repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
oliverkurth/pmd
|
server/structs.h
|
<gh_stars>1-10
/*
* Copyright © 2016-2019 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#pragma once
struct _PMD_SECURITY_CONTEXT_;
typedef struct _PMD_SECURITY_CONTEXT_ *PPMD_SECURITY_CONTEXT;
typedef struct _PMD_REST_CONFIG_
{
int nEnabled;
int nPort;
int nUseKerberos;
int nWorkerThreadCount;
int nClientCount;
char *pszApiSpec;
char *pszSSLCert;
char *pszSSLKey;
char *pszLogFile;
}PMD_REST_CONFIG, *PPMD_REST_CONFIG;
typedef struct _PMD_ROLES_CONFIG_
{
char *pszDir;
char *pszPluginsDir;
}PMD_ROLES_CONFIG, *PPMD_ROLES_CONFIG;
typedef struct _PMD_CONFIG_
{
int nServerType;
char* pszCurrentHash;
char* pszServerUrl;
char* pszComposeServer;
char *pszApiSecurityConf;
PPMD_REST_CONFIG pRestConfig;
char *pszPrivsepPubKeyFile;
PPMD_ROLES_CONFIG pRolesConfig;
}PMD_CONFIG, *PPMD_CONFIG;
typedef struct _HPRIVSEP_TO_HPKG_
{
PPMDHANDLE hPMD;
PPKGHANDLE hPkg;
struct _HPRIVSEP_TO_HPKG_ *pNext;
}HPRIVSEP_TO_HPKG, *PHPRIVSEP_TO_HPKG;
typedef struct _SERVER_ENV_
{
pthread_mutex_t mutexModuleEntries;
PPMD_CONFIG pConfig;
PREST_API_DEF pApiDef;
PREST_MODULE_ENTRY pModuleEntries;
PREST_PROCESSOR pRestProcessor;
PPMD_SECURITY_CONTEXT pSecurityContext;
PVMREST_HANDLE pRestHandle;
pthread_mutex_t mutexPrivSepHandleList;
PHPRIVSEP_TO_HPKG gpPrivSepHandleList;
}SERVER_ENV, *PSERVER_ENV;
|
oliverkurth/pmd
|
server/pkgmgmtrpcapi.c
|
/*
* Copyright © 2016-2017 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "includes.h"
unsigned32
pkg_rpc_open_handle(
handle_t hBinding,
PTDNF_RPC_CMD_ARGS pRpcArgs,
pkg_handle_t *phPkgHandle
)
{
uint32_t dwError = 0;
PPMDHANDLE hPMD = NULL;
PPKGHANDLE hPkgHandle = NULL;
PTDNF_CMD_ARGS pArgs = NULL;
if(!hBinding || !pRpcArgs || !phPkgHandle)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = pkg_rpc_get_cmd_args(pRpcArgs, &pArgs);
BAIL_ON_PMD_ERROR(dwError);
dwError = rpc_open_privsep_internal(PKG_PRIVSEP, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_open_handle(hPMD, pArgs, &hPkgHandle);
BAIL_ON_PMD_ERROR(dwError);
dwError = privsep_handle_list_add(hPMD, hPkgHandle);
BAIL_ON_PMD_ERROR(dwError);
*phPkgHandle = hPkgHandle;
cleanup:
pkg_free_cmd_args(pArgs);
return dwError;
error:
if(phPkgHandle)
{
*phPkgHandle = NULL;
}
rpc_free_handle(hPMD);
goto cleanup;
}
unsigned32
pkg_rpc_close_handle(
handle_t hBinding,
pkg_handle_t hPkgHandle
)
{
uint32_t dwError = 0;
PPMDHANDLE hPMD = NULL;
if(!hBinding || !hPkgHandle)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = privsep_handle_list_remove(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
pkg_close_handle(hPMD, hPkgHandle);
rpc_free_handle(hPMD);
cleanup:
return dwError;
error:
goto cleanup;
}
unsigned32
pkg_rpc_count(
handle_t hBinding,
pkg_handle_t hPkgHandle,
unsigned32* pdwCount
)
{
uint32_t dwError = 0;
PPMDHANDLE hPMD = NULL;
uint32_t dwCount = 0;
if(!hBinding || !hPkgHandle || !pdwCount)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = privsep_handle_list_get(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_count(hPMD, hPkgHandle, &dwCount);
BAIL_ON_PMD_ERROR(dwError);
*pdwCount = dwCount;
cleanup:
return dwError;
error:
goto cleanup;
}
unsigned32
pkg_rpc_list(
handle_t hBinding,
pkg_handle_t hPkgHandle,
unsigned32 nScope,
PPMD_WSTRING_ARRAY pPkgNameSpecs,
PTDNF_RPC_PKGINFO_ARRAY* ppInfo
)
{
uint32_t dwError = 0;
uint32_t dwCount = 0;
uint32_t dwIndex = 0;
PTDNF_RPC_PKGINFO_ARRAY pInfo = NULL;
PTDNF_PKG_INFO pPkgInfo = NULL;
char **ppszPackageNameSpecs = NULL;
PPMDHANDLE hPMD = NULL;
if(!hBinding || !hPkgHandle || !pPkgNameSpecs || !ppInfo)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = privsep_handle_list_get(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_list_w(hPMD,
hPkgHandle,
nScope,
pPkgNameSpecs,
&pInfo);
BAIL_ON_PMD_ERROR(dwError);
*ppInfo = pInfo;
cleanup:
return dwError;
error:
if(ppInfo)
{
*ppInfo = NULL;
}
if(pInfo)
{
PMDRpcServerFreeMemory(pInfo);
}
goto cleanup;
}
unsigned32
pkg_rpc_repolist(
handle_t hBinding,
pkg_handle_t hPkgHandle,
unsigned32 nFilter,
PTDNF_RPC_REPODATA_ARRAY* ppRepoData
)
{
uint32_t dwError = 0;
uint32_t dwCount = 0;
uint32_t dwIndex = 0;
PTDNF_REPO_DATA pRepoData = NULL;
PTDNF_REPO_DATA pRepoDataTemp = NULL;
PTDNF_RPC_REPODATA_ARRAY pRpcRepoDataArray = NULL;
PTDNF_RPC_REPODATA pRpcRepoData = NULL;
wstring_t pwszTemp = NULL;
PPMDHANDLE hPMD = NULL;
if(!hBinding || !hPkgHandle || !ppRepoData)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = privsep_handle_list_get(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_repolist_w(
hPMD,
hPkgHandle,
nFilter,
&pRpcRepoDataArray);
BAIL_ON_PMD_ERROR(dwError);
*ppRepoData = pRpcRepoDataArray;
cleanup:
return dwError;
error:
if(ppRepoData)
{
*ppRepoData = NULL;
}
goto cleanup;
}
unsigned32
pkg_rpc_info(
handle_t hBinding,
pkg_handle_t hPkgHandle,
unsigned32 nScope,
PPMD_WSTRING_ARRAY pPkgNameSpecs,
PTDNF_RPC_PKGINFO_ARRAY* ppInfo
)
{
uint32_t dwError = 0;
printf("Info\n");
return dwError;
}
unsigned32
pkg_rpc_updateinfo_summary(
handle_t hBinding,
pkg_handle_t hPkgHandle,
PTDNF_RPC_UPDATEINFO_SUMMARY_ARRAY* ppRpcUpdateInfoArray
)
{
uint32_t dwError = 0;
PPMDHANDLE hPMD = NULL;
PTDNF_RPC_UPDATEINFO_SUMMARY_ARRAY pRpcUpdateInfoArray = NULL;
if(!hBinding || !hPkgHandle || !ppRpcUpdateInfoArray)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = privsep_handle_list_get(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_updateinfo_summary_w(
hPMD,
hPkgHandle,
AVAIL_AVAILABLE,
&pRpcUpdateInfoArray);
BAIL_ON_PMD_ERROR(dwError);
*ppRpcUpdateInfoArray = pRpcUpdateInfoArray;
cleanup:
return dwError;
error:
if(ppRpcUpdateInfoArray)
{
*ppRpcUpdateInfoArray = NULL;
}
goto cleanup;
}
unsigned32
pkg_rpc_version(
handle_t hBinding,
wstring_t* ppwszVersion
)
{
uint32_t dwError = 0;
wstring_t pwszVersion = NULL;
PPMDHANDLE hPMD = NULL;
if(!hBinding || !ppwszVersion)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
dwError = rpc_open_privsep_internal(PKG_PRIVSEP, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_version_w(hPMD, &pwszVersion);
BAIL_ON_PMD_ERROR(dwError);
*ppwszVersion = pwszVersion;
cleanup:
return dwError;
error:
if(ppwszVersion)
{
*ppwszVersion = NULL;
}
PMDRpcServerFreeMemory(pwszVersion);
goto cleanup;
}
unsigned32
pkg_rpc_resolve(
handle_t hBinding,
pkg_handle_t hPkgHandle,
unsigned32 nAlterType,
PTDNF_RPC_SOLVED_PKG_INFO *ppSolvedInfo
)
{
uint32_t dwError = 0;
PTDNF_RPC_SOLVED_PKG_INFO pSolvedInfo = NULL;
PTDNF_SOLVED_PKG_INFO pSolvedInfoA = NULL;
PPMDHANDLE hPMD = NULL;
if(!hBinding || !hPkgHandle || !ppSolvedInfo)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
dwError = privsep_handle_list_get(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_resolve_w(
hPMD,
hPkgHandle,
nAlterType,
&pSolvedInfo);
BAIL_ON_PMD_ERROR(dwError);
*ppSolvedInfo = pSolvedInfo;
cleanup:
return dwError;
error:
if(ppSolvedInfo)
{
*ppSolvedInfo = NULL;
}
PMDRpcServerFreeSolvedInfo(pSolvedInfo);
goto cleanup;
}
unsigned32
pkg_rpc_alter(
handle_t hBinding,
pkg_handle_t hPkgHandle,
TDNF_ALTERTYPE nAlterType
)
{
uint32_t dwError = 0;
PPMDHANDLE hPMD = NULL;
if(!hBinding || !hPkgHandle)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
CHECK_RPC_ACCESS(hBinding, dwError);
dwError = privsep_handle_list_get(hPkgHandle, &hPMD);
BAIL_ON_PMD_ERROR(dwError);
dwError = pkg_alter_w(
hPMD,
hPkgHandle,
nAlterType);
BAIL_ON_PMD_ERROR(dwError);
cleanup:
return dwError;
error:
goto cleanup;
}
void
pkg_handle_t_rundown(void *handle)
{
PPMDHANDLE hPMD = NULL;
if(privsep_handle_list_remove(handle, &hPMD) == 0)
{
if(hPMD)
{
rpc_free_handle(hPMD);
}
}
}
|
oliverkurth/pmd
|
common/rolemgmt.c
|
/*
* Copyright © 2016-2019 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "includes.h"
void
rolemgmt_free_role(
PPMD_ROLEMGMT_ROLE pRole
);
uint32_t
rolemgmt_status_from_string(
const char *pszStatus,
PMD_ROLE_STATUS *pnStatus
)
{
uint32_t dwError = 0;
size_t nSize = 0;
size_t i = 0;
PMD_ROLE_STATUS nStatus = ROLE_STATUS_NONE;
struct stLookup
{
PMD_ROLE_STATUS nStatus;
const char *pszStatus;
}arLookup[] =
{
{ROLE_STATUS_SUCCESS, "success"},
{ROLE_STATUS_FAILURE, "failure"},
{ROLE_STATUS_NOT_STARTED, "not started"},
{ROLE_STATUS_IN_PROGRESS, "in progress"}
};
nSize = sizeof(arLookup)/sizeof(arLookup[0]);
if(!pszStatus || !pnStatus)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
for(i = 0; i < nSize; ++i)
{
if(!strcmp(pszStatus, arLookup[i].pszStatus))
{
nStatus = arLookup[i].nStatus;
break;
}
}
if(nStatus == ROLE_STATUS_NONE)
{
dwError = ERROR_PMD_ROLE_BAD_STATUS;
BAIL_ON_PMD_ERROR(dwError);
}
*pnStatus= nStatus;
cleanup:
return dwError;
error:
if(pnStatus)
{
*pnStatus = ROLE_STATUS_NONE;
}
goto cleanup;
}
uint32_t
rolemgmt_status_to_string(
PMD_ROLE_STATUS nStatus,
char **ppszStatus
)
{
uint32_t dwError = 0;
char *pszStatus = NULL;
size_t nSize = 0;
char *pszStrings[] =
{
"none",
"success",
"failure",
"not started"
"in progress"
};
nSize = sizeof(pszStrings)/sizeof(pszStrings[0]);
if(nStatus <= ROLE_STATUS_NONE ||
nStatus > nSize ||
!ppszStatus)
{
dwError = ERROR_PMD_INVALID_PARAMETER;
BAIL_ON_PMD_ERROR(dwError);
}
dwError = PMDAllocateString(pszStrings[nStatus], &pszStatus);
BAIL_ON_PMD_ERROR(dwError);
*ppszStatus = pszStatus;
cleanup:
return dwError;
error:
if(ppszStatus)
{
*ppszStatus = NULL;
}
PMD_SAFE_FREE_MEMORY(pszStatus);
goto cleanup;
}
void
rolemgmt_free_children(
PPMD_ROLEMGMT_ROLE pRole
)
{
int i = 0;
for(i = 0; i < pRole->nChildCount; ++i)
{
rolemgmt_free_role(pRole->ppChildren[i]);
}
PMD_SAFE_FREE_MEMORY(pRole->ppChildren);
}
void
rolemgmt_free_role(
PPMD_ROLEMGMT_ROLE pRole
)
{
int i = 0;
if(!pRole)
{
return;
}
PMD_SAFE_FREE_MEMORY(pRole->pszName);
PMD_SAFE_FREE_MEMORY(pRole->pszParent);
PMD_SAFE_FREE_MEMORY(pRole->pszDisplayName);
PMD_SAFE_FREE_MEMORY(pRole->pszDescription);
PMD_SAFE_FREE_MEMORY(pRole->pszPlugin);
rolemgmt_free_children(pRole);
PMDFreeMemory(pRole);
}
void
rolemgmt_free_roles(
PPMD_ROLEMGMT_ROLE pRoles
)
{
if(!pRoles)
{
return;
}
PPMD_ROLEMGMT_ROLE pRole = NULL;
while(pRoles)
{
pRole = pRoles->pNext;
rolemgmt_free_role(pRoles);
pRoles = pRole;
}
}
|
intERLab-AIT/libnmea-esp32
|
example/main/nmea_example_main.c
|
<reponame>intERLab-AIT/libnmea-esp32
/* NMEA parsing example for ESP32.
* Based on "parse_stdin.c" example from libnmea.
* Copyright (c) 2015 <NAME>.
* Additions Copyright (c) 2017 <NAME>.
* See "LICENSE" file in libnmea directory for license.
*/
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "driver/uart.h"
#include "nmea.h"
#include "gpgll.h"
#include "gpgga.h"
#include "gprmc.h"
#include "gpgsa.h"
#include "gpvtg.h"
#include "gptxt.h"
#include "gpgsv.h"
#define UART_NUM UART_NUM_1
#define UART_RX_PIN 21
#define UART_RX_BUF_SIZE (1024)
static void uart_setup();
static void read_and_parse_nmea();
void app_main()
{
uart_setup();
read_and_parse_nmea();
}
static void uart_setup()
{
uart_config_t uart_config = {
.baud_rate = 9600,
.data_bits = UART_DATA_8_BITS,
.parity = UART_PARITY_DISABLE,
.stop_bits = UART_STOP_BITS_1,
.flow_ctrl = UART_HW_FLOWCTRL_DISABLE
};
ESP_ERROR_CHECK(uart_param_config(UART_NUM, &uart_config));
ESP_ERROR_CHECK(uart_set_pin(UART_NUM,
UART_PIN_NO_CHANGE, UART_RX_PIN,
UART_PIN_NO_CHANGE, UART_PIN_NO_CHANGE));
ESP_ERROR_CHECK(uart_driver_install(UART_NUM, UART_RX_BUF_SIZE * 2, 0, 0, NULL, 0));
}
static void read_and_parse_nmea()
{
// Configure a temporary buffer for the incoming data
char *buffer = (char*) malloc(UART_RX_BUF_SIZE + 1);
char fmt_buf[32];
size_t total_bytes = 0;
while (1) {
// Read data from the UART
int read_bytes = uart_read_bytes(UART_NUM,
(uint8_t*) buffer + total_bytes,
UART_RX_BUF_SIZE - total_bytes, 100 / portTICK_RATE_MS);
if (read_bytes <= 0) {
continue;
}
nmea_s *data;
total_bytes += read_bytes;
/* find start (a dollar sign) */
char* start = memchr(buffer, '$', total_bytes);
if (start == NULL) {
total_bytes = 0;
continue;
}
/* find end of line */
char* end = memchr(start, '\r', total_bytes - (start - buffer));
if (NULL == end || '\n' != *(++end)) {
continue;
}
end[-1] = NMEA_END_CHAR_1;
end[0] = NMEA_END_CHAR_2;
/* handle data */
data = nmea_parse(start, end - start + 1, 0);
if (data == NULL) {
printf("Failed to parse the sentence!\n");
printf(" Type: %.5s (%d)\n", start+1, nmea_get_type(start));
} else {
if (data->errors != 0) {
printf("WARN: The sentence struct contains parse errors!\n");
}
if (NMEA_GPGGA == data->type) {
printf("GPGGA sentence\n");
nmea_gpgga_s *gpgga = (nmea_gpgga_s *) data;
printf("Number of satellites: %d\n", gpgga->n_satellites);
printf("Altitude: %f %c\n", gpgga->altitude,
gpgga->altitude_unit);
}
if (NMEA_GPGLL == data->type) {
printf("GPGLL sentence\n");
nmea_gpgll_s *pos = (nmea_gpgll_s *) data;
printf("Longitude:\n");
printf(" Degrees: %d\n", pos->longitude.degrees);
printf(" Minutes: %f\n", pos->longitude.minutes);
printf(" Cardinal: %c\n", (char) pos->longitude.cardinal);
printf("Latitude:\n");
printf(" Degrees: %d\n", pos->latitude.degrees);
printf(" Minutes: %f\n", pos->latitude.minutes);
printf(" Cardinal: %c\n", (char) pos->latitude.cardinal);
strftime(fmt_buf, sizeof(fmt_buf), "%H:%M:%S", &pos->time);
printf("Time: %s\n", fmt_buf);
}
if (NMEA_GPRMC == data->type) {
printf("GPRMC sentence\n");
nmea_gprmc_s *pos = (nmea_gprmc_s *) data;
printf("Longitude:\n");
printf(" Degrees: %d\n", pos->longitude.degrees);
printf(" Minutes: %f\n", pos->longitude.minutes);
printf(" Cardinal: %c\n", (char) pos->longitude.cardinal);
printf("Latitude:\n");
printf(" Degrees: %d\n", pos->latitude.degrees);
printf(" Minutes: %f\n", pos->latitude.minutes);
printf(" Cardinal: %c\n", (char) pos->latitude.cardinal);
strftime(fmt_buf, sizeof(fmt_buf), "%d %b %T %Y", &pos->date_time);
printf("Date & Time: %s\n", fmt_buf);
printf("Speed, in Knots: %f\n", pos->gndspd_knots);
printf("Track, in degrees: %f\n", pos->track_deg);
printf("Magnetic Variation:\n");
printf(" Degrees: %f\n", pos->magvar_deg);
printf(" Cardinal: %c\n", (char) pos->magvar_cardinal);
double adjusted_course = pos->track_deg;
if (NMEA_CARDINAL_DIR_EAST == pos->magvar_cardinal) {
adjusted_course -= pos->magvar_deg;
} else if (NMEA_CARDINAL_DIR_WEST == pos->magvar_cardinal) {
adjusted_course += pos->magvar_deg;
} else {
printf("Invalid Magnetic Variation Direction!\n");
}
printf("Adjusted Track (heading): %f\n", adjusted_course);
}
if (NMEA_GPGSA == data->type) {
nmea_gpgsa_s *gpgsa = (nmea_gpgsa_s *) data;
printf("GPGSA Sentence:\n");
printf(" Mode: %c\n", gpgsa->mode);
printf(" Fix: %d\n", gpgsa->fixtype);
printf(" PDOP: %.2lf\n", gpgsa->pdop);
printf(" HDOP: %.2lf\n", gpgsa->hdop);
printf(" VDOP: %.2lf\n", gpgsa->vdop);
}
if (NMEA_GPGSV == data->type) {
nmea_gpgsv_s *gpgsv = (nmea_gpgsv_s *) data;
printf("GPGSV Sentence:\n");
printf(" Num: %d\n", gpgsv->sentences);
printf(" ID: %d\n", gpgsv->sentence_number);
printf(" SV: %d\n", gpgsv->satellites);
printf(" #1: %d %d %d %d\n", gpgsv->sat[0].prn, gpgsv->sat[0].elevation, gpgsv->sat[0].azimuth, gpgsv->sat[0].snr);
printf(" #2: %d %d %d %d\n", gpgsv->sat[1].prn, gpgsv->sat[1].elevation, gpgsv->sat[1].azimuth, gpgsv->sat[1].snr);
printf(" #3: %d %d %d %d\n", gpgsv->sat[2].prn, gpgsv->sat[2].elevation, gpgsv->sat[2].azimuth, gpgsv->sat[2].snr);
printf(" #4: %d %d %d %d\n", gpgsv->sat[3].prn, gpgsv->sat[3].elevation, gpgsv->sat[3].azimuth, gpgsv->sat[3].snr);
}
if (NMEA_GPTXT == data->type) {
nmea_gptxt_s *gptxt = (nmea_gptxt_s *) data;
printf("GPTXT Sentence:\n");
printf(" ID: %d %d %d\n", gptxt->id_00, gptxt->id_01, gptxt->id_02);
printf(" %s\n", gptxt->text);
}
if (NMEA_GPVTG == data->type) {
nmea_gpvtg_s *gpvtg = (nmea_gpvtg_s *) data;
printf("GPVTG Sentence:\n");
printf(" Track [deg]: %.2lf\n", gpvtg->track_deg);
printf(" Speed [kmph]: %.2lf\n", gpvtg->gndspd_kmph);
printf(" Speed [knots]: %.2lf\n", gpvtg->gndspd_knots);
}
nmea_free(data);
}
/* buffer empty? */
if (end == buffer + total_bytes) {
total_bytes = 0;
continue;
}
/* copy rest of buffer to beginning */
if (buffer != memmove(buffer, end, total_bytes - (end - buffer))) {
total_bytes = 0;
continue;
}
total_bytes -= end - buffer;
}
free(buffer);
}
|
dvetutnev/boost-di-example
|
legacy/executer.h
|
<gh_stars>0
#pragma once
#include "logger.h"
#include <boost/asio/io_context.hpp>
#include <string>
#include <functional>
#include <thread>
struct Ssid
{
std::string value;
};
struct Id
{
std::string value;
};
class Executer
{
public:
Executer(const Ssid&, const Id&, Logger&);
void process(const std::string&, std::function<void(const std::string&)>);
void stop();
private:
const Ssid ssid;
const Id id;
Logger& logger;
boost::asio::io_context ioContext;
using WorkGuard = boost::asio::executor_work_guard<boost::asio::io_context::executor_type>;
WorkGuard workGuard;
std::thread thread;
void worker();
};
|
dvetutnev/boost-di-example
|
legacy/logger.h
|
#pragma once
#include <iostream>
#include <mutex>
class Logger
{
public:
void log(const std::string& s) {
std::lock_guard lock{mtx};
std::cout << s << std::endl;
}
private:
std::mutex mtx;
};
|
dvetutnev/boost-di-example
|
di/executer.h
|
<reponame>dvetutnev/boost-di-example<filename>di/executer.h<gh_stars>0
#pragma once
#include "logger.h"
#include <boost/asio/io_context.hpp>
#include <functional>
#include <thread>
#include <memory>
struct Ssid
{
std::string value;
};
struct Id
{
std::string value;
};
struct IExecuter
{
virtual void process(const std::string&, std::function<void(const std::string&)>) = 0;
virtual void stop() = 0;
virtual const Id& getId() const = 0;
virtual ~IExecuter() = default;
};
class Executer : public IExecuter
{
public:
Executer(Ssid, Id, std::shared_ptr<ILogger>);
void process(const std::string&, std::function<void(const std::string&)>) override;
void stop() override;
const Id& getId() const override;
private:
const Ssid ssid;
const Id id;
const std::shared_ptr<ILogger> logger;
boost::asio::io_context ioContext;
using WorkGuard = boost::asio::executor_work_guard<boost::asio::io_context::executor_type>;
WorkGuard workGuard;
std::thread thread;
void worker();
};
inline bool operator== (const Ssid& a, const Ssid& b) {
return a.value == b.value;
}
inline bool operator== (const Id& a, const Id& b) {
return a.value == b.value;
}
|
dvetutnev/boost-di-example
|
legacy/group.h
|
<gh_stars>0
#pragma once
#include "executer.h"
#include <list>
#include <memory>
class Group
{
public:
Group(const Ssid&, std::size_t, Logger&);
Executer& getExecuter();
void stopAll();
private:
const Ssid ssid;
const std::size_t maxSize;
Logger& logger;
std::size_t currentId;
using Container = std::list<std::unique_ptr<Executer>>;
Container instances;
Container::iterator it;
};
|
dvetutnev/boost-di-example
|
di/logger.h
|
#pragma once
#include <iostream>
#include <mutex>
struct ILogger
{
virtual void log(const std::string&) = 0;
virtual ~ILogger() = default;
};
class Logger : public ILogger
{
public:
void log(const std::string& s) override {
std::lock_guard lock{mtx};
std::cout << s << std::endl;
}
private:
std::mutex mtx;
};
|
dvetutnev/boost-di-example
|
di/mocks.h
|
<filename>di/mocks.h<gh_stars>0
#pragma once
#include "executer.h"
#include "group.h"
#include "manager.h"
#include <gmock/gmock.h>
struct MockLogger : ILogger
{
MOCK_METHOD(void, log, (const std::string&), (override));
};
struct MockExecuter : IExecuter
{
MOCK_METHOD(void, process, (const std::string&, std::function<void(const std::string&)>), (override));
MOCK_METHOD(void, stop, (), (override));
MOCK_METHOD(const Id&, getId, (), (const, override));
};
struct MockFactoryExecuter : IFactoryExecuter
{
MOCK_METHOD(std::unique_ptr<IExecuter>, create, (Ssid&&, Id&&), (const, override));
};
struct MockGroup : IGroup
{
MOCK_METHOD(IExecuter&, getExecuter, (), (override));
MOCK_METHOD(void, stopAll, (), (override));
};
struct MockFactoryGroup : IFactoryGroup
{
MOCK_METHOD(std::unique_ptr<IGroup>, create, (Ssid&&), (const, override));
};
|
dvetutnev/boost-di-example
|
di/group.h
|
#pragma once
#include "executer.h"
#include <boost/di/extension/injections/factory.hpp>
#include <list>
struct IGroup
{
virtual IExecuter& getExecuter() = 0;
virtual void stopAll() = 0;
virtual ~IGroup() = default;
};
struct GroupSize
{
std::size_t value;
};
using IFactoryExecuter = boost::di::extension::ifactory<IExecuter, Ssid, Id>;
class Group : public IGroup
{
public:
Group(Ssid, GroupSize, std::shared_ptr<IFactoryExecuter>);
IExecuter& getExecuter() override;
void stopAll() override;
private:
const Ssid ssid;
const std::size_t maxSize;
const std::shared_ptr<IFactoryExecuter> factory;
std::size_t currentId;
using Container = std::list<std::unique_ptr<IExecuter>>;
Container instances;
Container::iterator it;
};
|
dvetutnev/boost-di-example
|
di/manager.h
|
#pragma once
#include "group.h"
#include <boost/di/extension/injections/factory.hpp>
#include <map>
using IFactoryGroup = boost::di::extension::ifactory<IGroup, Ssid>;
class Manager
{
public:
Manager(std::shared_ptr<IFactoryGroup>);
IExecuter& getExecuter(const Ssid&);
void stop(const Ssid&);
void stopAll();
private:
const std::shared_ptr<IFactoryGroup> factory;
struct Compare
{
bool operator()(const Ssid& a, const Ssid& b) const {
return std::less<std::string>{}(a.value, b.value);
}
};
std::map<Ssid, std::unique_ptr<IGroup>, Compare> groups;
};
|
dvetutnev/boost-di-example
|
async_result.h
|
#pragma once
#include <boost/asio/io_context.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/use_future.hpp>
#include <memory>
#include <tuple>
#include <stdexcept>
class IoContextWrapper
{
public:
void addWork() {
workCount++;
if (!workGuard) {
workGuard = std::make_unique<WorkGuard>(ioContext.get_executor());
}
}
void removeWork() {
if (workCount == 0) {
std::logic_error{"Work empty"};
}
workCount--;
if (workCount == 0){
workGuard.reset();
}
}
boost::asio::io_context& get() {
return ioContext;
}
void run() {
ioContext.run();
}
private:
boost::asio::io_context ioContext;
using WorkGuard = boost::asio::executor_work_guard<boost::asio::io_context::executor_type>;
std::unique_ptr<WorkGuard> workGuard;
std::size_t workCount = 0;
};
struct AsyncResult
{
using Signature = void (boost::system::error_code, const std::string&);
using CompletionToken = std::decay_t<decltype(boost::asio::use_future)>;
using Result = boost::asio::async_result<CompletionToken, Signature>;
using Handler = Result::completion_handler_type;
static auto create(IoContextWrapper& ioContext) {
Handler asyncHandler{boost::asio::use_future};
Result asyncResult{asyncHandler};
ioContext.addWork();
auto promise = [&ioContext, asyncHandler](const std::string& message) mutable {
auto handler = [asyncHandler, message]() mutable {
return asyncHandler(boost::system::error_code{}, message);
};
ioContext.get().post(handler);
ioContext.removeWork();
};
return std::make_tuple(promise, asyncResult.get());
}
};
|
dvetutnev/boost-di-example
|
legacy/manager.h
|
<gh_stars>0
#pragma once
#include "group.h"
#include <map>
class Manager
{
public:
Manager(std::size_t, Logger&);
Executer& getExecuter(const Ssid&);
void stop(const Ssid&);
void stopAll();
private:
const std::size_t groupSize;
Logger& logger;
using Item = std::unique_ptr<Group>;
struct Compare
{
bool operator()(const Ssid& a, const Ssid& b) const {
return std::less<std::string>{}(a.value, b.value);
}
};
std::map<Ssid, Item, Compare> groups;
};
|
kostiakoval/SwiftMiror
|
Pods/Target Support Files/Pods-MirrorTests/Pods-MirrorTests-umbrella.h
|
<reponame>kostiakoval/SwiftMiror<filename>Pods/Target Support Files/Pods-MirrorTests/Pods-MirrorTests-umbrella.h
#import <UIKit/UIKit.h>
FOUNDATION_EXPORT double Pods_MirrorTestsVersionNumber;
FOUNDATION_EXPORT const unsigned char Pods_MirrorTestsVersionString[];
|
kostiakoval/SwiftMiror
|
Mirror/Mirror.h
|
//
// Mirror.h
// Mirror
//
// Created by <NAME> on 10/07/15.
// Copyright (c) 2015 CocoaPods. All rights reserved.
//
#import <UIKit/UIKit.h>
//! Project version number for Mirror.
FOUNDATION_EXPORT double MirrorVersionNumber;
//! Project version string for Mirror.
FOUNDATION_EXPORT const unsigned char MirrorVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <Mirror/PublicHeader.h>
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/FifthPage/Controller/TTFifthPageViewController.h
|
<filename>TTBasicProject/TTBasicProject/Classes/FifthPage/Controller/TTFifthPageViewController.h
//
// TTFifthPageViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import "BaseViewController.h"
@interface TTFifthPageViewController : BaseViewController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/ThirdPage/Controller/TTThirdPageViewController.h
|
//
// TTThirdPageViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import "BaseViewController.h"
@interface TTThirdPageViewController : BaseViewController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/UITextView+PlaceHolder.h
|
<reponame>DreamFlyingCow/TTBasicProject<filename>TTBasicProject/TTBasicProject/Classes/Category/UITextView+PlaceHolder.h
//
// UITextView+PlaceHolder.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
#import <objc/runtime.h>
@protocol UITextViewPlaceHolderDelegate <NSObject>
@optional
- (void)finishInputTextWithString:(NSString *)string;
@end
@interface UITextView (PlaceHolder) <UITextViewDelegate>
// 用来处理输入完成的代理
@property (weak, nonatomic) id<UITextViewPlaceHolderDelegate> placeHolderDelegate;
// 设置占位字符串
- (void)addPlaceHolder:(NSString *)placeHolder;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Public/TTRefreshBackGifFooter.h
|
//
// TTRefreshBackGifFooter.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/19.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <MJRefresh/MJRefresh.h>
@interface TTRefreshBackGifFooter : MJRefreshBackGifFooter
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/UIView+TTDraggable.h
|
//
// UIView+TTDraggable.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
// 此分类就是给一个View添加一个手势, 可以随意拖动, 松手之后回到原位置
@interface UIView (TTDraggable)
/**
* Make view draggable.
*
* @param view Animator reference view, usually is super view.
* @param damping Value from 0.0 to 1.0. 0.0 is the least oscillation. default is 0.4.
*/
- (void)makeDraggable;
- (void)makeDraggableInView:(UIView *)view damping:(CGFloat)damping;
/**
* Disable view draggable.
*/
- (void)removeDraggable;
/**
* If you call make draggable method in the initialize method such as `-initWithFrame:`,
* `-viewDidLoad`, the view may not be layout correctly at that time. So you should
* update snap point in `-layoutSubviews` or `-viewDidLayoutSubviews`.
*
* By the way, you can call make draggable method in `-layoutSubviews` or
* `-viewDidLayoutSubviews` directly instead of update snap point.
*/
- (void)updateSnapPoint;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/FifthPage/View/TTThreeSpecialCell.h
|
<filename>TTBasicProject/TTBasicProject/Classes/FifthPage/View/TTThreeSpecialCell.h<gh_stars>1-10
//
// TTThreeSpecialCell.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTThreeSpecialCell : UITableViewCell
/**
* 输入TF
*/
@property (nonatomic ,strong) UITextField *textField;
/**
* 只是用来展示信息的Label
*/
@property (strong, nonatomic) UILabel *mesLabel;
/**
* 这个是用来展示最左侧的标题的
*/
@property (strong, nonatomic) UILabel *titleLabel;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Base/View/TTTabBar.h
|
//
// TTTabBar.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTTabBar : UITabBar
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/UIView+Image.h
|
//
// UIView+Image.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UIView (Image)
/**
* 通过颜色来生成一个纯色图片
*
* @param color 所需的颜色
*
* @return 生成的图片
*/
- (UIImage *)buttonImageFromColor:(UIColor *)color;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/UIView+TTHUDView.h
|
<gh_stars>1-10
//
// UIView+TTHUDView.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UIView (TTHUDView)<MBProgressHUDDelegate>
- (void)hudShow;
- (void)hiddleHud;
- (void)hudShow:(NSString *)content;
- (void)textHUDHiddle;
// 下边的HUD都是会自动消失的
- (void)hudShowWithText:(NSString *)text;
- (void)showHudSuccess:(NSString *)tip;
- (void)showHudFailed:(NSString *)tip;
- (void)showHud:(NSString *)tip andImg:(NSString *)img;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/ForthPage/Controller/TTForthPageViewController.h
|
<gh_stars>1-10
//
// TTForthPageViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import "BaseViewController.h"
@interface TTForthPageViewController : BaseViewController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Define/Frame.h
|
<filename>TTBasicProject/TTBasicProject/Classes/Define/Frame.h
//
// Frame.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#ifndef Frame_h
#define Frame_h
// 屏幕的物理宽度
#define kScreenWidth [UIScreen mainScreen].bounds.size.width
// 屏幕的物理高度
#define kScreenHeight [UIScreen mainScreen].bounds.size.height
// 当前设备的版本
#define kCurrentFloatDevice [[[UIDevice currentDevice] systemVersion] floatValue]
// 主window
#define kKeyWindow [UIApplication sharedApplication].keyWindow
#endif
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/FifthPage/View/TTCustomerHeaderView.h
|
<reponame>DreamFlyingCow/TTBasicProject<filename>TTBasicProject/TTBasicProject/Classes/FifthPage/View/TTCustomerHeaderView.h
//
// TTCustomerHeaderView.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTCustomerHeaderView : UIView
/**
* 头像View
*/
@property (weak, nonatomic) IBOutlet UIImageView *iconView;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Public/TTHUDView.h
|
<gh_stars>1-10
//
// TTHUDView.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface TTHUDView : UIView
- (void)hudShow;
- (void)hudShow:(NSString *)content;
- (void)hiddleHud;
- (void)textHUDHiddle;
- (void)hudShowWithText:(NSString *)text;
- (void)showHudSuccess:(NSString *)tip;
- (void)showHudFailed:(NSString *)tip;
- (void)showHud:(NSString *)tip andImg:(NSString *)img;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Public/TTRefreshGifHeader.h
|
//
// TTRefreshGifHeader.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <MJRefresh/MJRefresh.h>
@interface TTRefreshGifHeader : MJRefreshGifHeader
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Base/Controller/TTNavigationViewController.h
|
//
// TTNavigationViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTNavigationViewController : UINavigationController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/UIBarButtonItem+Item.h
|
//
// UIBarButtonItem+Item.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UIBarButtonItem (Item)
// 快速创建UIBarButtonItem
+ (UIBarButtonItem *)itemWithimage:(UIImage *)image highImage:(UIImage *)highImage target:(id)target action:(SEL)action;
+ (UIBarButtonItem *)backItemWithimage:(UIImage *)image highImage:(UIImage *)highImage target:(id)target action:(SEL)action;
+ (UIBarButtonItem *)itemWithimage:(UIImage *)image selImage:(UIImage *)selImage target:(id)target action:(SEL)action;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Public/TTActionSheet.h
|
<filename>TTBasicProject/TTBasicProject/Classes/Public/TTActionSheet.h
//
// TTActionSheet.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface TTActionSheet : NSObject<UIActionSheetDelegate>
/**
* 标题
*/
@property (nonatomic, strong) NSArray * titles;
/**
* 这个不设置的话默认是0(那个按钮的text会变成红色的)
*/
@property (nonatomic, assign) NSInteger destructiveButtonIndex;
/**
* 取消按钮的index
*/
@property (nonatomic, assign) NSInteger cancelButtonIndex;
- (id)initWithTitles:(NSArray *)titles;
/**
* 获取按钮的index
*
* @param view 被添加到的view
*
* @return 返回index
*/
- (NSInteger)showInView:(UIView *)view;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Login/Controller/TTLoginViewController.h
|
//
// TTLoginViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTLoginViewController : BaseViewController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Base/Controller/TTTabBarViewController.h
|
<reponame>DreamFlyingCow/TTBasicProject<gh_stars>1-10
//
// TTTabBarViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import "BaseTabBarController.h"
#import "BaseNavigationController.h"
#import "TTFirstPageViewController.h"
#import "TTSecondPageViewController.h"
#import "TTThirdPageViewController.h"
#import "TTForthPageViewController.h"
#import "TTFifthPageViewController.h"
@interface TTTabBarViewController : BaseTabBarController
@property (nonatomic ,strong) UILabel *msgMarkLab;
//切换按钮
@property (strong, nonatomic) NSMutableArray *btnArray;
@property (strong, nonatomic) BaseNavigationController *firstNav;
@property (strong, nonatomic) BaseNavigationController *secondNav;
@property (strong, nonatomic) BaseNavigationController *thirdNav;
@property (strong, nonatomic) BaseNavigationController *forthNav;
@property (strong, nonatomic) BaseNavigationController *fifthNav;
@property (strong, nonatomic) TTFirstPageViewController *firstVC;
@property (strong, nonatomic) TTSecondPageViewController *secondVC;
@property (strong, nonatomic) TTThirdPageViewController *thirdVC;
@property (strong, nonatomic) TTForthPageViewController *forthVC;
@property (strong, nonatomic) TTFifthPageViewController *fifthVC;
- (void)SelectedIndex:(NSUInteger)selectedIndex;
+ (void)refreshTabbarCount;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Base/Controller/BaseViewController.h
|
<reponame>DreamFlyingCow/TTBasicProject
//
// BaseViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface BaseViewController : UIViewController
@property (strong, nonatomic) UIButton *retBtn;
//自定义navigationBar
@property (strong, nonatomic) UIView *navView;
@property (strong, nonatomic) UILabel *titleLab;
@property (nonatomic, strong) UIActivityIndicatorView *spinnerView;
- (void)addNavBarViewAndTitle:(NSString *)title;
- (void)addNavBarViewTitleAndWithActivity:(NSString *)title;
- (void)returnBtnClicked:(UIButton *)button;
- (void)showInView:(UIView *)view WithPoint:(CGPoint)point andHeight:(CGFloat)height;
- (void)activityDismiss;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Public/TTAlertView.h
|
<reponame>DreamFlyingCow/TTBasicProject<gh_stars>1-10
//
// TTAlertView.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTAlertView : UIAlertView
- (id)initWithTitle:(NSString *)title message:(NSString *)message clickedBlock:(void (^)(TTAlertView *alertView, BOOL cancelled, NSInteger buttonIndex))clickedBlock cancelButtonTitle:(NSString *)cancelButtonTitle otherButtonTitles:(NSString *)otherButtonTitles, ... NS_REQUIRES_NIL_TERMINATION;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/NSString+Size.h
|
//
// NSString+Size.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
// 此分类是用一个字符串计算他的高度(传进来一个size 以及font)
@interface NSString (Size)
- (CGSize)boundingRectWithSize:(CGSize)size withFont:(UIFont *)font;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/NSString+StringColor.h
|
<reponame>DreamFlyingCow/TTBasicProject
//
// NSString+StringColor.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/18.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface NSString (StringColor)
// 次方法用于对某一控件的text中的某一范围内的文字设置颜色
- (NSMutableAttributedString *)addStringWithDifferentColor:(UIColor *)color withRange:(NSRange)range;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/FirstPage/Controller/TTJumpPageViewController.h
|
<filename>TTBasicProject/TTBasicProject/Classes/FirstPage/Controller/TTJumpPageViewController.h
//
// TTJumpPageViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/9/26.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import "BaseViewController.h"
@interface TTJumpPageViewController : BaseViewController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/FirstPage/Controller/TTFirstPageViewController.h
|
<gh_stars>1-10
//
// TTFirstPageViewController.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import "BaseViewController.h"
@interface TTFirstPageViewController : BaseViewController
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/ThirdPage/View/TTActivityCell.h
|
//
// TTActivityCell.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TTActivityCell : UITableViewCell
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Category/UIImage+Image.h
|
//
// UIImage+Image.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UIImage (Image)
+ (instancetype)imageOriginalWithName:(NSString *)imageName;
- (instancetype)us_circleImage;
+ (instancetype)us_circleImageNamed:(NSString *)name;
@end
|
DreamFlyingCow/TTBasicProject
|
TTBasicProject/TTBasicProject/Classes/Define/Color.h
|
//
// Color.h
// TTBasicProject
//
// Created by 赵春浩 on 16/8/29.
// Copyright © 2016年 <NAME>. All rights reserved.
//
#ifndef Color_h
#define Color_h
// 三个参数是一样的
#define kCOLOR(a) [UIColor colorWithRed:a/255.0f green:a/255.0f blue:a/255.0f alpha:1.0f]
// 自定义三个参数
#define kCustomColor(r,g,b) [UIColor colorWithRed:r/255.0f green:g/255.0f blue:b/255.0f alpha:1.0f]
// 自定义三个参数和透明度
#define kCustomColorAndAlpha(r,g,b,a) [UIColor colorWithRed:r/255.0f green:g/255.0f blue:b/255.0f alpha:a]
// 传入一个6位数的颜色值和透明度
#define kColorRGBA(c,a) [UIColor colorWithRed:((c>>16)&0xFF)/255.0 green:((c>>8)&0xFF)/255.0 blue:(c&0xFF)/255.0 alpha:a]
// 只需要传入一个6位数的颜色值(c: 0x000000)
#define kColorRGB(c) [UIColor colorWithRed:((c>>16)&0xFF)/255.0 green:((c>>8)&0xFF)/255.0 blue:(c&0xFF)/255.0 alpha:1.0]
// 随机色
#define kRandomColor [UIColor colorWithRed:arc4random_uniform(256) / 255.0 green:arc4random_uniform(256) / 255.0 blue:arc4random_uniform(256) / 255.0 alpha:1.0]
// 主题色
#define kMainThemeColor kColorRGB(0x336699)
// 背景色
#define kBackgroundColor kColorRGB(0xf0f0f0)
// 分割线
#define kSepLineColor kColorRGB(0xdfdfdd)
// 灰色字体颜色
#define kGrayTextColor kColorRGB(0x999999)
// 拨打电话的按钮颜色
#define kCallPhoneColor kColorRGB(0x66cc99)
// 发消息的按钮颜色
#define kSendMesColor kColorRGB(0x66cccc)
// 顾客不可联系按钮颜色
#define kDisabledColor kColorRGB(0xcccccc)
#endif
|
IT-gMA/test-vc
|
hello-openmp/hello-openmp.c
|
#include <omp.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
#pragma omp parallel
{
int id = omp_get_thread_num();
int proc = sched_getcpu();
printf("Thread %02i running on processor %i.\n", id, proc);
if (id == 0)
{
printf("There are %i threads in total.\n", omp_get_num_threads());
}
}
return EXIT_SUCCESS;
}
|
IT-gMA/test-vc
|
vector_add_gpu/cpu_vec_add.c
|
<gh_stars>0
#include <stdlib.h>
#include <stdio.h>
//#include "../common/array.h"
#define MALLOC_CHECK_ERROR(X)({\
if ((X) == 0){\
fprintf(stderr, "Malloc error (%s:%d): %i\n", __FILE__, __LINE__, (X));\
exit(1);\
}\
})
// initialise a vector of length n with random values.
void init_vec(float *v, int n){
for(int i = 0; i < n; i++){
v[i] = rand() % 100 * 0.3234f;
}
}
// adds two vectors of length n.
void vector_add(float *a, float *b, float *c, int n){
for(int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
int main(void){
int n = 10000;
float *A = (float*) malloc(n * sizeof(float));
float *B = (float*) malloc(n * sizeof(float));
float *C = (float*) malloc(n * sizeof(float));
MALLOC_CHECK_ERROR(A && B && C);
init_vec(A, n);
init_vec(B, n);
vector_add(A, B, C, n);
printf("Vector A:\n");
print_array_terse(A, 100, 3);
printf("Vector B:\n");
print_array_terse(B, 100, 3);
printf("Vector C:\n");
print_array_terse(C, 100, 3);
free(A);
free(B);
free(C);
return 0;
}
|
IT-gMA/test-vc
|
hello-mpi/hello-mpi.c
|
<reponame>IT-gMA/test-vc<gh_stars>1-10
#include <mpi.h>
#include <stdio.h>
int main(int argc, char** argv) {
int size;
int rank;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
fprintf(stderr,"%i of %i: Hello, World!\n", rank, size);
MPI_Finalize();
}
|
wenqi-zhang/ack
|
emtest/select.c
|
/* $Id$ */
/*
* (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
* See the copyright notice in the ACK home directory, in the file "Copyright".
*
*/
#include <stdio.h>
#include <assert.h>
#include <signal.h>
#define LINSIZ 100
int sigs[] = {
SIGHUP,
SIGINT,
SIGQUIT,
SIGTERM,
0
};
char *prog;
char line[LINSIZ];
int nlocals = 0;
int nhol = 0;
int nerrors = 0;
int oknum = 2;
int fflag = 1;
int low = 0;
int high = 999;
FILE *file1;
FILE *file2;
FILE *file3;
char name1[] = "/data/data/com.termux/files/usr/tmp/f1XXXXXX";
char name2[] = "/data/data/com.termux/files/usr/tmp/f2XXXXXX";
char name3[] = "/data/data/com.termux/files/usr/tmp/f3XXXXXX";
char *to3dig();
stop() {
unlink(name1);
unlink(name2);
unlink(name3);
exit(nerrors);
}
main(argc,argv) char **argv; {
register *p;
register char *s;
prog = *argv++; --argc;
mktemp(name1);
mktemp(name2);
mktemp(name3);
for (p = sigs; *p; p++)
if (signal(*p, stop) == SIG_IGN)
signal(*p, SIG_IGN);
while (argc > 0 && argv[0][0] == '-') {
switch (argv[0][1]) {
case 'f':
fflag ^= 1;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
high = atoi(&argv[0][1]);
break;
default:
usage();
break;
}
argc--;
argv++;
}
if (argc > 0 && argv[0][0] >= '0' && argv[0][0] <= '9') {
s = argv[0];
do
low = low*10 + *s++ - '0';
while (*s >= '0' && *s <= '9');
if (*s == 0)
high = low;
else if (*s++ == '-') {
high = atoi(s);
if (high == 0)
high = 999;
} else
fatal("bad range %s", argv[0]);
argc--;
argv++;
}
if (argc > 1)
usage();
if (argc == 1 && freopen(argv[0], "r", stdin) == NULL)
fatal("cannot open %s", argv[0]);
if ((file1 = fopen(name1, "w")) == NULL)
fatal("cannot create %s", name1);
if ((file2 = fopen(name2, "w")) == NULL)
fatal("cannot create %s", name2);
if ((file3 = fopen(name3, "w")) == NULL)
fatal("cannot create %s", name3);
if (getline())
while (select())
;
fclose(file1);
fclose(file2);
fclose(file3);
combine();
stop();
}
select() {
register FILE *f;
int i;
if (sscanf(line, "TEST %d", &i) != 1)
fatal("bad test identification(%s)", line);
if (i < low || i > high) {
while (getline())
if (line[0] == 'T')
return(1);
return(0);
}
fprintf(file2, "; %s\n", line);
if (fflag) {
char *s = to3dig(i);
fprintf(file1, ".%s\n", s);
fprintf(file1, " con \"tst%s\"\n", s);
fprintf(file2, " fil .%s\n", s);
}
f = file1;
while (getline()) {
switch (line[0]) {
case 'T':
return(1);
case 'M':
if (sscanf(line, "MAIN%d", &i) != 1 || i%4 != 0)
break;
if (i > nlocals)
nlocals = i;
f = file2;
continue;
case 'P':
if (strcmp(line, "PROC") != 0)
break;
f = file3;
continue;
case 'H':
if (f != file1 ||
sscanf(line, "HOL%d", &i) != 1 ||
i%4 != 0)
break;
if (i > nhol)
nhol = i;
continue;
case 'O':
if (strcmp(line, "OK") != 0)
break;
fprintf(f, " lin %d\n nop\n", oknum++);
continue;
case 'E':
if (f != file3 || strcmp(line, "ERRLAB") != 0)
break;
fprintf(f, "1\n lin 1\n nop\n loc 1\n loc 1\n mon\n");
continue;
default:
putline(f);
continue;
}
fatal("bad line (%s)", line);
}
return(0);
}
combine() {
printf("#define WS EM_WSIZE\n");
printf("#define PS EM_PSIZE\n");
printf("#include \"test.h\"\n");
printf(" mes 2,WS,PS\n");
printf(" mes 1\n");
printf(" mes 4,300\n");
if (nhol)
printf(" hol %d,0,0\n", nhol);
copy(name1);
printf(" exp $_m_a_i_n\n");
printf(" pro $_m_a_i_n,%d\n", nlocals);
printf(" loc 123\n");
printf(" loc -98\n");
copy(name2);
printf(" loc -98\n");
printf(" bne *1\n");
printf(" loc 123\n");
printf(" bne *1\n");
printf(" lin 0\n");
printf(" nop\n");
printf(" loc 0\n");
printf(" ret WS\n");
printf("1\n");
printf(" lin 1\n");
printf(" nop\n");
printf(" loc 1\n");
printf(" ret WS\n");
printf(" end\n");
copy(name3);
}
copy(s) char *s; {
if (freopen(s, "r", stdin) == NULL)
fatal("cannot reopen %s", s);
while (getline())
putline(stdout);
}
getline() {
register len;
if (fgets(line, LINSIZ, stdin) == NULL)
return(0);
len = strlen(line);
if (line[len-1] != '\n')
fatal("line too long(%s)", line);
line[len-1] = 0;
return(1);
}
putline(f) FILE *f; {
fprintf(f, "%s\n", line);
}
fatal(s, a1, a2, a3, a4) char *s; {
fprintf(stderr, "%s: ", prog);
fprintf(stderr, s, a1, a2, a3, a4);
fprintf(stderr, " (fatal)\n");
nerrors++;
stop();
}
usage() {
fprintf(stderr, "usage: %s -f [[low]-[high]] [testcollection]\n", prog);
nerrors++;
stop();
}
char *
to3dig(i)
register int i;
{
static char buf[4];
register char *s = buf;
*s++ = (i % 1000) / 100 + '0';
*s++ = (i % 100) / 10 + '0';
*s++ = (i % 10) + '0';
*s = '\0';
return buf;
}
|
wenqi-zhang/ack
|
lang/cem/libcc.ansi/headers/ack/emufile.h
|
<gh_stars>0
#ifndef ACK_EMUFILE_H
#define ACK_EMUFILE_H
/*
* Focus point of all stdio activity.
*/
struct FILE {
int _count;
int _fd;
int _flags;
int _bufsiz;
unsigned char *_buf;
unsigned char *_ptr;
};
#define _IOFBF 0x000
#define _IOREAD 0x001
#define _IOWRITE 0x002
#define _IONBF 0x004
#define _IOMYBUF 0x008
#define _IOEOF 0x010
#define _IOERR 0x020
#define _IOLBF 0x040
#define _IOREADING 0x080
#define _IOWRITING 0x100
#define _IOAPPEND 0x200
#define _IOBINARY 0x400
#if !defined BUFSIZ
#define BUFSIZ 1024
#endif
#define FOPEN_MAX 20
extern FILE *__iotab[FOPEN_MAX];
#define stdin (&__stdin)
#define stdout (&__stdout)
#define stderr (&__stderr)
extern FILE __stdin, __stdout, __stderr;
#define FILENAME_MAX 255
#define TMP_MAX 999
#define L_tmpnam (sizeof("/data/data/com.termux/files/usr/tmp/") + 15)
#define ACK_TMP_PREFIX "/data/data/com.termux/files/usr/tmp/tmp."
extern int __fillbuf(FILE *_stream);
extern int __flushbuf(int _c, FILE *_stream);
#define getc(p) (--(p)->_count >= 0 ? (int) (*(p)->_ptr++) : \
__fillbuf(p))
#define putc(c, p) (--(p)->_count >= 0 ? \
(int) (*(p)->_ptr++ = (c)) : \
__flushbuf((c),(p)))
#define feof(p) (((p)->_flags & _IOEOF) != 0)
#define ferror(p) (((p)->_flags & _IOERR) != 0)
#define clearerr(p) ((p)->_flags &= ~(_IOERR|_IOEOF))
/* Non-standard extensions */
extern int fileno(FILE *_stream);
extern FILE* fdopen(int fildes, const char *type);
#define fileno(stream) ((stream)->_fd)
#define io_testflag(p,x) ((p)->_flags & (x))
extern void __register_stdio_cleanup(void);
#endif
|
wenqi-zhang/ack
|
lang/pc/libpc/opn.c
|
<reponame>wenqi-zhang/ack<gh_stars>0
/* $Id$ */
/*
* (c) copyright 1983 by the Vrije Universiteit, Amsterdam, The Netherlands.
*
* This product is part of the Amsterdam Compiler Kit.
*
* Permission to use, sell, duplicate or disclose this software must be
* obtained in writing. Requests for such permissions may be sent to
*
* Dr. <NAME>
* Wiskundig Seminarium
* Vrije Universiteit
* Postbox 7161
* 1007 MC Amsterdam
* The Netherlands
*
*/
/* Author: <NAME> */
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include "pc.h"
static int tmpfil(void)
{
static char namebuf[] = "/data/data/com.termux/files/usr/tmp/plf.xxxxx";
int i;
char *p, *q;
i = getpid();
p = namebuf;
q = p + 13;
do
*q++ = (i & 07) + '0';
while (i >>= 3);
*q = '\0';
if ((i = creat(p, 0644)) < 0)
if ((i = creat(p += 4, 0644)) < 0)
if ((i = creat(p += 5, 0644)) < 0)
goto error;
if (close(i) != 0)
goto error;
if ((i = open(p, 2)) < 0)
goto error;
if (unlink(p) != 0)
error:
_trp(EREWR);
return (i);
}
static int initfl(int descr, int sz, struct file* f)
{
int i;
_curfil = f;
if (sz == 0)
{
sz++;
descr |= TXTBIT;
}
for (i = 0; i < _extflc; i++)
if (f == _extfl[i])
break;
if (i >= _extflc)
{ /* local file */
f->fname = "LOCAL";
if ((descr & WRBIT) == 0 && (f->flags & 0377) == MAGIC)
{
_xcls(f);
if (lseek(f->ufd, (long)0, 0) == -1)
_trp(ERESET);
}
else
{
_cls(f);
f->ufd = tmpfil();
}
}
else
{ /* external file */
if (--i <= 0)
return (0);
if (i >= _pargc)
_trp(EARGC);
f->fname = _pargv[i];
_cls(f);
if ((descr & WRBIT) == 0)
{
if ((f->ufd = open(f->fname, 0)) < 0)
_trp(ERESET);
}
else
{
if ((f->ufd = creat(f->fname, 0644)) < 0)
_trp(EREWR);
}
}
f->buflen = (sz > PC_BUFLEN ? sz : PC_BUFLEN - PC_BUFLEN % sz);
f->size = sz;
f->ptr = f->bufadr;
f->flags = descr;
return (1);
}
void _opn(int sz, struct file* f)
{
if (initfl(MAGIC, sz, f))
f->count = 0;
}
void _cre(int sz, struct file* f)
{
if (initfl(WRBIT | EOFBIT | ELNBIT | MAGIC, sz, f))
f->count = f->buflen;
}
|
wenqi-zhang/ack
|
modules/src/system/tmpnam.c
|
<gh_stars>0
/* Copyright (c) 2019. See the file License in
* the root directory for more information.
*
* Created on: 2019-03-13
*
*/
#include <stdio.h>
/* This has been placed here, because on some famous platforms, this
* call is completely broken (e.g Windows up to recent versions of CRT)
*/
char* sys_tmpnam(char *buffer)
{
int fd,ret;
strcpy(buffer,"/data/data/com.termux/files/usr/tmp/XXXXXX");
fd=mkstemp(buffer);
ret=((fd==-1)?0:1);
close(fd);
return ret;
}
|
wenqi-zhang/ack
|
lang/fortran/comp/sysdep.c
|
<gh_stars>0
/****************************************************************
Copyright 1990 by AT&T Bell Laboratories and Bellcore.
Permission to use, copy, modify, and distribute this software
and its documentation for any purpose and without fee is hereby
granted, provided that the above copyright notice appear in all
copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the names of AT&T Bell Laboratories or
Bellcore or any of their entities not be used in advertising or
publicity pertaining to distribution of the software without
specific, written prior permission.
AT&T and Bellcore disclaim all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall AT&T or Bellcore be liable for
any special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
****************************************************************/
#include "defs.h"
#include "usignal.h"
char binread[] = "rb", textread[] = "r";
char binwrite[] = "wb", textwrite[] = "w";
char *c_functions = "c_functions";
char *coutput = "c_output";
char *initfname = "raw_data";
char *initbname = "raw_data.b";
char *blkdfname = "block_data";
char *p1_file = "p1_file";
char *p1_bakfile = "p1_file.BAK";
char *sortfname = "init_file";
char link_msg[] = "-lF77 -lI77 -lm -lc";
#ifndef TMPDIR
#ifdef MSDOS
#define TMPDIR ""
#else
#define TMPDIR "/data/data/com.termux/files/usr/tmp"
#endif
#endif
char *tmpdir = TMPDIR;
void
Un_link_all(cdelete)
{
if (!debugflag) {
unlink(c_functions);
unlink(initfname);
unlink(p1_file);
unlink(sortfname);
unlink(blkdfname);
if (cdelete && coutput)
unlink(coutput);
}
}
void
set_tmp_names()
{
int k;
if (debugflag == 1)
return;
k = strlen(tmpdir) + 16;
c_functions = (char *)ckalloc(7*k);
initfname = c_functions + k;
initbname = initfname + k;
blkdfname = initbname + k;
p1_file = blkdfname + k;
p1_bakfile = p1_file + k;
sortfname = p1_bakfile + k;
{
#ifdef MSDOS
char buf[64], *s, *t;
if (!*tmpdir || *tmpdir == '.' && !tmpdir[1])
t = "";
else {
/* substitute \ for / to avoid confusion with a
* switch indicator in the system("sort ...")
* call in formatdata.c
*/
for(s = tmpdir, t = buf; *s; s++, t++)
if ((*t = *s) == '/')
*t = '\\';
if (t[-1] != '\\')
*t++ = '\\';
*t = 0;
t = buf;
}
sprintf(c_functions, "%sf2c_func", t);
sprintf(initfname, "%sf2c_rd", t);
sprintf(blkdfname, "%sf2c_blkd", t);
sprintf(p1_file, "%sf2c_p1f", t);
sprintf(p1_bakfile, "%sf2c_p1fb", t);
sprintf(sortfname, "%sf2c_sort", t);
#else
int pid = getpid();
sprintf(c_functions, "%s/f2c%d_func", tmpdir, pid);
sprintf(initfname, "%s/f2c%d_rd", tmpdir, pid);
sprintf(blkdfname, "%s/f2c%d_blkd", tmpdir, pid);
sprintf(p1_file, "%s/f2c%d_p1f", tmpdir, pid);
sprintf(p1_bakfile, "%s/f2c%d_p1fb", tmpdir, pid);
sprintf(sortfname, "%s/f2c%d_sort", tmpdir, pid);
#endif
sprintf(initbname, "%s.b", initfname);
}
if (debugflag)
fprintf(diagfile, "%s %s %s %s %s %s\n", c_functions,
initfname, blkdfname, p1_file, p1_bakfile, sortfname);
}
char *
c_name(s,ft)char *s;
{
char *b, *s0;
int c;
b = s0 = s;
while(c = *s++)
if (c == '/')
b = s;
if (--s < s0 + 3 || s[-2] != '.'
|| ((c = *--s) != 'f' && c != 'F')) {
infname = s0;
Fatal("file name must end in .f or .F");
}
*s = ft;
b = copys(b);
*s = c;
return b;
}
static void
killed()
{
signal(SIGINT, SIG_IGN);
#ifdef SIGQUIT
signal(SIGQUIT, SIG_IGN);
#endif
#ifdef SIGHUP
signal(SIGHUP, SIG_IGN);
#endif
signal(SIGTERM, SIG_IGN);
Un_link_all(1);
exit(126);
}
static void
sig1catch(sig) int sig;
{
if (signal(sig, SIG_IGN) != SIG_IGN)
signal(sig, killed);
}
static void
flovflo()
{
Fatal("floating exception during constant evaluation; cannot recover");
/* vax returns a reserved operand that generates
an illegal operand fault on next instruction,
which if ignored causes an infinite loop.
*/
signal(SIGFPE, flovflo);
}
void
sigcatch()
{
sig1catch(SIGINT);
#ifdef SIGQUIT
sig1catch(SIGQUIT);
#endif
#ifdef SIGHUP
sig1catch(SIGHUP);
#endif
sig1catch(SIGTERM);
signal(SIGFPE, flovflo); /* catch overflows */
}
dofork()
{
#ifdef MSDOS
Fatal("Only one Fortran input file allowed under MS-DOS");
#else
int pid, status, w;
extern int retcode;
if (!(pid = fork()))
return 1;
if (pid == -1)
Fatal("bad fork");
while((w = wait(&status)) != pid)
if (w == -1)
Fatal("bad wait code");
retcode |= status >> 8;
#endif
return 0;
}
/* Initialization of tables that change with the character set... */
char escapes[Table_size];
#ifdef non_ASCII
char *str_fmt[Table_size];
static char *str0fmt[127] = { /*}*/
#else
char *str_fmt[Table_size] = {
#endif
"\\000", "\\001", "\\002", "\\003", "\\004", "\\005", "\\006", "\\007",
"\\b", "\\t", "\\n", "\\013", "\\f", "\\r", "\\016", "\\017",
"\\020", "\\021", "\\022", "\\023", "\\024", "\\025", "\\026", "\\027",
"\\030", "\\031", "\\032", "\\033", "\\034", "\\035", "\\036", "\\037",
" ", "!", "\\\"", "#", "$", "%%", "&", "'",
"(", ")", "*", "+", ",", "-", ".", "/",
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", ":", ";", "<", "=", ">", "?",
"@", "A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "[", "\\\\", "]", "^", "_",
"`", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "{", "|", "}", "~"
};
#ifdef non_ASCII
char *chr_fmt[Table_size];
static char *chr0fmt[127] = { /*}*/
#else
char *chr_fmt[Table_size] = {
#endif
"\\0", "\\1", "\\2", "\\3", "\\4", "\\5", "\\6", "\\7",
"\\b", "\\t", "\\n", "\\13", "\\f", "\\r", "\\16", "\\17",
"\\20", "\\21", "\\22", "\\23", "\\24", "\\25", "\\26", "\\27",
"\\30", "\\31", "\\32", "\\33", "\\34", "\\35", "\\36", "\\37",
" ", "!", "\"", "#", "$", "%%", "&", "\\'",
"(", ")", "*", "+", ",", "-", ".", "/",
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", ":", ";", "<", "=", ">", "?",
"@", "A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "[", "\\\\", "]", "^", "_",
"`", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "{", "|", "}", "~"
};
void
fmt_init()
{
static char *str1fmt[6] =
{ "\\b", "\\t", "\\n", "\\f", "\\r", "\\%03o" };
register int i, j;
register char *s;
/* str_fmt */
#ifdef non_ASCII
i = 0;
#else
i = 127;
#endif
for(; i < Table_size; i++)
str_fmt[i] = "\\%03o";
#ifdef non_ASCII
for(i = 32; i < 127; i++) {
s = str0fmt[i];
str_fmt[*(unsigned char *)s] = s;
}
str_fmt['"'] = "\\\"";
#else
if (Ansi == 1)
str_fmt[7] = chr_fmt[7] = "\\a";
#endif
/* chr_fmt */
#ifdef non_ASCII
for(i = 0; i < 32; i++)
chr_fmt[i] = chr0fmt[i];
#else
i = 127;
#endif
for(; i < Table_size; i++)
chr_fmt[i] = "\\%o";
#ifdef non_ASCII
for(i = 32; i < 127; i++) {
s = chr0fmt[i];
j = *(unsigned char *)s;
if (j == '\\')
j = *(unsigned char *)(s+1);
chr_fmt[j] = s;
}
#endif
/* escapes (used in lex.c) */
for(i = 0; i < Table_size; i++)
escapes[i] = i;
for(s = "btnfr0", i = 0; i < 6; i++)
escapes[*(unsigned char *)s++] = "\b\t\n\f\r"[i];
/* finish str_fmt and chr_fmt */
if (Ansi)
str1fmt[5] = "\\v";
if ('\v' == 'v') { /* ancient C compiler */
str1fmt[5] = "v";
#ifndef non_ASCII
escapes['v'] = 11;
#endif
}
else
escapes['v'] = '\v';
for(s = "\b\t\n\f\r\v", i = 0; j = *(unsigned char *)s++;)
str_fmt[j] = chr_fmt[j] = str1fmt[i++];
/* '\v' = 11 for both EBCDIC and ASCII... */
chr_fmt[11] = Ansi ? "\\v" : "\\13";
}
/* Unless SYSTEM_SORT is defined, the following gives a simple
* in-core version of dsort(). On Fortran source with huge DATA
* statements, the in-core version may exhaust the available memory,
* in which case you might either recompile this source file with
* SYSTEM_SORT defined (if that's reasonable on your system), or
* replace the dsort below with a more elaborate version that
* does a merging sort with the help of auxiliary files.
*/
#ifdef SYSTEM_SORT
dsort(from, to)
char *from, *to;
{
char buf[200];
sprintf(buf, "sort <%s >%s", from, to);
return system(buf) >> 8;
}
#else
static int
#ifdef __STDC__
compare(const void *a, const void *b)
#else
compare(a,b)
char *a, *b;
#endif
{ return strcmp(*(char **)a, *(char **)b); }
dsort(from, to)
char *from, *to;
{
extern char *Alloc();
struct Memb {
struct Memb *next;
int n;
char buf[32000];
};
typedef struct Memb memb;
memb *mb, *mb1;
register char *x, *x0, *xe;
register int c, n;
FILE *f;
char **z, **z0;
int nn = 0;
f = opf(from, textread);
mb = (memb *)Alloc(sizeof(memb));
mb->next = 0;
x0 = x = mb->buf;
xe = x + sizeof(mb->buf);
n = 0;
for(;;) {
c = getc(f);
if (x >= xe && (c != EOF || x != x0)) {
if (!n)
return 126;
nn += n;
mb->n = n;
mb1 = (memb *)Alloc(sizeof(memb));
mb1->next = mb;
mb = mb1;
memcpy(mb->buf, x0, n = x-x0);
x0 = mb->buf;
x = x0 + n;
xe = x0 + sizeof(mb->buf);
n = 0;
}
if (c == EOF)
break;
if (c == '\n') {
++n;
*x++ = 0;
x0 = x;
}
else
*x++ = c;
}
clf(&f, from, 1);
f = opf(to, textwrite);
if (x > x0) { /* shouldn't happen */
*x = 0;
++n;
}
mb->n = n;
nn += n;
if (!nn) /* shouldn't happen */
goto done;
z = z0 = (char **)Alloc(nn*sizeof(char *));
for(mb1 = mb; mb1; mb1 = mb1->next) {
x = mb1->buf;
n = mb1->n;
for(;;) {
*z++ = x;
if (--n <= 0)
break;
while(*x++);
}
}
qsort((char *)z0, nn, sizeof(char *), compare);
for(n = nn, z = z0; n > 0; n--)
fprintf(f, "%s\n", *z++);
free((char *)z0);
done:
clf(&f, to, 1);
do {
mb1 = mb->next;
free((char *)mb);
}
while(mb = mb1);
return 0;
}
#endif
|
arguelles/muSQuIDS
|
inc/loadFlux.h
|
#ifndef _LOADFLUX_H_
#define _LOADFLUX_H_
#include <SQuIDS/const.h>
#include <iostream>
#include <fstream>
#include <nuSQuIDS/marray.h>
#include <boost/tokenizer.hpp>
int LoadData(nusquids::marray<double,1>& ENodes,
nusquids::marray<double,2>& MuFlux,
nusquids::marray<double,3>& NuFlux,
std::string FileName);
#endif
|
arguelles/muSQuIDS
|
inc/muSQuIDS.h
|
<gh_stars>0
#ifndef _MU_NSQ_
#define _MU_NSQ_
#include <vector>
#include <iostream>
#include <nuSQuIDS/nuSQuIDS.h>
#include <gsl/gsl_deriv.h>
#include "exCross.h"
namespace musquids {
using nusquids::nuSQUIDS;
using nusquids::marray;
/// \brief This class implements muon energy loss into nuSQuIDS.
class muSQUIDS: public nuSQUIDS {
private:
std::vector<double> inv_lambda;
marray<double,2> dScalardE;
/// The following variables are to evaluate the derivative of the scalar flux
std::shared_ptr<gsl_spline> scalar_spline;
std::shared_ptr<gsl_interp_accel> scalar_spline_acc;
std::vector<double> tmp_scalar_state;
protected:
/// \brief Here we will calculate the muon flux derivative
void AddToPreDerive(double x){
for(size_t si=0; si<dScalardE.extent(0); si++){
for(size_t ei; ei<dScalardE.extent(1); ei++){
tmp_scalar_state[ei] = state[ei].scalar[si];
}
gsl_spline_init(scalar_spline.get(),E_range.get_data(),tmp_scalar_state.data(),ne);
gsl_interp_accel_reset(scalar_spline_acc.get());
for(size_t ei; ei<dScalardE.extent(1); ei++){
dScalardE[si][ei] = gsl_spline_eval_deriv(scalar_spline.get(),E_range[ei],scalar_spline_acc.get());
}
}
}
protected:
double EnergyLoss(double Emuon) const {
// From T. Gaisser Cosmic Ray book
// From http://pdg.lbl.gov/2015/AtomicNuclearProperties/
// Good from Emuon>10Gev
double RadDensH=4.405e5; // gr/cm^2
// current_density in gr/cm^3
// coeffcients in MeV/cm^2
return (((-1.9-0.08*log(Emuon/params.muon_mass))-(Emuon/params.MeV)/RadDensH)*current_density)*(params.MeV/params.cm);
}
double Fmue(double Emuon, double Enu) const {
// Fits to muon decay spectrum including spin
double z = Enu/Emuon;
return (1.79779466e-02*pow(z,4)+1.20239959e+01*pow(z,3.)-2.38837016e+01*z*z+1.17861335e+01*z+5.85725324e-02)/Emuon;
}
double Fmumu(double Emuon, double Enu) const {
// Fits to muon decay spectrum including spin
double z = Enu/Emuon;
return (-0.24794224*pow(z,4.)+4.51300659*pow(z,3.)-6.2556965*z*z-0.03647084*z+2.02480429)/Emuon;
}
double lambda(double Emuon) const {
return Emuon*params.muon_lifetime/params.muon_mass;
}
protected:
// These scalar functions will manage the muon decay and energy loss
double GammaScalar(unsigned int ei,unsigned int index_scalar) const {
double muon_decay_term=inv_lambda[ei];
return nuSQUIDS::GammaScalar(ei,index_scalar) + muon_decay_term;
}
double InteractionsScalar(unsigned int ei,unsigned int index_scalar) const {
double muon_energy_loss_terms=EnergyLoss(E_range[ei])*dScalardE[index_scalar][ei];
return nuSQUIDS::InteractionsScalar(ei,index_scalar) + muon_energy_loss_terms;
}
// This rho function will add the neutrinos from muon decay
squids::SU_vector InteractionsRho(unsigned int ei,unsigned int index_rho) const {
squids::SU_vector from_muon_decay_terms(nsun);
double muon_decay_to_muon_integral = 0.;
double muon_decay_to_e_integral = 0.;
unsigned int other_index_rho = (index_rho == 0) ? 1 : 0;
for(unsigned int em = ei+1; em < ne; em++){ // loop in the tau neutrino energies
muon_decay_to_muon_integral += state[em].scalar[index_rho]*Fmumu(E_range[em],E_range[ei])*inv_lambda[em]*delE[em-1];
muon_decay_to_e_integral += state[em].scalar[other_index_rho]*Fmue(E_range[em],E_range[ei])*inv_lambda[em]*delE[em-1];
}
from_muon_decay_terms += evol_b1_proj[index_rho][1][ei]*muon_decay_to_muon_integral;
from_muon_decay_terms += evol_b1_proj[index_rho][0][ei]*muon_decay_to_e_integral;
return nuSQUIDS::InteractionsRho(ei,index_rho) + from_muon_decay_terms;
}
public:
muSQUIDS(){}
muSQUIDS(marray<double,1> E_range,
int numneu=3, nusquids::NeutrinoType NT=nusquids::both,bool iinteraction=true):
nuSQUIDS(E_range,numneu,NT,iinteraction,std::make_shared<nusquids::NeutrinoDISCrossSectionsFromTablesExtended>()),
scalar_spline(gsl_spline_alloc(gsl_interp_cspline,E_range.size()),[](gsl_spline* t){ gsl_spline_free(t);}),
scalar_spline_acc(gsl_interp_accel_alloc(),[](gsl_interp_accel* t){ gsl_interp_accel_free(t);})
{
// resetting squids nodes to the right scalar size
ini(ne,numneu,nrhos,2,Get_t());
// initializing the muon decay lenght
inv_lambda.resize(ne);
for(unsigned int ei=0; ei<ne; ei++)
inv_lambda[ei] = 1./lambda(E_range[ei]);
// initializing the scalar derivative matrix
dScalardE.resize(std::vector<size_t>{nscalars,ne});
std::fill(dScalardE.begin(),dScalardE.end(),0);
// initializing the scalar temporary state
tmp_scalar_state.resize(ne);
std::fill(tmp_scalar_state.begin(),tmp_scalar_state.end(),0);
}
void Set_initial_state(const marray<double,2>& muon_flux,const marray<double,3>& neutrino_state,nusquids::Basis basis)
{
nuSQUIDS::Set_initial_state(neutrino_state,basis);
for(unsigned int ie = 0; ie < ne; ie++){
for(unsigned int ir = 0; ir < nscalars; ir++){
state[ie].scalar[ir] = muon_flux[ie][ir];
}
}
}
double GetMuonFlux(unsigned int ie, unsigned int irho){
return state[ie].scalar[irho];
}
};
} // close musquids namespace
#endif
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/AppDelegate.h
|
<gh_stars>10-100
//
// AppDelegate.h
// ABBPlayer
//
// Created by beyondsoft-聂小波 on 16/9/20.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "Reachability.h"
@interface AppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
@property (strong, nonatomic) Reachability *reach;
- (NetworkStatus)currentReachabilityStatus;
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/TbaleViewVideo/Controller/VideoListViewController.h
|
<gh_stars>10-100
//
// VideoListViewController.h
// ABBPlayer
//
// Created by beyondsoft-聂小波 on 16/9/20.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface VideoListViewController : UITableViewController
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/MoviePlayerVC/MessageBaseViewController.h
|
//
// MessageBaseViewController.h
// ABBPlayer
//
// Created by beyondsoft-聂小波 on 16/9/21.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "UIView+WHC_Toast.h"
@interface MessageBaseViewController : UIViewController
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/DownLoadVC/VC/TableBaseViewController.h
|
<reponame>niexiaobo/ABBPlayerKit
//
// TableBaseViewController.h
// ABBVideoDownloadPlayer
//
// Created by beyondsoft-聂小波 on 16/9/19.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
#define Screen_height [[UIScreen mainScreen] bounds].size.height
#define Screen_width [[UIScreen mainScreen] bounds].size.width
@interface TableBaseViewController : UIViewController
@property(nonatomic, strong) UITableView *tableView;
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/DownLoadVC/model/DownloadModel.h
|
//
// DownloadModel.h
// ABBPlayer
//
// Created by beyondsoft-聂小波 on 16/9/20.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "WHC_DownloadObject.h"
#import <WHCNetWorkKit/WHC_HttpManager.h>
#define WHC_BackgroundDownload (1)
// showMsg
typedef void(^DownloadModelShowMsg)(NSString *message);
@interface DownloadModel : NSObject
/** showMsg */
@property (nonatomic, copy ) DownloadModelShowMsg showModelMssage;
- (void)downLoadWith:(NSString *)playUrl title:(NSString *)title defaultFormat:(NSString *)defaultFormat;
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/MoviePlayerVC/MoviePlayerViewController.h
|
//
// MoviePlayerViewController.h
// ABBPlayer
//
// Created by beyondsoft-聂小波 on 16/9/20.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "MessageBaseViewController.h"
@interface MoviePlayerViewController : MessageBaseViewController
/** 视频URL */
@property (nonatomic, strong) NSURL *videoURL;
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/DownLoadVC/VC/DownLoadListViewController.h
|
<filename>ABBPlayer/ABBPlayer/ViewController/DownLoadVC/VC/DownLoadListViewController.h<gh_stars>10-100
//
// DownLoadListViewController.h
// ABBVideoDownloadPlayer
//
// Created by beyondsoft-聂小波 on 16/9/19.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "TableBaseViewController.h"
#import "DownLoadListCell.h"
@interface DownLoadListViewController : TableBaseViewController
@end
|
niexiaobo/ABBPlayerKit
|
ABBPlayer/ABBPlayer/ViewController/DownLoadVC/Cell/DownLoadListCell.h
|
//
// DownLoadListCell.h
// ABBVideoDownloadPlayer
//
// Created by beyondsoft-聂小波 on 16/9/19.
// Copyright © 2016年 NieXiaobo. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "WHC_DownloadObject.h"
#import "WHCNetWorkKit.h"
#import "UIView+WHC_ViewProperty.h"
#define kFontSize (15.0)
#define kCellHeight (57.0) //cell高度
#define kMinPlaySize (10.0) //最小播放尺寸
#define kCellName (@"DownLoadListCell")//cell名称
#define WHC_BackgroundDownload (1)
@protocol DownLoadListCellDelegate <NSObject>
- (void)videoDownload:(NSError *)error index:(NSInteger)index strUrl:(NSString *)strUrl;
- (void)updateDownloadValue:(WHC_DownloadObject *)downloadObject index:(NSInteger)index;
- (void)videoPlayerIndex:(NSInteger)index;
- (void)refreshTableViewCellUI;
@end
@interface DownLoadListCell : UITableViewCell
@property (nonatomic , weak)id<DownLoadListCellDelegate> delegate;
@property (nonatomic , assign)NSInteger index;
- (void)displayCell:(WHC_DownloadObject *)object index:(NSInteger)index;
@end
|
keflavich/nestfit
|
nestfit/core/fastexp.c
|
<reponame>keflavich/nestfit<filename>nestfit/core/fastexp.c
/*
* fastexp.c
* This file is part of LIME, the versatile line modeling engine
*
* Copyright (C) 2006-2014 <NAME>
* Copyright (C) 2015-2018 The LIME development team
* Released under the GNU GPL v3
*
*/
#include "fastexp.h"
double EXP_TABLE_2D[128][10];
double EXP_TABLE_3D[256][2][10];
/* I've hard-wired the dimensions of these arrays, but it would be better perhaps to declare them as pointers, and calculate the dimensions with the help of the function call:
calcFastExpRange(FAST_EXP_MAX_TAYLOR, FAST_EXP_NUM_BITS, &numMantissaFields, &lowestExponent, &numExponentsUsed)
*/
double oneOver_i[FAST_EXP_MAX_TAYLOR+1];
double ERF_TABLE[ERF_TABLE_SIZE];
/*....................................................................*/
void fillErfTable() {
int i;
for (i=0;i<ERF_TABLE_SIZE;i++) {
ERF_TABLE[i]=(SQRT_PI/2.)*erf(i*ERF_TABLE_LIMIT/(ERF_TABLE_SIZE-1.));
}
}
/*....................................................................*/
double
geterf(const double x0, const double x1) {
/* table lookup erf thingy */
double val0=0.,val1=0.;
if (fabs(x0)>=ERF_TABLE_LIMIT) val0=(SQRT_PI/2.);
else {
int index = (int)(fabs(x0*IBIN_WIDTH));
double inter_coeff = (fabs(x0*IBIN_WIDTH)-index);
val0=(1-inter_coeff)*ERF_TABLE[index]+inter_coeff*ERF_TABLE[index+1];
}
if (x0<0.) val0=-val0;
if (fabs(x1)>=ERF_TABLE_LIMIT) val1=(SQRT_PI/2.);
else {
int index = (int)(fabs(x1*IBIN_WIDTH));
double inter_coeff = (fabs(x1*IBIN_WIDTH)-index);
val1=(1-inter_coeff)*ERF_TABLE[index]+inter_coeff*ERF_TABLE[index+1];
}
if (x1<0.) val1=-val1;
return fabs((val1-val0)/(x1-x0));
}
/*....................................................................*/
int factorial(const int n){
int i, result;
result = 1.0;
for(i=1;i<n+1;i++){
result *= i;
}
return result;
}
/*....................................................................*/
double taylor(const int maxOrder, const float x){
double result=1.0;
int i;
for (i=maxOrder;i>0;i--){
result = 1.0 + x*result/(double)i;
}
return result;
}
/*....................................................................*/
void calcFastExpRange(const int maxTaylorOrder, const int maxNumBitsPerMantField, int *numMantissaFields, int *lowestExponent, int *numExponentsUsed){
/*
We want to approximate an exponential call exp(-|x|) (where x is a standard, i.e. IEEE 754-format, float) by a look-up table. The scheme proposed makes use of two arrays: a 2D one of shape (J=2^{B-B0},L) and a 3D one of shape (J=2^B,K-1,L), where B is the input argument maxNumBitsPerMantField. The purpose of the present function is to calculate and return B0, K and L, as well as the value of lowestExponent (explained in section 2 below).
1) The fast-exp lookup algorithm.
=================================
The algorithm makes use of the fact that exp(a+b+c) = exp(a)*exp(b)*exp(c) to return a full precision exponential value via the product of several separate lower-precision lookups. This is done by dividing the mantissa of |x| into K contiguous fields, where K is calculated from
K = ceiling(23/B).
Note that the IEEE 754 standard specifies that the mantissa of a float is 23 bits long. Each of these mantissa fields, considered as an integer, can generate a value for the first index j of the appropriate array. (The 0th field lookup values are in the 2D array, but every other field accesses values in the 3D array.) For field numbers k>0, the second index of the 3D array is found from k-1; and the third index l is taken by treating the bits of the float exponent (bits 1 to 9 in the IEEE 754 standard) as in integer, suitably offset as described below.
The detailed working of the algorithm is illustrated with an example. We construct an example float x equal to -1.3a07b2x * 2^3. This should have a IEEE 754 bit representation as follows:
seee eeee emmm mmmm mmmm mmmm mmmm mmmm
x = 1100 0001 0001 1101 0000 0011 1101 1001
or, in hex,
0xc11d03d9
If we set B=8, that gives the number of fields K=3. We thus divide the mantissa into three fields a, b and c as follows:
00111010000001111011001
aaaaaaabbbbbbbbcccccccc
Note that field a has only 7 bits, not 8: because 8 does not divide evenly into 23. It is this shorter k=0 field which necessitates the use of a separate array for this field. The value B0 is the number of 'missing' bits in this first field - i.e. =1 in the present case.
The example above will yield
a = 0x1d
b = 0x03
c = 0xd9.
Suppose we construct a new IEEE 754 bit representation for each of these. Starting with 'a', this gives
seee eeee emmm mmmm mmmm mmmm mmmm mmmm
A0 = 1100 0001 0001 1101 0000 0000 0000 0000
which is -1.3ax * 2^3 as desired. For 'b' however, if we naively shift the 'b' bit pattern 7 bits to the left, and decrement the exponent by 7, we get
seee eeee emmm mmmm mmmm mmmm mmmm mmmm
A1 = 1011 1101 1000 0001 1000 0000 0000 0000
According to the IEEE 754 rules for encoding floats, this is not -0.03x * 2^-4 as we need, but -1.03x * 2^-4. Similar will hold for 'c'. I.e. we left-shift the 'c' bit pattern by 15 bits, decrementing the exponent also by 15, to give
seee eeee emmm mmmm mmmm mmmm mmmm mmmm
A2 = 1011 1001 1110 1100 1000 0000 0000 0000
This is not -0.d9x * 2^-12 as we need, but -1.d9x * 2^-12. Thus we must add 1 * 2^{e-7} and 1 * 2^{e-15} to the total afterwards, where e is the original exponent (in the present example = 3). Thus we have
x = A0 + (A1 + 1*2^{e-7}) + (A2 + 1*2^{e-15})
and thus
exp(x) = exp(A0)*exp(A1 + 1*2^{e-7})*exp(A2 + 1*2^{e-15}).
2) Calculating the range of exponents.
======================================
Values of |x| which are smaller than a given cutoff are calculated via a Pth-order Taylor expansion; values greater than another cutoff return a value of zero. Ideally, we set the low-x cutoff to the point at which the absolute value of the next-higher-order Taylor term equals the floating-point precision, and the high-x cutoff to the point where exp(-|x|) reaches the same precision.
The fractional precision of the lookup table is clearly epsilon=1/2^23, the same precision as an ordinary floating-point number. This will also be approximately the absolute precision sigma for small values of |x| where exp(-|x|) ~ 1. We should choose the low cutoff value x_lo such that the next higher term (P+1th term) of the Taylor series is equal to sigma, i.e.:
|x_lo|^{P+1} 1
-------------- = ------.
{P+1}! 2^23
Having calculated |x_lo|, we want to find that exponent N_lo of 2 such that 2^N_lo < |x_lo| < 2^{N_lo+1}.
The second part of the calculation is to find N_hi such that exp(-2^N_hi) < sigma < exp(-2^{N_hi-1}). The number of exponents L which needed is thus 1+N_hi-N_lo.
*/
int ieee754NumMantBits=23;
double sigma, xLo;
int nLo, nHi;
*numMantissaFields = 1+floor(ieee754NumMantBits/(float)maxNumBitsPerMantField);
sigma = 1.0/pow(2.,ieee754NumMantBits);
xLo = pow((double)factorial(maxTaylorOrder+1)*sigma, 1/(double)(maxTaylorOrder+1));
nLo = floor(log(xLo)/log(2.));
/*
exp(-2^N_hi) < sigma
thus
2^N_hi > -ln(sigma)
thus
N_hi > ln(-ln(sigma))/ln(2).
*/
nHi = 1+floor(log(-log(sigma))/log(2.));
*lowestExponent = nLo;
*numExponentsUsed = 1+nHi-nLo;
}
/*....................................................................*/
void calcExpTableEntries(const int maxTaylorOrder, const int maxNumBitsPerMantField){
/*
See description of the lookup algorithm in function calcFastExpRange().
*/
int ieee754ExpOffset=127,ieee754NumMantBits=23;
int negativeSignMask=0x80000000;
int numJs,numJs0,numMantissaFields,lowestExponent,numExponentsUsed,exponentOffset,mantShift,bitOffset0,fieldBitOffset,fieldI,j,k,l,exponentMask;
float argOffset;
union
{
float f;
int m;
} floPo;
// Should raise an exception here #ifndef FASTEXP?
calcFastExpRange(maxTaylorOrder, maxNumBitsPerMantField, &numMantissaFields, &lowestExponent, &numExponentsUsed);
exponentOffset = ieee754ExpOffset + lowestExponent;
mantShift = ieee754NumMantBits - maxNumBitsPerMantField;
bitOffset0 = maxNumBitsPerMantField*numMantissaFields - ieee754NumMantBits;
numJs = (int)pow(2.,maxNumBitsPerMantField);
numJs0 = (int)pow(2.,maxNumBitsPerMantField-bitOffset0);
fieldBitOffset = 0.0;
for (l=0;l<numExponentsUsed;l++){
argOffset = 0.0;
exponentMask = (l+exponentOffset+fieldBitOffset)<<ieee754NumMantBits;
for (j=0;j<numJs0;j++){
floPo.m = negativeSignMask | exponentMask | (j<<(mantShift+bitOffset0));
EXP_TABLE_2D[j][l] = exp(floPo.f + argOffset);
}
}
for (fieldI=1;fieldI<numMantissaFields;fieldI++){
k = fieldI-1;
fieldBitOffset = bitOffset0 - fieldI*maxNumBitsPerMantField;
for (l=0;l<numExponentsUsed;l++){
argOffset = pow(2.,l+lowestExponent+fieldBitOffset);
exponentMask = (l+exponentOffset+fieldBitOffset)<<ieee754NumMantBits;
for (j=0;j<numJs;j++){
floPo.m = negativeSignMask | exponentMask | (j<<mantShift);
EXP_TABLE_3D[j][k][l] = exp(floPo.f + argOffset);
}
}
}
/*We also construct the table of 1/i to be used for faster calculation of the Taylor approximation.*/
oneOver_i[0]=0.0;
for (j=1;j<=FAST_EXP_MAX_TAYLOR;j++) oneOver_i[j]=1.0/(1.0*j);
}
/*....................................................................*/
double
FastExp(const float negarg){
/*
See description of the lookup algorithm in function calcFastExpRange(). ****NOTE!**** Most numbers here are hard-wired for the sake of speed. If need be, they can be verified (or recalculated for different conditions) via calcExpTableEntries().
*/
int exponentMask=0x7f800000,ieee754NumMantBits=23;
int exponentOffset=122,numExponentsUsed=10;
/*
This value should be calculated from 127+lowestExponent, where 127 is the offset for an exponent of zero laid down in the IEEE 754 standard, and both lowestExponent and numExponentsUsed can be calculated via calcFastExpRange().
exponentOffset = ieee754ExpOffset + lowestExponent;
*/
int mantMask0=0x007f0000, mantMask1=0x0000ff00, mantMask2=0x000000ff;
int mantOffset0=16, mantOffset1=8, mantOffset2=0;
int i,j0,j1,j2,l;
union
{
float f;
int m;
} floPo;
double result;
// Should raise an exception here #ifndef FASTEXP?
if (negarg<0.0) return exp(-negarg);
if (negarg==0.0) return 1.0;
floPo.f = negarg;
l = ((floPo.m & exponentMask)>>ieee754NumMantBits)-exponentOffset;
if (l<0){ // do the Taylor approximation.
result = 1.0;
for (i=FAST_EXP_MAX_TAYLOR;i>0;i--){
result = 1.0 - negarg*result*oneOver_i[i];
}
return result;
}else if(l>=numExponentsUsed){
return 0.0;
}
j0 = (floPo.m & mantMask0)>>mantOffset0;
j1 = (floPo.m & mantMask1)>>mantOffset1;
j2 = (floPo.m & mantMask2)>>mantOffset2;
return (EXP_TABLE_2D[j0] [l]*
EXP_TABLE_3D[j1][0][l]*
EXP_TABLE_3D[j2][1][l]);
}
|
keflavich/nestfit
|
nestfit/core/fastexp.h
|
#include <stdlib.h>
#include <math.h>
#define SQRT_PI (sqrt(M_PI)) /* sqrt(pi) */
#define FAST_EXP_MAX_TAYLOR 3
#define FAST_EXP_NUM_BITS 8
#define ERF_TABLE_LIMIT 6.0 /* For x>6 erf(x)-1<double precision machine epsilon, so no need to store the values for larger x. */
#define ERF_TABLE_SIZE 6145
#define BIN_WIDTH (ERF_TABLE_LIMIT/(ERF_TABLE_SIZE-1.))
#define IBIN_WIDTH (1./BIN_WIDTH)
extern double EXP_TABLE_2D[128][10];
extern double EXP_TABLE_3D[256][2][10];
extern double oneOver_i[FAST_EXP_MAX_TAYLOR+1];
extern double ERF_TABLE[ERF_TABLE_SIZE];
double geterf(const double, const double);
double FastExp(const float);
void calcExpTableEntries(const int, const int);
void fillErfTable(void);
|
cornelisnetworks/opa-hfi1
|
compat/RH77/compat.c
|
<filename>compat/RH77/compat.c<gh_stars>0
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/export.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include "../hfi1/hfi.h"
#include "compat.h"
/**
* debugfs_use_file_start - mark the beginning of file data access
* @dentry: the dentry object whose data is being accessed.
* @srcu_idx: a pointer to some memory to store a SRCU index in.
*
* Up to a matching call to debugfs_use_file_finish(), any
* successive call into the file removing functions debugfs_remove()
* and debugfs_remove_recursive() will block. Since associated private
* file data may only get freed after a successful return of any of
* the removal functions, you may safely access it after a successful
* call to debugfs_use_file_start() without worrying about
* lifetime issues.
*
* If -%EIO is returned, the file has already been removed and thus,
* it is not safe to access any of its data. If, on the other hand,
* it is allowed to access the file data, zero is returned.
*
* Regardless of the return code, any call to
* debugfs_use_file_start() must be followed by a matching call
* to debugfs_use_file_finish().
*/
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu)
{
*srcu_idx = srcu_read_lock(&debugfs_srcu);
barrier();
if (d_unlinked(dentry))
return -EIO;
return 0;
}
EXPORT_SYMBOL(debugfs_use_file_start);
/**
* debugfs_use_file_finish - mark the end of file data access
* @srcu_idx: the SRCU index "created" by a former call to
* debugfs_use_file_start().
*
* Allow any ongoing concurrent call into debugfs_remove() or
* debugfs_remove_recursive() blocked by a former call to
* debugfs_use_file_start() to proceed and return to its caller.
*/
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu)
{
srcu_read_unlock(&debugfs_srcu, srcu_idx);
}
EXPORT_SYMBOL(debugfs_use_file_finish);
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/tid_rdma.h
|
<filename>drivers/infiniband/hw/hfi1/tid_rdma.h
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef HFI1_TID_RDMA_H
#define HFI1_TID_RDMA_H
#include <linux/circ_buf.h>
#include "common.h"
/* Add a convenience helper */
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
#define TID_RDMA_SEGMENT_SHIFT 18
/*
* Bit definitions for priv->s_flags.
* These bit flags overload the bit flags defined for the QP's s_flags.
* Due to the fact that these bit fields are used only for the QP priv
* s_flags, there are no collisions.
*
* HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock
* HFI1_R_TID_WAIT_INTERLCK - QP is waiting for responder interlock
*/
#define HFI1_S_TID_BUSY_SET BIT(0)
/* BIT(1) reserved for RVT_S_BUSY. */
#define HFI1_R_TID_RSC_TIMER BIT(2)
/* BIT(3) reserved for RVT_S_RESP_PENDING. */
/* BIT(4) reserved for RVT_S_ACK_PENDING. */
#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
#define HFI1_R_TID_WAIT_INTERLCK BIT(6)
/* BIT(7) - BIT(15) reserved for RVT_S_WAIT_*. */
/* BIT(16) reserved for RVT_S_SEND_ONE */
#define HFI1_S_TID_RETRY_TIMER BIT(17)
/* BIT(18) reserved for RVT_S_ECN. */
#define HFI1_R_TID_SW_PSN BIT(19)
/* BIT(26) reserved for HFI1_S_WAIT_HALT */
/* BIT(27) reserved for HFI1_S_WAIT_TID_RESP */
/* BIT(28) reserved for HFI1_S_WAIT_TID_SPACE */
/*
* Unlike regular IB RDMA VERBS, which do not require an entry
* in the s_ack_queue, TID RDMA WRITE requests do because they
* generate responses.
* Therefore, the s_ack_queue needs to be extended by a certain
* amount. The key point is that the queue needs to be extended
* without letting the "user" know so they user doesn't end up
* using these extra entries.
*/
#define HFI1_TID_RDMA_WRITE_CNT 8
struct tid_rdma_params {
struct rcu_head rcu_head;
u32 qp;
u32 max_len;
u16 jkey;
u8 max_read;
u8 max_write;
u8 timeout;
u8 urg;
u8 version;
};
struct tid_rdma_qp_params {
struct work_struct trigger_work;
struct tid_rdma_params local;
struct tid_rdma_params __rcu *remote;
};
/* Track state for each hardware flow */
struct tid_flow_state {
u32 generation;
u32 psn;
u8 index;
u8 last_index;
};
enum tid_rdma_req_state {
TID_REQUEST_INACTIVE = 0,
TID_REQUEST_INIT,
TID_REQUEST_INIT_RESEND,
TID_REQUEST_ACTIVE,
TID_REQUEST_RESEND,
TID_REQUEST_RESEND_ACTIVE,
TID_REQUEST_QUEUED,
TID_REQUEST_SYNC,
TID_REQUEST_RNR_NAK,
TID_REQUEST_COMPLETE,
};
struct tid_rdma_request {
struct rvt_qp *qp;
struct hfi1_ctxtdata *rcd;
union {
struct rvt_swqe *swqe;
struct rvt_ack_entry *ack;
} e;
struct tid_rdma_flow *flows; /* array of tid flows */
struct rvt_sge_state ss; /* SGE state for TID RDMA requests */
u16 n_flows; /* size of the flow buffer window */
u16 setup_head; /* flow index we are setting up */
u16 clear_tail; /* flow index we are clearing */
u16 flow_idx; /* flow index most recently set up */
u16 acked_tail;
u32 seg_len;
u32 total_len;
u32 r_ack_psn; /* next expected ack PSN */
u32 r_flow_psn; /* IB PSN of next segment start */
u32 r_last_acked; /* IB PSN of last ACK'ed packet */
u32 s_next_psn; /* IB PSN of next segment start for read */
u32 total_segs; /* segments required to complete a request */
u32 cur_seg; /* index of current segment */
u32 comp_seg; /* index of last completed segment */
u32 ack_seg; /* index of last ack'ed segment */
u32 alloc_seg; /* index of next segment to be allocated */
u32 isge; /* index of "current" sge */
u32 ack_pending; /* num acks pending for this request */
enum tid_rdma_req_state state;
};
/*
* When header suppression is used, PSNs associated with a "flow" are
* relevant (and not the PSNs maintained by verbs). Track per-flow
* PSNs here for a TID RDMA segment.
*
*/
struct flow_state {
u32 flags;
u32 resp_ib_psn; /* The IB PSN of the response for this flow */
u32 generation; /* generation of flow */
u32 spsn; /* starting PSN in TID space */
u32 lpsn; /* last PSN in TID space */
u32 r_next_psn; /* next PSN to be received (in TID space) */
/* For tid rdma read */
u32 ib_spsn; /* starting PSN in Verbs space */
u32 ib_lpsn; /* last PSn in Verbs space */
};
struct tid_rdma_pageset {
dma_addr_t addr : 48; /* Only needed for the first page */
u8 idx: 8;
u8 count : 7;
u8 mapped: 1;
};
/**
* kern_tid_node - used for managing TID's in TID groups
*
* @grp_idx: rcd relative index to tid_group
* @map: grp->map captured prior to programming this TID group in HW
* @cnt: Only @cnt of available group entries are actually programmed
*/
struct kern_tid_node {
struct tid_group *grp;
u8 map;
u8 cnt;
};
/* Overall info for a TID RDMA segment */
struct tid_rdma_flow {
/*
* While a TID RDMA segment is being transferred, it uses a QP number
* from the "KDETH section of QP numbers" (which is different from the
* QP number that originated the request). Bits 11-15 of these QP
* numbers identify the "TID flow" for the segment.
*/
struct flow_state flow_state;
struct tid_rdma_request *req;
u32 tid_qpn;
u32 tid_offset;
u32 length;
u32 sent;
u8 tnode_cnt;
u8 tidcnt;
u8 tid_idx;
u8 idx;
u8 npagesets;
u8 npkts;
u8 pkt;
u8 resync_npkts;
struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
u32 tid_entry[TID_RDMA_MAX_PAGES];
};
enum tid_rnr_nak_state {
TID_RNR_NAK_INIT = 0,
TID_RNR_NAK_SEND,
TID_RNR_NAK_SENT,
};
bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
void tid_rdma_conn_error(struct rvt_qp *qp);
void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
struct rvt_sge_state *ss, bool *last);
int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
/**
* trdma_clean_swqe - clean flows for swqe if large send queue
* @qp: the qp
* @wqe: the send wqe
*/
static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{
if (!wqe->priv)
return;
__trdma_clean_swqe(qp, wqe);
}
void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_init_attr *init_attr);
void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
struct cntr_entry;
u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data);
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
u32 *bth2, u32 *len);
void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
struct ib_other_headers *ohdr, u32 *bth0,
u32 *bth1, u32 *bth2, u32 *len, bool *last);
void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
struct hfi1_pportdata *ppd,
struct hfi1_packet *packet);
void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
u32 *bth2);
void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
struct rvt_swqe *wqe)
{
if (wqe->priv &&
(wqe->wr.opcode == IB_WR_RDMA_READ ||
wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
setup_tid_rdma_wqe(qp, wqe);
}
u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
struct ib_other_headers *ohdr, u32 *bth1,
u32 bth2, u32 *len,
struct rvt_sge_state **ss);
void hfi1_del_tid_reap_timer(struct rvt_qp *qp);
void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet);
bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
struct ib_other_headers *ohdr, u16 iflow,
u32 *bth1, u32 *bth2);
void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
u32 *bth2, u16 fidx);
void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet);
struct hfi1_pkt_state;
int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
void _hfi1_do_tid_send(struct work_struct *work);
bool hfi1_schedule_tid_send(struct rvt_qp *qp);
bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
void hfi1_qp_tid_print(struct seq_file *s, struct rvt_qp *qp);
#endif /* HFI1_TID_RDMA_H */
|
cornelisnetworks/opa-hfi1
|
compat/SLES12SP5/compat.h
|
<reponame>cornelisnetworks/opa-hfi1
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2019 Intel Corporation.
*/
#if !defined(SLES12SP5_COMPAT_H)
#define SLES12SP5_COMPAT_H
#include <linux/device.h>
#include <rdma/ib_mad.h>
#define CREATE_AH_HAS_UDATA
#define HAVE_ALLOC_RDMA_NETDEV
#define HAVE_NOSPEC_H
#define HAVE_ARRAY_SIZE
#define POST_HAS_CONST
#define CREATE_FLOW_HAS_UDATA
#define HAVE_IB_GID_ATTR
#define HAVE_RDMA_NETDEV_GET_PARAMS
#define HAVE_MAX_SEND_SGE
#define HAVE_KMALLOC_ARRAY_NODE
#define HAVE_SECURITY_H
#define HAVE_IBDEV_DRIVER_ID
#define HAVE_RDMA_COPY_AH_ATTR
#include "compat_common.h"
#undef CONFIG_FAULT_INJECTION
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
#define rdma_create_ah(a, b, c) rdma_create_ah(a, b)
#define rdma_destroy_ah(a, b) rdma_destroy_ah(a)
#define rdma_set_device_sysfs_group(a, b)
#undef access_ok
#define access_ok(addr, size) \
({ \
WARN_ON_IN_IRQ(); \
likely(!__range_not_ok(addr, size, user_addr_max())); \
})
#define _ib_alloc_device ib_alloc_device
#endif //SLES12SP5_COMPAT
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/trace_gpu.h
|
<reponame>cornelisnetworks/opa-hfi1<gh_stars>0
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_GPU_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_gpu
DECLARE_EVENT_CLASS(pin_gpu_pages,
TP_PROTO(unsigned long base, unsigned long addr,
unsigned long len, unsigned long size,
unsigned int npages),
TP_ARGS(base, addr, len, size, npages),
TP_STRUCT__entry(__field(unsigned long, base)
__field(unsigned long, addr)
__field(unsigned long, len)
__field(unsigned long, size)
__field(unsigned int, npages)
),
TP_fast_assign(__entry->base = base;
__entry->addr = addr;
__entry->len = len;
__entry->size = size;
__entry->npages = npages;
),
TP_printk("NV: base: 0x%lx, addr: 0x%lx, len: %lu, pin size: %lu, num pages: %u",
__entry->base,
__entry->addr,
__entry->len,
__entry->size,
__entry->npages
)
);
DECLARE_EVENT_CLASS(free_gpu_pages,
TP_PROTO(unsigned long addr),
TP_ARGS(addr),
TP_STRUCT__entry(__field(unsigned long, addr)
),
TP_fast_assign(__entry->addr = addr;
),
TP_printk("NV: Unpinning the buffer at address: 0x%lx",
__entry->addr
)
);
DECLARE_EVENT_CLASS(invalidate_gpu_pages,
TP_PROTO(unsigned long addr, unsigned long size),
TP_ARGS(addr, size),
TP_STRUCT__entry(__field(unsigned long, addr)
__field(unsigned long, size)
),
TP_fast_assign(__entry->addr = addr;
__entry->size = size;
),
TP_printk("NV: Invalidating address range: start 0x%lx, len %lu",
__entry->addr,
__entry->size
)
);
DECLARE_EVENT_CLASS(gpu_page_table_info,
TP_PROTO(unsigned int entries, unsigned int page_size),
TP_ARGS(entries, page_size),
TP_STRUCT__entry(__field(unsigned int, entries)
__field(unsigned int, page_size)
),
TP_fast_assign(__entry->entries = entries;
__entry->page_size = page_size;
),
TP_printk("NV: nvidia_p2p_get_pages: num entries: %u, page size: %u",
__entry->entries,
__entry->page_size
)
);
DECLARE_EVENT_CLASS(pin_gpu_pages_fail,
TP_PROTO(int ret, unsigned long addr, unsigned long size),
TP_ARGS(ret, addr, size),
TP_STRUCT__entry(__field(int, ret)
__field(unsigned long, addr)
__field(unsigned long, size)
),
TP_fast_assign(__entry->ret = ret;
__entry->addr = addr;
__entry->size = size;
),
TP_printk("NV: Failed to pin GPU mem pages. ret %d, addr 0x%lx, size %lu",
__entry->ret,
__entry->addr,
__entry->size
)
);
TRACE_EVENT(gpu_page_size_check,
TP_PROTO(unsigned int size_type),
TP_ARGS(size_type),
TP_STRUCT__entry(__field(unsigned int, size_type)),
TP_fast_assign(__entry->size_type = size_type;),
TP_printk("NV: GPU memory page size is not 64KB. Size type: %u",
__entry->size_type)
);
TRACE_EVENT(gpu_page_tbl_check,
TP_PROTO(unsigned int comp, unsigned int inst),
TP_ARGS(comp, inst),
TP_STRUCT__entry(__field(unsigned int, comp)
__field(unsigned int, inst)
),
TP_fast_assign(__entry->comp = comp;
__entry->inst = inst;
),
TP_printk("NV: page table version incompatible. Compiled: 0x%x, Installed: 0x%x",
__entry->comp,
__entry->inst)
);
DEFINE_EVENT(pin_gpu_pages, pin_rcv_pages_gpu,
TP_PROTO(unsigned long base, unsigned long addr,
unsigned long len, unsigned long size, unsigned int npages),
TP_ARGS(base, addr, len, size, npages));
DEFINE_EVENT(pin_gpu_pages, pin_sdma_pages_gpu,
TP_PROTO(unsigned long base, unsigned long addr,
unsigned long len, unsigned long size, unsigned int npages),
TP_ARGS(base, addr, len, size, npages));
DEFINE_EVENT(invalidate_gpu_pages, unpin_rcv_gpu_pages_callback,
TP_PROTO(unsigned long addr, unsigned long len),
TP_ARGS(addr, len));
DEFINE_EVENT(invalidate_gpu_pages, unpin_gpu_pages_callback,
TP_PROTO(unsigned long addr, unsigned long len),
TP_ARGS(addr, len));
DEFINE_EVENT(gpu_page_table_info, recv_gpu_page_table_info,
TP_PROTO(unsigned int entries, unsigned int page_size),
TP_ARGS(entries, page_size));
DEFINE_EVENT(gpu_page_table_info, sdma_gpu_page_table_info,
TP_PROTO(unsigned int entries, unsigned int page_size),
TP_ARGS(entries, page_size));
DEFINE_EVENT(pin_gpu_pages_fail, recv_pin_gpu_pages_fail,
TP_PROTO(int ret, unsigned long addr, unsigned long size),
TP_ARGS(ret, addr, size));
DEFINE_EVENT(pin_gpu_pages_fail, sdma_pin_gpu_pages_fail,
TP_PROTO(int ret, unsigned long addr, unsigned long size),
TP_ARGS(ret, addr, size));
DEFINE_EVENT(free_gpu_pages, free_recv_gpu_pages,
TP_PROTO(unsigned long addr),
TP_ARGS(addr));
DEFINE_EVENT(free_gpu_pages, free_sdma_gpu_pages,
TP_PROTO(unsigned long addr),
TP_ARGS(addr));
#endif /* __HFI1_TRACE_GPU_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_gpu
#include <trace/define_trace.h>
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/trace_rx.h
|
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_RX_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_RX_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rx
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_packet *packet),
TP_ARGS(packet),
TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd)
__field(u64, eflags)
__field(u32, ctxt)
__field(u32, etype)
__field(u32, hlen)
__field(u32, tlen)
__field(u32, updegr)
__field(u32, etail)
),
TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd);
__entry->eflags = rhf_err_flags(packet->rhf);
__entry->ctxt = packet->rcd->ctxt;
__entry->etype = packet->etype;
__entry->hlen = packet->hlen;
__entry->tlen = packet->tlen;
__entry->updegr = packet->updegr;
__entry->etail = rhf_egr_index(packet->rhf);
),
TP_printk(
"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
__get_str(dev),
__entry->ctxt,
__entry->eflags,
__entry->etype, show_packettype(__entry->etype),
__entry->hlen,
__entry->tlen,
__entry->updegr,
__entry->etail
)
);
TRACE_EVENT(hfi1_receive_interrupt,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
TP_ARGS(dd, rcd),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u32, ctxt)
__field(bool, slow_path)
__field(bool, dma_rtail)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = rcd->ctxt;
__entry->slow_path = hfi1_is_slowpath(rcd);
__entry->dma_rtail = get_dma_rtail_setting(rcd);
),
TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
__get_str(dev),
__entry->ctxt,
__entry->slow_path,
__entry->dma_rtail
)
);
TRACE_EVENT(hfi1_mmu_invalidate,
TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
unsigned long start, unsigned long end),
TP_ARGS(ctxt, subctxt, type, start, end),
TP_STRUCT__entry(
__field(unsigned int, ctxt)
__field(u16, subctxt)
__string(type, type)
__field(unsigned long, start)
__field(unsigned long, end)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__assign_str(type, type);
__entry->start = start;
__entry->end = end;
),
TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
__entry->ctxt,
__entry->subctxt,
__get_str(type),
__entry->start,
__entry->end
)
);
#define SNOOP_PRN \
"slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
"svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
TRACE_EVENT(snoop_capture,
TP_PROTO(struct hfi1_devdata *dd,
int hdr_len,
struct ib_header *hdr,
int data_len,
void *data),
TP_ARGS(dd, hdr_len, hdr, data_len, data),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
__field(u16, slid)
__field(u16, dlid)
__field(u32, qpn)
__field(u8, opcode)
__field(u8, sl)
__field(u16, pkey)
__field(u32, hdr_len)
__field(u32, data_len)
__field(u8, lnh)
__dynamic_array(u8, raw_hdr, hdr_len)
__dynamic_array(u8, raw_pkt, data_len)
),
TP_fast_assign(
struct ib_other_headers *ohdr;
__entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
if (__entry->lnh == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
DD_DEV_ASSIGN(dd);
__entry->slid = be16_to_cpu(hdr->lrh[3]);
__entry->dlid = be16_to_cpu(hdr->lrh[1]);
__entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
__entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
__entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
__entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
__entry->hdr_len = hdr_len;
__entry->data_len = data_len;
memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
memcpy(__get_dynamic_array(raw_pkt), data, data_len);
),
TP_printk(
"[%s] " SNOOP_PRN,
__get_str(dev),
__entry->slid,
__entry->dlid,
__entry->qpn,
__entry->opcode,
show_ib_opcode(__entry->opcode),
__entry->sl,
__entry->pkey,
__entry->hdr_len,
__entry->data_len
)
);
#endif /* __HFI1_TRACE_RX_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_rx
#include <trace/define_trace.h>
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/sdma_txreq.h
|
<reponame>cornelisnetworks/opa-hfi1
/*
* Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef HFI1_SDMA_TXREQ_H
#define HFI1_SDMA_TXREQ_H
/* increased for AHG */
#define NUM_DESC 6
/*
* struct sdma_desc - canonical fragment descriptor
*
* This is the descriptor carried in the tx request
* corresponding to each fragment.
*
*/
struct sdma_desc {
/* private: don't use directly */
u64 qw[2];
};
/**
* struct sdma_txreq - the sdma_txreq structure (one per packet)
* @list: for use by user and by queuing for wait
*
* This is the representation of a packet which consists of some
* number of fragments. Storage is provided to within the structure.
* for all fragments.
*
* The storage for the descriptors are automatically extended as needed
* when the currently allocation is exceeded.
*
* The user (Verbs or PSM) may overload this structure with fields
* specific to their use by putting this struct first in their struct.
* The method of allocation of the overloaded structure is user dependent
*
* The list is the only public field in the structure.
*
*/
#define SDMA_TXREQ_S_OK 0
#define SDMA_TXREQ_S_SENDERROR 1
#define SDMA_TXREQ_S_ABORTED 2
#define SDMA_TXREQ_S_SHUTDOWN 3
/* flags bits */
#define SDMA_TXREQ_F_URGENT 0x0001
#define SDMA_TXREQ_F_AHG_COPY 0x0002
#define SDMA_TXREQ_F_USE_AHG 0x0004
#define SDMA_TXREQ_F_SGE_CORRUPT 0x0008
#define SDMA_TXREQ_F_VIP 0x0010
struct sdma_txreq;
typedef void (*callback_t)(struct sdma_txreq *, int);
struct iowait;
struct sdma_txreq {
struct list_head list;
/* private: */
struct sdma_desc *descp;
/* private: */
void *coalesce_buf;
/* private: */
struct iowait *wait;
/* private: */
callback_t complete;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
u64 sn;
#endif
/* private: - used in coalesce/pad processing */
u16 packet_len;
/* private: - down-counted to trigger last */
u16 tlen;
/* private: */
u16 num_desc;
/* private: */
u16 desc_limit;
/* private: */
u16 next_descq_idx;
/* private: */
u16 coalesce_idx;
/* private: flags */
u16 flags;
/* private: */
struct sdma_desc descs[NUM_DESC];
};
static inline int sdma_txreq_built(struct sdma_txreq *tx)
{
return tx->num_desc;
}
#endif /* HFI1_SDMA_TXREQ_H */
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/ulp/opa_vnic/opa_vnic_debugfs.c
|
<gh_stars>0
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains HFI VNIC debug interface
*/
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/list_sort.h>
#include <linux/module.h>
#include "opa_vnic_internal.h"
#include "opa_vnic_debugfs.h"
/* MAC table string size; 64K is enough for the whole table */
#define OPA_VNIC_MACTBL_STR_SIZE SZ_64K
/* EEPH string size */
#define OPA_VNIC_EEPH_STR_SIZE SZ_1K
/* Vnic stats string size */
#define OPA_VNIC_STATS_STR_SIZE SZ_1K
enum {
OPA_VNIC_DBG_FABRIC_ID,
OPA_VNIC_DBG_VESW_ID,
OPA_VNIC_DBG_PKEY,
OPA_VNIC_DBG_ETH_LINK_STATUS,
OPA_VNIC_DBG_BASE_MAC_ADDR,
OPA_VNIC_DBG_CFG_STATE,
OPA_VNIC_DBG_OPER_STATE,
OPA_VNIC_DBG_ENCAP_SLID,
OPA_VNIC_DBG_DEF_PORT_MASK,
OPA_VNIC_DBG_UC_DLID,
OPA_VNIC_DBG_MC_DLID,
OPA_VNIC_DBG_SC_UC,
OPA_VNIC_DBG_SC_MC,
OPA_VNIC_DBG_PCP_SC_UC,
OPA_VNIC_DBG_PCP_SC_MC,
OPA_VNIC_DBG_VL_UC,
OPA_VNIC_DBG_VL_MC,
OPA_VNIC_DBG_PCP_VL_UC,
OPA_VNIC_DBG_PCP_VL_MC,
OPA_VNIC_DBG_ETH_MTU,
OPA_VNIC_DBG_ENCAP_RC,
OPA_VNIC_DBG_RESET,
OPA_VNIC_DBG_MACTBL_DIGEST,
OPA_VNIC_DBG_NUM_ATTR,
};
static struct dentry *opa_vnic_dbg_root;
#define DEBUGFS_SEQ_FILE_OPS(name) \
static const struct seq_operations _##name##_seq_ops = { \
.start = _##name##_seq_start, \
.next = _##name##_seq_next, \
.stop = _##name##_seq_stop, \
.show = _##name##_seq_show \
}
#define DEBUGFS_SEQ_FILE_OPEN(name) \
static int _##name##_open(struct inode *inode, struct file *file) \
{ \
struct seq_file *seq; \
int ret; \
ret = seq_open(file, &_##name##_seq_ops); \
if (ret) \
return ret; \
seq = file->private_data; \
seq->private = inode->i_private; \
return 0; \
}
#define DEBUGFS_FILE_OPS(name) \
static const struct file_operations _##name##_file_ops = { \
.owner = THIS_MODULE, \
.open = _##name##_open, \
.write = _##name##_write, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = seq_release \
}
#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
do { \
struct dentry *ent; \
ent = debugfs_create_file(name, mode, parent, data, ops); \
if (!ent) \
pr_warn("create of %s failed\n", name); \
} while (0)
#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, 0644)
static void *_vport_state_seq_start(struct seq_file *s, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
if (*pos >= OPA_VNIC_DBG_NUM_ATTR)
return NULL;
return pos;
}
static void *_vport_state_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
++*pos;
if (*pos >= OPA_VNIC_DBG_NUM_ATTR)
return NULL;
return pos;
}
static void _vport_state_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int _vport_state_seq_show(struct seq_file *s, void *v)
{
struct opa_vnic_adapter *adapter =
(struct opa_vnic_adapter *)s->private;
struct __opa_veswport_info *info = &adapter->info;
loff_t *spos = v;
int stat;
char *str;
u8 i, *val;
if (v == SEQ_START_TOKEN)
return 0;
stat = *spos;
switch (stat) {
case OPA_VNIC_DBG_FABRIC_ID:
seq_printf(s, "fabric_id (fabric id): 0x%x\n",
info->vesw.fabric_id);
break;
case OPA_VNIC_DBG_VESW_ID:
seq_printf(s, "vesw_id (virtual eth switch id): 0x%x\n",
info->vesw.vesw_id);
break;
case OPA_VNIC_DBG_PKEY:
seq_printf(s, "pkey (partition key): 0x%x\n",
info->vesw.pkey);
break;
case OPA_VNIC_DBG_ETH_LINK_STATUS:
seq_printf(s, "eth_link_status (0=unknown 1=up 2=down): %u\n",
info->vport.eth_link_status);
break;
case OPA_VNIC_DBG_BASE_MAC_ADDR:
val = info->vport.base_mac_addr;
str = "%s: %02x:%02x:%02x:%02x:%02x:%02x\n";
seq_printf(s, str, "base_mac_addr (mac address)",
val[0], val[1], val[2], val[3], val[4], val[5]);
break;
case OPA_VNIC_DBG_CFG_STATE:
seq_printf(s, "config_state (0=nop 1=drop 2=init 3=fwd): %u\n",
info->vport.config_state);
break;
case OPA_VNIC_DBG_OPER_STATE:
seq_printf(s, "oper_state (0=nop 1=drop 2=init 3=fwd): %u\n",
info->vport.oper_state);
break;
case OPA_VNIC_DBG_ENCAP_SLID:
seq_printf(s, "encap_slid (source lid): 0x%x\n",
info->vport.encap_slid);
break;
case OPA_VNIC_DBG_DEF_PORT_MASK:
seq_printf(s, "def_port_mask (default port mask): 0x%04x\n",
info->vesw.def_port_mask);
break;
case OPA_VNIC_DBG_UC_DLID:
seq_puts(s, "u_ucast_dlid (unknown ucast dlid):");
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
seq_printf(s, " 0x%x", info->vesw.u_ucast_dlid[i]);
seq_puts(s, "\n");
break;
case OPA_VNIC_DBG_MC_DLID:
seq_printf(s, "u_mcast_dlid (unknown mcast dlid): 0x%x\n",
info->vesw.u_mcast_dlid);
break;
case OPA_VNIC_DBG_ETH_MTU:
seq_printf(s, "eth_mtu (ethernet mtu): %u\n",
info->vesw.eth_mtu);
break;
case OPA_VNIC_DBG_SC_UC:
seq_printf(s, "non_vlan_sc_uc (non-vlan ucast sc): 0x%x\n",
info->vport.non_vlan_sc_uc);
break;
case OPA_VNIC_DBG_SC_MC:
seq_printf(s, "non_vlan_sc_mc (non-vlan mcast sc): 0x%x\n",
info->vport.non_vlan_sc_mc);
break;
case OPA_VNIC_DBG_PCP_SC_UC:
val = info->vport.pcp_to_sc_uc;
str = "%s: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n";
seq_printf(s, str, "pcp_to_sc_uc (vlan ucast sc)",
val[0], val[1], val[2], val[3],
val[4], val[5], val[6], val[7]);
break;
case OPA_VNIC_DBG_PCP_SC_MC:
val = info->vport.pcp_to_sc_mc;
str = "%s: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n";
seq_printf(s, str, "pcp_to_sc_mc (vlan mcast sc)",
val[0], val[1], val[2], val[3],
val[4], val[5], val[6], val[7]);
break;
case OPA_VNIC_DBG_VL_UC:
seq_printf(s, "non_vlan_vl_uc (non-vlan ucast vl): 0x%x\n",
info->vport.non_vlan_vl_uc);
break;
case OPA_VNIC_DBG_VL_MC:
seq_printf(s, "non_vlan_vl_mc (non-vlan mcast vl): 0x%x\n",
info->vport.non_vlan_vl_mc);
break;
case OPA_VNIC_DBG_PCP_VL_UC:
val = info->vport.pcp_to_vl_uc;
str = "%s: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n";
seq_printf(s, str, "pcp_to_vl_uc (vlan ucast vl)",
val[0], val[1], val[2], val[3],
val[4], val[5], val[6], val[7]);
break;
case OPA_VNIC_DBG_PCP_VL_MC:
val = info->vport.pcp_to_vl_mc;
str = "%s: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n";
seq_printf(s, str, "pcp_to_vl_mc (vlan mcast vl)",
val[0], val[1], val[2], val[3],
val[4], val[5], val[6], val[7]);
break;
case OPA_VNIC_DBG_ENCAP_RC:
seq_printf(s, "encap_rc (routing control): 0x%08x\n",
info->vesw.rc);
break;
case OPA_VNIC_DBG_MACTBL_DIGEST:
seq_printf(s, "MAC Table Digest: %u\n",
info->vport.mac_tbl_digest);
break;
default:
return SEQ_SKIP;
}
return 0;
}
static ssize_t _vport_state_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = (struct seq_file *)file->private_data;
struct opa_vnic_adapter *adapter =
(struct opa_vnic_adapter *)s->private;
struct __opa_veswport_info *info = &adapter->info;
char debug_buf[256];
ssize_t len;
u32 value;
int cnt;
if (*ppos != 0)
return 0;
if (count >= sizeof(debug_buf))
return -ENOSPC;
len = simple_write_to_buffer(debug_buf, sizeof(debug_buf) - 1,
ppos, buf, count);
if (len < 0)
return len;
debug_buf[len] = '\0';
if (strncmp(debug_buf, "fabric_id", 9) == 0) {
cnt = kstrtouint(strim(&debug_buf[9]), 0, &value);
if (!cnt)
info->vesw.fabric_id = value;
} else if (strncmp(debug_buf, "vesw_id", 7) == 0) {
cnt = kstrtouint(strim(&debug_buf[7]), 0, &value);
if (!cnt)
info->vesw.vesw_id = value;
} else if (strncmp(debug_buf, "pkey", 4) == 0) {
cnt = kstrtouint(strim(&debug_buf[4]), 0, &value);
if (!cnt)
info->vesw.pkey = value;
} else if (strncmp(debug_buf, "base_mac_addr", 13) == 0) {
u8 i, mac[6];
cnt = sscanf(&debug_buf[13], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
&mac[0], &mac[1], &mac[2],
&mac[3], &mac[4], &mac[5]);
if (cnt == 6)
for (i = 0; i < 6; i++)
info->vport.base_mac_addr[i] = mac[i];
} else if (strncmp(debug_buf, "config_state", 12) == 0) {
cnt = kstrtouint(strim(&debug_buf[12]), 0, &value);
if (!cnt)
info->vport.config_state = value;
} else if (strncmp(debug_buf, "encap_slid", 10) == 0) {
cnt = kstrtouint(strim(&debug_buf[10]), 0, &value);
if (!cnt)
info->vport.encap_slid = value;
} else if (strncmp(debug_buf, "def_port_mask", 13) == 0) {
cnt = kstrtouint(strim(&debug_buf[13]), 16, &value);
if (!cnt)
info->vesw.def_port_mask = value;
} else if (strncmp(debug_buf, "u_ucast_dlid", 12) == 0) {
u32 idx;
int val;
cnt = sscanf(&debug_buf[12], "%u %i", &idx, &val);
if ((cnt == 2) && (idx < OPA_VESW_MAX_NUM_DEF_PORT))
info->vesw.u_ucast_dlid[idx] = (u32)val;
} else if (strncmp(debug_buf, "u_mcast_dlid", 12) == 0) {
cnt = kstrtouint(strim(&debug_buf[12]), 0, &value);
if (!cnt)
info->vesw.u_mcast_dlid = value;
} else if (strncmp(debug_buf, "eth_mtu", 7) == 0) {
cnt = kstrtouint(strim(&debug_buf[7]), 0, &value);
if (!cnt)
info->vesw.eth_mtu = value;
} else if (strncmp(debug_buf, "non_vlan_sc_uc", 14) == 0) {
cnt = kstrtouint(strim(&debug_buf[14]), 0, &value);
if (!cnt)
info->vport.non_vlan_sc_uc = value;
} else if (strncmp(debug_buf, "non_vlan_sc_mc", 14) == 0) {
cnt = kstrtouint(strim(&debug_buf[14]), 0, &value);
if (!cnt)
info->vport.non_vlan_sc_mc = value;
} else if (strncmp(debug_buf, "pcp_to_sc_uc", 12) == 0) {
int i, sc[8];
cnt = sscanf(&debug_buf[12], "%i %i %i %i %i %i %i %i",
&sc[0], &sc[1], &sc[2], &sc[3],
&sc[4], &sc[5], &sc[6], &sc[7]);
if (cnt == 8)
for (i = 0; i < 8; i++)
info->vport.pcp_to_sc_uc[i] = sc[i];
} else if (strncmp(debug_buf, "pcp_to_sc_mc", 12) == 0) {
int i, sc[8];
cnt = sscanf(&debug_buf[12], "%i %i %i %i %i %i %i %i",
&sc[0], &sc[1], &sc[2], &sc[3],
&sc[4], &sc[5], &sc[6], &sc[7]);
if (cnt == 8)
for (i = 0; i < 8; i++)
info->vport.pcp_to_sc_mc[i] = sc[i];
} else if (strncmp(debug_buf, "non_vlan_vl_uc", 14) == 0) {
cnt = kstrtouint(strim(&debug_buf[14]), 0, &value);
if (!cnt)
info->vport.non_vlan_vl_uc = value;
} else if (strncmp(debug_buf, "non_vlan_vl_mc", 14) == 0) {
cnt = kstrtouint(strim(&debug_buf[14]), 0, &value);
if (!cnt)
info->vport.non_vlan_vl_mc = value;
} else if (strncmp(debug_buf, "pcp_to_vl_uc", 12) == 0) {
int i, vl[8];
cnt = sscanf(&debug_buf[12], "%i %i %i %i %i %i %i %i",
&vl[0], &vl[1], &vl[2], &vl[3],
&vl[4], &vl[5], &vl[6], &vl[7]);
if (cnt == 8)
for (i = 0; i < 8; i++)
info->vport.pcp_to_vl_uc[i] = vl[i];
} else if (strncmp(debug_buf, "pcp_to_vl_mc", 12) == 0) {
int i, vl[8];
cnt = sscanf(&debug_buf[12], "%i %i %i %i %i %i %i %i",
&vl[0], &vl[1], &vl[2], &vl[3],
&vl[4], &vl[5], &vl[6], &vl[7]);
if (cnt == 8)
for (i = 0; i < 8; i++)
info->vport.pcp_to_vl_mc[i] = vl[i];
} else if (strncmp(debug_buf, "encap_rc", 8) == 0) {
cnt = kstrtouint(strim(&debug_buf[8]), 16, &value);
if (!cnt)
info->vesw.rc = value;
} else if (strncmp(debug_buf, "reset", 5) == 0) {
struct opa_veswport_info port_info;
vema_get_pod_values(&port_info);
opa_vnic_set_vesw_info(adapter, &port_info.vesw);
opa_vnic_set_per_veswport_info(adapter, &port_info.vport);
opa_vnic_release_mac_tbl(adapter);
}
/* process the new config settings */
opa_vnic_process_vema_config(adapter);
return count;
}
DEBUGFS_SEQ_FILE_OPS(vport_state);
DEBUGFS_SEQ_FILE_OPEN(vport_state)
DEBUGFS_FILE_OPS(vport_state);
static ssize_t mac_tbl_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct opa_vnic_adapter *adapter = file_inode(file)->i_private;
struct opa_veswport_mactable *tbl;
char *debug_buf;
ssize_t len = 0;
int i, extra, rc = 0;
if (*ppos != 0)
return 0;
if (count < OPA_VNIC_MACTBL_STR_SIZE)
return -ENOSPC;
debug_buf = kmalloc(OPA_VNIC_MACTBL_STR_SIZE, GFP_KERNEL);
if (!debug_buf)
return -ENOMEM;
/* allocate veswport mac table */
extra = sizeof(struct opa_veswport_mactable_entry) *
OPA_VNIC_MAC_TBL_MAX_ENTRIES;
tbl = kzalloc(sizeof(*tbl) + extra, GFP_KERNEL);
if (!tbl) {
rc = -ENOMEM;
goto read_done;
}
/* get the whole table */
tbl->offset = cpu_to_be16(0);
tbl->num_entries = cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES);
opa_vnic_query_mac_tbl(adapter, tbl);
len += scnprintf(debug_buf + len, OPA_VNIC_MACTBL_STR_SIZE - len,
"Mac Table Digest: %u\n",
be32_to_cpu(tbl->mac_tbl_digest));
for (i = 0; i < OPA_VNIC_MAC_TBL_MAX_ENTRIES; i++) {
struct opa_veswport_mactable_entry *entry =
&tbl->tbl_entries[i];
u8 *mac_addr = entry->mac_addr;
u8 empty_mac[ETH_ALEN] = { 0 };
/* if the entry is not there (null), skip */
if (!memcmp(mac_addr, empty_mac, ARRAY_SIZE(empty_mac)))
continue;
len += scnprintf(debug_buf + len,
OPA_VNIC_MACTBL_STR_SIZE - len,
"%4d: %02x:%02x:%02x:%02x:%02x:%02x 0x%x\n",
i, mac_addr[0], mac_addr[1], mac_addr[2],
mac_addr[3], mac_addr[4], mac_addr[5],
be32_to_cpu(entry->dlid_sd));
}
kfree(tbl);
rc = simple_read_from_buffer(buf, count, ppos, debug_buf, len);
read_done:
kfree(debug_buf);
return rc;
}
static ssize_t mac_tbl_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct opa_vnic_adapter *adapter = file_inode(file)->i_private;
struct opa_veswport_mactable *tbl;
int i, extra, cnt, rc, num_bytes;
u32 offset, num_entries, digest;
char *debug_buf, *buf_ptr;
ssize_t len;
if (*ppos != 0)
return 0;
if (count >= OPA_VNIC_MACTBL_STR_SIZE)
return -ENOSPC;
debug_buf = kmalloc(count + 1, GFP_KERNEL);
if (!debug_buf)
return -ENOMEM;
rc = simple_write_to_buffer(debug_buf, count, ppos, buf, count);
if (rc < 0)
goto write_err;
len = rc;
debug_buf[len] = '\0';
/* read offset and number of entries */
buf_ptr = debug_buf;
cnt = sscanf(buf_ptr, "%u %u %u %n", &digest, &offset, &num_entries,
&num_bytes);
if ((cnt != 3) || !num_entries ||
((offset + num_entries) > OPA_VNIC_MAC_TBL_MAX_ENTRIES)) {
v_err("Invalid input\n");
rc = -EINVAL;
goto write_err;
}
/* allocate veswport mac table */
extra = sizeof(struct opa_veswport_mactable_entry) * num_entries;
tbl = kzalloc(sizeof(*tbl) + extra, GFP_KERNEL);
if (!tbl) {
rc = -ENOMEM;
goto write_err;
}
/* build the veswport mac table */
tbl->mac_tbl_digest = cpu_to_be32(digest);
tbl->offset = cpu_to_be16(offset);
tbl->num_entries = cpu_to_be16(num_entries);
for (i = 0; i < num_entries; i++) {
struct opa_veswport_mactable_entry *entry =
&tbl->tbl_entries[i];
u8 *mac_addr = entry->mac_addr;
u32 dlid_sd;
buf_ptr += num_bytes;
cnt = sscanf(buf_ptr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %x%n",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5],
&dlid_sd, &num_bytes);
if (cnt != 7) {
v_err("Invalid input\n");
rc = -EINVAL;
goto write_invalid;
}
entry->dlid_sd = cpu_to_be32(dlid_sd);
}
/* update mac table */
rc = opa_vnic_update_mac_tbl(adapter, tbl);
write_invalid:
kfree(tbl);
rc = rc ? : count;
write_err:
kfree(debug_buf);
return rc;
}
static const struct file_operations mac_tbl_file_ops = {
.owner = THIS_MODULE,
.write = mac_tbl_write,
.read = mac_tbl_read,
};
void opa_vnic_dbg_vport_init(struct opa_vnic_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
if (!opa_vnic_dbg_root)
return;
adapter->dentry = debugfs_create_dir(dev->name,
opa_vnic_dbg_root);
if (!adapter->dentry) {
pr_warn("init of opa vnic debugfs failed\n");
return;
}
DEBUGFS_SEQ_FILE_CREATE(vport_state, adapter->dentry, adapter);
DEBUGFS_FILE_CREATE("mac_tbl", adapter->dentry, adapter,
&mac_tbl_file_ops, 0644);
}
void opa_vnic_dbg_vport_exit(struct opa_vnic_adapter *adapter)
{
debugfs_remove_recursive(adapter->dentry);
}
static int ctrl_add_vport_set(void *data, u64 val)
{
struct opa_vnic_vema_port *port = data;
struct opa_vnic_adapter *adapter;
u8 vport = (u8)val;
adapter = vema_add_vport(port, vport);
return IS_ERR(adapter);
}
DEFINE_SIMPLE_ATTRIBUTE(ctrl_add_vport, NULL, ctrl_add_vport_set, "%llu\n");
void opa_vnic_dbg_ctrl_init(struct opa_vnic_ctrl_port *cport)
{
struct opa_vnic_vema_port *port;
int i;
if (!opa_vnic_dbg_root)
return;
for (i = 1; i <= cport->num_ports; i++) {
char name[255];
port = vema_get_port(cport, i);
if (port->dentry)
continue;
snprintf(name, sizeof(name), "%s.%02x",
dev_name(&cport->ibdev->dev), i);
port->dentry = debugfs_create_dir(name, opa_vnic_dbg_root);
if (port->dentry)
DEBUGFS_FILE_CREATE("add_vport", port->dentry, port,
&ctrl_add_vport, 0200);
}
}
void opa_vnic_dbg_ctrl_exit(struct opa_vnic_ctrl_port *cport)
{
struct opa_vnic_vema_port *port;
int i;
for (i = 1; i <= cport->num_ports; i++) {
port = vema_get_port(cport, i);
debugfs_remove_recursive(port->dentry);
port->dentry = NULL;
}
}
void opa_vnic_dbg_init(void)
{
opa_vnic_dbg_root = debugfs_create_dir(opa_vnic_driver_name, NULL);
if (IS_ERR(opa_vnic_dbg_root))
opa_vnic_dbg_root = NULL;
if (!opa_vnic_dbg_root)
pr_warn("init of hfi vnic debugfs failed\n");
}
void opa_vnic_dbg_exit(void)
{
debugfs_remove_recursive(opa_vnic_dbg_root);
opa_vnic_dbg_root = NULL;
}
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/opfn.c
|
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "hfi.h"
#include "trace.h"
#include "qp.h"
#define IB_BTHE_E BIT(IB_BTHE_E_SHIFT)
#define OPFN_CODE(code) BIT((code - 1))
#define OPFN_MASK(code) OPFN_CODE(STL_VERBS_EXTD_##code)
struct hfi1_opfn_type {
bool (*request)(struct rvt_qp *qp, u64 *data);
bool (*response)(struct rvt_qp *qp, u64 *data);
bool (*reply)(struct rvt_qp *qp, u64 data);
void (*error)(struct rvt_qp *qp);
};
struct hfi1_opfn_type hfi1_opfn_handlers[STL_VERBS_EXTD_MAX] = {
{ 0 },
{ tid_rdma_conn_req, tid_rdma_conn_resp, tid_rdma_conn_reply,
tid_rdma_conn_error },
};
static void opfn_schedule_conn_request(struct rvt_qp *qp);
static bool hfi1_opfn_extended(u32 bth1)
{
return !!(bth1 & IB_BTHE_E);
}
void opfn_conn_request(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct ib_atomic_wr wr;
u16 mask, capcode;
struct hfi1_opfn_type *extd;
u64 data;
unsigned long flags;
int ret = 0;
trace_hfi1_opfn_state_conn_request(qp);
spin_lock_irqsave(&priv->opfn.lock, flags);
/*
* Exit if the extended bit is not set, or if nothing is requested, or
* if we have completed all requests, or if a previous request is in
* progress
*/
if (!priv->opfn.extended || !priv->opfn.requested ||
priv->opfn.requested == priv->opfn.completed || priv->opfn.curr)
goto done;
mask = priv->opfn.requested & ~priv->opfn.completed;
capcode = ilog2(mask & ~(mask - 1)) + 1;
if (capcode >= STL_VERBS_EXTD_MAX) {
priv->opfn.completed |= OPFN_CODE(capcode);
goto done;
}
extd = &hfi1_opfn_handlers[capcode];
if (!extd || !extd->request || !extd->request(qp, &data)) {
/*
* Either there is no handler for this capability or the request
* packet could not be generated. Either way, mark it as done so
* we don't keep attempting to complete it.
*/
priv->opfn.completed |= OPFN_CODE(capcode);
goto done;
}
trace_hfi1_opfn_data_conn_request(qp, capcode, data);
data = (data & ~0xf) | capcode;
memset(&wr, 0, sizeof(wr));
wr.wr.opcode = IB_WR_OPFN;
wr.remote_addr = HFI1_VERBS_E_ATOMIC_VADDR;
wr.compare_add = data;
priv->opfn.curr = capcode; /* A new request is now in progress */
/* Drop opfn.lock before calling ib_post_send() */
spin_unlock_irqrestore(&priv->opfn.lock, flags);
ret = ib_post_send(&qp->ibqp, &wr.wr, NULL);
if (ret)
goto err;
trace_hfi1_opfn_state_conn_request(qp);
return;
err:
trace_hfi1_msg_opfn_conn_request(qp, "ib_ost_send failed: ret = ",
(u64)ret);
spin_lock_irqsave(&priv->opfn.lock, flags);
/*
* In case of an unexpected error return from ib_post_send
* clear opfn.curr and reschedule to try again
*/
priv->opfn.curr = STL_VERBS_EXTD_NONE;
opfn_schedule_conn_request(qp);
done:
spin_unlock_irqrestore(&priv->opfn.lock, flags);
}
void opfn_send_conn_request(struct work_struct *work)
{
struct hfi1_opfn_data *od;
struct hfi1_qp_priv *qpriv;
od = container_of(work, struct hfi1_opfn_data, opfn_work);
qpriv = container_of(od, struct hfi1_qp_priv, opfn);
opfn_conn_request(qpriv->owner);
}
/*
* When QP s_lock is held in the caller, the OPFN request must be scheduled
* to a different workqueue to avoid double locking QP s_lock in call to
* ib_post_send in opfn_conn_request
*/
static void opfn_schedule_conn_request(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
trace_hfi1_opfn_state_sched_conn_request(qp);
/* XXX: should we be scheduling to a different workqueue? */
schedule_work(&priv->opfn.opfn_work);
}
void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
struct ib_atomic_eth *ateth)
{
struct hfi1_qp_priv *priv = qp->priv;
u64 data = be64_to_cpu(ateth->compare_data);
struct hfi1_opfn_type *extd;
u8 capcode;
unsigned long flags;
trace_hfi1_opfn_state_conn_response(qp);
capcode = data & 0xf;
trace_hfi1_opfn_data_conn_response(qp, capcode, data);
if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
return;
extd = &hfi1_opfn_handlers[capcode];
if (!extd || !extd->response) {
e->atomic_data = capcode;
return;
}
spin_lock_irqsave(&priv->opfn.lock, flags);
if (priv->opfn.completed & OPFN_CODE(capcode)) {
/*
* We are receiving a request for a feature that has already
* been negotiated. This may mean that the other side has reset
*/
priv->opfn.completed &= ~OPFN_CODE(capcode);
if (extd->error)
extd->error(qp);
}
if (extd->response(qp, &data))
priv->opfn.completed |= OPFN_CODE(capcode);
e->atomic_data = (data & ~0xf) | capcode;
trace_hfi1_opfn_state_conn_response(qp);
spin_unlock_irqrestore(&priv->opfn.lock, flags);
}
void opfn_conn_reply(struct rvt_qp *qp, u64 data)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_opfn_type *extd;
u8 capcode;
unsigned long flags;
trace_hfi1_opfn_state_conn_reply(qp);
capcode = data & 0xf;
trace_hfi1_opfn_data_conn_reply(qp, capcode, data);
if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
return;
spin_lock_irqsave(&priv->opfn.lock, flags);
/*
* Either there is no previous request or the reply is not for the
* current request
*/
if (!priv->opfn.curr || capcode != priv->opfn.curr)
goto done;
extd = &hfi1_opfn_handlers[capcode];
if (!extd || !extd->reply)
goto clear;
if (extd->reply(qp, data))
priv->opfn.completed |= OPFN_CODE(capcode);
clear:
/*
* Clear opfn.curr to indicate that the previous request is no longer in
* progress
*/
priv->opfn.curr = STL_VERBS_EXTD_NONE;
trace_hfi1_opfn_state_conn_reply(qp);
done:
spin_unlock_irqrestore(&priv->opfn.lock, flags);
}
void opfn_conn_error(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_opfn_type *extd = NULL;
unsigned long flags;
u16 capcode;
trace_hfi1_opfn_state_conn_error(qp);
/*
* The QP has gone into the Error state. We have to invalidate all
* negotiated feature, including the one in progress (if any). The RC
* QP handling will clean the WQE for the connection request.
*/
trace_hfi1_msg_opfn_conn_error(qp, "error. qp state ", (u64)qp->state);
spin_lock_irqsave(&priv->opfn.lock, flags);
while (priv->opfn.completed) {
capcode = priv->opfn.completed & ~(priv->opfn.completed - 1);
extd = &hfi1_opfn_handlers[ilog2(capcode) + 1];
if (extd->error)
extd->error(qp);
priv->opfn.completed &= ~OPFN_CODE(capcode);
}
priv->opfn.extended = false;
priv->opfn.requested = 0;
priv->opfn.curr = STL_VERBS_EXTD_NONE;
spin_unlock_irqrestore(&priv->opfn.lock, flags);
}
void opfn_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask)
{
struct ib_qp *ibqp = &qp->ibqp;
struct hfi1_qp_priv *priv = qp->priv;
unsigned long flags;
if (attr_mask & IB_QP_RETRY_CNT)
priv->s_retry = attr->retry_cnt;
spin_lock_irqsave(&priv->opfn.lock, flags);
if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
struct tid_rdma_params *local = &priv->tid_rdma.local;
if (attr_mask & IB_QP_TIMEOUT)
priv->tid_retry_timeout_jiffies = qp->timeout_jiffies;
if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) ||
qp->pmtu == enum_to_mtu(OPA_MTU_8192)) {
tid_rdma_opfn_init(qp, local);
/*
* We only want to set the OPFN requested bit when the
* QP transitions to RTS.
*/
if (attr_mask & IB_QP_STATE &&
attr->qp_state == IB_QPS_RTS) {
priv->opfn.requested |= OPFN_MASK(TID_RDMA);
/*
* If the QP is transitioning to RTS and the
* opfn.completed for TID RDMA has already been
* set, the QP is being moved *back* into RTS.
* We can now renegotiate the TID RDMA
* parameters.
*/
if (priv->opfn.completed &
OPFN_MASK(TID_RDMA)) {
priv->opfn.completed &=
~OPFN_MASK(TID_RDMA);
/*
* Since the opfn.completed bit was
* already set, it is safe to assume
* that the opfn.extended is also set.
*/
opfn_schedule_conn_request(qp);
}
}
} else {
memset(local, 0, sizeof(*local));
}
}
spin_unlock_irqrestore(&priv->opfn.lock, flags);
}
void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1)
{
struct hfi1_qp_priv *priv = qp->priv;
if (!priv->opfn.extended && hfi1_opfn_extended(bth1) &&
HFI1_CAP_IS_KSET(OPFN)) {
priv->opfn.extended = true;
if (qp->state == IB_QPS_RTS)
opfn_conn_request(qp);
}
}
|
cornelisnetworks/opa-hfi1
|
compat/common/compat_common.h
|
<reponame>cornelisnetworks/opa-hfi1
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(COMPAT_COMMON_H)
#define COMPAT_COMMON_H
#include <linux/socket.h>
#include <net/ipv6.h>
#include <net/ip.h>
#include <linux/netdevice.h>
#include <linux/if_link.h>
#include <uapi/rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/kthread.h>
#include <rdma/opa_port_info.h>
#include <linux/timer.h>
#define IB_QPN_MASK 0xFFFFFF
#include <rdma/ib_hdrs.h>
#if !defined(BAD_DMA_ADDRESS)
#define BAD_DMA_ADDRESS ((u64)0)
#endif
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define MAX_NICE 19
#define MIN_NICE -20
#define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
#define IB_DEVICE_RDMA_NETDEV_OPA_VNIC (1ULL << 35)
#define IB_DEVICE_NODE_DESC_MAX 64
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field) \
((__entry->__data_loc_##field >> 16) & 0xffff)
#define IB_PORT_OPA_MASK_CHG BIT(4)
#ifdef NEED_CURRENT_TIME
#define current_time(inode) CURRENT_TIME
#endif
/* Address format 0x000FF000 */
#if !defined RDMA_CORE_CAP_AF_IB
#define RDMA_CORE_CAP_AF_IB 0x00001000
#endif
#if !defined RDMA_CORE_CAP_ETH_AH
#define RDMA_CORE_CAP_ETH_AH 0x00002000
#endif
#if !defined RDMA_CORE_CAP_OPA_AH
#define RDMA_CORE_CAP_OPA_AH 0x00000000
#endif
#if !defined(kthread_init_work)
#define kthread_init_work(work, fn) init_kthread_work(work, fn)
#endif
#if !defined(RB_ROOT_CACHED)
#define rb_root_cached rb_root
#define RB_ROOT_CACHED RB_ROOT
#define rb_erase_cached(node, root) rb_erase(node, root)
#define rb_first_cached(root) rb_first(root)
#endif
#ifndef smp_store_mb
#define smp_store_mb(var, value) \
do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif
extern struct srcu_struct debugfs_srcu;
extern struct ib_dma_mapping_ops rvt_default_dma_mapping_ops;
const char *get_unit_name(int unit);
#ifdef NEED_CDEV_SET_PARENT
void cdev_set_parent(struct cdev *p, struct kobject *kobj);
#endif
#ifdef NEED_PCI_REQUEST_IRQ
int pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn,
void *dev_id, const char *fmt, ...);
void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
#endif
#if !defined(IFS_RH80) && !defined(IFS_RH81)
static inline int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pci_reset_bridge_secondary_bus(dev);
return 0;
}
#endif
#ifdef NEED_MM_HELPER_FUNCTIONS
/**
* mmgrab() - Pin a &struct mm_struct.
* @mm: The &struct mm_struct to pin.
*
* Make sure that @mm will not get freed even after the owning task
* exits. This doesn't guarantee that the associated address space
* will still exist later on and mmget_not_zero() has to be used before
* accessing it.
*
* This is a preferred way to to pin @mm for a longer/unbounded amount
* of time.
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
{
atomic_inc(&mm->mm_count);
}
#endif
#ifdef NEED_IB_HELPER_FUNCTIONS
struct opa_class_port_info {
u8 base_version;
u8 class_version;
__be16 cap_mask;
__be32 cap_mask2_resp_time;
u8 redirect_gid[16];
__be32 redirect_tc_fl;
__be32 redirect_lid;
__be32 redirect_sl_qp;
__be32 redirect_qkey;
u8 trap_gid[16];
__be32 trap_tc_fl;
__be32 trap_lid;
__be32 trap_hl_qp;
__be32 trap_qkey;
__be16 trap_pkey;
__be16 redirect_pkey;
u8 trap_sl_rsvd;
u8 reserved[3];
} __packed;
/* rdma netdev type - specifies protocol type */
enum rdma_netdev_t {
RDMA_NETDEV_OPA_VNIC,
RDMA_NETDEV_IPOIB,
};
enum rdma_ah_attr_type {
RDMA_AH_ATTR_TYPE_IB,
RDMA_AH_ATTR_TYPE_ROCE,
RDMA_AH_ATTR_TYPE_OPA,
};
/**
* struct rdma_netdev - rdma netdev
* For cases where netstack interfacing is required.
*/
struct rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
u8 port_num;
/* control functions */
void (*set_id)(struct net_device *netdev, int id);
/* send packet */
int (*send)(struct net_device *dev, struct sk_buff *skb,
struct ib_ah *address, u32 dqpn);
/* multicast */
int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
union ib_gid *gid, u16 mlid,
int set_qkey, u32 qkey);
int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
union ib_gid *gid, u16 mlid);
};
struct roce_ah_attr {
u8 dmac[ETH_ALEN];
};
struct opa_ah_attr {
u32 dlid;
u8 src_path_bits;
};
#define rdma_ah_attr ib_ah_attr
static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
{
attr->sl = sl;
}
static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
{
return attr->sl;
}
static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
{
attr->port_num = port_num;
}
static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
{
return attr->port_num;
}
static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
bool make_grd)
{
}
static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
{
return false;
}
static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
enum ib_ah_flags flag)
{
attr->ah_flags = flag;
}
/*To retrieve and modify the grh */
static inline struct ib_global_route
*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
{
return &attr->grh;
}
static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
__be64 prefix)
{
struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
grh->dgid.global.subnet_prefix = prefix;
}
static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
__be64 if_id)
{
struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
grh->dgid.global.interface_id = if_id;
}
/*
* rdma_modify_ah - Modifies the address vector associated with an address
* handle.
* @ah: The address handle to modify.
* @ah_attr: The new address vector attributes to associate with the
* address handle.
*/
static inline int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
return (ah->device->modify_ah ?
ah->device->modify_ah(ah, ah_attr) : -ENOSYS);
}
/*
* ib_lid_cpu16 - Return lid in 16bit CPU encoding.
* In the current implementation the only way to get
* get the 32bit lid is from other sources for OPA.
* For IB, lids will always be 16bits so cast the
* value accordingly.
*
* @lid: A 32bit LID
*/
static inline u16 ib_lid_cpu16(u32 lid)
{
WARN_ON_ONCE(lid & 0xFFFF0000);
return (u16)lid;
}
static inline const struct ib_global_route
*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
{
return &attr->grh;
}
static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
{
attr->dlid = (u16)dlid;
}
static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
{
/* Different implementations for QIB and HFI1. */
#ifdef QIB_DRIVER
return (u32)attr->dlid;
#else
u32 dlid = attr->dlid;
const struct ib_global_route *grh = rdma_ah_read_grh(attr);
/* Modify ah_attr.dlid to be in the 32 bit LID space.
* This is how the address will be laid out:
* Assuming MCAST_NR to be 4,
* 32 bit permissive LID = 0xFFFFFFFF
* Multicast LID range = 0xFFFFFFFE to 0xF0000000
* Unicast LID range = 0xEFFFFFFF to 1
* Invalid LID = 0
*/
if (ib_is_opa_gid(&grh->dgid))
dlid = opa_get_lid_from_gid(&grh->dgid);
else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
(dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
(dlid != be32_to_cpu(OPA_LID_PERMISSIVE)))
dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) +
opa_get_mcast_base(OPA_MCAST_NR);
else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE))
dlid = be32_to_cpu(OPA_LID_PERMISSIVE);
return dlid;
#endif
}
static inline enum ib_ah_flags
rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
{
return attr->ah_flags;
}
static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
{
return attr->src_path_bits;
}
static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
u8 static_rate)
{
attr->static_rate = static_rate;
}
static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
{
return attr->static_rate;
}
/**
* rdma_cap_opa_ah - Check if the port of device supports
* OPA Address handles
* @device: Device to check
* @port_num: Port number to check
*
* Return: true if we are running on an OPA device which supports
* the extended OPA addressing.
*/
static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
{
return false;
}
/*Get AH type */
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
u32 port_num)
{
return RDMA_AH_ATTR_TYPE_IB;
}
#endif /* NEED_IB_HELPER_FUNCTIONS */
#ifdef NEED_KTHREAD_HELPER_FUNCTIONS
static inline bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work)
{
return queue_kthread_work(worker, work);
}
static inline void kthread_flush_work(struct kthread_work *work)
{
flush_kthread_work(work);
}
/*
* This API was added in commit f5eabf5e5 and is not yet in any of our distro
* backports. Once it does show up we will get a compiler error and we can
* add appropriate #ifdefs
*/
static inline struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[], const char *cq_name)
{
struct kthread_worker *worker;
struct task_struct *task;
int node = -1;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker)
return NULL;
init_kthread_worker(worker);
if (cpu >= 0)
node = cpu_to_node(cpu);
task = kthread_create_on_node(
kthread_worker_fn,
worker,
node,
namefmt, cq_name);
if (IS_ERR(task)) {
kfree(worker);
return NULL;
}
if (cpu >= 0)
kthread_bind(task, cpu);
wake_up_process(task);
/*Flush to allow kthread_worker_fn properly set worker->task field*/
flush_kthread_worker(worker);
return worker;
}
static inline void kthread_destroy_worker(struct kthread_worker *worker)
{
if (!worker)
return;
flush_kthread_worker(worker);
kthread_stop(worker->task);
kfree(worker);
}
#endif
/*
* For SELinux until our patches are accepted by the distro
*/
#ifdef HAVE_IB_GET_CACHED_SUBNET_PREFIX
int ib_get_cached_subnet_prefix(struct ib_device *device,
u8 port_num,
u64 *sn_pfx);
#endif
#ifndef HAVE_KMALLOC_ARRAY_NODE
static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
{
if (size != 0 && n > SIZE_MAX / size)
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc_node(n * size, flags, node);
return __kmalloc_node(n * size, flags, node);
}
static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
#endif
#define rdma_netdev ifs_aip_rdma_netdev
struct ifs_aip_rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
u8 port_num;
int mtu;
/*
* cleanup function must be specified.
* FIXME: This is only used for OPA_VNIC and that usage should be
* removed too.
*/
void (*free_rdma_netdev)(struct net_device *netdev);
/* control functions */
void (*set_id)(struct net_device *netdev, int id);
/* send packet */
int (*send)(struct net_device *dev, struct sk_buff *skb,
struct ib_ah *address, u32 dqpn);
/* multicast */
int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
union ib_gid *gid, u16 mlid,
int set_qkey, u32 qkey);
int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
union ib_gid *gid, u16 mlid);
};
#define IB_DEVICE_RDMA_NETDEV IB_DEVICE_RDMA_NETDEV_OPA_VNIC
#define IB_QP_CREATE_NETDEV_USE (1 << 7)
#ifndef atomic_try_cmpxchg
#define __atomic_try_cmpxchg(type, _p, _po, _n) \
({ \
typeof(_po) __po = (_po); \
typeof(*(_po)) __r, __o = *__po; \
__r = atomic_cmpxchg##type((_p), __o, (_n)); \
if (unlikely(__r != __o)) \
*__po = __r; \
likely(__r == __o); \
})
#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
#endif
#ifndef atomic_fetch_add_unless
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, &c, c + a));
return c;
}
#endif
static inline bool rdma_core_cap_opa_port(struct ib_device *device,
u32 port_num)
{
if (!device)
return false;
return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_PORT_INTEL_OPA)
== RDMA_CORE_PORT_INTEL_OPA;
}
static inline int opa_mtu_enum_to_int(int mtu)
{
switch (mtu) {
case OPA_MTU_8192:
return 8192;
case OPA_MTU_10240:
return 10240;
default:
return(ib_mtu_enum_to_int(mtu));
}
}
static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
int mtu)
{
if (rdma_core_cap_opa_port(device, port))
return opa_mtu_enum_to_int(mtu);
else
return ib_mtu_enum_to_int((enum ib_mtu)mtu);
}
#ifndef timer_setup
#define timer_setup(timer, callback, flags) \
do { \
__init_timer((timer), (flags)); \
(timer)->function = (void (*)(unsigned long))callback; \
(timer)->data = (unsigned long)(timer); \
} while (0)
#endif
#ifndef from_timer
#define from_timer(var, callback_timer, timer_fieldname) \
container_of(callback_timer, typeof(*(var)), timer_fieldname)
#endif
#ifndef HAVE_ENUM_IB_UVERBS_ADVISE_MR_ADVICE
enum ib_uverbs_advise_mr_advice {
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH,
IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
};
#endif
#ifndef HAVE_IB_DEVICE_OPS
struct iw_cm_id;
struct iw_cm_conn_param;
struct uverbs_attr_bundle;
struct ib_flow_action_attrs_esp;
struct ib_dm_mr_attr;
struct ib_dm_alloc_attr;
struct ib_counters_read_attr;
struct rdma_netdev_alloc_params;
struct rdma_restrack_entry;
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations, providers will
* need to define the supported operations, otherwise they will be set to null.
*/
struct ib_device_ops {
#ifdef POST_HAS_CONST
int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
#else
int (*post_send)(struct ib_qp *qp, struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr);
int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
#endif
void (*drain_rq)(struct ib_qp *qp);
void (*drain_sq)(struct ib_qp *qp);
int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
#ifdef POST_HAS_CONST
int (*post_srq_recv)(struct ib_srq *srq,
const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
#else
int (*post_srq_recv)(struct ib_srq *srq,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
#endif
int (*process_mad)(struct ib_device *device, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in_mad, size_t in_mad_size,
struct ib_mad_hdr *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
int (*modify_device)(struct ib_device *device, int device_modify_mask,
struct ib_device_modify *device_modify);
#ifdef GET_DEV_FW_STR_HAS_LEN
void (*get_dev_fw_str)(struct ib_device *device, char *str,
size_t str_len);
#else
void (*get_dev_fw_str)(struct ib_device *device, char *str);
#endif
#ifdef HAVE_GET_VECTOR_AFFINITY
const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
int comp_vector);
#endif
int (*query_port)(struct ib_device *device, u8 port_num,
struct ib_port_attr *port_attr);
int (*modify_port)(struct ib_device *device, u8 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
/**
* The following mandatory functions are used only at device
* registration. Keep functions such as these at the end of this
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths.
*/
int (*get_port_immutable)(struct ib_device *device, u8 port_num,
struct ib_port_immutable *immutable);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
u8 port_num);
/**
* When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
* a net device doesn't exist. The vendor driver should call dev_hold
* on this net device. The HW vendor's device driver must guarantee
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
/**
* rdma netdev operation
*
* Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
* must return -EOPNOTSUPP if it doesn't support the specified type.
*/
#ifdef HAVE_ALLOC_RDMA_NETDEV
struct net_device *(*alloc_rdma_netdev)(
struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
const char *name, unsigned char name_assign_type,
void (*setup)(struct net_device *));
#endif
#ifdef HAVE_RDMA_NETDEV_GET_PARAMS
int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
#endif
/**
* query_gid should be return GID value for @device, when @port_num
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
int (*query_gid)(struct ib_device *device, u8 port_num, int index,
union ib_gid *gid);
/**
* When calling add_gid, the HW vendor's driver should add the gid
* of device of port at gid index available at @attr. Meta-info of
* that gid (for example, the network device related to this gid) is
* available at @attr. @context allows the HW vendor driver to store
* extra information together with a GID entry. The HW vendor driver may
* allocate memory to contain this information and store it in @context
* when a new GID entry is written to. Params are consistent until the
* next call of add_gid or delete_gid. The function should return 0 on
* success or error otherwise. The function could be called
* concurrently for different ports. This function is only called when
* roce_gid_table is used.
*/
#ifdef HAVE_IB_GID_ATTR
#ifdef ADD_GID_HAS_GID
int (*add_gid)(const union ib_gid *gid,
const struct ib_gid_attr *attr,
void **context);
#else
int (*add_gid)(const struct ib_gid_attr *attr, void **context);
#endif
#else
int (*add_gid)(struct ib_device *device, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context);
#endif
/**
* When calling del_gid, the HW vendor's driver should delete the
* gid of device @device at gid index gid_index of port port_num
* available in @attr.
* Upon the deletion of a GID entry, the HW vendor must free any
* allocated memory. The caller will clear @context afterwards.
* This function is only called when roce_gid_table is used.
*/
#ifdef HAVE_IB_GID_ATTR
int (*del_gid)(const struct ib_gid_attr *attr, void **context);
#else
int (*del_gid)(struct ib_device *device, u8 port_num,
unsigned int index,
void **context);
#endif
int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
u16 *pkey);
#ifdef ALLOC_UCONTEXT_RETURNS_INT
int (*alloc_ucontext)(struct ib_ucontext *context,
struct ib_udata *udata);
#else
struct ib_ucontext *(*alloc_ucontext)(struct ib_device *ibdev,
struct ib_udata *udata);
#endif
#ifdef DEALLOC_UCONTEXT_RETURNS_VOID
void (*dealloc_ucontext)(struct ib_ucontext *context);
#else
int (*dealloc_ucontext)(struct ib_ucontext *context);
#endif
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
#ifdef ALLOC_PD_RETURN_INT
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
#else
struct ib_pd *(*alloc_pd)(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
#endif
#ifdef DEALLOC_PD_HAS_UDATA
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
#else
int (*dealloc_pd)(struct ib_pd *pd);
#endif
#ifdef CREATE_AH_RETURNS_INT
int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
u32 flags, struct ib_udata *udata);
#elif defined(CREATE_AH_HAS_FLAGS)
struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
u32 create_flags,
struct ib_udata *udata);
#elif defined(CREATE_AH_HAS_UDATA)
struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
#else
struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr);
#endif
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
#ifdef DESTROY_AH_RETURNS_VOID
void (*destroy_ah)(struct ib_ah *ah, u32 flags);
#elif defined(DESTROY_AH_HAS_FLAGS)
int (*destroy_ah)(struct ib_ah *ah, u32 flags);
#else
int (*destroy_ah)(struct ib_ah *ah);
#endif
#ifdef CREATE_SRQ_RETURNS_INT
int (*create_srq)(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
#else
struct ib_srq *(*create_srq)(struct ib_pd *ibpd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
#endif
int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
#ifdef DESTROY_SRQ_HAS_UDATA
void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
#else
int (*destroy_srq)(struct ib_srq *srq);
#endif
struct ib_qp *(*create_qp)(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
#ifdef DESTROY_QP_HAS_UDATA
int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
#else
int (*destroy_qp)(struct ib_qp *qp);
#endif
#ifdef CREATE_CQ_LACKS_CONTEXT
struct ib_cq *(*create_cq)(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
#else
struct ib_cq *(*create_cq)(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
#endif
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
#ifdef DESTROY_CQ_HAS_UDATA
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
#else
int (*destroy_cq)(struct ib_cq *cq);
#endif
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_pd *pd, struct ib_udata *udata);
#ifdef DEREG_MR_HAS_UDATA
int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
#else
int (*dereg_mr)(struct ib_mr *mr);
#endif
#ifdef ALLOC_MR_HAS_UDATA
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
#else
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
#endif
int (*advise_mr)(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge,
struct uverbs_attr_bundle *attrs);
int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
#ifdef HAVE_ALLOC_XRCD
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
struct ib_udata *udata);
#ifdef DEALLOC_XRCD_HAS_UDATA
int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
#else
int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
#endif
#endif
#ifdef CREATE_FLOW_HAS_UDATA
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain, struct ib_udata *udata);
#else
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain);
#endif
int (*destroy_flow)(struct ib_flow *flow_id);
struct ib_flow_action *(*create_flow_action_esp)(
struct ib_device *device,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
int (*destroy_flow_action)(struct ib_flow_action *action);
int (*modify_flow_action_esp)(
struct ib_flow_action *action,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
int state);
int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *ivf);
int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
#ifdef DESTROY_WQ_HAS_UDATA
int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
#else
int (*destroy_wq)(struct ib_wq *wq);
#endif
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dm *(*alloc_dm)(struct ib_device *device,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
struct ib_counters *(*create_counters)(
struct ib_device *device, struct uverbs_attr_bundle *attrs);
int (*destroy_counters)(struct ib_counters *counters);
int (*read_counters)(struct ib_counters *counters,
struct ib_counters_read_attr *counters_read_attr,
struct uverbs_attr_bundle *attrs);
/**
* alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
* driver initialized data. The struct is kfree()'ed by the sysfs
* core when the device is removed. A lifespan of -1 in the return
* struct tells the core to set a default lifespan.
*/
struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
u8 port_num);
/**
* get_hw_stats - Fill in the counter value(s) in the stats struct.
* @index - The index in the value array we wish to have updated, or
* num_counters if we want all stats updated
* Return codes -
* < 0 - Error, no counters updated
* index - Updated the single counter pointed to by index
* num_counters - Updated all counters (will reset the timestamp
* and prevent further calls for lifespan milliseconds)
* Drivers are allowed to update all counters in leiu of just the
* one given in index at their option
*/
int (*get_hw_stats)(struct ib_device *device,
struct rdma_hw_stats *stats, u8 port, int index);
/*
* This function is called once for each port when a ib device is
* registered.
*/
int (*init_port)(struct ib_device *device, u8 port_num,
struct kobject *port_sysfs);
/**
* Allows rdma drivers to add their own restrack attributes.
*/
int (*fill_res_entry)(struct sk_buff *msg,
struct rdma_restrack_entry *entry);
/* Device lifecycle callbacks */
/*
* Called after the device becomes registered, before clients are
* attached
*/
int (*enable_driver)(struct ib_device *dev);
/*
* This is called as part of ib_dealloc_device().
*/
void (*dealloc_driver)(struct ib_device *dev);
/* iWarp CM callbacks */
void (*iw_add_ref)(struct ib_qp *qp);
void (*iw_rem_ref)(struct ib_qp *qp);
struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
int (*iw_connect)(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *conn_param);
int (*iw_accept)(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *conn_param);
int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
u8 pdata_len);
int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
};
#endif
#ifndef HAVE_IB_SET_DEVICE_OPS
void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops);
#endif
#ifndef HAVE_ARRAY_SIZE
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
#define check_mul_overflow(a, b, d) __unsigned_mul_overflow(a, b, d)
static inline size_t array_size(size_t a, size_t b)
{
size_t bytes;
if(check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
#endif
#ifndef CREATE_AH_HAS_FLAGS
enum rdma_create_ah_flags {
/* In a sleepable context */
RDMA_CREATE_AH_SLEEPABLE = BIT(0),
};
#endif
#if !defined(DESTROY_AH_HAS_FLAGS) && !defined(DESTROY_AH_RETURN_VOID)
enum rdma_destroy_ah_flags {
/* In a sleepable context */
RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
};
#endif
#ifndef HAVE_VM_FAULT_T
typedef int vm_fault_t;
#endif
#ifdef IB_MODIFY_QP_IS_OK_HAS_LINK
#define ib_modify_qp_is_ok(a,b,c,d) ib_modify_qp_is_ok(a,b,c,d,IB_LINK_LAYER_INFINIBAND)
#endif
#ifndef HAVE_RDMA_COPY_AH_ATTR
static inline
void rdma_destroy_ah_attr(struct rdma_ah_attr *ah)
{
}
static inline
void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
const struct rdma_ah_attr *src)
{
*dest = *src;
}
static inline
void rdma_replace_ah_attr(struct rdma_ah_attr *old,
const struct rdma_ah_attr *new)
{
*old = *new;
}
static inline
void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
{
*dest = *src;
}
#endif
#endif
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/vnic.h
|
<filename>drivers/infiniband/hw/hfi1/vnic.h
#ifndef _HFI1_VNIC_H
#define _HFI1_VNIC_H
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <rdma/opa_vnic.h>
#include "hfi.h"
#include "sdma.h"
#define HFI1_VNIC_MAX_TXQ 16
#define HFI1_VNIC_MAX_PAD 12
/* L4 header definitions */
#define HFI1_VNIC_L4_HDR_OFFSET OPA_VNIC_L2_HDR_LEN
#define HFI1_VNIC_GET_L4_HDR(data) \
(*((u16 *)((u8 *)(data) + HFI1_VNIC_L4_HDR_OFFSET)))
#define HFI1_VNIC_GET_VESWID(data) \
(HFI1_VNIC_GET_L4_HDR(data) & 0xFFF)
/* Service class */
#define HFI1_VNIC_SC_OFFSET_LOW 6
#define HFI1_VNIC_SC_OFFSET_HI 7
#define HFI1_VNIC_SC_SHIFT 4
#define HFI1_VNIC_MAX_QUEUE 16
#define HFI1_NUM_VNIC_CTXT 8
/**
* struct hfi1_vnic_sdma - VNIC per Tx ring SDMA information
* @dd - device data pointer
* @sde - sdma engine
* @vinfo - vnic info pointer
* @wait - iowait structure
* @stx - sdma tx request
* @state - vnic Tx ring SDMA state
* @q_idx - vnic Tx queue index
*/
struct hfi1_vnic_sdma {
struct hfi1_devdata *dd;
struct sdma_engine *sde;
struct hfi1_vnic_vport_info *vinfo;
struct iowait wait;
struct sdma_txreq stx;
unsigned int state;
u8 q_idx;
bool pkts_sent;
};
/**
* struct hfi1_vnic_rx_queue - HFI1 VNIC receive queue
* @idx: queue index
* @vinfo: pointer to vport information
* @netdev: network device
* @napi: netdev napi structure
* @skbq: queue of received socket buffers
*/
struct hfi1_vnic_rx_queue {
u8 idx;
struct hfi1_vnic_vport_info *vinfo;
struct net_device *netdev;
struct napi_struct napi;
};
/**
* struct hfi1_vnic_vport_info - HFI1 VNIC virtual port information
* @dd: device data pointer
* @netdev: net device pointer
* @flags: state flags
* @lock: vport lock
* @num_tx_q: number of transmit queues
* @num_rx_q: number of receive queues
* @vesw_id: virtual switch id
* @rxq: Array of receive queues
* @stats: per queue stats
* @sdma: VNIC SDMA structure per TXQ
*/
struct hfi1_vnic_vport_info {
struct hfi1_devdata *dd;
struct net_device *netdev;
unsigned long flags;
/* Lock used around state updates */
struct mutex lock;
u8 num_tx_q;
u8 num_rx_q;
u16 vesw_id;
struct hfi1_vnic_rx_queue rxq[HFI1_NUM_VNIC_CTXT];
struct opa_vnic_stats stats[HFI1_VNIC_MAX_QUEUE];
struct hfi1_vnic_sdma sdma[HFI1_VNIC_MAX_TXQ];
};
#define v_dbg(format, arg...) \
netdev_dbg(vinfo->netdev, format, ## arg)
#define v_err(format, arg...) \
netdev_err(vinfo->netdev, format, ## arg)
#define v_info(format, arg...) \
netdev_info(vinfo->netdev, format, ## arg)
/* vnic hfi1 internal functions */
void hfi1_vnic_setup(struct hfi1_devdata *dd);
int hfi1_vnic_txreq_init(struct hfi1_devdata *dd);
void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd);
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet);
void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo);
bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
u8 q_idx);
/* vnic rdma netdev operations */
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
u8 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
struct hfi1_vnic_vport_info *vinfo,
struct sk_buff *skb, u64 pbc, u8 plen);
#endif /* _HFI1_VNIC_H */
|
cornelisnetworks/opa-hfi1
|
compat/RH73/compat.c
|
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/export.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include "../hfi1/hfi.h"
#include "compat.h"
/* Address handles */
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
{
struct ib_ah *ah;
ah = pd->device->create_ah(pd, ah_attr);
if (!IS_ERR(ah)) {
ah->device = pd->device;
ah->pd = pd;
ah->uobject = NULL;
atomic_inc(&pd->usecnt);
}
return ah;
}
EXPORT_SYMBOL(rdma_create_ah);
int pci_alloc_irq_vectors(struct pci_dev *pcidev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
int i, nvec;
struct hfi1_msix_entry *entries;
if (max_vecs < min_vecs)
return -ERANGE;
nvec = max_vecs;
if (flags & PCI_IRQ_MSIX) {
entries = kcalloc(nvec, sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
/* 1-1 MSI-X entry assignment */
for (i = 0; i < max_vecs; i++)
entries[i].msix.entry = i;
msix_setup(pcidev, pcidev->msix_cap, &nvec, entries);
return nvec;
}
if (flags & PCI_IRQ_LEGACY) {
hfi1_enable_intx(pcidev);
return 1;
}
return -ENOSPC;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors);
void msix_setup(struct pci_dev *pcidev, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry)
{
int ret;
int nvec = *msixcnt;
struct msix_entry *msix_entry;
int i;
/*
* We can't pass hfi1_msix_entry array to msix_setup
* so use a dummy msix_entry array and copy the allocated
* irq back to the hfi1_msix_entry array.
*/
msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry) {
ret = -ENOMEM;
goto do_intx;
}
for (i = 0; i < nvec; i++)
msix_entry[i] = hfi1_msix_entry[i].msix;
ret = pci_enable_msix_range(pcidev, msix_entry, 1, nvec);
if (ret < 0)
goto free_msix_entry;
nvec = ret;
for (i = 0; i < nvec; i++)
hfi1_msix_entry[i].msix = msix_entry[i];
kfree(msix_entry);
*msixcnt = nvec;
return;
free_msix_entry:
kfree(msix_entry);
do_intx:
*msixcnt = 0;
hfi1_enable_intx(pcidev);
}
EXPORT_SYMBOL(msix_setup);
/**
* bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
* @list: indicates whether the bitmap must be list
* @buf: page aligned buffer into which string is placed
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Output format is a comma-separated list of decimal numbers and
* ranges if list is specified or hex digits grouped into comma-separated
* sets of 8 digits/set. Returns the number of characters written to buf.
*
* It is assumed that @buf is a pointer into a PAGE_SIZE area and that
* sufficient storage remains at @buf to accommodate the
* bitmap_print_to_pagebuf() output.
*/
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits)
{
ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
int n = 0;
if (len > 1)
n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
return n;
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
/**
* debugfs_use_file_start - mark the beginning of file data access
* @dentry: the dentry object whose data is being accessed.
* @srcu_idx: a pointer to some memory to store a SRCU index in.
*
* Up to a matching call to debugfs_use_file_finish(), any
* successive call into the file removing functions debugfs_remove()
* and debugfs_remove_recursive() will block. Since associated private
* file data may only get freed after a successful return of any of
* the removal functions, you may safely access it after a successful
* call to debugfs_use_file_start() without worrying about
* lifetime issues.
*
* If -%EIO is returned, the file has already been removed and thus,
* it is not safe to access any of its data. If, on the other hand,
* it is allowed to access the file data, zero is returned.
*
* Regardless of the return code, any call to
* debugfs_use_file_start() must be followed by a matching call
* to debugfs_use_file_finish().
*/
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu)
{
*srcu_idx = srcu_read_lock(&debugfs_srcu);
barrier();
if (d_unlinked(dentry))
return -EIO;
return 0;
}
EXPORT_SYMBOL(debugfs_use_file_start);
/**
* debugfs_use_file_finish - mark the end of file data access
* @srcu_idx: the SRCU index "created" by a former call to
* debugfs_use_file_start().
*
* Allow any ongoing concurrent call into debugfs_remove() or
* debugfs_remove_recursive() blocked by a former call to
* debugfs_use_file_start() to proceed and return to its caller.
*/
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu)
{
srcu_read_unlock(&debugfs_srcu, srcu_idx);
}
EXPORT_SYMBOL(debugfs_use_file_finish);
int rvt_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dma_addr == BAD_DMA_ADDRESS;
}
EXPORT_SYMBOL(rvt_mapping_error);
u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
size_t size, enum dma_data_direction direction)
{
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
return (u64)cpu_addr;
}
EXPORT_SYMBOL(rvt_dma_map_single);
void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
enum dma_data_direction direction)
{
/* This is a stub, nothing to be done here */
}
EXPORT_SYMBOL(rvt_dma_unmap_single);
u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
u64 addr;
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
addr = (u64)page_address(page);
if (addr)
addr += offset;
return addr;
}
EXPORT_SYMBOL(rvt_dma_map_page);
void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
enum dma_data_direction direction)
{
/* This is a stub, nothing to be done here */
}
EXPORT_SYMBOL(rvt_dma_unmap_page);
int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
struct scatterlist *sg;
u64 addr;
int i;
int ret = nents;
if (WARN_ON(!valid_dma_direction(direction)))
return 0;
for_each_sg(sgl, sg, nents, i) {
addr = (u64)page_address(sg_page(sg));
if (!addr) {
ret = 0;
break;
}
sg->dma_address = addr + sg->offset;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
}
return ret;
}
EXPORT_SYMBOL(rvt_map_sg);
void rvt_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
/* This is a stub, nothing to be done here */
}
EXPORT_SYMBOL(rvt_unmap_sg);
void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
}
EXPORT_SYMBOL(rvt_sync_single_for_cpu);
void rvt_sync_single_for_device(struct ib_device *dev, u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
EXPORT_SYMBOL(rvt_sync_single_for_device);
void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size,
u64 *dma_handle, gfp_t flag)
{
struct page *p;
void *addr = NULL;
p = alloc_pages(flag, get_order(size));
if (p)
addr = page_address(p);
if (dma_handle)
*dma_handle = (u64)addr;
return addr;
}
EXPORT_SYMBOL(rvt_dma_alloc_coherent);
void rvt_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
EXPORT_SYMBOL(rvt_dma_free_coherent);
/*
* We should only need to wait 100ms after FLR, but some devices take longer.
* Wait for up to 1000ms for config space to return something other than -1.
* Intel IGD requires this when an LCD panel is attached. We read the 2nd
* dword because VFs don't implement the 1st dword.
*/
static void pci_flr_wait(struct pci_dev *dev)
{
int i = 0;
u32 id;
do {
msleep(100);
pci_read_config_dword(dev, PCI_COMMAND, &id);
} while (i++ < 10 && id == ~0);
if (id == ~0)
dev_warn(&dev->dev, "Failed to return from FLR\n");
else if (i > 1)
dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
(i - 1) * 100);
}
/**
* pcie_flr - initiate a PCIe function level reset
* @dev: device to reset
*
* Initiate a function level reset on @dev. The caller should ensure the
* device supports FLR before calling this function, e.g. by using the
* pcie_has_flr() helper.
*/
void pcie_flr(struct pci_dev *dev)
{
if (!pci_wait_for_pending_transaction(dev))
dev_err(&dev->dev,
"timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev,
PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
pci_flr_wait(dev);
}
EXPORT_SYMBOL_GPL(pcie_flr);
struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
.mapping_error = rvt_mapping_error,
.map_single = rvt_dma_map_single,
.unmap_single = rvt_dma_unmap_single,
.map_page = rvt_dma_map_page,
.unmap_page = rvt_dma_unmap_page,
.map_sg = rvt_map_sg,
.unmap_sg = rvt_unmap_sg,
.sync_single_for_cpu = rvt_sync_single_for_cpu,
.sync_single_for_device = rvt_sync_single_for_device,
.alloc_coherent = rvt_dma_alloc_coherent,
.free_coherent = rvt_dma_free_coherent
};
|
cornelisnetworks/opa-hfi1
|
compat/RH81/compat.h
|
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2019 Intel Corporation.
*/
#if !defined(RH81_COMPAT_H)
#define RH81_COMPAT_H
#define CREATE_AH_HAS_UDATA
#define HAVE_ALLOC_RDMA_NETDEV
#define CREATE_FLOW_HAS_UDATA
#define HAVE_IB_GID_ATTR
#define ADD_GID_HAS_GID
#define HAVE_RDMA_NETDEV_GET_PARAMS
#define HAVE_ARRAY_SIZE
#define HAVE_NOSPEC_H
#define HAVE_ENUM_IB_UVERBS_ADVISE_MR_ADVICE
#define HAVE_IB_DEVICE_OPS
#define HAVE_IB_SET_DEVICE_OPS
#define POST_HAS_CONST
#define CREATE_AH_HAS_FLAGS
#define DESTROY_AH_HAS_FLAGS
#define HAVE_VM_FAULT_T
#define HAVE_KMALLOC_ARRAY_NODE
#define HAVE_IBDEV_DRIVER_ID
#define HAVE_IB_GET_CACHED_SUBNET_PREFIX
#define HAVE_MAX_SEND_SGE
#define HAVE_SECURITY_H
#define HAVE_RDMA_COPY_AH_ATTR
#include "compat_common.h"
#define rdma_set_device_sysfs_group(a, b)
#endif //RH81_COMPAT
|
cornelisnetworks/opa-hfi1
|
compat/SLES15SP1/compat.h
|
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(SLES15SP1_COMPAT_H)
#define SLES15SP1_COMPAT_H
#include <linux/device.h>
#include <rdma/ib_mad.h>
#define CREATE_AH_HAS_UDATA
#define HAVE_ALLOC_RDMA_NETDEV
#define POST_HAS_CONST
#define HAVE_IB_GID_ATTR
#define CREATE_FLOW_HAS_UDATA
#define HAVE_RDMA_NETDEV_GET_PARAMS
#define HAVE_ARRAY_SIZE
#define HAVE_NOSPEC_H
#define HAVE_MAX_SEND_SGE
#define HAVE_IBDEV_DRIVER_ID
#define HAVE_IB_GET_CACHED_SUBNET_PREFIX
#define HAVE_SECURITY_H
#define HAVE_KMALLOC_ARRAY_NODE
#define HAVE_RDMA_COPY_AH_ATTR
#include "compat_common.h"
#define rdma_create_ah(a, b, c) rdma_create_ah(a, b)
#define rdma_destroy_ah(a, b) rdma_destroy_ah(a)
#undef access_ok
#define access_ok(addr, size) \
({ \
WARN_ON_IN_IRQ(); \
likely(!__range_not_ok(addr, size, user_addr_max())); \
})
#define _ib_alloc_device ib_alloc_device
#undef CONFIG_FAULT_INJECTION
#endif //SLES15SP1_COMPAT
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/rcva.c
|
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/err.h>
#include <linux/errno.h>
#include "rcva.h"
#include "hfi.h"
#define GROUP_SIZE RCV_INCREMENT
#define MAX_BUFFER_SIZE (256 * 1024)
#define MAX_PER_CONTEXT (2048 / GROUP_SIZE)
#define MIN_NEE_CONTEXTS 2
#define MIN_NEE_BUFFERS 1024
#define MIN_TEE_BUFFERS 2
#define MIN_EE_BUFFERS 1024
/**
* rcva_size - partition the receive array
* @r: the rcva to store size attributes
* @attr: rcv_create_attr structure for parameters
* @total_ee: the size of the eager entry pool
* @total_ne: the size of the network eager pool
* @total_te: the size of the tid entry pool
*
* All computations and variables reflect groups
* to simplify the computations.
*
* Legend:
* * ee - eager entries
* * ne - network eager entries
* * te - tid eager entries
*
* The routine assumes the the following dd variables have been
* correctly determined:
* * n_krcv_queues
* * num_vnic_contexts
* * num_user_contexts
* * num_netdev_contexts
*
* The per context size is returned in r.
*
*/
static void rcva_size(struct rcva *r,
struct rcva_create_attr *attr,
u16 *total_ee,
u16 *total_ne,
u16 *total_te)
{
struct hfi1_devdata *dd = r->dd;
u32 groups = chip_rcv_array_count(dd) / GROUP_SIZE;
/* determine number of contexts */
u32 ee_contexts = attr->ee_contexts;
u32 ne_contexts = attr->ne_contexts;
u32 te_contexts = attr->te_contexts;
/* .e_groups - each context group size */
/* total_.e_groups - total size across all contexts */
u32 ee_groups, total_ee_groups;
u32 te_groups, total_te_groups;
u32 ne_groups, total_ne_groups;
u32 extra_groups;
bool multi_packet = attr->multi_packet;
/* first size tid */
te_groups = te_contexts ?
min_t(u32, groups / te_contexts, MAX_PER_CONTEXT) : 0;
total_te_groups = te_groups * te_contexts;
dd_dev_dbg(dd, "ee_contexts %u te_contexts %u ne_contexts %u\n",
ee_contexts, te_contexts, ne_contexts);
/* kernel/PSM with multi-packet */
ee_groups = attr->max_buffers / GROUP_SIZE;
ee_groups = min_t(u32, ee_groups, MAX_PER_CONTEXT);
total_ee_groups = ee_groups * ee_contexts;
ne_groups = MAX_PER_CONTEXT;
total_ne_groups = ne_groups * ne_contexts;
dd_dev_dbg(dd, "initial sizes: ee_groups %u total_ee_groups %u ne_groups %u total_ne_groups %u te_groups %u total_te_groups %u\n",
ee_groups, total_ee_groups,
ne_groups, total_ne_groups,
te_groups, total_te_groups);
while (true) {
if ((total_ee_groups + total_te_groups + total_ne_groups) <=
groups)
break;
/* we hit the floor for buffers per group - cut contexts */
if (ne_contexts > MIN_NEE_CONTEXTS) {
ne_contexts /= 2;
total_ne_groups = ne_groups * ne_contexts;
}
if ((total_ee_groups + total_te_groups + total_ne_groups) <=
groups)
break;
if (ne_groups > MIN_NEE_BUFFERS / GROUP_SIZE) {
ne_groups /= 2;
total_ne_groups = ne_groups * ne_contexts;
}
if ((total_ee_groups + total_te_groups + total_ne_groups) <=
groups)
break;
if (!multi_packet) {
if (ee_groups > MIN_EE_BUFFERS)
ee_groups /= 2;
total_ee_groups = ee_groups * ee_contexts;
}
if ((total_ee_groups + total_te_groups + total_ne_groups) <=
groups)
break;
if (te_groups > MIN_TEE_BUFFERS)
total_te_groups = --te_groups * te_contexts;
if ((total_ee_groups + total_te_groups + total_ne_groups) <=
groups)
break;
dd_dev_dbg(dd, "continue with ee_groups %u ne_groups %u te_groups %u ne_contexts %u\n",
ee_groups, ne_groups, te_groups, ne_contexts);
}
dd_dev_dbg(dd, "after reductions: ee_groups %u total_ee_groups %u ne_groups %u total_ne_groups %u te_groups %u total_te_groups %u\n",
ee_groups, total_ee_groups,
ne_groups, total_ne_groups,
te_groups, total_te_groups);
extra_groups = groups -
(total_ee_groups + total_te_groups + total_ne_groups);
dd_dev_dbg(dd, "extra_groups %u\n", extra_groups);
/* try to increase tid */
if (te_contexts) {
te_groups += extra_groups / te_contexts;
te_groups = min_t(u32, MAX_PER_CONTEXT, te_groups);
total_te_groups = te_groups * te_contexts;
}
extra_groups = groups -
(total_ee_groups + total_te_groups + total_ne_groups);
/* try to increase network */
if (ne_contexts) {
ne_groups += extra_groups / ne_contexts;
ne_groups = min_t(u32, MAX_PER_CONTEXT, ne_groups);
total_ne_groups = ne_groups * ne_contexts;
}
extra_groups = groups -
(total_ee_groups + total_te_groups + total_ne_groups);
ee_groups += extra_groups / ee_contexts;
dd_dev_dbg(dd, "final sizing: ne_contexts %u ee_groups %u total_ee_groups %u ne_groups %u total_ne_groups %u te_groups %u total_te_groups %u extra_groups %u\n",
ne_contexts, ee_groups, total_ee_groups,
ne_groups, total_ne_groups,
te_groups, total_te_groups, extra_groups);
/* return sizes of each pool */
*total_ee = total_ee_groups;
*total_ne = total_ne_groups;
*total_te = total_te_groups;
/* return size of each contexts group */
r->ee_size = ee_groups;
r->ne_size = ne_groups;
r->te_size = te_groups;
r->netdev_contexts = ne_contexts;
}
/**
* rcva_create - returns a rcva structure
* @dd: the device data
* @attr: rcv_create_attr structure for parameters
*
* This routine inits and returns a rcva structure
* for assigning within the dd structure.
*
* The routine sizes and establishes pools that support the allocation of
* eager entries, tid entries, and network entries that are used by
* the various types of contexts.
*
* The chunks are added as one relative to distinquish a failure
* of 0 from a successful allocation. The allocate functions and
* deallocate functions make the necessary adjustments.
*
*/
struct rcva *rcva_create(struct hfi1_devdata *dd,
struct rcva_create_attr *attr)
{
struct rcva *r;
u16 total_ee, total_ne, total_te;
/* chunks are one relative */
u16 start_ee = 0, start_ne, start_te;
r = kzalloc_node(sizeof(*r), GFP_KERNEL, dd->node);
if (!r)
return ERR_PTR(-ENOMEM);
r->dd = dd;
rcva_size(r, attr, &total_ee, &total_ne, &total_te);
start_ne = total_ee;
start_te = total_ee + total_ne;
r->ee_pool = gen_pool_create(0, dd->node);
if (!r->ee_pool)
goto bail;
if (total_ee &&
gen_pool_add(r->ee_pool, start_ee + 1, total_ee, dd->node))
goto bail;
if (total_ne) {
r->ne_pool = gen_pool_create(0, dd->node);
if (!r->ne_pool)
goto bail;
if (gen_pool_add(r->ne_pool, start_ne + 1, total_ne, dd->node))
goto bail;
}
if (total_te) {
r->te_pool = gen_pool_create(0, dd->node);
if (!r->te_pool)
goto bail;
if (gen_pool_add(r->te_pool, start_te + 1, total_te, dd->node))
goto bail;
}
return r;
bail:
rcva_destroy(r);
return ERR_PTR(-ENOMEM);
}
/**
* rcva_destroy - tears down a structure returned by rcva_create()
* @r: the rcva structure to be torn down
*/
void rcva_destroy(struct rcva *r)
{
if (!r)
return;
if (r->te_pool)
gen_pool_destroy(r->te_pool);
r->te_pool = NULL;
if (r->ne_pool)
gen_pool_destroy(r->ne_pool);
r->ne_pool = NULL;
if (r->ee_pool)
gen_pool_destroy(r->ee_pool);
r->ee_pool = NULL;
kfree(r);
}
/**
* rvca_alloc_ee_slice - allocate an eager entry slice
* @r: the rcva structure that holds the slices
* @s: the slice structure that will hold the slice
*/
int rvca_alloc_ee_slice(struct rcva *r, struct rcva_slice *s)
{
unsigned long ret;
ret = gen_pool_alloc(r->ee_pool, r->ee_size);
if (!ret)
return -ENOMEM;
/* make zero relative */
s->base = ret - 1;
s->size = r->ee_size;
return 0;
}
/**
* rvca_alloc_ne_slice - allocate an network device slice
* @r: the rcva structure that holds the slices
* @s: the slice structure that will hold the slice
*/
int rvca_alloc_ne_slice(struct rcva *r, struct rcva_slice *s)
{
unsigned long ret;
ret = gen_pool_alloc(r->ne_pool, r->ne_size);
if (!ret)
return -ENOMEM;
/* make zero relative */
s->base = ret - 1;
s->size = r->ne_size;
return 0;
}
/**
* rvca_alloc_te_slice - allocate an tid slice
* @r: the rcva structure that holds the slices
* @s: the slice structure that will hold the slice
*/
int rvca_alloc_te_slice(struct rcva *r, struct rcva_slice *s)
{
unsigned long ret;
ret = gen_pool_alloc(r->te_pool, r->te_size);
if (!ret)
return -ENOMEM;
/* make zero relative */
s->base = ret - 1;
s->size = r->te_size;
return 0;
}
/**
* rvca_free_ee_slice - free an eager entry slice
* @r: the rcva structure that holds the slices
* @s: the slice structure that will be returned
*/
void rvca_free_ee_slice(struct rcva *r, struct rcva_slice *s)
{
if (s->size)
gen_pool_free(r->ee_pool, s->base + 1, s->size);
s->size = 0;
}
/**
* rvca_free_ne_slice - free an network entry slice
* @r: the rcva structure that holds the slices
* @s: the slice structure that will be returned
*/
void rvca_free_ne_slice(struct rcva *r, struct rcva_slice *s)
{
if (s->size)
gen_pool_free(r->ne_pool, s->base + 1, s->size);
s->size = 0;
}
/**
* rvca_free_te_slice - free an tid entry slice
* @r: the rcva structure that holds the slices
* @s: the slice structure that will be returned
*/
void rvca_free_te_slice(struct rcva *r, struct rcva_slice *s)
{
if (s->size)
gen_pool_free(r->te_pool, s->base + 1, s->size);
s->size = 0;
}
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/ipoib.h
|
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains HFI1 support for IPOIB functionality
*/
#ifndef HFI1_IPOIB_H
#define HFI1_IPOIB_H
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/atomic.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/if_infiniband.h>
#include "hfi.h"
#include "iowait.h"
#include "netdev.h"
#include <rdma/ib_verbs.h>
#define HFI1_IPOIB_TXREQ_NAME_LEN 32
#define HFI1_IPOIB_PSEUDO_LEN 20
#define HFI1_IPOIB_ENCAP_LEN 4
struct hfi1_ipoib_dev_priv;
union hfi1_ipoib_flow {
u16 as_int;
struct {
u8 tx_queue;
u8 sc5;
} __attribute__((__packed__));
};
/**
* struct hfi1_ipoib_circ_buf - List of items to be processed
* @items: ring of items
* @head: ring head
* @tail: ring tail
* @max_items: max items + 1 that the ring can contain
* @producer_lock: producer sync lock
* @consumer_lock: consumer sync lock
*/
struct hfi1_ipoib_circ_buf {
void **items;
unsigned long head;
unsigned long tail;
unsigned long max_items;
spinlock_t producer_lock; /* head sync lock */
spinlock_t consumer_lock; /* tail sync lock */
};
/**
* struct hfi1_ipoib_txq - IPOIB per Tx queue information
* @priv: private pointer
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
* @wait: iowait structure
* @complete_txreqs: count of txreqs completed by sdma
* @napi: pointer to tx napi interface
* @tx_ring: ring of ipoib txreqs to be reaped by napi callback
*/
struct hfi1_ipoib_txq {
struct hfi1_ipoib_dev_priv *priv;
struct sdma_engine *sde;
struct list_head tx_list;
u64 sent_txreqs;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
struct iowait wait;
atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
struct napi_struct *napi;
struct hfi1_ipoib_circ_buf tx_ring;
};
struct hfi1_ipoib_dev_priv {
struct hfi1_devdata *dd;
struct net_device *netdev;
struct ib_device *device;
struct hfi1_ipoib_txq *txqs;
struct kmem_cache *txreq_cache;
struct napi_struct *tx_napis;
u16 pkey;
u16 pkey_index;
u32 qkey;
u8 port_num;
const struct net_device_ops *netdev_ops;
struct rvt_qp *qp;
struct pcpu_sw_netstats __percpu *netstats;
};
/* hfi1 ipoib rdma netdev's private data structure */
struct hfi1_ipoib_rdma_netdev {
struct rdma_netdev rn; /* keep this first */
/* followed by device private data */
struct hfi1_ipoib_dev_priv dev_priv;
};
static inline struct hfi1_ipoib_dev_priv *
hfi1_ipoib_priv(const struct net_device *dev)
{
return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
}
static inline void
hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
u64 packets,
u64 bytes)
{
struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
u64_stats_update_begin(&netstats->syncp);
netstats->rx_packets += packets;
netstats->rx_bytes += bytes;
u64_stats_update_end(&netstats->syncp);
}
static inline void
hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
u64 packets,
u64 bytes)
{
struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
u64_stats_update_begin(&netstats->syncp);
netstats->tx_packets += packets;
netstats->tx_bytes += bytes;
u64_stats_update_end(&netstats->syncp);
}
int hfi1_ipoib_send_dma(struct net_device *dev,
struct sk_buff *skb,
struct ib_ah *address,
u32 dqpn);
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
int hfi1_ipoib_rxq_init(struct net_device *dev);
void hfi1_ipoib_rxq_deinit(struct net_device *dev);
void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
struct net_device *hfi1_ipoib_alloc_rn(struct ib_device *device,
u8 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
int size, void *data);
#ifdef HAVE_RDMA_NETDEV_GET_PARAMS
int hfi1_ipoib_rn_get_params(struct ib_device *device,
u8 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
#endif
#endif /* _IPOIB_H */
|
cornelisnetworks/opa-hfi1
|
include/rdma/rdma_vt.h
|
<reponame>cornelisnetworks/opa-hfi1<filename>include/rdma/rdma_vt.h
#ifndef DEF_RDMA_VT_H
#define DEF_RDMA_VT_H
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* Structure that low level drivers will populate in order to register with the
* rdmavt layer.
*/
#include "compat.h"
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/version.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_mad.h>
#include <rdma/rdmavt_mr.h>
#define RVT_MAX_PKEY_VALUES 16
#define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
#define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
#define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
struct trap_list {
u32 list_len;
struct list_head list;
};
struct rvt_qp;
struct rvt_qpn_table;
struct rvt_ibport {
struct rvt_qp __rcu *qp[2];
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
struct rb_root mcast_tree;
spinlock_t lock; /* protect changes in this struct */
/* non-zero when timer is set */
unsigned long mkey_lease_timeout;
unsigned long trap_timeout;
__be64 gid_prefix; /* in network order */
__be64 mkey;
u64 tid;
u32 port_cap_flags;
u16 port_cap3_flags;
u32 pma_sample_start;
u32 pma_sample_interval;
__be16 pma_counter_select[5];
u16 pma_tag;
u16 mkey_lease_period;
u32 sm_lid;
u8 sm_sl;
u8 mkeyprot;
u8 subnet_timeout;
u8 vl_high_limit;
/*
* Driver is expected to keep these up to date. These
* counters are informational only and not required to be
* completely accurate.
*/
u64 n_rc_resends;
u64 n_seq_naks;
u64 n_rdma_seq;
u64 n_rnr_naks;
u64 n_other_naks;
u64 n_loop_pkts;
u64 n_pkt_drops;
u64 n_vl15_dropped;
u64 n_rc_timeouts;
u64 n_dmawait;
u64 n_unaligned;
u64 n_rc_dupreq;
u64 n_rc_seqnak;
u16 pkey_violations;
u16 qkey_violations;
u16 mkey_violations;
/* Hot-path per CPU counters to avoid cacheline trading to update */
u64 z_rc_acks;
u64 z_rc_qacks;
u64 z_rc_delayed_comp;
u64 __percpu *rc_acks;
u64 __percpu *rc_qacks;
u64 __percpu *rc_delayed_comp;
void *priv; /* driver private data */
/*
* The pkey table is allocated and maintained by the driver. Drivers
* need to have access to this before registering with rdmav. However
* rdmavt will need access to it so drivers need to proviee this during
* the attach port API call.
*/
u16 *pkey_table;
struct rvt_ah *sm_ah;
/*
* Keep a list of traps that have not been repressed. They will be
* resent based on trap_timer.
*/
struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
struct timer_list trap_timer;
};
#define RVT_CQN_MAX 16 /* maximum length of cq name */
#define RVT_SGE_COPY_MEMCPY 0
#define RVT_SGE_COPY_CACHELESS 1
#define RVT_SGE_COPY_ADAPTIVE 2
/*
* Things that are driver specific, module parameters in hfi1 and qib
*/
struct rvt_driver_params {
struct ib_device_attr props;
/*
* Anything driver specific that is not covered by props
* For instance special module parameters. Goes here.
*/
unsigned int lkey_table_size;
unsigned int qp_table_size;
unsigned int sge_copy_mode;
unsigned int wss_threshold;
unsigned int wss_clean_period;
int qpn_start;
int qpn_inc;
int qpn_res_start;
int qpn_res_end;
int nports;
int npkeys;
int node;
int psn_mask;
int psn_shift;
int psn_modify_mask;
u32 core_cap_flags;
u32 max_mad_size;
u8 qos_shift;
u8 max_rdma_atomic;
u8 extra_rdma_atomic;
u8 reserved_operations;
};
/* Protection domain */
struct rvt_pd {
struct ib_pd ibpd;
bool user;
};
/* Address handle */
struct rvt_ah {
struct ib_ah ibah;
struct rdma_ah_attr attr;
atomic_t refcount;
u8 vl;
u8 log_pmtu;
};
#define HAVE_IB_QP_CREATE_USE_GFP_NOIO \
(defined(IFS_RH73) || \
defined(IFS_RH74) || \
defined(IFS_SLES12SP2) || \
defined(IFS_SLES12SP3))
/*
* This structure is used by rvt_mmap() to validate an offset
* when an mmap() request is made. The vm_area_struct then uses
* this as its vm_private_data.
*/
struct rvt_mmap_info {
struct list_head pending_mmaps;
struct ib_ucontext *context;
void *obj;
__u64 offset;
struct kref ref;
u32 size;
};
/* memory working set size */
struct rvt_wss {
unsigned long *entries;
atomic_t total_count;
atomic_t clean_counter;
atomic_t clean_entry;
int threshold;
int num_entries;
long pages_mask;
unsigned int clean_period;
};
struct rvt_dev_info;
struct rvt_swqe;
struct rvt_driver_provided {
/*
* Which functions are required depends on which verbs rdmavt is
* providing and which verbs the driver is overriding. See
* check_support() for details.
*/
/*
* hot path calldowns in a single cacheline
*
* Add data path calls below
*/
/*
* Give the driver a notice that there is send work to do. It is up to
* the driver to generally push the packets out, this just queues the
* work with the driver. There are two variants here. The no_lock
* version requires the s_lock not to be held. The other assumes the
* s_lock is held.
*/
bool (*schedule_send)(struct rvt_qp *qp);
bool (*schedule_send_no_lock)(struct rvt_qp *qp);
/*
* Driver specific work request setup and checking.
* This function is allowed to perform any setup, checks, or
* adjustments required to the SWQE in order to be usable by
* underlying protocols. This includes private data structure
* allocations.
*/
int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send);
/*
* Sometimes rdmavt needs to kick the driver's send progress. That is
* done by this call back.
*/
void (*do_send)(struct rvt_qp *qp);
/* end of hot path calldowns */
/* Passed to ib core registration. Callback to create syfs files */
int (*port_callback)(struct ib_device *, u8, struct kobject *);
/*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
*/
struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
/*
* Allocate a private queue pair data structure for driver specific
* information which is opaque to rdmavt. Errors are returned via
* ERR_PTR(err). The driver is free to return NULL or a valid
* pointer.
*/
#if HAVE_IB_QP_CREATE_USE_GFP_NOIO
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
gfp_t gfp);
#else
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
#endif
/*
* Init a struture allocated with qp_priv_alloc()
*/
int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_init_attr *init_attr);
/*
* Free the driver's private qp structure.
*/
void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
* Inform the driver the particular qp in quesiton has been reset so
* that it can clean up anything it needs to.
*/
void (*notify_qp_reset)(struct rvt_qp *qp);
/*
* Get a path mtu from the driver based on qp attributes.
*/
int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_attr *attr);
/*
* Notify driver that it needs to flush any outstanding IO requests that
* are waiting on a qp.
*/
void (*flush_qp_waiters)(struct rvt_qp *qp);
/*
* Notify driver to stop its queue of sending packets. Nothing else
* should be posted to the queue pair after this has been called.
*/
void (*stop_send_queue)(struct rvt_qp *qp);
/*
* Have the drivr drain any in progress operations
*/
void (*quiesce_qp)(struct rvt_qp *qp);
/*
* Inform the driver a qp has went to error state.
*/
void (*notify_error_qp)(struct rvt_qp *qp);
/*
* Get an MTU for a qp.
*/
u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
u32 pmtu);
/*
* Convert an mtu to a path mtu
*/
int (*mtu_to_path_mtu)(u32 mtu);
/*
* Get the guid of a port in big endian byte order
*/
int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
int guid_index, __be64 *guid);
/*
* Query driver for the state of the port.
*/
int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
struct ib_port_attr *props);
/*
* Tell driver to shutdown a port
*/
int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
/* Tell driver to send a trap for changed port capabilities */
void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
/*
* The following functions can be safely ignored completely. Any use of
* these is checked for NULL before blindly calling. Rdmavt should also
* be functional if drivers omit these.
*/
/* Called to inform the driver that all qps should now be freed. */
unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
/* Driver specific AH validation */
int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
/* Inform the driver a new AH has been created */
void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
struct rvt_ah *);
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
#if HAVE_IB_QP_CREATE_USE_GFP_NOIO
enum ib_qp_type type, u8 port_num, gfp_t qfp);
#else
enum ib_qp_type type, u8 port_num);
#endif
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
/* Driver specific QP modification/notification-of */
void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
/* Notify driver a mad agent has been created */
void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
/* Notify driver a mad agent has been removed */
void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
/* Notify driver to restart rc */
void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
/* Get and return CPU to pin CQ processing thread */
int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
};
struct rvt_dev_info {
struct ib_device ibdev; /* Keep this first. Nothing above here */
/*
* Prior to calling for registration the driver will be responsible for
* allocating space for this structure.
*
* The driver will also be responsible for filling in certain members of
* dparms.props. The driver needs to fill in dparms exactly as it would
* want values reported to a ULP. This will be returned to the caller
* in rdmavt's device. The driver should also therefore refrain from
* modifying this directly after registration with rdmavt.
*/
/* Driver specific properties */
struct rvt_driver_params dparms;
/* post send table */
const struct rvt_operation_params *post_parms;
/* opcode translation table */
const enum ib_wc_opcode *wc_opcode;
/* Driver specific helper functions */
struct rvt_driver_provided driver_f;
struct rvt_mregion __rcu *dma_mr;
struct rvt_lkey_table lkey_table;
/* Internal use */
int n_pds_allocated;
spinlock_t n_pds_lock; /* Protect pd allocated count */
int n_ahs_allocated;
spinlock_t n_ahs_lock; /* Protect ah allocated count */
u32 n_srqs_allocated;
spinlock_t n_srqs_lock; /* Protect srqs allocated count */
int flags;
struct rvt_ibport **ports;
/* QP */
struct rvt_qp_ibdev *qp_dev;
u32 n_qps_allocated; /* number of QPs allocated for device */
u32 n_rc_qps; /* number of RC QPs allocated for device */
u32 busy_jiffies; /* timeout scaling based on RC QP count */
spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */
/* memory maps */
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* protect mmap_offset */
u32 mmap_offset;
spinlock_t pending_lock; /* protect pending mmap list */
/* CQ */
u32 n_cqs_allocated; /* number of CQs allocated for device */
spinlock_t n_cqs_lock; /* protect count of in use cqs */
/* Multicast */
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
spinlock_t n_mcast_grps_lock;
/* Memory Working Set Size */
struct rvt_wss *wss;
};
/**
* rvt_set_ibdev_name - Craft an IB device name from client info
* @rdi: pointer to the client rvt_dev_info structure
* @name: client specific name
* @unit: client specific unit number.
*/
static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
const char *fmt, const char *name,
const int unit)
{
/*
* FIXME: rvt and its users want to touch the ibdev before
* registration and have things like the name work. We don't have the
* infrastructure in the core to support this directly today, hack it
* to work by setting the name manually here.
*/
dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
* rvt_get_ibdev_name - return the IB name
* @rdi: rdmavt device
*
* Return the registered name of the device.
*/
static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
{
return rdi->ibdev.name;
}
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct rvt_pd, ibpd);
}
static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
{
return container_of(ibah, struct rvt_ah, ibah);
}
static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
{
return container_of(ibdev, struct rvt_dev_info, ibdev);
}
static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
{
/*
* All ports have same number of pkeys.
*/
return rdi->dparms.npkeys;
}
/*
* Return the max atomic suitable for determining
* the size of the ack ring buffer in a QP.
*/
static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
{
return rdi->dparms.max_rdma_atomic +
rdi->dparms.extra_rdma_atomic + 1;
}
static inline unsigned int rvt_size_atomic(struct rvt_dev_info *rdi)
{
return rdi->dparms.max_rdma_atomic +
rdi->dparms.extra_rdma_atomic;
}
/*
* Return the indexed PKEY from the port PKEY table.
*/
static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
int port_index,
unsigned index)
{
if (index >= rvt_get_npkeys(rdi))
return 0;
else
return rdi->ports[port_index]->pkey_table[index];
}
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi);
int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id);
void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
int port_index, u16 *pkey_table);
int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
int access);
int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc);
int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct rvt_sge *isge, struct rvt_sge *last_sge,
struct ib_sge *sge, int acc);
struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
u16 lid);
#endif /* DEF_RDMA_VT_H */
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/user_exp_rcv_gpu.c
|
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "trace.h"
#include "mmu_rb.h"
#include "user_exp_rcv.h"
#include "user_exp_rcv_gpu.h"
static void unpin_rcv_gpu_pages_callback(void *data)
{
struct tid_user_buf *tidbuf = (struct tid_user_buf *)data;
if (tidbuf->pages.gpu) {
free_gpu_page_table(tidbuf->pages.gpu);
tidbuf->pages.gpu = NULL;
if (tidbuf->handler) {
trace_unpin_rcv_gpu_pages_callback(tidbuf->vaddr,
tidbuf->length);
hfi1_gpu_cache_invalidate(tidbuf->handler,
tidbuf->vaddr,
(tidbuf->vaddr + tidbuf->length));
}
}
}
/**
* Release pinned receive buffer GPU memory pages.
*/
void unpin_rcv_pages_gpu(struct hfi1_filedata *fd, struct tid_rb_node *node)
{
if (atomic_dec_and_test(&node->tidbuf->refcount)) {
/*
* If the refcount decrements to 0, there aren't any
* nodes that refer to a tidbuf. So the tidbuf can be
* freed and the GPU memory pages that have been
* pinned for the tidbuf can be unpinned.
*/
if (node->tidbuf->pages.gpu) {
trace_free_recv_gpu_pages(node->tidbuf->vaddr);
put_gpu_pages(node->tidbuf->vaddr,
node->tidbuf->pages.gpu);
}
fd->tid_n_gpu_pinned -= node->tidbuf->npages;
kfree(node->tidbuf);
}
}
/**
* Pin receive buffer pages.
*/
int pin_rcv_pages_gpu(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int ret, pinned;
unsigned int max_cache_pages;
unsigned long size, unaligned_vaddr;
struct hfi1_devdata *dd = fd->uctxt->dd;
/* Get the number of pages the user buffer spans */
tidbuf->npages = num_user_pages_gpu(tidbuf->vaddr, tidbuf->length);
if (!tidbuf->npages)
return -EINVAL;
if (tidbuf->npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
}
/*
* Convert gpu_cache_size in MB to bytes and calculate the
* maximum pages allowed in the cache. If the sum of the
* no of pages in the cache and the no of pages that need to
* be pinned exceede the maximum pages allowed in the cache,
* return -ENOMEM to user space. User space can request the
* driver to free up some buffers in the cache and then
* pin the new buffers.
*/
max_cache_pages = ((gpu_cache_size << 20) >> NV_GPU_PAGE_SHIFT);
if (fd->tid_n_gpu_pinned + tidbuf->npages > max_cache_pages)
return -ENOMEM;
/*
* refcount is used to count the no of nodes (tid_rb_node) that
* refer to a tidbuf (tid_user_buf). There is one node for each
* TID RcvArray mapping of a tidbuf. This refcount is needed
* only for GPU memory pinned buffers as all the pages that have
* been pinned by nvidia_p2p_get_pages should be unpinned in
* one single call to nvidia_p2p_put_pages. We can not unpin a
* subset of the pages that have been pinned with
* nvidia_p2p_get_pages.
*/
atomic_set(&tidbuf->refcount, 0);
/* Starting virtual address has to be aligned to 64K */
unaligned_vaddr = tidbuf->vaddr;
tidbuf->vaddr = unaligned_vaddr & NV_GPU_PAGE_MASK;
size = unaligned_vaddr + tidbuf->length - tidbuf->vaddr;
trace_pin_rcv_pages_gpu(unaligned_vaddr, tidbuf->vaddr, tidbuf->length,
size, tidbuf->npages);
ret = pin_gpu_pages(tidbuf->vaddr, size, &tidbuf->pages.gpu,
unpin_rcv_gpu_pages_callback, tidbuf);
if (ret != 0) {
trace_recv_pin_gpu_pages_fail(ret, tidbuf->vaddr, size);
return ret;
}
tidbuf->length = size;
pinned = tidbuf->pages.gpu->entries;
fd->tid_n_gpu_pinned += pinned;
trace_recv_gpu_page_table_info(pinned, tidbuf->pages.gpu->page_size);
return pinned;
}
u32 find_phys_blocks_gpu(struct tid_user_buf *tidbuf)
{
struct tid_pageset *list = tidbuf->psets;
struct nvidia_p2p_page_table *page_table = tidbuf->pages.gpu;
unsigned int pagecount, pageidx, setcount = 0, i;
unsigned long pfn, this_pfn;
unsigned int npages = page_table->entries;
if (!npages)
return 0;
/*
* Look for sets of physically contiguous pages in the user buffer.
* This will allow us to optimize Expected RcvArray entry usage by
* using the bigger supported sizes.
*/
pfn = GPU_PAGE_TO_PFN(page_table->pages[0]);
for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
this_pfn = i < npages ?
GPU_PAGE_TO_PFN(page_table->pages[i]) : 0;
/*
* If the pfn's are not sequential, pages are not physically
* contiguous.
*/
if (this_pfn != ++pfn) {
/*
* At this point we have to loop over the set of
* physically contiguous pages and break them down it
* sizes supported by the HW.
* There are two main constraints:
* 1. The max buffer size is MAX_EXPECTED_BUFFER.
* If the total set size is bigger than that
* program only a MAX_EXPECTED_BUFFER chunk.
* 2. The buffer size has to be a power of two. If
* it is not, round down to the closes power of
* 2 and program that size.
*/
while (pagecount) {
int maxpages = pagecount;
u32 bufsize = pagecount * NV_GPU_PAGE_SIZE;
if (bufsize > MAX_EXPECTED_BUFFER)
maxpages =
MAX_EXPECTED_BUFFER >>
NV_GPU_PAGE_SHIFT;
else if (!is_power_of_2(bufsize))
maxpages =
rounddown_pow_of_two(bufsize) >>
NV_GPU_PAGE_SHIFT;
list[setcount].idx = pageidx;
list[setcount].count = maxpages;
pagecount -= maxpages;
pageidx += maxpages;
setcount++;
}
pageidx = i;
pagecount = 1;
pfn = this_pfn;
} else {
pagecount++;
}
}
return setcount;
}
int set_rcvarray_entry_gpu(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
u32 rcventry, struct tid_group *grp,
u16 pageidx, unsigned int npages)
{
int ret;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_rb_node *node;
struct hfi1_devdata *dd = uctxt->dd;
dma_addr_t phys;
/*
* Allocate the node first so we can handle a potential
* failure before we've programmed anything.
*/
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->mmu.len = npages * NV_GPU_PAGE_SIZE;
node->mmu.addr = tbuf->vaddr + (pageidx * NV_GPU_PAGE_SIZE);
phys = tbuf->pages.gpu->pages[pageidx]->physical_address;
node->phys = phys;
node->tidbuf = tbuf;
node->npages = npages;
node->rcventry = rcventry;
/*
* As per GPU Direct documentation, IOMMU must be disabled or configured
* for pass-through translation in order for GPU Direct RDMA to work.
* So physical address and DMA address would be the same.
*/
node->dma_addr = phys;
node->grp = grp;
node->freed = false;
node->ongpu = tbuf->ongpu;
if (!fd->handler_gpu)
ret = tid_rb_insert(fd, &node->mmu);
else
ret = hfi1_mmu_rb_insert(fd->handler_gpu, &node->mmu);
if (ret) {
hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
node->rcventry, node->mmu.addr, node->phys, ret);
kfree(node);
return -EFAULT;
}
/*
* In case of GPU memory, buffer size starts at 64K (GPU mem page size)
*/
hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 5);
atomic_inc(&node->tidbuf->refcount);
trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
node->mmu.addr, node->phys, phys);
return 0;
}
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/user_sdma_gpu.c
|
<filename>drivers/infiniband/hw/hfi1/user_sdma_gpu.c
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "sdma.h"
#include "trace.h"
#include "mmu_rb.h"
#include "user_sdma.h"
#include "user_sdma_gpu.h"
int user_sdma_txadd_gpu(struct user_sdma_request *req,
struct user_sdma_txreq *tx,
struct user_sdma_iovec *iovec, u32 datalen,
u32 *queued_ptr, u32 *data_sent_ptr,
u64 *iov_offset_ptr)
{
int ret;
unsigned int pageidx, len;
unsigned long base, offset;
u64 iov_offset = *iov_offset_ptr;
u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct nvidia_p2p_page_table *gpu;
base = (unsigned long)iovec->iov.iov_base;
offset = ((base + iovec->offset + iov_offset) & ~NV_GPU_PAGE_MASK);
pageidx = (((iovec->offset + iov_offset + base) -
(base & NV_GPU_PAGE_MASK)) >> NV_GPU_PAGE_SHIFT);
len = offset + req->info.fragsize > NV_GPU_PAGE_SIZE ?
NV_GPU_PAGE_SIZE - offset : req->info.fragsize;
len = min((datalen - queued), len);
gpu = iovec->pages.gpu;
ret = sdma_txadd_daddr(pq->dd, &tx->txreq,
gpu->pages[pageidx]->physical_address + offset,
len);
if (ret) {
SDMA_DBG(req, "NV: base 0x%lx, offset: 0x%lx, idx: %u, len: %u",
base, offset, pageidx, len);
SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
return ret;
}
iov_offset += len;
queued += len;
data_sent += len;
if (unlikely(queued < datalen && pageidx == iovec->npages &&
req->iov_idx < req->data_iovs - 1)) {
iovec->offset += iov_offset;
iovec = &req->iovs[++req->iov_idx];
iov_offset = 0;
}
*queued_ptr = queued;
*data_sent_ptr = data_sent;
*iov_offset_ptr = iov_offset;
return ret;
}
static u32 sdma_cache_evict_gpu(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
{
struct evict_data evict_data;
evict_data.cleared = 0;
evict_data.target = npages;
hfi1_mmu_rb_evict(pq->handler_gpu, &evict_data);
return evict_data.cleared;
}
static void unpin_gpu_pages_callback(void *data)
{
struct sdma_mmu_node *node = (struct sdma_mmu_node *)data;
if (node->pages.gpu) {
/*
* Any previous SDMA sends from the node buffer should have
* completed by now and the node refcount should be zero.
*/
WARN_ON(atomic_read(&node->refcount));
free_gpu_page_table(node->pages.gpu);
node->pages.gpu = NULL;
trace_unpin_gpu_pages_callback(node->rb.addr, node->rb.len);
hfi1_gpu_cache_invalidate(node->pq->handler_gpu,
node->rb.addr,
(node->rb.addr + node->rb.len));
}
}
static inline void release_gpu_mem_pages(unsigned long addr,
nvidia_p2p_page_table_t *page_table)
{
trace_free_sdma_gpu_pages(addr);
put_gpu_pages(addr, page_table);
}
static void unpin_vector_gpu_mem_pages(struct user_sdma_request *req,
struct user_sdma_iovec *iovec)
{
unsigned long addr = (unsigned long)iovec->iov.iov_base &
NV_GPU_PAGE_MASK;
release_gpu_mem_pages(addr, iovec->pages.gpu);
iovec->pages.gpu = NULL;
iovec->npages = 0;
iovec->offset = 0;
}
int pin_sdma_pages_gpu(struct user_sdma_iovec *iovec,
struct sdma_mmu_node *node,
struct user_sdma_request *req,
int npages, unsigned long size,
unsigned long addr)
{
int ret, pinned, locked;
unsigned int cleared, target;
unsigned long max_cache_pages;
struct hfi1_user_sdma_pkt_q *pq = req->pq;
/*
* It was found that nvidia_p2p_get_pages() handles the cases where
* a buffer that needs to be pinned is already partially pinned.
* So, no additional handling is needed here for such cases.
*/
trace_pin_sdma_pages_gpu((unsigned long)iovec->iov.iov_base, addr,
iovec->iov.iov_len, size, npages);
locked = atomic_read(&pq->n_gpu_locked);
/*
* Convert gpu_cache_size in MB to bytes and divide it by
* GPU memory page size to calculate the maximum pages allowed
* in the GPU buffer cache.
*/
max_cache_pages = ((gpu_cache_size << 20) >> NV_GPU_PAGE_SHIFT);
if (locked + npages > max_cache_pages) {
if (npages > max_cache_pages)
return -EINVAL;
do {
target = npages - (max_cache_pages - locked);
cleared = sdma_cache_evict_gpu(pq, target);
locked = atomic_read(&pq->n_gpu_locked);
} while (cleared < target);
}
retry_pin:
ret = pin_gpu_pages(addr, size, &node->pages.gpu,
unpin_gpu_pages_callback, node);
if (ret) {
/*
* As per NVIDIA's documentation, nvidia_p2p_get_pages API
* returns -EINVAL if an invalid argument was supplied.
* While testing the code, it was found that the API may
* return -EINVAL and not -ENOMEM if there isn't enough GPU
* BAR memory to pin the required number of pages. So if the
* error return is -ENOMEM or -EINVAL, we make an attempt to
* evict npages from the SDMA pinned buffer cache and try
* pinning the buffer again. It could be that -EINVAL is
* returned from nvidia_p2p_get_pages when an argument is
* invalid. Even in that case, cache is evicted continuosly
* and once it is empty, we return -ENOMEM to the application
* without calling nvidia_p2p_get_pages again. NVIDIA document
* doesn't mention any driver API that returns free BAR memory
* which would have helped distinguish -EINVAL return when there
* isn't enough BAR memory to pin vs an invalid argument.
*/
if ((ret == -ENOMEM) || (ret == -EINVAL)) {
target = npages;
cleared = sdma_cache_evict_gpu(pq, target);
/*
* If pinning GPU memory pages fails and there aren't
* any GPU memory pinned pages in the SDMA cache, then
* return -ENOMEM. This can happen when all the GPU
* memory pages are pinned to receive buffers. PSM
* needs to request the driver to unpin some receive
* buffers and re-attempt this failed SDMA transfer.
* If SDMA cache eviction unpins some GPU memory
* pinned buffers or detects some GPU buffers that are
* busy, re-attempt pinning the buffer for SDMA transfer
* The buffers in the cache that are busy can also be
* unpinned through the callback function invoked by the
* NVidia driver when the user space application frees
* the GPU buffers.
*/
if (cleared == 0 && (atomic_read(&pq->n_gpu_locked) == 0))
ret = -ENOMEM;
else
goto retry_pin;
}
trace_sdma_pin_gpu_pages_fail(ret, addr, size);
return ret;
}
trace_sdma_gpu_page_table_info(node->pages.gpu->entries,
node->pages.gpu->page_size);
pinned = node->pages.gpu->entries;
if (pinned != npages) {
trace_sdma_pin_gpu_pages_fail(ret, addr, size);
unpin_vector_gpu_mem_pages(req, iovec);
return -EFAULT;
}
node->rb.len = size;
atomic_add(pinned, &pq->n_gpu_locked);
return pinned;
}
void unpin_sdma_pages_gpu(struct sdma_mmu_node *node)
{
if (node->npages) {
/*
* For GPU buffers that are being removed from the cache via
* the callback function, release_gpu_mem_pages which in turn
* calls nvidia_p2p_put_pages shouldn't be called as the
* callback function unpin_gpu_pages_callback already called
* nvidia_p2p_free_page_table. The total number of GPU pages
* pinned will still need to be adjusted.
*/
if (node->pages.gpu) {
release_gpu_mem_pages(node->rb.addr, node->pages.gpu);
node->pages.gpu = NULL;
}
atomic_sub(node->npages, &node->pq->n_gpu_locked);
}
}
int user_sdma_gpu_cache_evict(struct hfi1_filedata *fd,
unsigned long arg, u32 len)
{
unsigned cleared = 0, target;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_sdma_gpu_cache_evict_params evict_params;
if (sizeof(evict_params) != len)
return -EINVAL;
if (copy_from_user(&evict_params,
(struct hfi1_sdma_gpu_cache_evict_params __user *)arg,
sizeof(evict_params)))
return -EFAULT;
if (evict_params.evict_params_in.version != HFI1_GDR_VERSION)
return -ENODEV;
target = evict_params.evict_params_in.pages_to_evict;
if (target > 0)
cleared = sdma_cache_evict_gpu(pq, target);
evict_params.evict_params_out.pages_evicted = cleared;
evict_params.evict_params_out.pages_in_cache = atomic_read(&pq->n_gpu_locked);
if (copy_to_user((struct hfi1_sdma_gpu_cache_evict_params __user *)arg,
&evict_params,
sizeof(evict_params)))
return -EFAULT;
return 0;
}
|
cornelisnetworks/opa-hfi1
|
distro/RHEL76/ipoib/ipoib_vlan.c
|
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include "ipoib.h"
static ssize_t show_parent(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = ipoib_priv(dev);
return sprintf(buf, "%s\n", priv->parent->name);
}
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
u16 pkey, int type)
{
int result;
struct rdma_netdev *rn = netdev_priv(priv->dev);
priv->max_ib_mtu = ppriv->max_ib_mtu;
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
rn->mtu = priv->mcast_mtu;
priv->parent = ppriv->dev;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
ipoib_set_dev_features(priv, ppriv->ca);
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
priv->dev->broadcast[8] = pkey >> 8;
priv->dev->broadcast[9] = pkey & 0xff;
result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
if (result < 0) {
ipoib_warn(ppriv, "failed to initialize subinterface: "
"device %s, port %d",
ppriv->ca->name, ppriv->port);
goto err;
}
result = register_netdevice(priv->dev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
goto register_failed;
}
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_umcast_attr(priv->dev))
goto sysfs_failed;
if (device_create_file(&priv->dev->dev, &dev_attr_parent))
goto sysfs_failed;
}
priv->child_type = type;
list_add_tail(&priv->list, &ppriv->child_intfs);
return 0;
sysfs_failed:
result = -ENOMEM;
unregister_netdevice(priv->dev);
register_failed:
ipoib_dev_cleanup(priv->dev);
err:
return result;
}
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
struct ipoib_dev_priv *tpriv;
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ppriv = ipoib_priv(pdev);
if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
return -EPERM;
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
if (!mutex_trylock(&ppriv->sysfs_mutex))
return restart_syscall();
if (!rtnl_trylock()) {
mutex_unlock(&ppriv->sysfs_mutex);
return restart_syscall();
}
if (!down_write_trylock(&ppriv->vlan_rwsem)) {
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
return restart_syscall();
}
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (!priv) {
result = -ENOMEM;
goto out;
}
/*
* First ensure this isn't a duplicate. We check the parent device and
* then all of the legacy child interfaces to make sure the Pkey
* doesn't match.
*/
if (ppriv->pkey == pkey) {
result = -ENOTUNIQ;
goto out;
}
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
if (tpriv->pkey == pkey &&
tpriv->child_type == IPOIB_LEGACY_CHILD) {
result = -ENOTUNIQ;
goto out;
}
}
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
out:
up_write(&ppriv->vlan_rwsem);
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
if (result && priv) {
struct rdma_netdev *rn;
rn = netdev_priv(priv->dev);
rn->free_rdma_netdev(priv->dev);
kfree(priv);
}
return result;
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
struct net_device *dev = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ppriv = ipoib_priv(pdev);
if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
return -EPERM;
if (!mutex_trylock(&ppriv->sysfs_mutex))
return restart_syscall();
if (!rtnl_trylock()) {
mutex_unlock(&ppriv->sysfs_mutex);
return restart_syscall();
}
if (!down_write_trylock(&ppriv->vlan_rwsem)) {
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
return restart_syscall();
}
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
list_del(&priv->list);
dev = priv->dev;
break;
}
}
up_write(&ppriv->vlan_rwsem);
if (dev) {
ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
unregister_netdevice(dev);
}
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
if (dev) {
struct rdma_netdev *rn;
rn = netdev_priv(dev);
rn->free_rdma_netdev(priv->dev);
kfree(priv);
return 0;
}
return -ENODEV;
}
|
cornelisnetworks/opa-hfi1
|
compat/RH73/compat.h
|
<filename>compat/RH73/compat.h<gh_stars>0
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(RH73_COMPAT_H)
#define RH73_COMPAT_H
#include "compat_common.h"
#define __GFP_RECLAIM (__GFP_WAIT)
#define OPA_SM_CLASS_VERSION (OPA_SMI_CLASS_VERSION)
#define OPA_CAP_MASK3_IsEthOnFabricSupported BIT(13)
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
#define PCI_IRQ_LEGACY BIT(0) /* allow legacy interrupts */
#define PCI_IRQ_MSI BIT(1) /* allow MSI interrupts */
#define PCI_IRQ_MSIX BIT(2) /* allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY BIT(3) /* auto-assign affinity */
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
#define ib_register_device(a, b, c) ib_register_device((a), (c))
#define rdma_set_device_sysfs_group(a, b)
struct hfi1_msix_entry;
struct hfi1_devdata;
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
*/
struct irq_affinity {
int pre_vectors;
int post_vectors;
};
void pcie_flr(struct pci_dev *dev);
int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags);
void msix_setup(struct pci_dev *pcidev, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry);
int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu);
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
static inline long compat_get_user_pages(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas)
{
return get_user_pages(current, current->mm, start,
nr_pages, 1, 1, pages, vmas);
}
#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
compat_get_user_pages(start, nr_pages, gup_flags, pages, vmas)
static inline void inode_lock(struct inode *inode)
{
mutex_lock(&inode->i_mutex);
}
static inline void inode_unlock(struct inode *inode)
{
mutex_unlock(&inode->i_mutex);
}
static inline int simple_positive(struct dentry *dentry)
{
return !d_unhashed(dentry) && dentry->d_inode;
}
static inline void hfi1_enable_intx(struct pci_dev *pdev)
{
/* first, turn on INTx */
pci_intx(pdev, 1);
/* then turn off MSI-X */
pci_disable_msix(pdev);
}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msix(dev);
pci_disable_msi(dev);
}
/* Helpers to hide struct msi_desc implementation details */
#define msi_desc_to_dev(desc) ((desc)->dev)
#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
#ifdef CONFIG_PCI_MSI
/**
* pci_irq_vector - return Linux IRQ number of a device vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
*/
static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (dev->msix_enabled) {
struct msi_desc *entry;
int i = 0;
for_each_msi_entry(entry, dev) {
if (i == nr)
return entry->irq;
i++;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
if (dev->msi_enabled) {
struct msi_desc *entry = first_msi_entry(dev);
if (WARN_ON_ONCE(nr >= entry->nvec_used))
return -EINVAL;
} else {
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
}
return dev->irq + nr;
}
#else
static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
return dev->irq;
}
#endif
/**
* cpumap_print_to_pagebuf - copies the cpumask into the buffer either
* as comma-separated list of cpus or hex values of cpumask
* @list: indicates whether the cpumap must be list
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
* Returns the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
static inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpu_ids);
}
#endif //RH73_COMPAT
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/gdr_ops.c
|
<reponame>cornelisnetworks/opa-hfi1
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file includes code obtained from: https://github.com/NVIDIA/gdrcopy/
* under the following copyright and license.
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/aio.h>
#include <linux/bitmap.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <rdma/ib.h>
#include "mmu_rb.h"
#include "hfi.h"
#include "device.h"
#include "trace.h"
#include "nv-p2p.h"
#include "gpu.h"
#include "gdr_ops.h"
static int gdr_major;
static struct class *gdr_class;
static struct device *gdr_device;
static dev_t gdr_dev;
#define GDR_DRIVER_NAME "hfi1_gdr_chr"
#define GDR_CLASS_NAME "hfi1_gdr"
#define GDR_DEV_NAME "hfi1_gdr"
/**
* struct hfi1_gdrdata - Private data for gdr operations driver
* @ioctl_busy_flag - an atomic used to serialize gdr device ioctl operations.
* @gdr_handler - Head of a red/black tree of gpu memory buffer descriptors
* @map_this_mr - A memory descriptor pointer to be mmapped by this driver.
* @mr_lock - a mutex for resolving races with the Nvidia callback handler.
* @gdr_wq - A work queue to delete nodes from the delete list.
* @n_pages_locked - Total number of pages pinned in the cache.
* @lru_list - Head of the LRU list.
*
* A file descriptor's private_data field point to one of these structures.
* Most of the state in this structure is manipulated through this driver's
* ioctl() system call handler.
*
* It maintains a red/black tree and mutex lock for all the pinned
* and mmapped gpu buffers created.
*
* The ioctl_busy_flag is used to ensure that only one thread
* at a time is executing in this ioctl() handler. Any thread that enters
* this ioctl() handler while it is "busy", will receive an -EINVAL error code.
* This means there is no concurrent execution of most of the code in this
* driver. This is acceptable because the primary user of this driver is PSM,
* and the core of PSM is single-threaded.
*
* The mr_lock mutex serializes access to this red/black tree as well
* as all state information for each of the pinned GPU buffers.
*
* map_this_mr is used as a way to efficiently communicate across
* the vm_mmap() function call in do_pin_and_mmap_gpu_buf() to this
* driver's mmap handler function hfi1_gdr_mmap(), a pointer to a
* gdr_mr struct that describes the GPU buffer that is to be mmapped
* into the process's user virtual adddress space.
*/
struct hfi1_gdrdata {
atomic_t ioctl_busy_flag;
struct mmu_rb_handler *gdr_handler;
void *map_this_mr; /* consider replacing with a linked list */
struct mutex mr_lock;
struct workqueue_struct *gdr_wq;
unsigned long n_pages_locked;
struct list_head lru_list;
};
/**
* struct gdr_mr - describes a gpu buffer and it's pinned and mmaped state.
* @rb_node: This memory region's place holder in the RB tree of cached
* memory region structures. This node contains the starting address
* and length of a gpu buffer.
* @list: List entry for adding to a list.
* @host_addr: if not NULL, it is the host address mapping of this gpu buffer.
* @mr_handle: a pseudo-random number used by this driver's mmap_handler
* @page_table: When a GPU buf is pinned this points to that GPU buf's
* NVidia page table.
* @gd: A pointer to the hfi1_gdrdata this structure is linked to.
* @ref_cnt: A reference count on this structure.
*
* Each struct gdr_mr represents a mmapped and pinned gpu buffer.
* The hfi1_gdrdata structure for this driver contains the head of
* a read/black tree of these these structures. Access to and modification
* of the read/black tree and the content of structures in this structure
* are controlled by the mr_lock mutex in the hfi1_gdrdata structure.
*/
struct gdr_mr {
struct mmu_rb_node rb_node;
struct list_head list;
u64 host_addr;
u32 mr_handle;
nvidia_p2p_page_table_t *page_table;
struct hfi1_gdrdata *gd;
struct kref ref_cnt;
};
/**
* gdrdrv_munmap() - unmap a pinned gpu page from process's address space.
* @mr: - A pointer to a struct gdr_mr describing the gpu buf being unmapped.
*
* Unmap host_addr from the user's address space.
*
* This could be called during process exit after a user-mode SEGFAULT.
* Under this circumstance, we must not call vm_munmap(), otherwise the kernel
* will segfault and panic.
*
*/
static inline void gdrdrv_munmap(struct gdr_mr *mr)
{
int unmap_ret = 0;
if (mr->host_addr && !(current->flags & PF_EXITING)) {
unmap_ret = vm_munmap(mr->host_addr, mr->rb_node.len);
WARN_ON(unmap_ret);
}
mr->host_addr = 0;
}
static inline void
mr_complete(struct kref *kref)
{
struct gdr_mr *mr;
struct hfi1_gdrdata *gd;
mr = container_of(kref, struct gdr_mr, ref_cnt);
gd = mr->gd;
WARN_ON(gd->map_this_mr == mr);
hfi1_mmu_rb_remove(mr->gd->gdr_handler, &mr->rb_node);
list_del(&mr->list);
kfree(mr);
return;
}
static inline void
acquire_callback_mr_ref(struct gdr_mr *mr)
{
kref_get(&mr->ref_cnt);
}
static inline void
release_callback_mr_ref(struct gdr_mr *mr)
{
kref_put(&mr->ref_cnt, mr_complete);
}
static inline void
acquire_ioctl_mr_ref(struct gdr_mr *mr)
{
kref_get(&mr->ref_cnt);
}
static inline void
release_ioctl_mr_ref(struct gdr_mr *mr)
{
kref_put(&mr->ref_cnt, mr_complete);
}
/**
* handle_to_offset() - convert a 32-bit number to a vm_mmap() offset argument.
* @handle: an unsigned 32 bit psuedo random value.
*
* The 32 bit psuedo-random handle value is used to uniquely label a
* struct gdr_mr, for use by this driver's mmap handler.
*
* This shifts left the 32-bit handle value to a "page-aligned" value that
* can be passed as the offset argument to vm_mmap(). The vm_mmap()
* function then shifts its offset argument 12 bits to the right
* before passing assigning to the vm_pgoff member of the vm_area struct
* that is passed to the mmap handler.
*
* This way, the vm_pgoff member contains the original 32-bit handle
* value.
*
* Return: A value that can be passed as an offset into a vm_mmap() call.
*/
static inline off_t
handle_to_offset(u32 handle)
{
return (off_t)handle << PAGE_SHIFT;
}
/**
* handle_from_vm_pgoff() - convert a vm_pgoff value to a 32-bit handle.
* @pgoff: a page-aligned value passed into this driver's mmap handler.
*
* Convert the vm_pagoff value from the mmap handler's vm_area_struct into
* a 32-bit handle value.
*
* Return: The unique 32-bit pseudo random value from the vm_mmap() argument.
*/
static inline u32
handle_from_vm_pgoff(unsigned long pgoff)
{
return (u32)pgoff;
}
/**
* get_random_handle()
*
* Generate a pseudo-random handle value, to be used during mmap operations.
*
* Return: a 32-bit pseudo random value.
*/
static inline u32
get_random_handle(void)
{
return (u32)get_cycles();
}
/*
* File operation functions
*/
static int hfi1_gdr_open(struct inode *inode, struct file *fp);
static int hfi1_gdr_release(struct inode *inode, struct file *fp);
static int hfi1_gdr_mmap(struct file *fp, struct vm_area_struct *vma);
static long hfi1_gdr_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
static const struct file_operations hfi1_gdr_ops = {
.owner = THIS_MODULE,
.open = hfi1_gdr_open,
.release = hfi1_gdr_release,
.unlocked_ioctl = hfi1_gdr_ioctl,
.mmap = hfi1_gdr_mmap,
.llseek = noop_llseek,
};
static bool gdr_rb_filter(struct mmu_rb_node *node, unsigned long addr,
unsigned long len)
{
return (bool)(node->addr == addr);
}
static int gdr_rb_insert(void *arg, struct mmu_rb_node *mnode)
{
return 0;
}
static int gdr_rb_evict(void *arg, struct mmu_rb_node *mnode,
void *evict_arg, bool *stop)
{
return 0;
}
static void gdr_rb_remove(void *arg, struct mmu_rb_node *mnode)
{
}
static int gdr_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
{
return 0;
}
static struct mmu_rb_ops gdr_rb_ops = {
.filter = gdr_rb_filter,
.insert = gdr_rb_insert,
.evict = gdr_rb_evict,
.remove = gdr_rb_remove,
.invalidate = gdr_rb_invalidate
};
/**
* hfi1_gdr_open() - the open file_operations handler for this driver.
* @inode: Pointer to an inode for this special file.
* @filep: Pointer to an open file structure for this open instance.
*
* Allocate and initialize a hfi1_gdrdata structure.
*
* Return:
* 0 - success, -ENOMEM for a failed memory allocation.
*/
static int hfi1_gdr_open(struct inode *inode, struct file *filep)
{
int ret = 0;
struct hfi1_gdrdata *gd;
gd = kzalloc(sizeof(*gd), GFP_KERNEL);
if (!gd)
return -ENOMEM;
filep->private_data = gd;
mutex_init(&gd->mr_lock);
ret = hfi1_mmu_rb_register(gd, NULL, &gdr_rb_ops,
gd->gdr_wq, &gd->gdr_handler);
if (ret) {
filep->private_data = NULL;
kfree(gd);
return ret;
}
INIT_LIST_HEAD(&gd->lru_list);
gd->n_pages_locked = 0;
return ret;
}
/**
* gdrdrv_get_pages_free_callback() - Callback handler for unpinning a GPU buf.
* @data: - A pointer to a struct gdr_mr describing the gpu buf being freed.
*
* This is a callback function that the Nvidia driver calls when
* a user frees a GPU buffer that has been pinned. This function unmaps and
* unpins that gpu buffer.
*
* The GPU buffer can ALSO be unmmapped and unpinned through the
* HFI1_IOCTL_GDR_GPU_MUNMAP_UNPIN ioctl, calling the nvidia_p2p_put_pages()
* function. It's possible for that ioctl() operation to race with this
* callback.
*
* Ultimately, the nvidia_p2p_put_pages() function determines which code
* path wins this race. If nvidia_p2p_put_pages() "wins", then
* this callback function will not be called for that GPU buffer. If this
* callback handler "wins", then nvidia_p2p_put_pages() will fail with an
* -EINVAL error code.
*
* Both this ioctl() function and this callback handler acquire
* the mr_lock mutex to serialize operations on the memory mapping
* of this GPU buf. So it's important that the ioctl() function release
* this mr_lock BEFORE calling nvidia_p2p_put_pages(), otherwise that
* ioctl() function could deadlock on that mutex with this callback handler.
*
* mr->page_table SHOULD NEVER be NULL when this function is entered.
* If that happens, it indicates a bug either in this driver, or in
* the NVidia driver. But out of a sense of parania, we WARN on this
* case, and call free_gpu_page_table() ONLY when this pointer is NOT
* NULL.
*/
static void gdrdrv_get_pages_free_callback(void *data)
{
struct hfi1_gdrdata *gd;
struct gdr_mr *mr = data;
unsigned int npages;
gd = mr->gd;
mutex_lock(&gd->mr_lock);
gdrdrv_munmap(mr);
/*
* mr->page_table SHOULD NEVER be NULL in this code, but see comment
* above about paranoia.
*/
WARN_ON(!mr->page_table);
if (mr->page_table) {
free_gpu_page_table(mr->page_table);
mr->page_table = NULL;
npages = num_user_pages_gpu(mr->rb_node.addr, mr->rb_node.len);
gd->n_pages_locked -= npages;
}
release_callback_mr_ref(mr);
mutex_unlock(&gd->mr_lock);
}
/**
* do_munmap_and_unpin_gpu_buf() - unpin and unmap a gpu buffer.
* @mr: A pointer to a struct gdr_mr for the gpu buffer to be pinned/mmapped
*
* This is called when the user has requested a GPU buffer to be unpinned,
* or also at various places in this driver to unwind from an error situation,
* or to "evict" an entry from the cache.
*
* This function can race with gdrdrv_get_pages_free_callback(). See comments
* below for details on how that race is resolved.
*
* The caller of this function must hold an "ioctl" reference on this mr.
* This function releases that "ioctl" reference. It may also under
* some cirumstances release the "callback" reference.
*
* This mr structure MAY be freed during the releasing of these references.
*/
static void
do_munmap_and_unpin_gpu_buf(struct gdr_mr *mr)
{
struct hfi1_gdrdata *gd = mr->gd;
unsigned int npages;
int ret;
mutex_lock(&gd->mr_lock);
gdrdrv_munmap(mr);
mutex_unlock(&gd->mr_lock);
/*
* This function call can race with gdrdrv_get_pages_free_callback().
* The Nvidia driver's nvidia_p2p_put_pages() function serializes these
* events. nvidia_p2p_put_pages() has three expected return values:
*
* 0 - nvidia_p2p_put_pages() is successful, indicating
* that nvidia_p2p_put_pages() has unpinned this GPU buffer,
* and the gdrdrv_get_pages_free_callback() has been
* unregistered for that GPU buffer.
*
* -EINVAL - nvidia_p2p_put_pages() lost the race with
* gdrdrv_get_pages_free_callback(). The callback
* handler unmapped and unpinned the GPU buffer.
*
* -EIO - This is an "unknown" error.
*
* If gdrdrv_get_pages_free_callback() executes first, then it will
* unpin this buffer (it was already unmmapped above), and
* it will release the "callback mr reference". In this case
* the nvidia_p2p_put_pages() will return -EINVAL, so in this
* case we need only release the "ioctl mr reference".
*
* If nvidia_p2p_put_pages() executes first, then it will unpin
* this GPU buffer and also unregister the free callback handler.
* So the free callback handler will never be called for this
* gdr_mr. In this case, nvidia_p2p_put_pages() returns 0 value
* "sucess". This means this code path needs to also release the
* "callback mr reference".
*
* In the case where nvidia_p2p_put_pages() succeeds, the "free
* callback handler" does NOT get called for this GPU buffer.
* nvidia_p2p_put_pages() deregisters the "free callback" handler
* from this GPU buffer.
*/
ret = nvidia_p2p_put_pages(0, 0, mr->rb_node.addr, mr->page_table);
mutex_lock(&gd->mr_lock);
npages = num_user_pages_gpu(mr->rb_node.addr, mr->rb_node.len);
gd->n_pages_locked -= npages;
release_ioctl_mr_ref(mr);
if (!ret) {
mr->page_table = NULL;
release_callback_mr_ref(mr);
}
mutex_unlock(&gd->mr_lock);
}
/**
* create_mr() - create a new memory region
* @gd: A pointer to the hfi1_gdrdata for this open file descriptor.
* @gpu_buf_addr: GPU buffer start address
* @gpu_buf_size: GPU buffer size
*
* kref_init() intializes the reference count on this gdr_mr
* to 1. This is treated as the "ioctl_mr_ref()" for this
* gdr_mr.
*
* Return:
* Pointer to the new mr if successful. NULL otherwise.
*/
static struct gdr_mr *create_mr(struct hfi1_gdrdata *gd,
u64 gpu_buf_addr,
u32 gpu_buf_size)
{
struct gdr_mr *mr;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return mr;
mr->rb_node.addr = gpu_buf_addr;
mr->rb_node.len = gpu_buf_size;
mr->gd = gd;
mr->mr_handle = get_random_handle();
mr->host_addr = 0;
mr->page_table = NULL;
kref_init(&mr->ref_cnt);
return mr;
}
/**
* evict_gpu_cache() - Evict certain number of pages from a cache
* @gd: A pointer to the hfi1_gdrdata for this open file descriptor.
* @target: Target number of pages to be evicted from the cache.
*
* The caller must acquire gd->mr_lock mutex before calling.
*
* Return:
* Number of pages evicted from the cache.
*/
static u32 evict_gpu_cache(struct hfi1_gdrdata *gd, u32 target)
{
struct gdr_mr *mr, *ptr;
unsigned int npages, cleared = 0;
list_for_each_entry_safe_reverse(mr, ptr, &gd->lru_list, list) {
npages = num_user_pages_gpu(mr->rb_node.addr, mr->rb_node.len);
/* this node will be evicted, add its pages to our count */
cleared += npages;
acquire_ioctl_mr_ref(mr);
mutex_unlock(&gd->mr_lock);
do_munmap_and_unpin_gpu_buf(mr);
mutex_lock(&gd->mr_lock);
/* have enough pages been cleared? */
if (cleared >= target)
break;
}
return cleared;
}
/**
* do_pin_and_mmap_gpu_buf() - pin and mmap a gpu buffer
* @fp: A pointer to an open file structure for this file.
* @gd: A pointer to the hfi1_gdrdata for this open file.
* @mr: A pointer to struct gdr_mr which contains the starting address and
* the size of the gpu buffer.
*
* The caller has searched the red/black tree of gdr_mr structures and
* didn't find one that matches the GPU desired buffer. This routine pins and
* mmaps the GPU buffer and adds it to the tree of gdr_mr structures.
*
* Side Effects:
* This function uses gd->map_this_mr to indirectly pass a pointer to a
* gdr_mr struct to this driver's mmap handler, hfi1_gdr_mmap(), outside
* the normal call stack mechanism.
*
* gd->map_this_mr is set prior to calling vm_mmap(). vm_mmap() will
* call hfi1_gdr_mmap() to do this driver-specific memory map operation.
* The gd->map_this_mr pointer allows hfi1_gdr_mmap() to quickly find
* this descriptor for the physical addresses that are to be mmapped.
*
* When vm_mmap() returns, the gd->map_this_mr is cleared as clean up
* after the mmap operation has completed.
*
* Return:
* 0 - success,
* other - unable to pin or mmap the gpu buffer
*/
static int
do_pin_and_mmap_gpu_buf(struct file *fp,
struct hfi1_gdrdata *gd,
struct gdr_mr *mr)
{
unsigned long virtual, max_cache_pages;
unsigned int npages, target, cleared;
int ret = 0;
mutex_lock(&gd->mr_lock);
max_cache_pages = ((gpu_cache_size << 20) >> NV_GPU_PAGE_SHIFT);
npages = num_user_pages_gpu(mr->rb_node.addr, mr->rb_node.len);
if (gd->n_pages_locked + npages > max_cache_pages) {
if (npages > max_cache_pages) {
mutex_unlock(&gd->mr_lock);
return -EINVAL;
}
target = npages - (max_cache_pages - gd->n_pages_locked);
cleared = evict_gpu_cache(gd, target);
WARN_ON(cleared < target);
}
ret = get_gpu_pages(mr->rb_node.addr, mr->rb_node.len,
&mr->page_table,
gdrdrv_get_pages_free_callback,
mr);
if (ret) {
/*
* As per NVIDIA's documentation, nvidia_p2p_get_pages API
* returns -EINVAL if an invalid argument was supplied.
* While testing the code, it was found that the API may
* return -EINVAL and not -ENOMEM if there isn't enough GPU
* BAR memory to pin the required number of pages. So if the
* error return is -ENOMEM or -EINVAL, we make an attempt to
* evict npages from the pinned buffer cache and try
* pinning the buffer again.
*/
if ((ret == -ENOMEM) || (ret == -EINVAL)) {
target = npages;
cleared = evict_gpu_cache(gd, target);
ret = get_gpu_pages(mr->rb_node.addr,
mr->rb_node.len,
&mr->page_table,
gdrdrv_get_pages_free_callback,
mr);
if (!ret)
goto pin_success;
}
mutex_unlock(&gd->mr_lock);
kfree(mr);
return ret;
}
pin_success:
gd->n_pages_locked += npages;
acquire_callback_mr_ref(mr);
/*
* Save this mr so that this driver's mmap handler hfi1_gdr_mmap()
* can find it quickly and process it. On success, the pinned
* GPU memory described by this mr will be mmapped into the user's
* address space.
*/
gd->map_this_mr = mr;
mutex_unlock(&gd->mr_lock);
/*
* mmap the set of physical pages that in the mr->page_table
* list of physical page addresses. mr->page_table was
* constructed by the get_gpu_pages() call above.
*/
virtual = vm_mmap(fp, 0, mr->rb_node.len,
PROT_READ|PROT_WRITE,
MAP_SHARED,
handle_to_offset(mr->mr_handle));
mutex_lock(&gd->mr_lock);
/*
* The mmap operation on this pinned GPU buffer has completed,
* so clean up. This cleanup is important, as hfi1_gdr_mmap()
* tests for a NULL gd->map_this_mr to identify cases where the
* application has called the mmap(2) system call.
*/
gd->map_this_mr = NULL;
if (!mr->page_table) {
/*
* In this case, the free callback handler has unpinned
* this gdr_mr structure. But it's POSSIBLE that the
* mapping of that pinned buffer succeeded.
*
* But the virtual address of that mapping isn't available
* until the vm_mmap() call has returned here. So the
* callback handler can not have done the proper unmap.
* So we need to do the munmap here, even though the
* GPU buffer has already been unpinned.
*/
if (!IS_ERR((void *)virtual)) {
int munmap_ret;
munmap_ret = vm_munmap(mr->host_addr, mr->rb_node.len);
WARN_ON(munmap_ret);
}
release_ioctl_mr_ref(mr);
mutex_unlock(&gd->mr_lock);
return -EINVAL;
}
/*
* This is a case where the GPU buffer is still pinned,
* but this driver's mmap handler failed for some other
* reason.
*
* So the mr->host_addr is not set in this error case,
* but we still want to unpin this GPU buffer.
*/
if (IS_ERR((void *)virtual)) {
WARN_ON(1);
mutex_unlock(&gd->mr_lock);
do_munmap_and_unpin_gpu_buf(mr);
return virtual;
}
mr->host_addr = virtual;
ret = hfi1_mmu_rb_insert(gd->gdr_handler, &mr->rb_node);
if (ret)
WARN_ON(ret);
list_add(&mr->list, &gd->lru_list);
mutex_unlock(&gd->mr_lock);
return ret;
}
/**
* fetch_user_query_ioctl_params - fetch and validate arguments.
* @arg
* @query_params
*
* Fetch from user space the query parameter block and validate its content.
*
* Return:
* 0 on success
* -EFAULT on bad parameter block address
* -EINVAL on invalid content of parameter block
*/
int
fetch_user_query_ioctl_params(unsigned long arg,
struct hfi1_gdr_query_params *query_params)
{
if (copy_from_user(query_params,
(struct hfi_gdr_query_params __user *)arg,
sizeof(*query_params)))
return -EFAULT;
if (query_params->query_params_in.version != HFI1_GDR_VERSION)
return -ENODEV;
if ((!query_params->query_params_in.gpu_buf_size) ||
(query_params->query_params_in.gpu_buf_addr & ~NV_GPU_PAGE_MASK) ||
(query_params->query_params_in.gpu_buf_size & ~NV_GPU_PAGE_MASK))
return -EINVAL;
return 0;
}
/**
* ioctl_gpu_buf_cache_evict() - process the HFI1_IOCTL_GDR_GPU_CACHE_EVICT
* @gd: A pointer to the hfi1_gdrdata structure for this file.
* @arg: A pointer to the struct hfi1_gdr_cache_evict_params argument.
*
* This function handles ioctl() requests to evict a certain number of
* pages from a GPU buffer cache and/or to query the number of pages
* in the cache. Reqests to evict pages will have non-zero pages_to_evict
* input parameter. Number of pages evicted and number of pages in the cache
* will be filled in the output parameters.
*
* Return:
* 0 - success,
* -EFAULT - The copy_from_user() or copy_to_user() function failed.
*/
static int
ioctl_gpu_buf_cache_evict(struct hfi1_gdrdata *gd,
unsigned long arg)
{
unsigned int cleared = 0, target;
struct hfi1_gdr_cache_evict_params evict_params;
if (copy_from_user(&evict_params,
(struct hfi1_gdr_cache_evict_params __user *)arg,
sizeof(evict_params)))
return -EFAULT;
if (evict_params.evict_params_in.version != HFI1_GDR_VERSION)
return -ENODEV;
target = evict_params.evict_params_in.pages_to_evict;
mutex_lock(&gd->mr_lock);
if (target > 0)
cleared = evict_gpu_cache(gd, target);
evict_params.evict_params_out.pages_evicted = cleared;
evict_params.evict_params_out.pages_in_cache = gd->n_pages_locked;
mutex_unlock(&gd->mr_lock);
if (copy_to_user((struct hfi1_gdr_cache_evict_params __user *)arg,
&evict_params,
sizeof(evict_params)))
return -EFAULT;
return 0;
}
/**
* ioctl_gpu_buf_pin_mmap() - process the HFI1_IOCTL_GDR_GPU_PIN_MMAP
* @fp: A pointer to the open file structure for this file.
* @gd: A pointer to the hfi1_gdrdata structure for this file.
* @arg: A pointer to the struct hfi1_gdr_query_params argument.
*
* This function handles ioctl() requests to return a host address
* for a pinned and mmapped gpu buffer.
*
* Search the red/black tree for the desired gpu buffer among the tree
* of struct gdr_mr structures. If it isn't found, then pin and mmap it,
* and add it to the red/black tree
*
* On success, return to the user the user host address of this mapping.
* This "fast path" in this code is when the gpu buffer is already found
* in the red/black tree. We want this case to run with minimal overhead.
*
* Return:
* 0 - success,
* -EFAULT - The copy_from_user() or copy_to_user() function failed,
* -EINVAL - The gpu buffer described is not properly aligned,
* -EINVAL - The operation requested was not valid,
* -ENOENT - An unpin was requested, but the desired gpu buffer was not found,
*/
static int
ioctl_gpu_buf_pin_mmap(struct file *fp,
struct hfi1_gdrdata *gd,
unsigned long arg)
{
struct hfi1_gdr_query_params query_params;
struct gdr_mr *mr = NULL;
int ret = 0;
struct mmu_rb_node *rb_node;
ret = fetch_user_query_ioctl_params(arg, &query_params);
if (ret)
return ret;
mutex_lock(&gd->mr_lock);
rb_node = hfi1_mmu_rb_search_addr(gd->gdr_handler,
query_params.query_params_in.gpu_buf_addr,
query_params.query_params_in.gpu_buf_size);
if (rb_node) {
/*
* A buffer found in the cache that partially or completely
* overlaps the address range.
*/
mr = container_of(rb_node, struct gdr_mr, rb_node);
if (rb_node->len >=
query_params.query_params_in.gpu_buf_size) {
/*
* Buffer found in the cache that has the same
* starting address and is the same length as or is
* longer than the requested buffer.
*
* This code path never releases mr_lock after
* the gdr_mr struct has been found in the r/b tree,
* until after it is completely finished with that
* gdr_mr struct.
*
* So while LOGICALLY we could acquire and release
* an ioctl reference in this code path, there is no
* need to actually do that.
*/
query_params.query_params_out.host_buf_addr =
mr->host_addr;
/*
* Since this mr is being "used", move this mr to
* the end of the list.
*/
list_del(&mr->list);
list_add(&mr->list, &gd->lru_list);
mutex_unlock(&gd->mr_lock);
goto skip_pin_mmap;
} else {
/*
* rb_node->len < gpu_buf_size
*
* A buffer found in the cache that has the same
* starting address but is of lesser length.
* Need to unmap and unpin the old old smaller
* GPU buffer, freeing the struct gdr_mr in the
* processs.
*
* A new struct gdr_mr will be allocated, and a
* new, larger GPU buffer will be pinned and
* mmapped below.
*/
acquire_ioctl_mr_ref(mr);
mutex_unlock(&gd->mr_lock);
do_munmap_and_unpin_gpu_buf(mr);
mutex_lock(&gd->mr_lock);
}
}
mr = create_mr(gd,
query_params.query_params_in.gpu_buf_addr,
query_params.query_params_in.gpu_buf_size);
mutex_unlock(&gd->mr_lock);
ret = do_pin_and_mmap_gpu_buf(fp, gd, mr);
if (!ret) {
mutex_lock(&gd->mr_lock);
query_params.query_params_out.host_buf_addr = mr->host_addr;
release_ioctl_mr_ref(mr);
mutex_unlock(&gd->mr_lock);
}
skip_pin_mmap:
if ((!ret) && (copy_to_user((struct hfi_gdr_query_params __user *)arg,
&query_params,
sizeof(query_params))))
return -EFAULT;
return ret;
}
/**
* ioctl_gpu_buf_unmap_unpin() - process the HFI1_IOCTL_GDR_GPU_UNPIN_MUNMAP
* @fp: A pointer to the open file structure for this file.
* @gd: A pointer to the hfi1_gdrdata structure for this file.
* @arg: A pointer to the struct hfi1_gdr_query_params argument.
*
* This function handles the ioctl() request to unpin and unmap a gpu buffer.
*
* Return:
* 0 - success,
* -EFAULT - The copy_from_user() or copy_to_user() function failed,
* -EINVAL - The gpu buffer described is not properly aligned,
* -EINVAL - The operation requested was not valid,
* -ENOENT - An unpin was requested, but the desired gpu buffer was not found,
*/
static int
ioctl_gpu_buf_munmap_unpin(struct file *fp,
struct hfi1_gdrdata *gd,
unsigned long arg)
{
struct hfi1_gdr_query_params query_params;
struct gdr_mr *mr = NULL;
int ret = 0;
struct mmu_rb_node *rb_node;
ret = fetch_user_query_ioctl_params(arg, &query_params);
if (ret)
return ret;
mutex_lock(&gd->mr_lock);
rb_node = hfi1_mmu_rb_search_addr(gd->gdr_handler,
query_params.query_params_in.gpu_buf_addr,
query_params.query_params_in.gpu_buf_size);
if (rb_node) {
mr = container_of(rb_node, struct gdr_mr, rb_node);
acquire_ioctl_mr_ref(mr);
}
mutex_unlock(&gd->mr_lock);
if (mr)
do_munmap_and_unpin_gpu_buf(mr);
else
ret = -ENOENT;
return ret;
}
/**
* hfi1_gdr_ioctl() - This is this driver's ioctl() handler
* @fp: A pointer to the open file structure for this file.
* @cmd: The ioctl() command to execute.
* @arg: The argument to this ioctl() request.
*
* Only one thread at a time is allowed to execute this ioctl() handler.
* If there is a thread executing in this handler, and a second thread
* calls this ioctl(), the second thread will get a -EINVAL failure.
*
* There are two recognized commands to this ioctl.
*
* HFI1_IOCTL_GDR_GPU_PIN_MMAP, a request to either retrieve the
* host address for a pinned/mmapped gpu buffer, or a request
* to unpin and unmap a previously pinned gpu buffer.
*
* HFI1_IOCTL_GDR_GPU_MUNMAP_UNPIN, a request to munmap and unpin
* a gpu buffer.
*
* Return:
* 0 - success,
* -EINVAL - There is already a thread executing in this ioctl handler,
* -EINVAL - the cmd argument is not a known value.
*/
static long hfi1_gdr_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
struct hfi1_gdrdata *gd = fp->private_data;
int ret = 0;
if (atomic_cmpxchg(&gd->ioctl_busy_flag, 0, 1))
return -EINVAL;
switch (cmd) {
case HFI1_IOCTL_GDR_GPU_PIN_MMAP:
ret = ioctl_gpu_buf_pin_mmap(fp, gd, arg);
break;
case HFI1_IOCTL_GDR_GPU_MUNMAP_UNPIN:
ret = ioctl_gpu_buf_munmap_unpin(fp, gd, arg);
break;
case HFI1_IOCTL_GDR_GPU_CACHE_EVICT:
ret = ioctl_gpu_buf_cache_evict(gd, arg);
break;
default:
ret = -EINVAL;
}
WARN_ON(atomic_read(&gd->ioctl_busy_flag) != 1);
atomic_set(&gd->ioctl_busy_flag, 0);
return ret;
}
/**
* gdr_mmap_phys_mem_wcomb()
* @vma: A pointer to a vm_area_struct for physical memory segment to be mmaped.
* @vaddr: A virtual address to mmap a physical memory segment.
* @paddr: The physical address of the physical memory segment.
* @size: The size of the physical memory segment.
*
* This function remaps a contiguous physical memory region into the user's
* address space. THe mapping is in write combining mode.
*
* Return:
* 0 - success,
* -EAGAIN - the mmap request failed
*/
static int gdr_mmap_phys_mem_wcomb(struct vm_area_struct *vma,
unsigned long vaddr,
unsigned long paddr,
size_t size)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vaddr,
PHYS_PFN(paddr),
size,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* hfi1_gdr_mmap() - This driver's mmap handler.
* @filep: A pointer to the open file structure for this file.
* @vma: A pointer to the vma_area struct.
*
* This driver's mmap handler is normally invoked through this driver's
* ioctl handler when it is mmapping a pinned GPU buffer into the user's
* address space.
*
* This driver's mmap handler COULD also be called by the user calling
* the mmap() system call. This is not a valid case for this driver,
* and that mmap() system call will fail.
*
* Return:
* 0 - success,
* -EINVAL - The requested gpu buffer description is invalid
*/
static int hfi1_gdr_mmap(struct file *filep, struct vm_area_struct *vma)
{
int ret = 0;
size_t size = vma->vm_end - vma->vm_start;
struct hfi1_gdrdata *gd = filep->private_data;
struct gdr_mr *mr;
int p = 0;
unsigned long vaddr, prev_page_paddr;
int phys_contiguous = 1;
mutex_lock(&gd->mr_lock);
mr = gd->map_this_mr;
/*
* map_this_mr being NULL indicates that the user has
* called mmap directly on this file descriptor. We want
* to fail this mmap attempt.
*/
if (!mr) {
ret = -EINVAL;
goto out;
}
/*
* If the handle value from vm_pgoff does not match mr_handle,
* this means the user has called mmap() system call and
* raced with the ioctl() calling vm_mmap(). Fail this case as well.
*/
if (mr->mr_handle != handle_from_vm_pgoff(vma->vm_pgoff)) {
ret = -EINVAL;
goto out;
}
/*
* This code path lost a race with the free callback handler,
* which has unpinned and unmapped this GPU buffer.
*/
if (!mr->page_table) {
ret = -EINVAL;
goto out;
}
/*
* check for physically contiguous IO range
*/
vaddr = vma->vm_start;
prev_page_paddr = mr->page_table->pages[0]->physical_address;
phys_contiguous = 1;
for (p = 1; p < mr->page_table->entries; ++p) {
struct nvidia_p2p_page *page = mr->page_table->pages[p];
unsigned long page_paddr = page->physical_address;
if (prev_page_paddr + NV_GPU_PAGE_SIZE != page_paddr) {
phys_contiguous = 0;
break;
}
prev_page_paddr = page_paddr;
}
if (phys_contiguous) {
size_t len = min(size,
NV_GPU_PAGE_SIZE * mr->page_table->entries);
unsigned long page0_paddr =
mr->page_table->pages[0]->physical_address;
ret = gdr_mmap_phys_mem_wcomb(vma,
vaddr,
page0_paddr,
len);
if (ret)
goto out;
} else {
/*
* If not contiguous, map individual GPU pages separately.
* In this case, write-combining performance can be really
* bad, not sure why.
*/
p = 0;
while (size && p < mr->page_table->entries) {
struct nvidia_p2p_page *page = mr->page_table->pages[p];
unsigned long page_paddr = page->physical_address;
size_t len = min(NV_GPU_PAGE_SIZE, size);
ret = gdr_mmap_phys_mem_wcomb(vma,
vaddr,
page_paddr,
len);
if (ret)
goto out;
vaddr += len;
size -= len;
++p;
}
}
out:
mutex_unlock(&gd->mr_lock);
return ret;
}
/**
* hfi1_gdr_release() - This is the release function for this handler.
* @inode: A pointer to the inode form this file.
* @filep: A pointer to the open file structure for this file.
*
* This function unpins and unmaps all gpu buffers in the list of
* struct gdr_mr structures.
*
* Return:
* 0 - success
*/
static int hfi1_gdr_release(struct inode *inode, struct file *filep)
{
struct gdr_mr *mr;
struct mmu_rb_node *mnode;
struct hfi1_gdrdata *gd = filep->private_data;
filep->private_data = NULL;
mutex_lock(&gd->mr_lock);
while ((mnode = hfi1_mmu_rb_first_cached(gd->gdr_handler))) {
mr = container_of(mnode, struct gdr_mr, rb_node);
acquire_ioctl_mr_ref(mr);
mutex_unlock(&gd->mr_lock);
do_munmap_and_unpin_gpu_buf(mr);
mutex_lock(&gd->mr_lock);
}
mutex_unlock(&gd->mr_lock);
hfi1_mmu_rb_unregister(gd->gdr_handler);
kfree(gd);
return 0;
}
static char *gdr_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
}
static int add_gdr_dev(void)
{
int ret = 0;
gdr_major = register_chrdev(0, GDR_DRIVER_NAME, &hfi1_gdr_ops);
if (gdr_major < 0) {
gdr_major = 0;
ret = -ENODEV;
goto out;
}
gdr_dev = MKDEV(gdr_major, 0);
gdr_class = class_create(THIS_MODULE, GDR_CLASS_NAME);
if (IS_ERR(gdr_class)) {
ret = PTR_ERR(gdr_class);
gdr_class = NULL;
goto out;
}
gdr_class->devnode = gdr_devnode;
gdr_device = device_create(gdr_class, NULL, gdr_dev,
NULL, "%s", GDR_DEV_NAME);
if (IS_ERR(gdr_device)) {
ret = -PTR_ERR(gdr_device);
gdr_device = NULL;
goto out;
}
out:
return ret;
}
static void remove_gdr_dev(void)
{
if (gdr_device) {
device_unregister(gdr_device);
gdr_device = NULL;
}
if (gdr_class) {
class_destroy(gdr_class);
gdr_class = NULL;
}
if (gdr_major) {
unregister_chrdev(gdr_major, GDR_DRIVER_NAME);
gdr_major = 0;
}
}
/**
* hfi1_gdr_device_create() - Create gdr device and its file in /dev
*
* Return: 0 on sucess, negative error code on failure
*/
int hfi1_gdr_device_create()
{
int ret = 0;
ret = add_gdr_dev();
if (ret)
remove_gdr_dev();
return ret;
}
/**
* hfi1_gdr_device_remove() - Remove gdr device and its file in /dev
*/
void hfi1_gdr_device_remove()
{
remove_gdr_dev();
}
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/ipoib_rx.c
|
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "netdev.h"
#include "ipoib.h"
#define HFI1_IPOIB_SKB_PAD NET_SKB_PAD + NET_IP_ALIGN
static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
{
void *dst_data;
skb_checksum_none_assert(skb);
skb->protocol = *((__be16 *)data);
dst_data = skb_put(skb, size);
memcpy(dst_data, data, size);
skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
}
static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size)
{
struct sk_buff *skb;
int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD);
void *frag;
skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
skb_size = SKB_DATA_ALIGN(skb_size);
frag = napi_alloc_frag(skb_size);
if (unlikely(!frag))
return napi_alloc_skb(napi, size);
skb = build_skb(frag, skb_size);
if (unlikely(!skb)) {
skb_free_frag(frag);
return NULL;
}
skb_reserve(skb, HFI1_IPOIB_SKB_PAD);
return skb;
}
struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
int size, void *data)
{
struct napi_struct *napi = &rxq->napi;
int skb_size = size + HFI1_IPOIB_ENCAP_LEN;
struct sk_buff *skb;
/*
* For smaller(4k + skb overhead) allocations we will go using
* napi cache. Otherwise we will try to use napi frag cache.
*/
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
skb = napi_alloc_skb(napi, skb_size);
else
skb = prepare_frag_skb(napi, skb_size);
if (unlikely(!skb))
return NULL;
copy_ipoib_buf(skb, data, size);
return skb;
}
int hfi1_ipoib_rxq_init(struct net_device *netdev)
{
struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
struct hfi1_devdata *dd = ipoib_priv->dd;
int ret;
ret = hfi1_netdev_rx_init(dd);
if (ret)
return ret;
hfi1_init_aip_rsm(dd);
return ret;
}
void hfi1_ipoib_rxq_deinit(struct net_device *netdev)
{
struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
struct hfi1_devdata *dd = ipoib_priv->dd;
hfi1_deinit_aip_rsm(dd);
hfi1_netdev_rx_destroy(dd);
}
|
cornelisnetworks/opa-hfi1
|
compat/RH78/compat.h
|
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2020 Intel Corporation.
*/
#if !defined(RH78_COMPAT_H)
#define RH78_COMPAT_H
#define HAVE_IB_GID_ATTR
#define POST_HAS_CONST
#define CREATE_FLOW_HAS_UDATA
#define CREATE_AH_HAS_UDATA
#define HAVE_ALLOC_RDMA_NETDEV
#define HAVE_RDMA_NETDEV_GET_PARAMS
#define HAVE_NET_DEVICE_EXTENDED
#define HAVE_ARRAY_SIZE
#define HAVE_NOSPEC_H
#define VM_OPS_FAULT_HAVE_VMA
#define HAVE_MAX_SEND_SGE
#define HAVE_KMALLOC_ARRAY_NODE
#define HAVE_IBDEV_DRIVER_ID
#define HAVE_IB_GET_CACHED_SUBNET_PREFIX
#define HAVE_SECURITY_H
#define HAVE_AIO_WRITE
#define HAVE_DEVICE_RH
#define NEED_KTHREAD_HELPER_FUNCTIONS
#define NEED_CURRENT_TIME
#define HAVE_RDMA_COPY_AH_ATTR
#include "compat_common.h"
#define __GFP_RECLAIM (__GFP_WAIT)
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
#define NET_NAME_UNKNOWN 0
#define rdma_set_device_sysfs_group(a, b)
#define alloc_netdev_mqs(size, name, name_assign_type, setup, sdma, ctxts) \
alloc_netdev_mqs((size), (name), (setup), (sdma), (ctxts))
#define rdma_create_ah(a, b, c) rdma_create_ah(a, b)
#define rdma_destroy_ah(a, b) rdma_destroy_ah(a)
#undef access_ok
#define access_ok(addr, size) \
(likely(__range_not_ok(addr, size, user_addr_max()) == 0))
#define _ib_alloc_device ib_alloc_device
struct hfi1_msix_entry;
struct hfi1_devdata;
void pcie_flr(struct pci_dev *dev);
int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu);
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
struct ib_umem *ib_umem_get_hfi(struct ib_ucontext *context, unsigned long addr,
size_t size, int access, int dmasync);
static inline long compat_get_user_pages(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas)
{
return get_user_pages(current, current->mm, start,
nr_pages, 1, 1, pages, vmas);
}
#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
compat_get_user_pages(start, nr_pages, gup_flags, pages, vmas)
static inline int simple_positive(struct dentry *dentry)
{
return !d_unhashed(dentry) && dentry->d_inode;
}
static inline void hfi1_enable_intx(struct pci_dev *pdev)
{
/* first, turn on INTx */
pci_intx(pdev, 1);
/* then turn off MSI-X */
pci_disable_msix(pdev);
}
/* Helpers to hide struct msi_desc implementation details */
#define msi_desc_to_dev(desc) ((desc)->dev)
#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
#endif //RH78_COMPAT
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/trace_ctxts.h
|
<gh_stars>0
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(__HFI1_TRACE_CTXTS_H) || defined(TRACE_HEADER_MULTI_READ)
#define __HFI1_TRACE_CTXTS_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "hfi.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_ctxts
#define UCTXT_FMT \
"cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \
"rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx, subctxt_cnt:%u"
TRACE_EVENT(hfi1_uctxtdata,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt,
unsigned int subctxt),
TP_ARGS(dd, uctxt, subctxt),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned int, ctxt)
__field(unsigned int, subctxt)
__field(u32, credits)
__field(u64, hw_free)
__field(void __iomem *, piobase)
__field(u16, rcvhdrq_cnt)
__field(u64, rcvhdrq_dma)
__field(u32, eager_cnt)
__field(u64, rcvegr_dma)
__field(unsigned int, subctxt_cnt)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = uctxt->ctxt;
__entry->subctxt = subctxt;
__entry->credits = uctxt->sc->credits;
__entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
__entry->piobase = uctxt->sc->base_addr;
__entry->rcvhdrq_cnt = get_hdrq_cnt(uctxt);
__entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
__entry->eager_cnt = uctxt->egrbufs.alloced;
__entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
__entry->subctxt_cnt = uctxt->subctxt_cnt;
),
TP_printk("[%s] ctxt %u:%u " UCTXT_FMT,
__get_str(dev),
__entry->ctxt,
__entry->subctxt,
__entry->credits,
__entry->hw_free,
__entry->piobase,
__entry->rcvhdrq_cnt,
__entry->rcvhdrq_dma,
__entry->eager_cnt,
__entry->rcvegr_dma,
__entry->subctxt_cnt
)
);
#define CINFO_FMT \
"egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
TRACE_EVENT(hfi1_ctxt_info,
TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
unsigned int subctxt,
struct hfi1_ctxt_info *cinfo),
TP_ARGS(dd, ctxt, subctxt, cinfo),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned int, ctxt)
__field(unsigned int, subctxt)
__field(u16, egrtids)
__field(u16, rcvhdrq_cnt)
__field(u16, rcvhdrq_size)
__field(u16, sdma_ring_size)
__field(u32, rcvegr_size)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__entry->egrtids = cinfo->egrtids;
__entry->rcvhdrq_cnt = cinfo->rcvhdrq_cnt;
__entry->rcvhdrq_size = cinfo->rcvhdrq_entsize;
__entry->sdma_ring_size = cinfo->sdma_ring_size;
__entry->rcvegr_size = cinfo->rcvegr_size;
),
TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
__get_str(dev),
__entry->ctxt,
__entry->subctxt,
__entry->egrtids,
__entry->rcvegr_size,
__entry->rcvhdrq_cnt,
__entry->rcvhdrq_size,
__entry->sdma_ring_size
)
);
#ifdef AIP
const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt);
TRACE_EVENT(ctxt_rsm_hist,
TP_PROTO(unsigned int ctxt),
TP_ARGS(ctxt),
TP_STRUCT__entry(__field(unsigned int, ctxt)),
TP_fast_assign(__entry->ctxt = ctxt;),
TP_printk("%s", hfi1_trace_print_rsm_hist(p, __entry->ctxt))
);
#endif
#endif /* __HFI1_TRACE_CTXTS_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_ctxts
#include <trace/define_trace.h>
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/rc.h
|
<reponame>cornelisnetworks/opa-hfi1<gh_stars>0
/*
* Copyright(c) 2016-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef HFI1_RC_H
#define HFI1_RC_H
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
{
unsigned int next;
next = n + 1;
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0;
qp->s_tail_ack_queue = next;
qp->s_acked_ack_queue = next;
qp->s_ack_state = OP(ACKNOWLEDGE);
}
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
struct rvt_qp *qp)
{
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
static inline struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn,
u8 *prev, u8 *prev_ack,
bool *scheduled)
__must_hold(&qp->s_lock)
{
struct rvt_ack_entry *e = NULL;
u8 i, p;
bool s = true;
for (i = qp->r_head_ack_queue; ; i = p) {
if (i == qp->s_tail_ack_queue)
s = false;
if (i)
p = i - 1;
else
p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
if (p == qp->r_head_ack_queue) {
e = NULL;
break;
}
e = &qp->s_ack_queue[p];
if (!e->opcode) {
e = NULL;
break;
}
if (cmp_psn(psn, e->psn) >= 0) {
if (p == qp->s_tail_ack_queue &&
cmp_psn(psn, e->lpsn) <= 0)
s = false;
break;
}
}
if (prev)
*prev = p;
if (prev_ack)
*prev_ack = i;
if (scheduled)
*scheduled = s;
return e;
}
static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
{
u32 len;
len = delta_psn(psn, wqe->psn) * pmtu;
ss->sge = wqe->sg_list[0];
ss->sg_list = wqe->sg_list + 1;
ss->num_sge = wqe->wr.num_sge;
ss->total_len = wqe->length;
rvt_skip_sge(ss, len, false);
return wqe->length - len;
}
static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
{
if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
}
int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
struct hfi1_ctxtdata *rcd);
struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct hfi1_ibport *ibp);
#endif /* HFI1_RC_H */
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/common.h
|
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _COMMON_H
#define _COMMON_H
#include <rdma/hfi/hfi1_user.h>
/*
* This file contains defines, structures, etc. that are used
* to communicate between kernel and user code.
*/
/* version of protocol header (known to chip also). In the long run,
* we should be able to generate and accept a range of version numbers;
* for now we only accept one, and it's compiled in.
*/
#define IPS_PROTO_VERSION 2
/*
* These are compile time constants that you may want to enable or disable
* if you are trying to debug problems with code or performance.
* HFI1_VERBOSE_TRACING define as 1 if you want additional tracing in
* fast path code
* HFI1_TRACE_REGWRITES define as 1 if you want register writes to be
* traced in fast path code
* _HFI1_TRACING define as 0 if you want to remove all tracing in a
* compilation unit
*/
/* driver/hw feature set bitmask */
#define HFI1_CAP_USER_SHIFT 24
#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
/* locked flag - if set, only HFI1_CAP_WRITABLE_MASK bits can be set */
#define HFI1_CAP_LOCKED_SHIFT 63
#define HFI1_CAP_LOCKED_MASK 0x1ULL
#define HFI1_CAP_LOCKED_SMASK (HFI1_CAP_LOCKED_MASK << HFI1_CAP_LOCKED_SHIFT)
/* extra bits used between kernel and user processes */
#define HFI1_CAP_MISC_SHIFT (HFI1_CAP_USER_SHIFT * 2)
#define HFI1_CAP_MISC_MASK ((1ULL << (HFI1_CAP_LOCKED_SHIFT - \
HFI1_CAP_MISC_SHIFT)) - 1)
#define HFI1_CAP_KSET(cap) ({ hfi1_cap_mask |= HFI1_CAP_##cap; hfi1_cap_mask; })
#define HFI1_CAP_KCLEAR(cap) \
({ \
hfi1_cap_mask &= ~HFI1_CAP_##cap; \
hfi1_cap_mask; \
})
#define HFI1_CAP_USET(cap) \
({ \
hfi1_cap_mask |= (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
hfi1_cap_mask; \
})
#define HFI1_CAP_UCLEAR(cap) \
({ \
hfi1_cap_mask &= ~(HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
hfi1_cap_mask; \
})
#define HFI1_CAP_SET(cap) \
({ \
hfi1_cap_mask |= (HFI1_CAP_##cap | (HFI1_CAP_##cap << \
HFI1_CAP_USER_SHIFT)); \
hfi1_cap_mask; \
})
#define HFI1_CAP_CLEAR(cap) \
({ \
hfi1_cap_mask &= ~(HFI1_CAP_##cap | \
(HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT)); \
hfi1_cap_mask; \
})
#define HFI1_CAP_LOCK() \
({ hfi1_cap_mask |= HFI1_CAP_LOCKED_SMASK; hfi1_cap_mask; })
#define HFI1_CAP_LOCKED() (!!(hfi1_cap_mask & HFI1_CAP_LOCKED_SMASK))
/*
* The set of capability bits that can be changed after initial load
* This set is the same for kernel and user contexts. However, for
* user contexts, the set can be further filtered by using the
* HFI1_CAP_RESERVED_MASK bits.
*/
#define HFI1_CAP_WRITABLE_MASK (HFI1_CAP_SDMA_AHG | \
HFI1_CAP_HDRSUPP | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_NODROP_RHQ_FULL | \
HFI1_CAP_NODROP_EGR_FULL | \
HFI1_CAP_ALLOW_PERM_JKEY | \
HFI1_CAP_STATIC_RATE_CTRL | \
HFI1_CAP_PRINT_UNIMPL | \
HFI1_CAP_TID_UNMAP | \
HFI1_CAP_OPFN)
/*
* A set of capability bits that are "global" and are not allowed to be
* set in the user bitmask.
*/
#define HFI1_CAP_RESERVED_MASK ((HFI1_CAP_SDMA | \
HFI1_CAP_USE_SDMA_HEAD | \
HFI1_CAP_EXTENDED_PSN | \
HFI1_CAP_PRINT_UNIMPL | \
HFI1_CAP_NO_INTEGRITY | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_TID_RDMA | \
HFI1_CAP_OPFN) << \
HFI1_CAP_USER_SHIFT)
/*
* Set of capabilities that need to be enabled for kernel context in
* order to be allowed for user contexts, as well.
*/
#define HFI1_CAP_MUST_HAVE_KERN (HFI1_CAP_STATIC_RATE_CTRL)
/* Default enabled capabilities (both kernel and user) */
#define HFI1_CAP_MASK_DEFAULT (HFI1_CAP_HDRSUPP | \
HFI1_CAP_NODROP_RHQ_FULL | \
HFI1_CAP_NODROP_EGR_FULL | \
HFI1_CAP_SDMA | \
HFI1_CAP_PRINT_UNIMPL | \
HFI1_CAP_STATIC_RATE_CTRL | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_EXTENDED_PSN | \
((HFI1_CAP_HDRSUPP | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_STATIC_RATE_CTRL | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_EARLY_CREDIT_RETURN) << \
HFI1_CAP_USER_SHIFT))
/*
* A bitmask of kernel/global capabilities that should be communicated
* to user level processes.
*/
#define HFI1_CAP_K2U (HFI1_CAP_SDMA | \
HFI1_CAP_EXTENDED_PSN | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_NO_INTEGRITY)
#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
HFI1_USER_SWMINOR)
#ifndef HFI1_KERN_TYPE
#define HFI1_KERN_TYPE 0
#endif
/*
* Similarly, this is the kernel version going back to the user. It's
* slightly different, in that we want to tell if the driver was built as
* part of a Intel release, or from the driver from openfabrics.org,
* kernel.org, or a standard distribution, for support reasons.
* The high bit is 0 for non-Intel and 1 for Intel-built/supplied.
*
* It's returned by the driver to the user code during initialization in the
* spi_sw_version field of hfi1_base_info, so the user code can in turn
* check for compatibility with the kernel.
*/
#define HFI1_KERN_SWVERSION ((HFI1_KERN_TYPE << 31) | HFI1_USER_SWVERSION)
/*
* Define the driver version number. This is something that refers only
* to the driver itself, not the software interfaces it supports.
*/
#ifndef HFI1_DRIVER_VERSION_BASE
#define HFI1_DRIVER_VERSION_BASE "10.10.1.0"
#endif
/* create the final driver version string */
#ifdef HFI1_IDSTR
#define HFI1_DRIVER_VERSION HFI1_DRIVER_VERSION_BASE " " HFI1_IDSTR
#else
#define HFI1_DRIVER_VERSION HFI1_DRIVER_VERSION_BASE
#endif
/*
* Split point for the JKey that separates security index from the user
* identifiable portion.
*/
#define JKEY_SEC_SPLIT 8
#define JKEY_SEC_MASK 0xFF
/*
* Diagnostics can send a packet by writing the following
* struct to the diag packet special file.
*
* This allows a custom PBC qword, so that special modes and deliberate
* changes to CRCs can be used.
*/
#define _DIAG_PKT_VERS 1
struct diag_pkt {
__u16 version; /* structure version */
__u16 unit; /* which device */
__u16 sw_index; /* send sw index to use */
__u16 len; /* data length, in bytes */
__u16 port; /* port number */
__u16 unused;
__u32 flags; /* call flags */
__u64 data; /* user data pointer */
__u64 pbc; /* PBC for the packet */
};
/* diag_pkt flags */
#define F_DIAGPKT_WAIT 0x1 /* wait until packet is sent */
/*
* The next set of defines are for packet headers, and chip register
* and memory bits that are visible to and/or used by user-mode software.
*/
/*
* Receive Header Flags
*/
#define RHF_PKT_LEN_SHIFT 0
#define RHF_PKT_LEN_MASK 0xfffull
#define RHF_PKT_LEN_SMASK (RHF_PKT_LEN_MASK << RHF_PKT_LEN_SHIFT)
#define RHF_RCV_TYPE_SHIFT 12
#define RHF_RCV_TYPE_MASK 0x7ull
#define RHF_RCV_TYPE_SMASK (RHF_RCV_TYPE_MASK << RHF_RCV_TYPE_SHIFT)
#define RHF_USE_EGR_BFR_SHIFT 15
#define RHF_USE_EGR_BFR_MASK 0x1ull
#define RHF_USE_EGR_BFR_SMASK (RHF_USE_EGR_BFR_MASK << RHF_USE_EGR_BFR_SHIFT)
#define RHF_EGR_INDEX_SHIFT 16
#define RHF_EGR_INDEX_MASK 0x7ffull
#define RHF_EGR_INDEX_SMASK (RHF_EGR_INDEX_MASK << RHF_EGR_INDEX_SHIFT)
#define RHF_DC_INFO_SHIFT 27
#define RHF_DC_INFO_MASK 0x1ull
#define RHF_DC_INFO_SMASK (RHF_DC_INFO_MASK << RHF_DC_INFO_SHIFT)
#define RHF_RCV_SEQ_SHIFT 28
#define RHF_RCV_SEQ_MASK 0xfull
#define RHF_RCV_SEQ_SMASK (RHF_RCV_SEQ_MASK << RHF_RCV_SEQ_SHIFT)
#define RHF_EGR_OFFSET_SHIFT 32
#define RHF_EGR_OFFSET_MASK 0xfffull
#define RHF_EGR_OFFSET_SMASK (RHF_EGR_OFFSET_MASK << RHF_EGR_OFFSET_SHIFT)
#define RHF_HDRQ_OFFSET_SHIFT 44
#define RHF_HDRQ_OFFSET_MASK 0x1ffull
#define RHF_HDRQ_OFFSET_SMASK (RHF_HDRQ_OFFSET_MASK << RHF_HDRQ_OFFSET_SHIFT)
#define RHF_K_HDR_LEN_ERR (0x1ull << 53)
#define RHF_DC_UNC_ERR (0x1ull << 54)
#define RHF_DC_ERR (0x1ull << 55)
#define RHF_RCV_TYPE_ERR_SHIFT 56
#define RHF_RCV_TYPE_ERR_MASK 0x7ul
#define RHF_RCV_TYPE_ERR_SMASK (RHF_RCV_TYPE_ERR_MASK << RHF_RCV_TYPE_ERR_SHIFT)
#define RHF_TID_ERR (0x1ull << 59)
#define RHF_LEN_ERR (0x1ull << 60)
#define RHF_ECC_ERR (0x1ull << 61)
#define RHF_VCRC_ERR (0x1ull << 62)
#define RHF_ICRC_ERR (0x1ull << 63)
#define RHF_ERROR_SMASK 0xffe0000000000000ull /* bits 63:53 */
/* RHF receive types */
#define RHF_RCV_TYPE_EXPECTED 0
#define RHF_RCV_TYPE_EAGER 1
#define RHF_RCV_TYPE_IB 2 /* normal IB, IB Raw, or IPv6 */
#define RHF_RCV_TYPE_ERROR 3
#define RHF_RCV_TYPE_BYPASS 4
#define RHF_RCV_TYPE_INVALID5 5
#define RHF_RCV_TYPE_INVALID6 6
#define RHF_RCV_TYPE_INVALID7 7
/* RHF receive type error - expected packet errors */
#define RHF_RTE_EXPECTED_FLOW_SEQ_ERR 0x2
#define RHF_RTE_EXPECTED_FLOW_GEN_ERR 0x4
/* RHF receive type error - eager packet errors */
#define RHF_RTE_EAGER_NO_ERR 0x0
/* RHF receive type error - IB packet errors */
#define RHF_RTE_IB_NO_ERR 0x0
/* RHF receive type error - error packet errors */
#define RHF_RTE_ERROR_NO_ERR 0x0
#define RHF_RTE_ERROR_OP_CODE_ERR 0x1
#define RHF_RTE_ERROR_KHDR_MIN_LEN_ERR 0x2
#define RHF_RTE_ERROR_KHDR_HCRC_ERR 0x3
#define RHF_RTE_ERROR_KHDR_KVER_ERR 0x4
#define RHF_RTE_ERROR_CONTEXT_ERR 0x5
#define RHF_RTE_ERROR_KHDR_TID_ERR 0x6
/* RHF receive type error - bypass packet errors */
#define RHF_RTE_BYPASS_NO_ERR 0x0
/* MAX RcvSEQ */
#define RHF_MAX_SEQ 13
/* IB - LRH header constants */
#define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
/* misc. */
#define SC15_PACKET 0xF
#define SIZE_OF_CRC 1
#define SIZE_OF_LT 1
#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
#define LIM_MGMT_P_KEY 0x7FFF
#define FULL_MGMT_P_KEY 0xFFFF
#define DEFAULT_P_KEY LIM_MGMT_P_KEY
#define HFI1_PSM_IOC_BASE_SEQ 0x0
/* Number of BTH.PSN bits used for sequence number in expected rcvs */
#define HFI1_KDETH_BTH_SEQ_SHIFT 11
#define HFI1_KDETH_BTH_SEQ_MASK (BIT(HFI1_KDETH_BTH_SEQ_SHIFT) - 1)
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{
return __le64_to_cpu(*((__le64 *)rbuf));
}
static inline u64 rhf_err_flags(u64 rhf)
{
return rhf & RHF_ERROR_SMASK;
}
static inline u32 rhf_rcv_type(u64 rhf)
{
return (rhf >> RHF_RCV_TYPE_SHIFT) & RHF_RCV_TYPE_MASK;
}
static inline u32 rhf_rcv_type_err(u64 rhf)
{
return (rhf >> RHF_RCV_TYPE_ERR_SHIFT) & RHF_RCV_TYPE_ERR_MASK;
}
/* return size is in bytes, not DWORDs */
static inline u32 rhf_pkt_len(u64 rhf)
{
return ((rhf & RHF_PKT_LEN_SMASK) >> RHF_PKT_LEN_SHIFT) << 2;
}
static inline u32 rhf_egr_index(u64 rhf)
{
return (rhf >> RHF_EGR_INDEX_SHIFT) & RHF_EGR_INDEX_MASK;
}
static inline u32 rhf_rcv_seq(u64 rhf)
{
return (rhf >> RHF_RCV_SEQ_SHIFT) & RHF_RCV_SEQ_MASK;
}
/* returned offset is in DWORDS */
static inline u32 rhf_hdrq_offset(u64 rhf)
{
return (rhf >> RHF_HDRQ_OFFSET_SHIFT) & RHF_HDRQ_OFFSET_MASK;
}
static inline u64 rhf_use_egr_bfr(u64 rhf)
{
return rhf & RHF_USE_EGR_BFR_SMASK;
}
static inline u64 rhf_dc_info(u64 rhf)
{
return rhf & RHF_DC_INFO_SMASK;
}
static inline u32 rhf_egr_buf_offset(u64 rhf)
{
return (rhf >> RHF_EGR_OFFSET_SHIFT) & RHF_EGR_OFFSET_MASK;
}
#ifndef HAVE_SECURITY_H
static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
{
return 0;
}
static inline int security_ib_alloc_security(void **sec)
{
return 0;
}
static inline void security_ib_free_security(void *sec)
{
}
#endif
#ifndef HAVE_IB_GET_CACHED_SUBNET_PREFIX
static inline int ib_get_cached_subnet_prefix(struct ib_device *device,
u8 port_num,
u64 *sn_pfx)
{
return 0;
}
#endif
#endif /* _COMMON_H */
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/trace.c
|
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "user_sdma.h"
#include "compat_common.h"
static u8 __get_ib_hdr_len(struct ib_header *hdr)
{
struct ib_other_headers *ohdr;
u8 opcode;
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
opcode = ib_bth_get_opcode(ohdr);
return hdr_len_by_opcode[opcode] == 0 ?
0 : hdr_len_by_opcode[opcode] - (12 + 8);
}
static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr)
{
struct ib_other_headers *ohdr = NULL;
u8 opcode;
u8 l4 = hfi1_16B_get_l4(hdr);
if (l4 == OPA_16B_L4_FM) {
opcode = IB_OPCODE_UD_SEND_ONLY;
return (8 + 8); /* No BTH */
}
if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
opcode = ib_bth_get_opcode(ohdr);
return hdr_len_by_opcode[opcode] == 0 ?
0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8);
}
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet)
{
if (packet->etype != RHF_RCV_TYPE_BYPASS)
return __get_ib_hdr_len(packet->hdr);
else
return __get_16b_hdr_len(packet->hdr);
}
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
{
if (!opa_hdr->hdr_type)
return __get_ib_hdr_len(&opa_hdr->ibh);
else
return __get_16b_hdr_len(&opa_hdr->opah);
}
const char *hfi1_trace_get_packet_l4_str(u8 l4)
{
if (l4)
return "16B";
else
return "9B";
}
const char *hfi1_trace_get_packet_l2_str(u8 l2)
{
switch (l2) {
case 0:
return "0";
case 1:
return "1";
case 2:
return "16B";
case 3:
return "9B";
}
return "";
}
#define IMM_PRN "imm:%d"
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
#define DETH_ENTROPY_PRN "deth qkey:0x%.8x sqpn:0x%.6x entropy:0x%.2x"
#define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
#define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x"
#define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x"
#define TID_WRITE_REQ_PRN "original_qp 0x%x"
#define TID_WRITE_RSP_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
#define TID_WRITE_DATA_PRN "verbs_qp 0x%x"
#define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
#define TID_READ_RSP_PRN "verbs_qp 0x%x"
#define TID_ACK_PRN "tid_flow_psn 0x%x verbs_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
#define TID_RESYNC_PRN "verbs_qp 0x%x"
#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
static const char *parse_syndrome(u8 syndrome)
{
switch (syndrome >> 5) {
case 0:
return "ACK";
case 1:
return "RNRNAK";
case 3:
return "NAK";
}
return "";
}
void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
u8 *ack, bool *becn, bool *fecn, u8 *mig,
u8 *se, u8 *pad, u8 *opcode, u8 *tver,
u16 *pkey, u32 *psn, u32 *qpn)
{
*ack = ib_bth_get_ackreq(ohdr);
*becn = ib_bth_get_becn(ohdr);
*fecn = ib_bth_get_fecn(ohdr);
*mig = ib_bth_get_migreq(ohdr);
*se = ib_bth_get_se(ohdr);
*pad = ib_bth_get_pad(ohdr);
*opcode = ib_bth_get_opcode(ohdr);
*tver = ib_bth_get_tver(ohdr);
*pkey = ib_bth_get_pkey(ohdr);
*psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
u8 *ack, u8 *mig, u8 *opcode,
u8 *pad, u8 *se, u8 *tver,
u32 *psn, u32 *qpn)
{
*ack = ib_bth_get_ackreq(ohdr);
*mig = ib_bth_get_migreq(ohdr);
*opcode = ib_bth_get_opcode(ohdr);
*pad = ib_bth_get_pad(ohdr);
*se = ib_bth_get_se(ohdr);
*tver = ib_bth_get_tver(ohdr);
*psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
u16 *len, u32 *dlid, u32 *slid)
{
*lnh = ib_get_lnh(hdr);
*lver = ib_get_lver(hdr);
*sl = ib_get_sl(hdr);
*sc = ib_get_sc(hdr) | (sc5 << 4);
*len = ib_get_len(hdr);
*dlid = ib_get_dlid(hdr);
*slid = ib_get_slid(hdr);
}
void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr,
u8 *age, bool *becn, bool *fecn,
u8 *l4, u8 *rc, u8 *sc,
u16 *entropy, u16 *len, u16 *pkey,
u32 *dlid, u32 *slid)
{
*age = hfi1_16B_get_age(hdr);
*becn = hfi1_16B_get_becn(hdr);
*fecn = hfi1_16B_get_fecn(hdr);
*l4 = hfi1_16B_get_l4(hdr);
*rc = hfi1_16B_get_rc(hdr);
*sc = hfi1_16B_get_sc(hdr);
*entropy = hfi1_16B_get_entropy(hdr);
*len = hfi1_16B_get_len(hdr);
*pkey = hfi1_16B_get_pkey(hdr);
*dlid = hfi1_16B_get_dlid(hdr);
*slid = hfi1_16B_get_slid(hdr);
}
#define LRH_PRN "len:%d sc:%d dlid:0x%.4x slid:0x%.4x "
#define LRH_9B_PRN "lnh:%d,%s lver:%d sl:%d"
#define LRH_16B_PRN "age:%d becn:%d fecn:%d l4:%d " \
"rc:%d sc:%d pkey:0x%.4x entropy:0x%.4x"
const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
u8 age, bool becn, bool fecn, u8 l4,
u8 lnh, const char *lnh_name, u8 lver,
u8 rc, u8 sc, u8 sl, u16 entropy,
u16 len, u16 pkey, u32 dlid, u32 slid)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, LRH_PRN, len, sc, dlid, slid);
if (bypass)
trace_seq_printf(p, LRH_16B_PRN,
age, becn, fecn, l4, rc, sc, pkey, entropy);
else
trace_seq_printf(p, LRH_9B_PRN,
lnh, lnh_name, lver, sl);
trace_seq_putc(p, 0);
return ret;
}
#define BTH_9B_PRN \
"op:0x%.2x,%s se:%d m:%d pad:%d tver:%d pkey:0x%.4x " \
"f:%d b:%d qpn:0x%.6x a:%d psn:0x%.8x"
#define BTH_16B_PRN \
"op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \
"qpn:0x%.6x a:%d psn:0x%.8x"
#define L4_FM_16B_PRN \
"op:0x%.2x,%s dest_qpn:0x%.6x src_qpn:0x%.6x"
const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
u8 ack, bool becn, bool fecn, u8 mig,
u8 se, u8 pad, u8 opcode, const char *opname,
u8 tver, u16 pkey, u32 psn, u32 qpn,
u32 dest_qpn, u32 src_qpn)
{
const char *ret = trace_seq_buffer_ptr(p);
if (bypass)
if (l4 == OPA_16B_L4_FM)
trace_seq_printf(p, L4_FM_16B_PRN,
opcode, opname, dest_qpn, src_qpn);
else
trace_seq_printf(p, BTH_16B_PRN,
opcode, opname,
se, mig, pad, tver, qpn, ack, psn);
else
trace_seq_printf(p, BTH_9B_PRN,
opcode, opname,
se, mig, pad, tver, pkey, fecn, becn,
qpn, ack, psn);
trace_seq_putc(p, 0);
return ret;
}
const char *parse_everbs_hdrs(
struct trace_seq *p,
u8 opcode, u8 l4, u32 dest_qpn, u32 src_qpn,
void *ehdrs)
{
union ib_ehdrs *eh = ehdrs;
const char *ret = trace_seq_buffer_ptr(p);
if (l4 == OPA_16B_L4_FM) {
trace_seq_printf(p, "mgmt pkt");
goto out;
}
switch (opcode) {
/* imm */
case OP(RC, SEND_LAST_WITH_IMMEDIATE):
case OP(UC, SEND_LAST_WITH_IMMEDIATE):
case OP(RC, SEND_ONLY_WITH_IMMEDIATE):
case OP(UC, SEND_ONLY_WITH_IMMEDIATE):
case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
trace_seq_printf(p, IMM_PRN,
be32_to_cpu(eh->imm_data));
break;
/* reth + imm */
case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, RETH_PRN " " IMM_PRN,
get_ib_reth_vaddr(&eh->rc.reth),
be32_to_cpu(eh->rc.reth.rkey),
be32_to_cpu(eh->rc.reth.length),
be32_to_cpu(eh->rc.imm_data));
break;
/* reth */
case OP(RC, RDMA_READ_REQUEST):
case OP(RC, RDMA_WRITE_FIRST):
case OP(UC, RDMA_WRITE_FIRST):
case OP(RC, RDMA_WRITE_ONLY):
case OP(UC, RDMA_WRITE_ONLY):
trace_seq_printf(p, RETH_PRN,
get_ib_reth_vaddr(&eh->rc.reth),
be32_to_cpu(eh->rc.reth.rkey),
be32_to_cpu(eh->rc.reth.length));
break;
case OP(RC, RDMA_READ_RESPONSE_FIRST):
case OP(RC, RDMA_READ_RESPONSE_LAST):
case OP(RC, RDMA_READ_RESPONSE_ONLY):
case OP(RC, ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
be32_to_cpu(eh->aeth) & IB_MSN_MASK);
break;
case OP(TID_RDMA, WRITE_REQ):
trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
TID_WRITE_REQ_PRN,
le32_to_cpu(eh->tid_rdma.w_req.kdeth0),
le32_to_cpu(eh->tid_rdma.w_req.kdeth1),
ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr),
be32_to_cpu(eh->tid_rdma.w_req.reth.rkey),
be32_to_cpu(eh->tid_rdma.w_req.reth.length),
be32_to_cpu(eh->tid_rdma.w_req.verbs_qp));
break;
case OP(TID_RDMA, WRITE_RESP):
trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
TID_WRITE_RSP_PRN,
le32_to_cpu(eh->tid_rdma.w_rsp.kdeth0),
le32_to_cpu(eh->tid_rdma.w_rsp.kdeth1),
be32_to_cpu(eh->tid_rdma.w_rsp.aeth) >> 24,
parse_syndrome(/* aeth */
be32_to_cpu(eh->tid_rdma.w_rsp.aeth)
>> 24),
(be32_to_cpu(eh->tid_rdma.w_rsp.aeth) &
IB_MSN_MASK),
be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_psn),
be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_qp),
be32_to_cpu(eh->tid_rdma.w_rsp.verbs_qp));
break;
case OP(TID_RDMA, WRITE_DATA_LAST):
case OP(TID_RDMA, WRITE_DATA):
trace_seq_printf(p, TID_RDMA_KDETH_DATA " " TID_WRITE_DATA_PRN,
le32_to_cpu(eh->tid_rdma.w_data.kdeth0),
KDETH_GET(eh->tid_rdma.w_data.kdeth0, KVER),
KDETH_GET(eh->tid_rdma.w_data.kdeth0, SH),
KDETH_GET(eh->tid_rdma.w_data.kdeth0, INTR),
KDETH_GET(eh->tid_rdma.w_data.kdeth0, TIDCTRL),
KDETH_GET(eh->tid_rdma.w_data.kdeth0, TID),
KDETH_GET(eh->tid_rdma.w_data.kdeth0, OFFSET),
le32_to_cpu(eh->tid_rdma.w_data.kdeth1),
KDETH_GET(eh->tid_rdma.w_data.kdeth1, JKEY),
be32_to_cpu(eh->tid_rdma.w_data.verbs_qp));
break;
case OP(TID_RDMA, READ_REQ):
trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
TID_READ_REQ_PRN,
le32_to_cpu(eh->tid_rdma.r_req.kdeth0),
le32_to_cpu(eh->tid_rdma.r_req.kdeth1),
ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
be32_to_cpu(eh->tid_rdma.r_req.reth.rkey),
be32_to_cpu(eh->tid_rdma.r_req.reth.length),
be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn),
be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp),
be32_to_cpu(eh->tid_rdma.r_req.verbs_qp));
break;
case OP(TID_RDMA, READ_RESP):
trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
TID_READ_RSP_PRN,
le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET),
le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY),
be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24,
parse_syndrome(/* aeth */
be32_to_cpu(eh->tid_rdma.r_rsp.aeth)
>> 24),
(be32_to_cpu(eh->tid_rdma.r_rsp.aeth) &
IB_MSN_MASK),
be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp));
break;
case OP(TID_RDMA, ACK):
trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
TID_ACK_PRN,
le32_to_cpu(eh->tid_rdma.ack.kdeth0),
le32_to_cpu(eh->tid_rdma.ack.kdeth1),
be32_to_cpu(eh->tid_rdma.ack.aeth) >> 24,
parse_syndrome(/* aeth */
be32_to_cpu(eh->tid_rdma.ack.aeth)
>> 24),
(be32_to_cpu(eh->tid_rdma.ack.aeth) &
IB_MSN_MASK),
be32_to_cpu(eh->tid_rdma.ack.tid_flow_psn),
be32_to_cpu(eh->tid_rdma.ack.verbs_psn),
be32_to_cpu(eh->tid_rdma.ack.tid_flow_qp),
be32_to_cpu(eh->tid_rdma.ack.verbs_qp));
break;
case OP(TID_RDMA, RESYNC):
trace_seq_printf(p, TID_RDMA_KDETH " " TID_RESYNC_PRN,
le32_to_cpu(eh->tid_rdma.resync.kdeth0),
le32_to_cpu(eh->tid_rdma.resync.kdeth1),
be32_to_cpu(eh->tid_rdma.resync.verbs_qp));
break;
/* aeth + atomicacketh */
case OP(RC, ATOMIC_ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
be32_to_cpu(eh->at.aeth) >> 24,
parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
be32_to_cpu(eh->at.aeth) & IB_MSN_MASK,
ib_u64_get(&eh->at.atomic_ack_eth));
break;
/* atomiceth */
case OP(RC, COMPARE_SWAP):
case OP(RC, FETCH_ADD):
trace_seq_printf(p, ATOMICETH_PRN,
get_ib_ateth_vaddr(&eh->atomic_eth),
eh->atomic_eth.rkey,
get_ib_ateth_swap(&eh->atomic_eth),
get_ib_ateth_compare(&eh->atomic_eth));
break;
/* deth */
case OP(UD, SEND_ONLY):
trace_seq_printf(p, DETH_ENTROPY_PRN,
be32_to_cpu(eh->ud.deth[0]),
be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK,
be32_to_cpu(eh->ud.deth[1]) >>
RVT_AIP_ENTROPY_SHIFT);
break;
case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, DETH_PRN,
be32_to_cpu(eh->ud.deth[0]),
be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
break;
/* ieth */
case OP(RC, SEND_LAST_WITH_INVALIDATE):
case OP(RC, SEND_ONLY_WITH_INVALIDATE):
trace_seq_printf(p, IETH_PRN,
be32_to_cpu(eh->ieth));
break;
}
out:
trace_seq_putc(p, 0);
return ret;
}
const char *parse_sdma_flags(
struct trace_seq *p,
u64 desc0, u64 desc1)
{
const char *ret = trace_seq_buffer_ptr(p);
char flags[5] = { 'x', 'x', 'x', 'x', 0 };
flags[0] = (desc1 & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
flags[1] = (desc1 & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-';
flags[2] = (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
flags[3] = (desc0 & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
trace_seq_printf(p, "%s", flags);
if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
(u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
SDMA_DESC1_HEADER_MODE_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
SDMA_DESC1_HEADER_INDEX_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
SDMA_DESC1_HEADER_DWS_MASK));
return ret;
}
const char *print_u32_array(
struct trace_seq *p,
u32 *arr, int len)
{
int i;
const char *ret = trace_seq_buffer_ptr(p);
for (i = 0; i < len ; i++)
trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]);
trace_seq_putc(p, 0);
return ret;
}
u8 hfi1_trace_get_tid_ctrl(u32 ent)
{
return EXP_TID_GET(ent, CTRL);
}
u16 hfi1_trace_get_tid_len(u32 ent)
{
return EXP_TID_GET(ent, LEN);
}
u16 hfi1_trace_get_tid_idx(u32 ent)
{
return EXP_TID_GET(ent, IDX);
}
struct hfi1_ctxt_hist {
atomic_t count;
atomic_t data[255];
};
struct hfi1_ctxt_hist hist = {
.count = ATOMIC_INIT(0)
};
#ifdef AIP
const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt)
{
int i, len = ARRAY_SIZE(hist.data);
const char *ret = trace_seq_buffer_ptr(p);
#ifdef atomic_fetch_inc
unsigned long packet_count = atomic_fetch_inc(&hist.count);
#else
unsigned long packet_count = atomic_inc_return(&hist.count) - 1;
#endif
trace_seq_printf(p, "packet[%lu]", packet_count);
for (i = 0; i < len; ++i) {
unsigned long val;
atomic_t *count = &hist.data[i];
if (ctxt == i)
#ifdef atomic_fetch_inc
val = atomic_fetch_inc(count);
#else
val = atomic_inc_return(count) - 1;
#endif
else
val = atomic_read(count);
if (val)
trace_seq_printf(p, "(%d:%lu)", i, val);
}
trace_seq_putc(p, 0);
return ret;
}
#endif
__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
__hfi1_trace_fn(LINKVERB);
__hfi1_trace_fn(DEBUG);
__hfi1_trace_fn(SNOOP);
__hfi1_trace_fn(CNTR);
__hfi1_trace_fn(PIO);
__hfi1_trace_fn(DC8051);
__hfi1_trace_fn(FIRMWARE);
__hfi1_trace_fn(RCVCTRL);
__hfi1_trace_fn(TID);
__hfi1_trace_fn(MMU);
__hfi1_trace_fn(IOCTL);
__hfi1_trace_fn(OPFN);
__hfi1_trace_fn(TIDRDMA);
__hfi1_trace_fn(SEL);
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/netdev.h
|
<filename>drivers/infiniband/hw/hfi1/netdev.h
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef HFI1_NETDEV_H
#define HFI1_NETDEV_H
#include "hfi.h"
#include <linux/netdevice.h>
#include <linux/idr.h>
/**
* struct hfi1_netdev_rxq - Receive Queue for HFI
* dummy netdev. Both IPoIB and VNIC netdevices will be working on
* top of this device.
* @napi: napi object
* @priv: ptr to netdev_priv
* @rcd: ptr to receive context data
*/
struct hfi1_netdev_rxq {
struct napi_struct napi;
struct hfi1_netdev_priv *priv;
struct hfi1_ctxtdata *rcd;
};
#define HFI1_NETDEV_MAX 255
/*
* Number of VNIC contexts used. Ensure it is less than or equal to
* max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
*/
#define HFI1_NUM_NETDEV_CTXT 8
/* Number of VNIC RSM entries */
#define NUM_NETDEV_MAP_ENTRIES 8
/**
* struct hfi1_netdev_priv: data required to setup and run HFI netdev.
* @dd: hfi1_devdata
* @rxq: pointer to dummy netdev receive queues.
* @num_rx_q: number of receive queues
* @rmt_index: first free index in RMT Array
* @msix_start: first free MSI-X interrupt vector.
* @idr_lock: lock for inserting/destroying IDR entries
* used int VLANs and VNIC implementations.
* @idr: IDR for unique identifier VNIC and IPoIb VLANs.
* @enabled: atomic counter of netdevs enabling receive queues.
* When 0 NAPI will be disabled.
* @netdevs: atomic counter of netdevs using dummy netdev.
* When 0 receive queues will be freed.
*/
struct hfi1_netdev_priv {
struct hfi1_devdata *dd;
struct hfi1_netdev_rxq *rxq;
int num_rx_q;
int rmt_start;
/* mutex to protext idr */
struct mutex idr_lock;
struct idr idr;
/* count of enabled napi polls */
atomic_t enabled;
/* count of netdevs on top */
atomic_t netdevs;
};
static inline
struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
{
return (struct hfi1_netdev_priv *)&dev[1];
}
static inline
int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
return priv->num_rx_q;
}
static inline
struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
return priv->rxq[ctxt].rcd;
}
static inline
int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
return priv->rmt_start;
}
static inline
void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
priv->rmt_start = rmt_idx;
}
/**
* hfi1_netdev_enable_queues - This is napi enable function.
* It enables napi objects associated with queues.
* When at least one device has called it it increments atomic counter.
* Disable function decrements counter and when it is 0,
* calls napi_disable for every queue.
*
* @dd: hfi1 dev data
*/
void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
/**
* hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
* it allocates receive queue data and calls netif_napi_add
* for each queue.
*
* @dd: hfi1 dev data
*/
int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
/*
* hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
* napi is deleted and receive queses memory is freed.
*
* @dd: hfi1 dev data
*/
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
/**
* hfi1_netdev_alloc - Allocates netdev and private data. It is required
* because RMT index and MSI-X interrupt can be set only
* during driver initialization.
*
* @dd: hfi1 dev data
*/
int hfi1_netdev_alloc(struct hfi1_devdata *dd);
void hfi1_netdev_free(struct hfi1_devdata *dd);
/**
* Rhfi1_netdev_add_data - egisters data with unique identifier
* to be requested later this is needed for VNIC and IPoIB VLANs
* implementations.
* This call is protected by mutex idr_lock.
*
* @dd: hfi1 dev data
* @id: requested integer id up to INT_MAX
* @data: data to be associated with index
*/
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
/**
* hfi1_netdev_remove_data - Removes data with previously given id.
* Returns the reference to removed entry.
*
* @dd: hfi1 dev data
* @id: requested integer id up to INT_MAX
*/
void hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
/**
* hfi1_netdev_get_data - Gets data with given id
*
* @dd: hfi1 dev data
* @id: requested integer id up to INT_MAX
*/
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
/**
* hfi1_netdev_get_first_dat - aGets first entry with greater or equal id.
*
* @dd: hfi1 dev data
* @id: requested integer id up to INT_MAX
*/
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
/* chip.c */
/**
* hfi1_netdev_rx_napi - NAPI poll function
*
* @napi: pointer to NAPI object
* @budget: budget
*/
int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
#endif /* HFI1_NETDEV_H */
|
cornelisnetworks/opa-hfi1
|
compat/common/compat_common.c
|
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/export.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include "../hfi1/hfi.h"
#if !defined (IFS_SLES15) && !defined (IFS_SLES15SP1) && !defined (IFS_SLES12SP4)
#include "compat.h"
#endif
DEFINE_SRCU(debugfs_srcu);
const char *get_unit_name(int unit)
{
static char iname[16];
snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
return iname;
}
EXPORT_SYMBOL(get_unit_name);
void hfi1_vnic_setup(struct hfi1_devdata *dd)
{
}
EXPORT_SYMBOL(hfi1_vnic_setup);
int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
struct hfi1_vnic_vport_info *vinfo,
struct sk_buff *skb, u64 pbc, u8 plen)
{
return -ECOMM;
}
EXPORT_SYMBOL(hfi1_vnic_send_dma);
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
{
}
EXPORT_SYMBOL(hfi1_vnic_bypass_rcv);
void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
{
}
EXPORT_SYMBOL(hfi1_vnic_cleanup);
#ifdef NEED_PCI_REQUEST_IRQ
/*
* pci_request_irq - allocate an interrupt line for a PCI device
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts.
* If NULL and thread_fn != NULL the default primary handler is
* installed.
* @thread_fn: Function called from the IRQ handler thread
* If NULL, no IRQ thread is created
* @dev_id: Cookie passed back to the handler function
* @fmt: Printf-like format string naming the handler
*
* This call allocates interrupt resources and enables the interrupt line and
* IRQ handling. From the point this call is made @handler and @thread_fn may
* be invoked. All interrupts requested using this function might be shared.
*
* @dev_id must not be NULL and must be globally unique.
*/
int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
irq_handler_t thread_fn, void *dev_id, const char *fmt, ...)
{
va_list ap;
int ret;
char *devname;
va_start(ap, fmt);
devname = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
IRQF_SHARED, devname, dev_id);
if (ret)
kfree(devname);
return ret;
}
EXPORT_SYMBOL(pci_request_irq);
/*
* pci_free_irq - free an interrupt allocated with pci_request_irq
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
* @dev_id: Device identity to free
*
* Remove an interrupt handler. The handler is removed and if the interrupt
* line is no longer in use by any driver it is disabled. The caller must
* ensure the interrupt is disabled on the device before calling this function.
* The function does not return until any executing interrupts for this IRQ
* have completed.
*
* This function must not be called from interrupt context.
*/
void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
{
free_irq(pci_irq_vector(dev, nr), dev_id);
}
EXPORT_SYMBOL(pci_free_irq);
#endif
#ifdef NEED_CDEV_SET_PARENT
/**
* cdev_set_parent() - set the parent kobject for a char device
* @p: the cdev structure
* @kobj: the kobject to take a reference to
*
* cdev_set_parent() sets a parent kobject which will be referenced
* appropriately so the parent is not freed before the cdev. This
* should be called before cdev_add.
*/
void cdev_set_parent(struct cdev *p, struct kobject *kobj)
{
WARN_ON(!kobj->state_initialized);
p->kobj.parent = kobj;
}
EXPORT_SYMBOL(cdev_set_parent);
#endif
#ifndef HAVE_IB_SET_DEVICE_OPS
void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
{
#ifdef IB_DEV_HAS_EMBEDDED_OPS
struct ib_device_ops *dev_ops = &dev->ops;
#else
struct ib_device *dev_ops = dev;
#endif
#define SET_DEVICE_OP(ptr, name) \
do { \
if (ops->name) \
if (!((ptr)->name)) \
(ptr)->name = ops->name; \
} while (0)
#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
SET_DEVICE_OP(dev_ops, add_gid);
#ifdef HAVE_ADVICE_MR
SET_DEVICE_OP(dev_ops, advise_mr);
#endif
#ifdef HAVE_ALLOC_DM
SET_DEVICE_OP(dev_ops, alloc_dm);
#endif
SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mw);
SET_DEVICE_OP(dev_ops, alloc_pd);
#ifdef HAVE_ALLOC_RDMA_NETDEV
SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
#endif
SET_DEVICE_OP(dev_ops, alloc_ucontext);
#ifdef HAVE_ALLOC_XRCD
SET_DEVICE_OP(dev_ops, alloc_xrcd);
#endif
SET_DEVICE_OP(dev_ops, attach_mcast);
SET_DEVICE_OP(dev_ops, check_mr_status);
SET_DEVICE_OP(dev_ops, create_ah);
#ifdef HAVE_CREATE_COUNTERS
SET_DEVICE_OP(dev_ops, create_counters);
#endif
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow);
#ifdef HAVE_CREATE_FLOW_ACTION_ESP
SET_DEVICE_OP(dev_ops, create_flow_action_esp);
#endif
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
SET_DEVICE_OP(dev_ops, create_wq);
#ifdef HAVE_ALLOC_DM
SET_DEVICE_OP(dev_ops, dealloc_dm);
#endif
#ifdef HAVE_DEALLOC_DRIVER
SET_DEVICE_OP(dev_ops, dealloc_driver);
#endif
SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
#ifdef HAVE_ALLOC_XRCD
SET_DEVICE_OP(dev_ops, dealloc_xrcd);
#endif
SET_DEVICE_OP(dev_ops, del_gid);
SET_DEVICE_OP(dev_ops, dereg_mr);
SET_DEVICE_OP(dev_ops, destroy_ah);
#ifdef HAVE_CREATE_COUNTERS
SET_DEVICE_OP(dev_ops, destroy_counters);
#endif
SET_DEVICE_OP(dev_ops, destroy_cq);
SET_DEVICE_OP(dev_ops, destroy_flow);
#ifdef HAVE_DESTROY_FLOW_ACTION
SET_DEVICE_OP(dev_ops, destroy_flow_action);
#endif
SET_DEVICE_OP(dev_ops, destroy_qp);
SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
SET_DEVICE_OP(dev_ops, destroy_srq);
SET_DEVICE_OP(dev_ops, destroy_wq);
SET_DEVICE_OP(dev_ops, detach_mcast);
SET_DEVICE_OP(dev_ops, disassociate_ucontext);
SET_DEVICE_OP(dev_ops, drain_rq);
SET_DEVICE_OP(dev_ops, drain_sq);
#ifdef HAVE_ENABLE_DRIVER
SET_DEVICE_OP(dev_ops, enable_driver);
#endif
#ifdef HAVE_FILL_RES_ENTRY
SET_DEVICE_OP(dev_ops, fill_res_entry);
#endif
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
SET_DEVICE_OP(dev_ops, get_link_layer);
SET_DEVICE_OP(dev_ops, get_netdev);
SET_DEVICE_OP(dev_ops, get_port_immutable);
#ifdef HAVE_GET_VECTOR_AFFINITY
SET_DEVICE_OP(dev_ops, get_vector_affinity);
#endif
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_stats);
#ifdef HAVE_INIT_PORT
SET_DEVICE_OP(dev_ops, init_port);
#endif
#ifdef HAVE_IW_ACCEPT
SET_DEVICE_OP(dev_ops, iw_accept);
SET_DEVICE_OP(dev_ops, iw_add_ref);
SET_DEVICE_OP(dev_ops, iw_connect);
SET_DEVICE_OP(dev_ops, iw_create_listen);
SET_DEVICE_OP(dev_ops, iw_destroy_listen);
SET_DEVICE_OP(dev_ops, iw_get_qp);
SET_DEVICE_OP(dev_ops, iw_reject);
SET_DEVICE_OP(dev_ops, iw_rem_ref);
#endif
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
#ifdef HAVE_CREATE_FLOW_ACTION_ESP
SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
#endif
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
SET_DEVICE_OP(dev_ops, modify_srq);
SET_DEVICE_OP(dev_ops, modify_wq);
SET_DEVICE_OP(dev_ops, peek_cq);
SET_DEVICE_OP(dev_ops, poll_cq);
SET_DEVICE_OP(dev_ops, post_recv);
SET_DEVICE_OP(dev_ops, post_send);
SET_DEVICE_OP(dev_ops, post_srq_recv);
SET_DEVICE_OP(dev_ops, process_mad);
SET_DEVICE_OP(dev_ops, query_ah);
SET_DEVICE_OP(dev_ops, query_device);
SET_DEVICE_OP(dev_ops, query_gid);
SET_DEVICE_OP(dev_ops, query_pkey);
SET_DEVICE_OP(dev_ops, query_port);
SET_DEVICE_OP(dev_ops, query_qp);
SET_DEVICE_OP(dev_ops, query_srq);
#ifdef HAVE_RDMA_NETDEV_GET_PARAMS
SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
#endif
#ifdef HAVE_CREATE_COUNTERS
SET_DEVICE_OP(dev_ops, read_counters);
#endif
#ifdef HAVE_REG_DM_MR
SET_DEVICE_OP(dev_ops, reg_dm_mr);
#endif
SET_DEVICE_OP(dev_ops, reg_user_mr);
SET_DEVICE_OP(dev_ops, req_ncomp_notif);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, unmap_fmr);
#if 0
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
#endif
}
EXPORT_SYMBOL(ib_set_device_ops);
#endif
|
cornelisnetworks/opa-hfi1
|
compat/RH75/compat.h
|
<reponame>cornelisnetworks/opa-hfi1
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(RH75_COMPAT_H)
#define RH75_COMPAT_H
#define CREATE_AH_HAS_UDATA
#define HAVE_ALLOC_RDMA_NETDEV
#define NEED_MM_HELPER_FUNCTIONS
#define IB_MODIFY_QP_IS_OK_HAS_LINK
#define VM_OPS_FAULT_HAVE_VMA
#define HAVE_IB_GET_CACHED_SUBNET_PREFIX
#define HAVE_SECURITY_H
#define HAVE_AIO_WRITE
#define IB_PORT_ATTR_HAS_GRH_REQUIRED
#define HAVE_DEVICE_RH
#define NEED_KTHREAD_HELPER_FUNCTIONS
#define NEED_PCI_REQUEST_IRQ
#define NEED_CURRENT_TIME
#include "compat_common.h"
#define __GFP_RECLAIM (__GFP_WAIT)
#define IB_QP_CREATE_USE_GFP_NOIO (1 << 7)
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
#define NET_NAME_UNKNOWN 0
#define rdma_create_ah(a, b, c) rdma_create_ah(a, b)
#define rdma_destroy_ah(a, b) rdma_destroy_ah(a)
#define ib_register_device(a, b, c) ib_register_device((a), (c))
#define rdma_set_device_sysfs_group(a, b)
#define alloc_netdev_mqs(size, name, name_assign_type, setup, sdma, ctxts) \
alloc_netdev_mqs((size), (name), (setup), (sdma), (ctxts))
#undef access_ok
#define access_ok(addr, size) \
(likely(__range_not_ok(addr, size, user_addr_max()) == 0))
#define _ib_alloc_device ib_alloc_device
struct hfi1_msix_entry;
struct hfi1_devdata;
void pcie_flr(struct pci_dev *dev);
void msix_setup(struct pci_dev *pcidev, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry);
int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu);
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
struct ib_umem *ib_umem_get_hfi(struct ib_ucontext *context, unsigned long addr,
size_t size, int access, int dmasync);
static inline long compat_get_user_pages(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas)
{
return get_user_pages(current, current->mm, start,
nr_pages, 1, 1, pages, vmas);
}
#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
compat_get_user_pages(start, nr_pages, gup_flags, pages, vmas)
static inline int simple_positive(struct dentry *dentry)
{
return !d_unhashed(dentry) && dentry->d_inode;
}
static inline void hfi1_enable_intx(struct pci_dev *pdev)
{
/* first, turn on INTx */
pci_intx(pdev, 1);
/* then turn off MSI-X */
pci_disable_msix(pdev);
}
/* Helpers to hide struct msi_desc implementation details */
#define msi_desc_to_dev(desc) ((desc)->dev)
#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
#ifndef HAVE_NO_SPEC_H
#include <asm/barrier.h>
#undef array_index_mask_nospec
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
* @size: number of elements in array
*
* When @index is out of bounds (@index >= @size), the sign bit will be
* set. Extend the sign bit to all bits and invert, giving a result of
* zero for an out of bounds index, or ~0 if within bounds [0, @size).
*/
#ifndef array_index_mask_nospec
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
/*
* Always calculate and emit the mask even if the compiler
* thinks the mask is not needed. The compiler does not take
* into account the value of @index under speculation.
*/
OPTIMIZER_HIDE_VAR(index);
return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
}
#endif
#undef array_index_nospec
/*
* array_index_nospec - sanitize an array index after a bounds check
*
* For a code sequence like:
*
* if (index < size) {
* index = array_index_nospec(index, size);
* val = array[index];
* }
*
* ...if the CPU speculates past the bounds check then
* array_index_nospec() will clamp the index within the range of [0,
* size).
*/
#define array_index_nospec(index, size) \
({ \
typeof(index) _i = (index); \
typeof(size) _s = (size); \
unsigned long _mask = array_index_mask_nospec(_i, _s); \
\
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
\
(typeof(_i)) (_i & _mask); \
})
#endif /* !HAVE_NOSPEC_H */
#endif //RH75_COMPAT
|
cornelisnetworks/opa-hfi1
|
compat/RH78/compat.c
|
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2020 Intel Corporation.
*/
#include <linux/export.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include "../hfi1/hfi.h"
#include "compat.h"
/**
* debugfs_use_file_start - mark the beginning of file data access
* @dentry: the dentry object whose data is being accessed.
* @srcu_idx: a pointer to some memory to store a SRCU index in.
*
* Up to a matching call to debugfs_use_file_finish(), any
* successive call into the file removing functions debugfs_remove()
* and debugfs_remove_recursive() will block. Since associated private
* file data may only get freed after a successful return of any of
* the removal functions, you may safely access it after a successful
* call to debugfs_use_file_start() without worrying about
* lifetime issues.
*
* If -%EIO is returned, the file has already been removed and thus,
* it is not safe to access any of its data. If, on the other hand,
* it is allowed to access the file data, zero is returned.
*
* Regardless of the return code, any call to
* debugfs_use_file_start() must be followed by a matching call
* to debugfs_use_file_finish().
*/
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu)
{
*srcu_idx = srcu_read_lock(&debugfs_srcu);
barrier();
if (d_unlinked(dentry))
return -EIO;
return 0;
}
EXPORT_SYMBOL(debugfs_use_file_start);
/**
* debugfs_use_file_finish - mark the end of file data access
* @srcu_idx: the SRCU index "created" by a former call to
* debugfs_use_file_start().
*
* Allow any ongoing concurrent call into debugfs_remove() or
* debugfs_remove_recursive() blocked by a former call to
* debugfs_use_file_start() to proceed and return to its caller.
*/
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu)
{
srcu_read_unlock(&debugfs_srcu, srcu_idx);
}
EXPORT_SYMBOL(debugfs_use_file_finish);
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/qib/qib.h
|
#ifndef _QIB_KERNEL_H
#define _QIB_KERNEL_H
/*
* Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This header file is the base header file for qlogic_ib kernel code
* qib_user.h serves a similar purpose for user code.
*/
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <rdma/ib_hdrs.h>
#include <rdma/rdma_vt.h>
#include "compat.h"
#include "qib_common.h"
#include "qib_verbs.h"
/* only s/w major version of QLogic_IB we can handle */
#define QIB_CHIP_VERS_MAJ 2U
/* don't care about this except printing */
#define QIB_CHIP_VERS_MIN 0U
/* The Organization Unique Identifier (Mfg code), and its position in GUID */
#define QIB_OUI 0x001175
#define QIB_OUI_LSB 40
/*
* per driver stats, either not device nor port-specific, or
* summed over all of the devices and ports.
* They are described by name via ipathfs filesystem, so layout
* and number of elements can change without breaking compatibility.
* If members are added or deleted qib_statnames[] in qib_fs.c must
* change to match.
*/
struct qlogic_ib_stats {
__u64 sps_ints; /* number of interrupts handled */
__u64 sps_errints; /* number of error interrupts */
__u64 sps_txerrs; /* tx-related packet errors */
__u64 sps_rcverrs; /* non-crc rcv packet errors */
__u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
__u64 sps_nopiobufs; /* no pio bufs avail from kernel */
__u64 sps_ctxts; /* number of contexts currently open */
__u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
__u64 sps_buffull;
__u64 sps_hdrfull;
};
extern struct qlogic_ib_stats qib_stats;
extern const struct pci_error_handlers qib_pci_err_handler;
#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
/*
* First-cut critierion for "device is active" is
* two thousand dwords combined Tx, Rx traffic per
* 5-second interval. SMA packets are 64 dwords,
* and occur "a few per second", presumably each way.
*/
#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
/*
* Struct used to indicate which errors are logged in each of the
* error-counters that are logged to EEPROM. A counter is incremented
* _once_ (saturating at 255) for each event with any bits set in
* the error or hwerror register masks below.
*/
#define QIB_EEP_LOG_CNT (4)
struct qib_eep_log_mask {
u64 errs_to_log;
u64 hwerrs_to_log;
};
/*
* Below contains all data related to a single context (formerly called port).
*/
#ifdef CONFIG_DEBUG_FS
struct qib_opcode_stats_perctx;
#endif
struct qib_ctxtdata {
void **rcvegrbuf;
dma_addr_t *rcvegrbuf_phys;
/* rcvhdrq base, needs mmap before useful */
void *rcvhdrq;
/* kernel virtual address where hdrqtail is updated */
void *rcvhdrtail_kvaddr;
/*
* temp buffer for expected send setup, allocated at open, instead
* of each setup call
*/
void *tid_pg_list;
/*
* Shared page for kernel to signal user processes that send buffers
* need disarming. The process should call QIB_CMD_DISARM_BUFS
* or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
*/
unsigned long *user_event_mask;
/* when waiting for rcv or pioavail */
wait_queue_head_t wait;
/*
* rcvegr bufs base, physical, must fit
* in 44 bits so 32 bit programs mmap64 44 bit works)
*/
dma_addr_t rcvegr_phys;
/* mmap of hdrq, must fit in 44 bits */
dma_addr_t rcvhdrq_phys;
dma_addr_t rcvhdrqtailaddr_phys;
/*
* number of opens (including slave sub-contexts) on this instance
* (ignoring forks, dup, etc. for now)
*/
int cnt;
/*
* how much space to leave at start of eager TID entries for
* protocol use, on each TID
*/
/* instead of calculating it */
unsigned ctxt;
/* local node of context */
int node_id;
/* non-zero if ctxt is being shared. */
u16 subctxt_cnt;
/* non-zero if ctxt is being shared. */
u16 subctxt_id;
/* number of eager TID entries. */
u16 rcvegrcnt;
/* index of first eager TID entry. */
u16 rcvegr_tid_base;
/* number of pio bufs for this ctxt (all procs, if shared) */
u32 piocnt;
/* first pio buffer for this ctxt */
u32 pio_base;
/* chip offset of PIO buffers for this ctxt */
u32 piobufs;
/* how many alloc_pages() chunks in rcvegrbuf_pages */
u32 rcvegrbuf_chunks;
/* how many egrbufs per chunk */
u16 rcvegrbufs_perchunk;
/* ilog2 of above */
u16 rcvegrbufs_perchunk_shift;
/* order for rcvegrbuf_pages */
size_t rcvegrbuf_size;
/* rcvhdrq size (for freeing) */
size_t rcvhdrq_size;
/* per-context flags for fileops/intr communication */
unsigned long flag;
/* next expected TID to check when looking for free */
u32 tidcursor;
/* WAIT_RCV that timed out, no interrupt */
u32 rcvwait_to;
/* WAIT_PIO that timed out, no interrupt */
u32 piowait_to;
/* WAIT_RCV already happened, no wait */
u32 rcvnowait;
/* WAIT_PIO already happened, no wait */
u32 pionowait;
/* total number of polled urgent packets */
u32 urgent;
/* saved total number of polled urgent packets for poll edge trigger */
u32 urgent_poll;
/* pid of process using this ctxt */
pid_t pid;
pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
/* same size as task_struct .comm[], command that opened context */
char comm[16];
/* pkeys set by this use of this ctxt */
u16 pkeys[4];
/* so file ops can get at unit */
struct qib_devdata *dd;
/* so funcs that need physical port can get it easily */
struct qib_pportdata *ppd;
/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
void *subctxt_uregbase;
/* An array of pages for the eager receive buffers * N */
void *subctxt_rcvegrbuf;
/* An array of pages for the eager header queue entries * N */
void *subctxt_rcvhdr_base;
/* The version of the library which opened this ctxt */
u32 userversion;
/* Bitmask of active slaves */
u32 active_slaves;
/* Type of packets or conditions we want to poll for */
u16 poll_type;
/* receive packet sequence counter */
u8 seq_cnt;
u8 redirect_seq_cnt;
/* ctxt rcvhdrq head offset */
u32 head;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
#ifdef CONFIG_DEBUG_FS
/* verbs stats per CTX */
struct qib_opcode_stats_perctx *opstats;
#endif
};
struct rvt_sge_state;
struct qib_sdma_txreq {
int flags;
int sg_count;
dma_addr_t addr;
void (*callback)(struct qib_sdma_txreq *, int);
u16 start_idx; /* sdma private */
u16 next_descq_idx; /* sdma private */
struct list_head list; /* sdma private */
};
struct qib_sdma_desc {
__le64 qw[2];
};
struct qib_verbs_txreq {
struct qib_sdma_txreq txreq;
struct rvt_qp *qp;
struct rvt_swqe *wqe;
u32 dwords;
u16 hdr_dwords;
u16 hdr_inx;
struct qib_pio_header *align_buf;
struct rvt_mregion *mr;
struct rvt_sge_state *ss;
};
#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
#define QIB_SDMA_TXREQ_F_INTREQ 0x4
#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
#define QIB_SDMA_TXREQ_S_OK 0
#define QIB_SDMA_TXREQ_S_SENDERROR 1
#define QIB_SDMA_TXREQ_S_ABORTED 2
#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
/*
* Get/Set IB link-level config parameters for f_get/set_ib_cfg()
* Mostly for MADs that set or query link parameters, also ipath
* config interfaces
*/
#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
#define QIB_IB_CFG_SPD 5 /* current Link spd */
#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
#define QIB_IB_CFG_VL_HIGH_LIMIT 19
#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
/*
* for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
* IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
* QIB_IB_CFG_LINKDEFAULT cmd
*/
#define IB_LINKCMD_DOWN (0 << 16)
#define IB_LINKCMD_ARMED (1 << 16)
#define IB_LINKCMD_ACTIVE (2 << 16)
#define IB_LINKINITCMD_NOP 0
#define IB_LINKINITCMD_POLL 1
#define IB_LINKINITCMD_SLEEP 2
#define IB_LINKINITCMD_DISABLE 3
/*
* valid states passed to qib_set_linkstate() user call
*/
#define QIB_IB_LINKDOWN 0
#define QIB_IB_LINKARM 1
#define QIB_IB_LINKACTIVE 2
#define QIB_IB_LINKDOWN_ONLY 3
#define QIB_IB_LINKDOWN_SLEEP 4
#define QIB_IB_LINKDOWN_DISABLE 5
/*
* These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
* negotiation) are used for the 3rd argument to path_f_set_ib_cfg
* with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
* are also the the possible values for qib_link_speed_enabled and active
* The values were chosen to match values used within the IB spec.
*/
#define QIB_IB_SDR 1
#define QIB_IB_DDR 2
#define QIB_IB_QDR 4
#define QIB_DEFAULT_MTU 4096
/* max number of IB ports supported per HCA */
#define QIB_MAX_IB_PORTS 2
/*
* Possible IB config parameters for f_get/set_ib_table()
*/
#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
/*
* Possible "operations" for f_rcvctrl(ppd, op, ctxt)
* these are bits so they can be combined, e.g.
* QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
*/
#define QIB_RCVCTRL_TAILUPD_ENB 0x01
#define QIB_RCVCTRL_TAILUPD_DIS 0x02
#define QIB_RCVCTRL_CTXT_ENB 0x04
#define QIB_RCVCTRL_CTXT_DIS 0x08
#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
#define QIB_RCVCTRL_PKEY_DIS 0x80
#define QIB_RCVCTRL_BP_ENB 0x0100
#define QIB_RCVCTRL_BP_DIS 0x0200
#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
/*
* Possible "operations" for f_sendctrl(ppd, op, var)
* these are bits so they can be combined, e.g.
* QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
* Some operations (e.g. DISARM, ABORT) are known to
* be "one-shot", so do not modify shadow.
*/
#define QIB_SENDCTRL_DISARM (0x1000)
#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
/* available (0x2000) */
#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
#define QIB_SENDCTRL_SEND_DIS (0x20000)
#define QIB_SENDCTRL_SEND_ENB (0x40000)
#define QIB_SENDCTRL_FLUSH (0x80000)
#define QIB_SENDCTRL_CLEAR (0x100000)
#define QIB_SENDCTRL_DISARM_ALL (0x200000)
/*
* These are the generic indices for requesting per-port
* counter values via the f_portcntr function. They
* are always returned as 64 bit values, although most
* are 32 bit counters.
*/
/* send-related counters */
#define QIBPORTCNTR_PKTSEND 0U
#define QIBPORTCNTR_WORDSEND 1U
#define QIBPORTCNTR_PSXMITDATA 2U
#define QIBPORTCNTR_PSXMITPKTS 3U
#define QIBPORTCNTR_PSXMITWAIT 4U
#define QIBPORTCNTR_SENDSTALL 5U
/* receive-related counters */
#define QIBPORTCNTR_PKTRCV 6U
#define QIBPORTCNTR_PSRCVDATA 7U
#define QIBPORTCNTR_PSRCVPKTS 8U
#define QIBPORTCNTR_RCVEBP 9U
#define QIBPORTCNTR_RCVOVFL 10U
#define QIBPORTCNTR_WORDRCV 11U
/* IB link related error counters */
#define QIBPORTCNTR_RXLOCALPHYERR 12U
#define QIBPORTCNTR_RXVLERR 13U
#define QIBPORTCNTR_ERRICRC 14U
#define QIBPORTCNTR_ERRVCRC 15U
#define QIBPORTCNTR_ERRLPCRC 16U
#define QIBPORTCNTR_BADFORMAT 17U
#define QIBPORTCNTR_ERR_RLEN 18U
#define QIBPORTCNTR_IBSYMBOLERR 19U
#define QIBPORTCNTR_INVALIDRLEN 20U
#define QIBPORTCNTR_UNSUPVL 21U
#define QIBPORTCNTR_EXCESSBUFOVFL 22U
#define QIBPORTCNTR_ERRLINK 23U
#define QIBPORTCNTR_IBLINKDOWN 24U
#define QIBPORTCNTR_IBLINKERRRECOV 25U
#define QIBPORTCNTR_LLI 26U
/* other error counters */
#define QIBPORTCNTR_RXDROPPKT 27U
#define QIBPORTCNTR_VL15PKTDROP 28U
#define QIBPORTCNTR_ERRPKEY 29U
#define QIBPORTCNTR_KHDROVFL 30U
/* sampling counters (these are actually control registers) */
#define QIBPORTCNTR_PSINTERVAL 31U
#define QIBPORTCNTR_PSSTART 32U
#define QIBPORTCNTR_PSSTAT 33U
/* how often we check for packet activity for "power on hours (in seconds) */
#define ACTIVITY_TIMER 5
#define MAX_NAME_SIZE 64
#ifdef CONFIG_INFINIBAND_QIB_DCA
struct qib_irq_notify;
#endif
struct qib_msix_entry {
void *arg;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca;
int rcv;
struct qib_irq_notify *notifier;
#endif
cpumask_var_t mask;
};
/* Below is an opaque struct. Each chip (device) can maintain
* private data needed for its operation, but not germane to the
* rest of the driver. For convenience, we define another that
* is chip-specific, per-port
*/
struct qib_chip_specific;
struct qib_chipport_specific;
enum qib_sdma_states {
qib_sdma_state_s00_hw_down,
qib_sdma_state_s10_hw_start_up_wait,
qib_sdma_state_s20_idle,
qib_sdma_state_s30_sw_clean_up_wait,
qib_sdma_state_s40_hw_clean_up_wait,
qib_sdma_state_s50_hw_halt_wait,
qib_sdma_state_s99_running,
};
enum qib_sdma_events {
qib_sdma_event_e00_go_hw_down,
qib_sdma_event_e10_go_hw_start,
qib_sdma_event_e20_hw_started,
qib_sdma_event_e30_go_running,
qib_sdma_event_e40_sw_cleaned,
qib_sdma_event_e50_hw_cleaned,
qib_sdma_event_e60_hw_halted,
qib_sdma_event_e70_go_idle,
qib_sdma_event_e7220_err_halted,
qib_sdma_event_e7322_err_halted,
qib_sdma_event_e90_timer_tick,
};
extern char *qib_sdma_state_names[];
extern char *qib_sdma_event_names[];
struct sdma_set_state_action {
unsigned op_enable:1;
unsigned op_intenable:1;
unsigned op_halt:1;
unsigned op_drain:1;
unsigned go_s99_running_tofalse:1;
unsigned go_s99_running_totrue:1;
};
struct qib_sdma_state {
struct kref kref;
struct completion comp;
enum qib_sdma_states current_state;
struct sdma_set_state_action *set_state_action;
unsigned current_op;
unsigned go_s99_running;
unsigned first_sendbuf;
unsigned last_sendbuf; /* really last +1 */
/* debugging/devel */
enum qib_sdma_states previous_state;
unsigned previous_op;
enum qib_sdma_events last_event;
};
struct xmit_wait {
struct timer_list timer;
u64 counter;
u8 flags;
struct cache {
u64 psxmitdata;
u64 psrcvdata;
u64 psxmitpkts;
u64 psrcvpkts;
u64 psxmitwait;
} counter_cache;
};
/*
* The structure below encapsulates data relevant to a physical IB Port.
* Current chips support only one such port, but the separation
* clarifies things a bit. Note that to conform to IB conventions,
* port-numbers are one-based. The first or only port is port1.
*/
struct qib_pportdata {
struct qib_ibport ibport_data;
struct qib_devdata *dd;
struct qib_chippport_specific *cpspec; /* chip-specific per-port */
struct kobject pport_kobj;
struct kobject pport_cc_kobj;
struct kobject sl2vl_kobj;
struct kobject diagc_kobj;
/* GUID for this interface, in network order */
__be64 guid;
/* QIB_POLL, etc. link-state specific flags, per port */
u32 lflags;
/* qib_lflags driver is waiting for */
u32 state_wanted;
spinlock_t lflags_lock;
/* ref count for each pkey */
atomic_t pkeyrefs[4];
/*
* this address is mapped readonly into user processes so they can
* get status cheaply, whenever they want. One qword of status per port
*/
u64 *statusp;
/* SendDMA related entries */
/* read mostly */
struct qib_sdma_desc *sdma_descq;
struct workqueue_struct *qib_wq;
struct qib_sdma_state sdma_state;
dma_addr_t sdma_descq_phys;
volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
dma_addr_t sdma_head_phys;
u16 sdma_descq_cnt;
/* read/write using lock */
spinlock_t sdma_lock ____cacheline_aligned_in_smp;
struct list_head sdma_activelist;
struct list_head sdma_userpending;
u64 sdma_descq_added;
u64 sdma_descq_removed;
u16 sdma_descq_tail;
u16 sdma_descq_head;
u8 sdma_generation;
u8 sdma_intrequest;
struct tasklet_struct sdma_sw_clean_up_task
____cacheline_aligned_in_smp;
wait_queue_head_t state_wait; /* for state_wanted */
/* HoL blocking for SMP replies */
unsigned hol_state;
struct timer_list hol_timer;
/*
* Shadow copies of registers; size indicates read access size.
* Most of them are readonly, but some are write-only register,
* where we manipulate the bits in the shadow copy, and then write
* the shadow copy to qlogic_ib.
*
* We deliberately make most of these 32 bits, since they have
* restricted range. For any that we read, we won't to generate 32
* bit accesses, since Opteron will generate 2 separate 32 bit HT
* transactions for a 64 bit read, and we want to avoid unnecessary
* bus transactions.
*/
/* This is the 64 bit group */
/* last ibcstatus. opaque outside chip-specific code */
u64 lastibcstat;
/* these are the "32 bit" regs */
/*
* the following two are 32-bit bitmasks, but {test,clear,set}_bit
* all expect bit fields to be "unsigned long"
*/
unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
unsigned long p_sendctrl; /* shadow per-port sendctrl */
u32 ibmtu; /* The MTU programmed for this unit */
/*
* Current max size IB packet (in bytes) including IB headers, that
* we can send. Changes when ibmtu changes.
*/
u32 ibmaxlen;
/*
* ibmaxlen at init time, limited by chip and by receive buffer
* size. Not changed after init.
*/
u32 init_ibmaxlen;
/* LID programmed for this instance */
u16 lid;
/* list of pkeys programmed; 0 if not set */
u16 pkeys[4];
/* LID mask control */
u8 lmc;
u8 link_width_supported;
u8 link_speed_supported;
u8 link_width_enabled;
u8 link_speed_enabled;
u8 link_width_active;
u8 link_speed_active;
u8 vls_supported;
u8 vls_operational;
/* Rx Polarity inversion (compensate for ~tx on partner) */
u8 rx_pol_inv;
u8 hw_pidx; /* physical port index */
u8 port; /* IB port number and index into dd->pports - 1 */
u8 delay_mult;
/* used to override LED behavior */
u8 led_override; /* Substituted for normal value, if non-zero */
u16 led_override_timeoff; /* delta to next timer event */
u8 led_override_vals[2]; /* Alternates per blink-frame */
u8 led_override_phase; /* Just counts, LSB picks from vals[] */
atomic_t led_override_timer_active;
/* Used to flash LEDs in override mode */
struct timer_list led_override_timer;
struct xmit_wait cong_stats;
struct timer_list symerr_clear_timer;
/* Synchronize access between driver writes and sysfs reads */
spinlock_t cc_shadow_lock
____cacheline_aligned_in_smp;
/* Shadow copy of the congestion control table */
struct cc_table_shadow *ccti_entries_shadow;
/* Shadow copy of the congestion control entries */
struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
/* List of congestion control table entries */
struct ib_cc_table_entry_shadow *ccti_entries;
/* 16 congestion entries with each entry corresponding to a SL */
struct ib_cc_congestion_entry_shadow *congestion_entries;
/* Maximum number of congestion control entries that the agent expects
* the manager to send.
*/
u16 cc_supported_table_entries;
/* Total number of congestion control table entries */
u16 total_cct_entry;
/* Bit map identifying service level */
u16 cc_sl_control_map;
/* maximum congestion control table index */
u16 ccti_limit;
/* CA's max number of 64 entry units in the congestion control table */
u8 cc_max_table_entries;
};
/* Observers. Not to be taken lightly, possibly not to ship. */
/*
* If a diag read or write is to (bottom <= offset <= top),
* the "hoook" is called, allowing, e.g. shadows to be
* updated in sync with the driver. struct diag_observer
* is the "visible" part.
*/
struct diag_observer;
typedef int (*diag_hook) (struct qib_devdata *dd,
const struct diag_observer *op,
u32 offs, u64 *data, u64 mask, int only_32);
struct diag_observer {
diag_hook hook;
u32 bottom;
u32 top;
};
extern int qib_register_observer(struct qib_devdata *dd,
const struct diag_observer *op);
/* Only declared here, not defined. Private to diags */
struct diag_observer_list_elt;
/* device data struct now contains only "general per-device" info.
* fields related to a physical IB port are in a qib_pportdata struct,
* described above) while fields only used by a particular chip-type are in
* a qib_chipdata struct, whose contents are opaque to this file.
*/
struct qib_devdata {
struct qib_ibdev verbs_dev; /* must be first */
struct list_head list;
/* pointers to related structs for this device */
/* pci access data structure */
struct pci_dev *pcidev;
struct cdev *user_cdev;
struct cdev *diag_cdev;
struct device *user_device;
struct device *diag_device;
/* mem-mapped pointer to base of chip regs */
u64 __iomem *kregbase;
/* end of mem-mapped chip space excluding sendbuf and user regs */
u64 __iomem *kregend;
/* physical address of chip for io_remap, etc. */
resource_size_t physaddr;
/* qib_cfgctxts pointers */
struct qib_ctxtdata **rcd; /* Receive Context Data */
/* qib_pportdata, points to array of (physical) port-specific
* data structs, indexed by pidx (0..n-1)
*/
struct qib_pportdata *pport;
struct qib_chip_specific *cspec; /* chip-specific */
/* kvirt address of 1st 2k pio buffer */
void __iomem *pio2kbase;
/* kvirt address of 1st 4k pio buffer */
void __iomem *pio4kbase;
/* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
void __iomem *piobase;
/* mem-mapped pointer to base of user chip regs (if using WC PAT) */
u64 __iomem *userbase;
void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
/*
* points to area where PIOavail registers will be DMA'ed.
* Has to be on a page of it's own, because the page will be
* mapped into user program space. This copy is *ONLY* ever
* written by DMA, not by the driver! Need a copy per device
* when we get to multiple devices
*/
volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
/* physical address where updates occur */
dma_addr_t pioavailregs_phys;
/* device-specific implementations of functions needed by
* common code. Contrary to previous consensus, we can't
* really just point to a device-specific table, because we
* may need to "bend", e.g. *_f_put_tid
*/
/* fallback to alternate interrupt type if possible */
int (*f_intr_fallback)(struct qib_devdata *);
/* hard reset chip */
int (*f_reset)(struct qib_devdata *);
void (*f_quiet_serdes)(struct qib_pportdata *);
int (*f_bringup_serdes)(struct qib_pportdata *);
int (*f_early_init)(struct qib_devdata *);
void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
u32, unsigned long);
void (*f_cleanup)(struct qib_devdata *);
void (*f_setextled)(struct qib_pportdata *, u32);
/* fill out chip-specific fields */
int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
/* free irq */
void (*f_free_irq)(struct qib_devdata *);
struct qib_message_header *(*f_get_msgheader)
(struct qib_devdata *, __le32 *);
void (*f_config_ctxts)(struct qib_devdata *);
int (*f_get_ib_cfg)(struct qib_pportdata *, int);
int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
u32 (*f_iblink_state)(u64);
u8 (*f_ibphys_portstate)(u64);
void (*f_xgxs_reset)(struct qib_pportdata *);
/* per chip actions needed for IB Link up/down changes */
int (*f_ib_updown)(struct qib_pportdata *, int, u64);
u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
/* Read/modify/write of GPIO pins (potentially chip-specific */
int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
u32 mask);
/* Enable writes to config EEPROM (if supported) */
int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
/*
* modify rcvctrl shadow[s] and write to appropriate chip-regs.
* see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
* (ctxt == -1) means "all contexts", only meaningful for
* clearing. Could remove if chip_spec shutdown properly done.
*/
void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
int ctxt);
/* Read/modify/write sendctrl appropriately for op and port. */
void (*f_sendctrl)(struct qib_pportdata *, u32 op);
void (*f_set_intr_state)(struct qib_devdata *, u32);
void (*f_set_armlaunch)(struct qib_devdata *, u32);
void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
int (*f_late_initreg)(struct qib_devdata *);
int (*f_init_sdma_regs)(struct qib_pportdata *);
u16 (*f_sdma_gethead)(struct qib_pportdata *);
int (*f_sdma_busy)(struct qib_pportdata *);
void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
void (*f_sdma_hw_start_up)(struct qib_pportdata *);
void (*f_sdma_init_early)(struct qib_pportdata *);
void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
u32 (*f_hdrqempty)(struct qib_ctxtdata *);
u64 (*f_portcntr)(struct qib_pportdata *, u32);
u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
u64 **);
u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
char **, u64 **);
u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
void (*f_initvl15_bufs)(struct qib_devdata *);
void (*f_init_ctxt)(struct qib_ctxtdata *);
void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
struct qib_ctxtdata *);
void (*f_writescratch)(struct qib_devdata *, u32);
int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
#ifdef CONFIG_INFINIBAND_QIB_DCA
int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
#endif
char *boardname; /* human readable board info */
/* template for writing TIDs */
u64 tidtemplate;
/* value to write to free TIDs */
u64 tidinvalid;
/* number of registers used for pioavail */
u32 pioavregs;
/* device (not port) flags, basically device capabilities */
u32 flags;
/* last buffer for user use */
u32 lastctxt_piobuf;
/* reset value */
u64 z_int_counter;
/* percpu intcounter */
u64 __percpu *int_counter;
/* pio bufs allocated per ctxt */
u32 pbufsctxt;
/* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
u32 ctxts_extrabuf;
/*
* number of ctxts configured as max; zero is set to number chip
* supports, less gives more pio bufs/ctxt, etc.
*/
u32 cfgctxts;
/*
* number of ctxts available for PSM open
*/
u32 freectxts;
/*
* hint that we should update pioavailshadow before
* looking for a PIO buffer
*/
u32 upd_pio_shadow;
/* internal debugging stats */
u32 maxpkts_call;
u32 avgpkts_call;
u64 nopiobufs;
/* PCI Vendor ID (here for NodeInfo) */
u16 vendorid;
/* PCI Device ID (here for NodeInfo) */
u16 deviceid;
/* for write combining settings */
int wc_cookie;
unsigned long wc_base;
unsigned long wc_len;
/* shadow copy of struct page *'s for exp tid pages */
struct page **pageshadow;
/* shadow copy of dma handles for exp tid pages */
dma_addr_t *physshadow;
u64 __iomem *egrtidbase;
spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
/* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
spinlock_t uctxt_lock; /* rcd and user context changes */
/*
* per unit status, see also portdata statusp
* mapped readonly into user processes so they can get unit and
* IB link status cheaply
*/
u64 *devstatusp;
char *freezemsg; /* freeze msg if hw error put chip in freeze */
u32 freezelen; /* max length of freezemsg */
/* timer used to prevent stats overflow, error throttling, etc. */
struct timer_list stats_timer;
/* timer to verify interrupts work, and fallback if possible */
struct timer_list intrchk_timer;
unsigned long ureg_align; /* user register alignment */
/*
* Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
* pio_writing.
*/
spinlock_t pioavail_lock;
/*
* index of last buffer to optimize search for next
*/
u32 last_pio;
/*
* min kernel pio buffer to optimize search
*/
u32 min_kernel_pio;
/*
* Shadow copies of registers; size indicates read access size.
* Most of them are readonly, but some are write-only register,
* where we manipulate the bits in the shadow copy, and then write
* the shadow copy to qlogic_ib.
*
* We deliberately make most of these 32 bits, since they have
* restricted range. For any that we read, we won't to generate 32
* bit accesses, since Opteron will generate 2 separate 32 bit HT
* transactions for a 64 bit read, and we want to avoid unnecessary
* bus transactions.
*/
/* This is the 64 bit group */
unsigned long pioavailshadow[6];
/* bitmap of send buffers available for the kernel to use with PIO. */
unsigned long pioavailkernel[6];
/* bitmap of send buffers which need to be disarmed. */
unsigned long pio_need_disarm[3];
/* bitmap of send buffers which are being written to. */
unsigned long pio_writing[3];
/* kr_revision shadow */
u64 revision;
/* Base GUID for device (from eeprom, network order) */
__be64 base_guid;
/*
* kr_sendpiobufbase value (chip offset of pio buffers), and the
* base of the 2KB buffer s(user processes only use 2K)
*/
u64 piobufbase;
u32 pio2k_bufbase;
/* these are the "32 bit" regs */
/* number of GUIDs in the flash for this interface */
u32 nguid;
/*
* the following two are 32-bit bitmasks, but {test,clear,set}_bit
* all expect bit fields to be "unsigned long"
*/
unsigned long rcvctrl; /* shadow per device rcvctrl */
unsigned long sendctrl; /* shadow per device sendctrl */
/* value we put in kr_rcvhdrcnt */
u32 rcvhdrcnt;
/* value we put in kr_rcvhdrsize */
u32 rcvhdrsize;
/* value we put in kr_rcvhdrentsize */
u32 rcvhdrentsize;
/* kr_ctxtcnt value */
u32 ctxtcnt;
/* kr_pagealign value */
u32 palign;
/* number of "2KB" PIO buffers */
u32 piobcnt2k;
/* size in bytes of "2KB" PIO buffers */
u32 piosize2k;
/* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
u32 piosize2kmax_dwords;
/* number of "4KB" PIO buffers */
u32 piobcnt4k;
/* size in bytes of "4KB" PIO buffers */
u32 piosize4k;
/* kr_rcvegrbase value */
u32 rcvegrbase;
/* kr_rcvtidbase value */
u32 rcvtidbase;
/* kr_rcvtidcnt value */
u32 rcvtidcnt;
/* kr_userregbase */
u32 uregbase;
/* shadow the control register contents */
u32 control;
/* chip address space used by 4k pio buffers */
u32 align4k;
/* size of each rcvegrbuffer */
u16 rcvegrbufsize;
/* log2 of above */
u16 rcvegrbufsize_shift;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
u32 lbus_speed;
int unit; /* unit # of this chip */
/* start of CHIP_SPEC move to chipspec, but need code changes */
/* low and high portions of MSI capability/vector */
u32 msi_lo;
/* saved after PCIe init for restore after reset */
u32 msi_hi;
/* MSI data (vector) saved for restore */
u16 msi_data;
/* so we can rewrite it after a chip reset */
u32 pcibar0;
/* so we can rewrite it after a chip reset */
u32 pcibar1;
u64 rhdrhead_intr_off;
/*
* ASCII serial number, from flash, large enough for original
* all digit strings, and longer QLogic serial number format
*/
u8 serial[16];
/* human readable board version */
u8 boardversion[96];
u8 lbus_info[32]; /* human readable localbus info */
/* chip major rev, from qib_revision */
u8 majrev;
/* chip minor rev, from qib_revision */
u8 minrev;
/* Misc small ints */
/* Number of physical ports available */
u8 num_pports;
/* Lowest context number which can be used by user processes */
u8 first_user_ctxt;
u8 n_krcv_queues;
u8 qpn_mask;
u8 skip_kctxt_mask;
u16 rhf_offset; /* offset of RHF within receive header entry */
/*
* GPIO pins for twsi-connected devices, and device code for eeprom
*/
u8 gpio_sda_num;
u8 gpio_scl_num;
u8 twsi_eeprom_dev;
u8 board_atten;
/* Support (including locks) for EEPROM logging of errors and time */
/* control access to actual counters, timer */
spinlock_t eep_st_lock;
/* control high-level access to EEPROM */
struct mutex eep_lock;
uint64_t traffic_wds;
/*
* masks for which bits of errs, hwerrs that cause
* each of the counters to increment.
*/
struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
struct qib_diag_client *diag_client;
spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
struct diag_observer_list_elt *diag_observer_list;
u8 psxmitwait_supported;
/* cycle length of PS* counters in HW (in picoseconds) */
u16 psxmitwait_check_rate;
/* high volume overflow errors defered to tasklet */
struct tasklet_struct error_tasklet;
int assigned_node_id; /* NUMA node closest to HCA */
};
/* hol_state values */
#define QIB_HOL_UP 0
#define QIB_HOL_INIT 1
#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
/* operation types for f_txchk_change() */
#define TXCHK_CHG_TYPE_DIS1 3
#define TXCHK_CHG_TYPE_ENAB1 2
#define TXCHK_CHG_TYPE_KERN 1
#define TXCHK_CHG_TYPE_USER 0
#define QIB_CHASE_TIME msecs_to_jiffies(145)
#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
/* Private data for file operations */
struct qib_filedata {
struct qib_ctxtdata *rcd;
unsigned subctxt;
unsigned tidcursor;
struct qib_user_sdma_queue *pq;
int rec_cpu_num; /* for cpu affinity; -1 if none */
};
extern struct list_head qib_dev_list;
extern spinlock_t qib_devs_lock;
extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32);
int qib_enable_wc(struct qib_devdata *dd);
void qib_disable_wc(struct qib_devdata *dd);
int qib_count_units(int *npresentp, int *nupp);
int qib_count_active_units(void);
int qib_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev **cdevp, struct device **devp);
void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
int qib_dev_init(void);
void qib_dev_cleanup(void);
int qib_diag_add(struct qib_devdata *);
void qib_diag_remove(struct qib_devdata *);
void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
void qib_bad_intrstatus(struct qib_devdata *);
void qib_handle_urcv(struct qib_devdata *, u64);
/* clean up any per-chip chip-specific stuff */
void qib_chip_cleanup(struct qib_devdata *);
/* clean up any chip type-specific stuff */
void qib_chip_done(void);
/* check to see if we have to force ordering for write combining */
int qib_unordered_wc(void);
void qib_pio_copy(void __iomem *to, const void *from, size_t count);
void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
void qib_cancel_sends(struct qib_pportdata *);
int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
int qib_setup_eagerbufs(struct qib_ctxtdata *);
void qib_set_ctxtcnt(struct qib_devdata *);
int qib_create_ctxts(struct qib_devdata *dd);
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
int qib_reset_device(int);
int qib_wait_linkstate(struct qib_pportdata *, u32, int);
int qib_set_linkstate(struct qib_pportdata *, u8);
int qib_set_mtu(struct qib_pportdata *, u16);
int qib_set_lid(struct qib_pportdata *, u32, u8);
void qib_hol_down(struct qib_pportdata *);
void qib_hol_init(struct qib_pportdata *);
void qib_hol_up(struct qib_pportdata *);
void qib_hol_event(struct timer_list *);
void qib_disable_after_error(struct qib_devdata *);
int qib_set_uevent_bits(struct qib_pportdata *, const int);
/* for use in system calls, where we want to know device type, etc. */
#define ctxt_fp(fp) \
(((struct qib_filedata *)(fp)->private_data)->rcd)
#define subctxt_fp(fp) \
(((struct qib_filedata *)(fp)->private_data)->subctxt)
#define tidcursor_fp(fp) \
(((struct qib_filedata *)(fp)->private_data)->tidcursor)
#define user_sdma_queue_fp(fp) \
(((struct qib_filedata *)(fp)->private_data)->pq)
static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
{
return ppd->dd;
}
static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
{
return container_of(dev, struct qib_devdata, verbs_dev);
}
static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
{
return dd_from_dev(to_idev(ibdev));
}
static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
{
return container_of(ibp, struct qib_pportdata, ibport_data);
}
static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
{
struct qib_devdata *dd = dd_from_ibdev(ibdev);
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
WARN_ON(pidx >= dd->num_pports);
return &dd->pport[pidx].ibport_data;
}
/*
* values for dd->flags (_device_ related flags) and
*/
#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
#define QIB_INITTED 0x2 /* chip and driver up and initted */
#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
#define QIB_PRESENT 0x8 /* chip accesses can be done */
#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
#define QIB_HAS_THRESH_UPDATE 0x40
#define QIB_HAS_SDMA_TIMEOUT 0x80
#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
#define QIB_BADINTR 0x8000 /* severe interrupt problems */
#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
/*
* values for ppd->lflags (_ib_port_ related flags)
*/
#define QIBL_LINKV 0x1 /* IB link state valid */
#define QIBL_LINKDOWN 0x8 /* IB link is down */
#define QIBL_LINKINIT 0x10 /* IB link level is up */
#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
/* leave a gap for more IB-link state */
#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
* Do not try to bring up */
#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
/* IB dword length mask in PBC (lower 11 bits); same for all chips */
#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
/* ctxt_flag bit offsets */
/* waiting for a packet to arrive */
#define QIB_CTXT_WAITING_RCV 2
/* master has not finished initializing */
#define QIB_CTXT_MASTER_UNINIT 4
/* waiting for an urgent packet to arrive */
#define QIB_CTXT_WAITING_URG 5
/* free up any allocated data at closes */
void qib_free_data(struct qib_ctxtdata *dd);
void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
u32, struct qib_ctxtdata *);
struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
const struct pci_device_id *);
struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
const struct pci_device_id *);
struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
const struct pci_device_id *);
void qib_free_devdata(struct qib_devdata *);
struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
#define QIB_TWSI_NO_DEV 0xFF
/* Below qib_twsi_ functions must be called with eep_lock held */
int qib_twsi_reset(struct qib_devdata *dd);
int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
int len);
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len);
void qib_get_eeprom_info(struct qib_devdata *);
#define qib_inc_eeprom_err(dd, eidx, incr)
void qib_dump_lookup_output_queue(struct qib_devdata *);
void qib_force_pio_avail_update(struct qib_devdata *);
void qib_clear_symerror_on_linkup(struct timer_list *t);
/*
* Set LED override, only the two LSBs have "public" meaning, but
* any non-zero value substitutes them for the Link and LinkTrain
* LED states.
*/
#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
/* send dma routines */
int qib_setup_sdma(struct qib_pportdata *);
void qib_teardown_sdma(struct qib_pportdata *);
void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
struct list_head *pktlist);
int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
{
return ppd->sdma_descq_added == ppd->sdma_descq_removed;
}
/* must be called under qib_sdma_lock */
static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
{
return ppd->sdma_descq_cnt -
(ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
}
static inline int __qib_sdma_running(struct qib_pportdata *ppd)
{
return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
}
int qib_sdma_running(struct qib_pportdata *);
void dump_sdma_state(struct qib_pportdata *ppd);
void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
/*
* number of words used for protocol header if not set by qib_userinit();
*/
#define QIB_DFLT_RCVHDRSIZE 9
/*
* We need to be able to handle an IB header of at least 24 dwords.
* We need the rcvhdrq large enough to handle largest IB header, but
* still have room for a 2KB MTU standard IB packet.
* Additionally, some processor/memory controller combinations
* benefit quite strongly from having the DMA'ed data be cacheline
* aligned and a cacheline multiple, so we set the size to 32 dwords
* (2 64-byte primary cachelines for pretty much all processors of
* interest). The alignment hurts nothing, other than using somewhat
* more memory.
*/
#define QIB_RCVHDR_ENTSIZE 32
int qib_get_user_pages(unsigned long, size_t, struct page **);
void qib_release_user_pages(struct page **, size_t);
int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
void qib_sendbuf_done(struct qib_devdata *, unsigned);
static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
{
*((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
}
static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
{
/*
* volatile because it's a DMA target from the chip, routine is
* inlined, and don't want register caching or reordering.
*/
return (u32) le64_to_cpu(
*((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
}
static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
{
const struct qib_devdata *dd = rcd->dd;
u32 hdrqtail;
if (dd->flags & QIB_NODMA_RTAIL) {
__le32 *rhf_addr;
u32 seq;
rhf_addr = (__le32 *) rcd->rcvhdrq +
rcd->head + dd->rhf_offset;
seq = qib_hdrget_seq(rhf_addr);
hdrqtail = rcd->head;
if (seq == rcd->seq_cnt)
hdrqtail++;
} else
hdrqtail = qib_get_rcvhdrtail(rcd);
return hdrqtail;
}
/*
* sysfs interface.
*/
extern const char ib_qib_version[];
#if defined (IFS_SLES15SP1)
extern const struct attribute_group qib_attr_group;
#endif
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj);
#if !defined (IFS_SLES15SP1)
int qib_verbs_register_sysfs(struct qib_devdata *);
#endif
void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
int __init qib_init_qibfs(void);
int __exit qib_exit_qibfs(void);
int qibfs_add(struct qib_devdata *);
int qibfs_remove(struct qib_devdata *);
int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
const struct pci_device_id *);
void qib_pcie_ddcleanup(struct qib_devdata *);
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
void qib_free_irq(struct qib_devdata *dd);
int qib_reinit_intr(struct qib_devdata *dd);
void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
/* interrupts for device */
u64 qib_int_counter(struct qib_devdata *);
/* interrupt for all devices */
u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
*/
int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
* Flush write combining store buffers (if present) and perform a write
* barrier.
*/
static inline void qib_flush_wc(void)
{
#if defined(CONFIG_X86_64)
asm volatile("sfence" : : : "memory");
#else
wmb(); /* no reorder around wc flush */
#endif
}
/* global module parameter variables */
extern unsigned qib_ibmtu;
extern ushort qib_cfgctxts;
extern ushort qib_num_cfg_vls;
extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
extern unsigned qib_n_krcv_queues;
extern unsigned qib_sdma_fetch_arb;
extern unsigned qib_compat_ddr_negotiate;
extern int qib_special_trigger;
extern unsigned qib_numa_aware;
extern struct mutex qib_mutex;
/* Number of seconds before our card status check... */
#define STATUS_TIMEOUT 60
#define QIB_DRV_NAME "ib_qib"
#define QIB_USER_MINOR_BASE 0
#define QIB_TRACE_MINOR 127
#define QIB_DIAGPKT_MINOR 128
#define QIB_DIAG_MINOR_BASE 129
#define QIB_NMINORS 255
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
/*
* qib_early_err is used (only!) to print early errors before devdata is
* allocated, or when dd->pcidev may not be valid, and at the tail end of
* cleanup when devdata may have been freed, etc. qib_dev_porterr is
* the same as qib_dev_err, but is used when the message really needs
* the IB port# to be definitive as to what's happening..
* All of these go to the trace log, and the trace log entry is done
* first to avoid possible serial port delays from printk.
*/
#define qib_early_err(dev, fmt, ...) \
dev_err(dev, fmt, ##__VA_ARGS__)
#define qib_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
##__VA_ARGS__)
#define qib_devinfo(pcidev, fmt, ...) \
dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
*/
struct qib_hwerror_msgs {
u64 mask;
const char *msg;
size_t sz;
};
#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
/* in qib_intr.c... */
void qib_format_hwerrors(u64 hwerrs,
const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t lmsg);
void qib_stop_send_queue(struct rvt_qp *qp);
void qib_quiesce_qp(struct rvt_qp *qp);
void qib_flush_qp_waiters(struct rvt_qp *qp);
int qib_mtu_to_path_mtu(u32 mtu);
u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
void qib_notify_error_qp(struct rvt_qp *qp);
int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_attr *attr);
#endif /* _QIB_KERNEL_H */
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/rcva.h
|
<filename>drivers/infiniband/hw/hfi1/rcva.h<gh_stars>0
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _RCVA_H
#define _RCVA_H
#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <linux/genalloc.h>
/**
* struct rcva - receive allocation structure
*
* @dd: pointer to devdata
* @ee_pool: pool of receive array for eager entries
* @ne_pool: pool of receive array for network entries
* @te_pool: pool of receive array for tid entries
* @ee_size: number of eager groups for each context
* @ne_size: number of eager groups for netdev contexts
* @te_size: number of eager groups for tid capable contexts
* @netdev_contexts: return for number of netdev contexts
*/
struct hfi1_devdata;
struct rcva {
struct hfi1_devdata *dd;
struct gen_pool *ee_pool;
struct gen_pool *ne_pool;
struct gen_pool *te_pool;
u16 ee_size;
u16 ne_size;
u16 te_size;
u8 netdev_contexts;
};
/**
* struct rcva_create_attr - input to sizing
*
* @max_buffers: max_number of eager buffers
* @te_contexts: contexts with expected receive entries
* @ne_contexts: number of netdev contexts
* @ee_contexts: number of ee_contexts
* @multi_packet: alter algorithm for multi-packet
*/
struct rcva_create_attr {
u16 max_buffers;
u8 te_contexts;
u8 ne_contexts;
u8 ee_contexts;
bool multi_packet;
};
/**
* struct rcva_slice - stores an allocation from one of the pools
* @base: starting group for slice within receive array
* @size: size of array slice
*/
struct rcva_slice {
u16 base;
u16 size;
};
struct rcva *rcva_create(struct hfi1_devdata *dd,
struct rcva_create_attr *attr);
void rcva_destroy(struct rcva *r);
int rvca_alloc_ee_slice(struct rcva *r, struct rcva_slice *s);
int rvca_alloc_ne_slice(struct rcva *r, struct rcva_slice *s);
int rvca_alloc_te_slice(struct rcva *r, struct rcva_slice *s);
void rvca_free_ee_slice(struct rcva *r, struct rcva_slice *s);
void rvca_free_ne_slice(struct rcva *r, struct rcva_slice *s);
void rvca_free_te_slice(struct rcva *r, struct rcva_slice *s);
#endif /* _RCVA_H */
|
cornelisnetworks/opa-hfi1
|
compat/SLES12SP3/compat.c
|
<filename>compat/SLES12SP3/compat.c
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/export.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include "../hfi1/hfi.h"
#include "compat.h"
/* Address handles */
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 create_flags)
{
struct ib_ah *ah;
ah = pd->device->create_ah(pd, ah_attr, NULL);
if (!IS_ERR(ah)) {
ah->device = pd->device;
ah->pd = pd;
ah->uobject = NULL;
atomic_inc(&pd->usecnt);
}
return ah;
}
EXPORT_SYMBOL(rdma_create_ah);
/**
* rdma_destroy_ah - Destroys an address handle.
* @ah: The address handle to destroy.
*/
int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
struct ib_pd *pd;
int ret;
pd = ah->pd;
ret = ah->device->destroy_ah(ah);
if (!ret)
atomic_dec(&pd->usecnt);
return ret;
}
EXPORT_SYMBOL(rdma_destroy_ah);
/*
* The following functions implement driver specific replacements
* for the ib_dma_*() functions.
*
* These functions return kernel virtual addresses instead of
* device bus addresses since the driver uses the CPU to copy
* data instead of using hardware DMA.
*/
static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dma_addr == BAD_DMA_ADDRESS;
}
static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
size_t size, enum dma_data_direction direction)
{
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
return (u64)cpu_addr;
}
static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
enum dma_data_direction direction)
{
/* This is a stub, nothing to be done here */
}
static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
u64 addr;
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
addr = (u64)page_address(page);
if (addr)
addr += offset;
return addr;
}
static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
enum dma_data_direction direction)
{
/* This is a stub, nothing to be done here */
}
static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
struct scatterlist *sg;
u64 addr;
int i;
int ret = nents;
if (WARN_ON(!valid_dma_direction(direction)))
return 0;
for_each_sg(sgl, sg, nents, i) {
addr = (u64)page_address(sg_page(sg));
if (!addr) {
ret = 0;
break;
}
sg->dma_address = addr + sg->offset;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
}
return ret;
}
static void rvt_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
/* This is a stub, nothing to be done here */
}
static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return rvt_map_sg(dev, sgl, nents, direction);
}
static void rvt_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return rvt_unmap_sg(dev, sg, nents, direction);
}
static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
}
static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size,
u64 *dma_handle, gfp_t flag)
{
struct page *p;
void *addr = NULL;
p = alloc_pages(flag, get_order(size));
if (p)
addr = page_address(p);
if (dma_handle)
*dma_handle = (u64)addr;
return addr;
}
static void rvt_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,126))
/*
* We should only need to wait 100ms after FLR, but some devices take longer.
* Wait for up to 1000ms for config space to return something other than -1.
* Intel IGD requires this when an LCD panel is attached. We read the 2nd
* dword because VFs don't implement the 1st dword.
*/
static void pci_flr_wait(struct pci_dev *dev)
{
int i = 0;
u32 id;
do {
msleep(100);
pci_read_config_dword(dev, PCI_COMMAND, &id);
} while (i++ < 10 && id == ~0);
if (id == ~0)
dev_warn(&dev->dev, "Failed to return from FLR\n");
else if (i > 1)
dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
(i - 1) * 100);
}
/**
* pcie_flr - initiate a PCIe function level reset
* @dev: device to reset
*
* Initiate a function level reset on @dev. The caller should ensure the
* device supports FLR before calling this function, e.g. by using the
* pcie_has_flr() helper.
*/
void pcie_flr(struct pci_dev *dev)
{
if (!pci_wait_for_pending_transaction(dev))
dev_err(&dev->dev,
"timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev,
PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
pci_flr_wait(dev);
}
EXPORT_SYMBOL_GPL(pcie_flr);
#endif
struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
.mapping_error = rvt_mapping_error,
.map_single = rvt_dma_map_single,
.unmap_single = rvt_dma_unmap_single,
.map_page = rvt_dma_map_page,
.unmap_page = rvt_dma_unmap_page,
.map_sg = rvt_map_sg,
.unmap_sg = rvt_unmap_sg,
.map_sg_attrs = rvt_map_sg_attrs,
.unmap_sg_attrs = rvt_unmap_sg_attrs,
.sync_single_for_cpu = rvt_sync_single_for_cpu,
.sync_single_for_device = rvt_sync_single_for_device,
.alloc_coherent = rvt_dma_alloc_coherent,
.free_coherent = rvt_dma_free_coherent
};
|
cornelisnetworks/opa-hfi1
|
compat/SLES12SP2/compat.h
|
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if !defined(SLES12SP2_COMPAT_H)
#define SLES12SP2_COMPAT_H
#include <linux/version.h>
#include "compat_common.h"
#include <linux/device.h>
#include <rdma/ib_mad.h>
#undef CONFIG_FAULT_INJECTION
#define IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE (IB_OPCODE_RC_FETCH_ADD + 1)
#define IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE (IB_OPCODE_RC_FETCH_ADD + 2)
#define IB_OPCODE_MSP 0xe0
#define OPA_SM_CLASS_VERSION (OPA_SMI_CLASS_VERSION)
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
#define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F
#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
#define PCI_IRQ_LEGACY BIT(0) /* allow legacy interrupts */
#define PCI_IRQ_MSI BIT(1) /* allow MSI interrupts */
#define PCI_IRQ_MSIX BIT(2) /* allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY BIT(3) /* auto-assign affinity */
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
#define ib_register_device(a, b, c) ib_register_device((a), (c))
#define rdma_set_device_sysfs_group(a, b)
struct hfi1_msix_entry;
struct hfi1_devdata;
/**
* struct rdma_hw_stats
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
* to 10 milliseconds, drivers can override the default be specifying
* their own value during their allocation routine.
* @name - Array of pointers to static names used for the counters in
* directory.
* @num_counters - How many hardware counters there are. If name is
* shorter than this number, a kernel oops will result. Driver authors
* are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
* in their code to prevent this.
* @value - Array of u64 counters that are accessed by the sysfs code and
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
unsigned long timestamp;
unsigned long lifespan;
const char * const *names;
int num_counters;
u64 value[];
};
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
*/
struct irq_affinity {
int pre_vectors;
int post_vectors;
};
#define HAVE_MEMDUP_USER_NUL (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,90))
#if !HAVE_MEMDUP_USER_NUL
void *memdup_user_nul(const void __user *src, size_t len);
#endif
void pcie_flr(struct pci_dev *dev);
int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags);
void msix_setup(struct pci_dev *pcidev, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry);
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
int debugfs_use_file_start(struct dentry *dentry, int *srcu_idx)
__acquires(&debugfs_srcu);
void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
static inline long compat_get_user_pages(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas)
{
return get_user_pages(current, current->mm, start,
nr_pages, 1, 1, pages, vmas);
}
#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
compat_get_user_pages(start, nr_pages, gup_flags, pages, vmas)
/**
* ib_set_cpi_capmask2 - Sets the capmask2 in an
* ib_class_port_info mad.
* @cpi: A struct ib_class_port_info.
* @capmask2: The capmask2 to set.
*/
static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
u32 capmask2)
{
cpi->reserved[0] =
(u8)capmask2 >> (31 - 7 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}
/**
* ib_set_cpi_resptime - Sets the response time in an
* ib_class_port_info mad.
* @cpi: A struct ib_class_port_info.
* @rtime: The response time to set.
*/
static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
u8 rtime)
{
cpi->resp_time_value = rtime;
}
static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
const char * const *names, int num_counters,
unsigned long lifespan)
{
return NULL;
}
static inline void hfi1_enable_intx(struct pci_dev *pdev)
{
/* first, turn on INTx */
pci_intx(pdev, 1);
/* then turn off MSI-X */
pci_disable_msix(pdev);
}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msix(dev);
pci_disable_msi(dev);
}
/* Helpers to hide struct msi_desc implementation details */
#define msi_desc_to_dev(desc) ((desc)->dev)
#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
#ifdef CONFIG_PCI_MSI
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
#define for_each_pci_msi_entry(desc, pdev) \
for_each_msi_entry((desc), &(pdev)->dev)
static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (dev->msix_enabled) {
struct msi_desc *entry;
int i = 0;
for_each_pci_msi_entry(entry, dev) {
if (i == nr)
return entry->irq;
i++;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
if (WARN_ON_ONCE(nr >= entry->nvec_used))
return -EINVAL;
} else {
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
}
return dev->irq + nr;
}
#else
static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
if (WARN_ON_ONCE(nr > 0))
return -EINVAL;
return dev->irq;
}
#endif
#endif //SLES12SP2_COMPAT
|
cornelisnetworks/opa-hfi1
|
drivers/infiniband/hw/hfi1/netdev_rx.c
|
<gh_stars>0
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains HFI1 support for netdev RX functionality
*/
#include "sdma.h"
#include "verbs.h"
#include "netdev.h"
#include "hfi.h"
#include "compat_common.h"
#include <linux/atomic.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
struct hfi1_ctxtdata *uctxt)
{
unsigned int rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS |
HFI1_RCVCTRL_INTRAVAIL_DIS;
struct hfi1_devdata *dd = priv->dd;
int ret;
uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
/* Now allocate the RcvHdr queue and eager buffers. */
ret = hfi1_create_rcvhdrq(dd, uctxt);
if (ret)
goto done;
ret = hfi1_setup_eagerbufs(uctxt);
if (ret)
goto done;
clear_rcvhdrtail(uctxt);
if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
done:
return ret;
}
static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
struct hfi1_ctxtdata **ctxt)
{
struct hfi1_ctxtdata *uctxt;
int ret;
if (dd->flags & HFI1_FROZEN)
return -EIO;
ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
if (ret < 0) {
dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
return -ENOMEM;
}
uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
HFI1_CAP_KGET(NODROP_RHQ_FULL) |
HFI1_CAP_KGET(NODROP_EGR_FULL) |
HFI1_CAP_KGET(DMA_RTAIL);
/* Netdev contexts are always NO_RDMA_RTAIL */
uctxt->fast_handler = handle_receive_interrupt_napi_fp;
uctxt->slow_handler = handle_receive_interrupt_napi_sp;
hfi1_set_seq_cnt(uctxt, 1);
uctxt->is_vnic = true;
hfi1_stats.sps_ctxts++;
dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
*ctxt = uctxt;
return 0;
}
static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
struct hfi1_ctxtdata *uctxt)
{
flush_wc();
/*
* Disable receive context and interrupt available, reset all
* RcvCtxtCtrl bits to default values.
*/
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
HFI1_RCVCTRL_TIDFLOW_DIS |
HFI1_RCVCTRL_INTRAVAIL_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
msix_free_irq(dd, uctxt->msix_intr);
uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
uctxt->event_flags = 0;
hfi1_clear_tids(uctxt);
hfi1_clear_ctxt_pkey(dd, uctxt);
hfi1_stats.sps_ctxts--;
hfi1_free_ctxt(uctxt);
}
static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
struct hfi1_ctxtdata **ctxt)
{
int rc;
struct hfi1_devdata *dd = priv->dd;
rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
if (rc) {
dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
return rc;
}
rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
if (rc) {
dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
hfi1_netdev_deallocate_ctxt(dd, *ctxt);
*ctxt = NULL;
}
return rc;
}
static int hfi1_netdev_rxq_init(struct net_device *dev)
{
int i;
int rc;
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
struct hfi1_devdata *dd = priv->dd;
priv->num_rx_q = dd->num_netdev_contexts;
priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
GFP_KERNEL, dd->node);
if (!priv->rxq) {
dd_dev_err(dd, "Unable to allocate netdev queue data\n");
return (-ENOMEM);
}
for (i = 0; i < priv->num_rx_q; i++) {
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
if (rc)
goto bail_context_irq_failure;
hfi1_rcd_get(rxq->rcd);
rxq->priv = priv;
rxq->rcd->napi = &rxq->napi;
dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
i, rxq->rcd->ctxt);
/*
* Disable BUSY_POLL on this NAPI as this is not supported
* right now.
*/
set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
rc = msix_netdev_request_rcd_irq(rxq->rcd);
if (rc)
goto bail_context_irq_failure;
}
return 0;
bail_context_irq_failure:
dd_dev_err(dd, "Unable to allot receive context\n");
for (; i >= 0; i--) {
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
if (rxq->rcd) {
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
hfi1_rcd_put(rxq->rcd);
rxq->rcd = NULL;
}
}
kfree(priv->rxq);
priv->rxq = NULL;
return rc;
}
static void hfi1_netdev_rxq_deinit(struct net_device *dev)
{
int i;
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
struct hfi1_devdata *dd = priv->dd;
for (i = 0; i < priv->num_rx_q; i++) {
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
netif_napi_del(&rxq->napi);
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
hfi1_rcd_put(rxq->rcd);
rxq->rcd = NULL;
}
kfree(priv->rxq);
priv->rxq = NULL;
priv->num_rx_q = 0;
}
static void enable_queues(struct hfi1_netdev_priv *priv)
{
int i;
for (i = 0; i < priv->num_rx_q; i++) {
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
napi_enable(&rxq->napi);
hfi1_rcvctrl(priv->dd,
HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
rxq->rcd);
}
}
static void disable_queues(struct hfi1_netdev_priv *priv)
{
int i;
msix_netdev_synchronize_irq(priv->dd);
for (i = 0; i < priv->num_rx_q; i++) {
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
/* wait for napi if it was scheduled */
hfi1_rcvctrl(priv->dd,
HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
rxq->rcd);
napi_synchronize(&rxq->napi);
napi_disable(&rxq->napi);
}
}
int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
int res;
#ifdef atomic_fetch_inc
if (atomic_fetch_inc(&priv->netdevs))
#else
if (atomic_inc_return(&priv->netdevs) - 1)
#endif
return 0;
mutex_lock(&hfi1_mutex);
init_dummy_netdev(dd->dummy_netdev);
res = hfi1_netdev_rxq_init(dd->dummy_netdev);
mutex_unlock(&hfi1_mutex);
return res;
}
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
/* destroy the RX queues only if it is the last netdev going away */
if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
mutex_lock(&hfi1_mutex);
hfi1_netdev_rxq_deinit(dd->dummy_netdev);
mutex_unlock(&hfi1_mutex);
}
return 0;
}
int hfi1_netdev_alloc(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv;
const int netdev_size = sizeof(*dd->dummy_netdev) +
sizeof(struct hfi1_netdev_priv);
dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
if (!dd->dummy_netdev)
return -ENOMEM;
priv = hfi1_netdev_priv(dd->dummy_netdev);
priv->dd = dd;
idr_init(&priv->idr);
mutex_init(&priv->idr_lock);
atomic_set(&priv->enabled, 0);
atomic_set(&priv->netdevs, 0);
return 0;
}
void hfi1_netdev_free(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
if (dd->dummy_netdev) {
dd_dev_info(dd, "hfi1 netdev freed\n");
idr_destroy(&priv->idr);
mutex_destroy(&priv->idr_lock);
kfree(dd->dummy_netdev);
dd->dummy_netdev = NULL;
}
}
void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv;
if (!dd->dummy_netdev)
return;
priv = hfi1_netdev_priv(dd->dummy_netdev);
#ifdef atomic_fetch_inc
if (atomic_fetch_inc(&priv->enabled))
#else
if (atomic_inc_return(&priv->enabled) - 1)
#endif
return;
mutex_lock(&hfi1_mutex);
enable_queues(priv);
mutex_unlock(&hfi1_mutex);
}
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
{
struct hfi1_netdev_priv *priv;
if (!dd->dummy_netdev)
return;
priv = hfi1_netdev_priv(dd->dummy_netdev);
if (atomic_dec_if_positive(&priv->enabled))
return;
mutex_lock(&hfi1_mutex);
disable_queues(priv);
mutex_unlock(&hfi1_mutex);
}
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
int rc;
mutex_lock(&priv->idr_lock);
rc = idr_alloc(&priv->idr, data, id, id + 1, GFP_NOWAIT);
mutex_unlock(&priv->idr_lock);
return rc;
}
void hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
mutex_lock(&priv->idr_lock);
idr_remove(&priv->idr, id);
mutex_unlock(&priv->idr_lock);
}
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
void *p;
rcu_read_lock();
p = idr_find(&priv->idr, id);
rcu_read_unlock();
return p;
}
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
return idr_get_next(&priv->idr, start_id);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.