code
stringlengths
6
250k
repo_name
stringlengths
5
70
path
stringlengths
3
177
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
250k
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE191_Integer_Underflow__int_listen_socket_postdec_45.c Label Definition File: CWE191_Integer_Underflow__int.label.xml Template File: sources-sinks-45.tmpl.c */ /* * @description * CWE: 191 Integer Underflow * BadSource: listen_socket Read data using a listen socket (server side) * GoodSource: Set data to a small, non-zero number (negative two) * Sinks: decrement * GoodSink: Ensure there will not be an underflow before decrementing data * BadSink : Decrement data, which can cause an Underflow * Flow Variant: 45 Data flow: data passed as a static global variable from one function to another in the same source file * * */ #include "std_testcase.h" #ifdef _WIN32 #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define CLOSE_SOCKET closesocket #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #define INVALID_SOCKET -1 #define SOCKET_ERROR -1 #define CLOSE_SOCKET close #define SOCKET int #endif #define TCP_PORT 27015 #define LISTEN_BACKLOG 5 #define CHAR_ARRAY_SIZE (3 * sizeof(data) + 2) static int CWE191_Integer_Underflow__int_listen_socket_postdec_45_badData; static int CWE191_Integer_Underflow__int_listen_socket_postdec_45_goodG2BData; static int CWE191_Integer_Underflow__int_listen_socket_postdec_45_goodB2GData; #ifndef OMITBAD static void badSink() { int data = CWE191_Integer_Underflow__int_listen_socket_postdec_45_badData; { /* POTENTIAL FLAW: Decrementing data could cause an underflow */ data--; int result = data; printIntLine(result); } } void CWE191_Integer_Underflow__int_listen_socket_postdec_45_bad() { int data; /* Initialize data */ data = 0; { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; SOCKET listenSocket = INVALID_SOCKET; SOCKET acceptSocket = INVALID_SOCKET; char inputBuffer[CHAR_ARRAY_SIZE]; do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read data using a listen socket */ listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listenSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = INADDR_ANY; service.sin_port = htons(TCP_PORT); if (bind(listenSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } if (listen(listenSocket, LISTEN_BACKLOG) == SOCKET_ERROR) { break; } acceptSocket = accept(listenSocket, NULL, NULL); if (acceptSocket == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed */ recvResult = recv(acceptSocket, inputBuffer, CHAR_ARRAY_SIZE - 1, 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* NUL-terminate the string */ inputBuffer[recvResult] = '\0'; /* Convert to int */ data = atoi(inputBuffer); } while (0); if (listenSocket != INVALID_SOCKET) { CLOSE_SOCKET(listenSocket); } if (acceptSocket != INVALID_SOCKET) { CLOSE_SOCKET(acceptSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } CWE191_Integer_Underflow__int_listen_socket_postdec_45_badData = data; badSink(); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B() uses the GoodSource with the BadSink */ static void goodG2BSink() { int data = CWE191_Integer_Underflow__int_listen_socket_postdec_45_goodG2BData; { /* POTENTIAL FLAW: Decrementing data could cause an underflow */ data--; int result = data; printIntLine(result); } } static void goodG2B() { int data; /* Initialize data */ data = 0; /* FIX: Use a small, non-zero value that will not cause an integer underflow in the sinks */ data = -2; CWE191_Integer_Underflow__int_listen_socket_postdec_45_goodG2BData = data; goodG2BSink(); } /* goodB2G() uses the BadSource with the GoodSink */ static void goodB2GSink() { int data = CWE191_Integer_Underflow__int_listen_socket_postdec_45_goodB2GData; /* FIX: Add a check to prevent an underflow from occurring */ if (data > INT_MIN) { data--; int result = data; printIntLine(result); } else { printLine("data value is too large to perform arithmetic safely."); } } static void goodB2G() { int data; /* Initialize data */ data = 0; { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; SOCKET listenSocket = INVALID_SOCKET; SOCKET acceptSocket = INVALID_SOCKET; char inputBuffer[CHAR_ARRAY_SIZE]; do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read data using a listen socket */ listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listenSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = INADDR_ANY; service.sin_port = htons(TCP_PORT); if (bind(listenSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } if (listen(listenSocket, LISTEN_BACKLOG) == SOCKET_ERROR) { break; } acceptSocket = accept(listenSocket, NULL, NULL); if (acceptSocket == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed */ recvResult = recv(acceptSocket, inputBuffer, CHAR_ARRAY_SIZE - 1, 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* NUL-terminate the string */ inputBuffer[recvResult] = '\0'; /* Convert to int */ data = atoi(inputBuffer); } while (0); if (listenSocket != INVALID_SOCKET) { CLOSE_SOCKET(listenSocket); } if (acceptSocket != INVALID_SOCKET) { CLOSE_SOCKET(acceptSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } CWE191_Integer_Underflow__int_listen_socket_postdec_45_goodB2GData = data; goodB2GSink(); } void CWE191_Integer_Underflow__int_listen_socket_postdec_45_good() { goodG2B(); goodB2G(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE191_Integer_Underflow__int_listen_socket_postdec_45_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE191_Integer_Underflow__int_listen_socket_postdec_45_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
JianpingZeng/xcc
xcc/test/juliet/testcases/CWE191_Integer_Underflow/s04/CWE191_Integer_Underflow__int_listen_socket_postdec_45.c
C
bsd-3-clause
8,543
/* * This file is part of the Soletta Project * * Copyright (C) 2015 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <errno.h> #include <netinet/in.h> #include <stdint.h> #include <stdbool.h> #include <stdio.h> #define SOL_LOG_DOMAIN &_sol_oic_server_log_domain #include "cbor.h" #include "sol-coap.h" #include "sol-json.h" #include "sol-log-internal.h" #include "sol-platform.h" #include "sol-str-slice.h" #include "sol-util.h" #include "sol-vector.h" #include "sol-oic-cbor.h" #include "sol-oic-common.h" #include "sol-oic-server.h" SOL_LOG_INTERNAL_DECLARE(_sol_oic_server_log_domain, "oic-server"); struct sol_oic_server { struct sol_coap_server *server; struct sol_coap_server *dtls_server; struct sol_vector resources; struct sol_oic_server_information *information; int refcnt; }; struct sol_oic_server_resource { struct sol_coap_resource *coap; char *href; char *rt; char *iface; enum sol_oic_resource_flag flags; struct { struct { sol_coap_responsecode_t (*handle)(const struct sol_network_link_addr *cliaddr, const void *data, const struct sol_vector *input, struct sol_vector *output); } get, put, post, delete; const void *data; } callback; }; static struct sol_oic_server oic_server; #define OIC_SERVER_CHECK(ret) \ do { \ if (oic_server.refcnt == 0) { \ SOL_WRN("OIC API used before initialization"); \ return ret; \ } \ } while (0) #define OIC_COAP_SERVER_UDP_PORT 5683 #define OIC_COAP_SERVER_DTLS_PORT 5684 static int _sol_oic_server_d(struct sol_coap_server *server, const struct sol_coap_resource *resource, struct sol_coap_packet *req, const struct sol_network_link_addr *cliaddr, void *data) { const uint8_t format_cbor = SOL_COAP_CONTENTTYPE_APPLICATION_CBOR; CborEncoder encoder, root, map, rep_map; CborError err; struct sol_coap_packet *response; const char *os_version; uint8_t *payload; uint16_t size; OIC_SERVER_CHECK(-ENOTCONN); response = sol_coap_packet_new(req); SOL_NULL_CHECK(response, -ENOMEM); sol_coap_add_option(response, SOL_COAP_OPTION_CONTENT_FORMAT, &format_cbor, sizeof(format_cbor)); if (sol_coap_packet_get_payload(response, &payload, &size) < 0) { SOL_WRN("Couldn't obtain payload from CoAP packet"); goto out; } cbor_encoder_init(&encoder, payload, size, 0); err = cbor_encoder_create_array(&encoder, &root, 2); err |= cbor_encode_uint(&root, SOL_OIC_PAYLOAD_PLATFORM); err |= cbor_encoder_create_map(&root, &map, CborIndefiniteLength); err |= cbor_encode_text_stringz(&map, SOL_OIC_KEY_HREF); err |= cbor_encode_text_stringz(&map, "/oic/d"); err |= cbor_encoder_create_map(&map, &rep_map, CborIndefiniteLength); #define APPEND_KEY_VALUE(k, v) \ do { \ err |= cbor_encode_text_stringz(&rep_map, k); \ err |= cbor_encode_text_string(&rep_map, \ oic_server.information->v.data, oic_server.information->v.len); \ } while (0) APPEND_KEY_VALUE(SOL_OIC_KEY_MANUF_NAME, manufacturer_name); APPEND_KEY_VALUE(SOL_OIC_KEY_MANUF_URL, manufacturer_url); APPEND_KEY_VALUE(SOL_OIC_KEY_MODEL_NUM, model_number); APPEND_KEY_VALUE(SOL_OIC_KEY_MANUF_DATE, manufacture_date); APPEND_KEY_VALUE(SOL_OIC_KEY_PLATFORM_VER, platform_version); APPEND_KEY_VALUE(SOL_OIC_KEY_HW_VER, hardware_version); APPEND_KEY_VALUE(SOL_OIC_KEY_FIRMWARE_VER, firmware_version); APPEND_KEY_VALUE(SOL_OIC_KEY_SUPPORT_URL, support_url); #undef APPEND_KEY_VALUE err |= cbor_encode_text_stringz(&rep_map, SOL_OIC_KEY_PLATFORM_ID); err |= cbor_encode_byte_string(&rep_map, (const uint8_t *)oic_server.information->platform_id.data, oic_server.information->platform_id.len); err |= cbor_encode_text_stringz(&rep_map, SOL_OIC_KEY_SYSTEM_TIME); err |= cbor_encode_text_stringz(&rep_map, ""); err |= cbor_encode_text_stringz(&rep_map, SOL_OIC_KEY_OS_VER); os_version = sol_platform_get_os_version(); err |= cbor_encode_text_stringz(&rep_map, os_version ? os_version : "Unknown"); err |= cbor_encoder_close_container(&rep_map, &map); err |= cbor_encoder_close_container(&map, &root); err |= cbor_encoder_close_container(&encoder, &root); if (err == CborNoError) { sol_coap_header_set_type(response, SOL_COAP_TYPE_ACK); sol_coap_header_set_code(response, SOL_COAP_RSPCODE_OK); sol_coap_packet_set_payload_used(response, encoder.ptr - payload); return sol_coap_send_packet(server, response, cliaddr); } SOL_WRN("Error encoding platform CBOR response: %s", cbor_error_string(err)); out: sol_coap_packet_unref(response); return -ENOMEM; } static const struct sol_coap_resource oic_d_coap_resource = { SOL_SET_API_VERSION(.api_version = SOL_COAP_RESOURCE_API_VERSION, ) .path = { SOL_STR_SLICE_LITERAL("oic"), SOL_STR_SLICE_LITERAL("d"), SOL_STR_SLICE_EMPTY }, .get = _sol_oic_server_d, .flags = SOL_COAP_FLAGS_NONE }; static unsigned int as_nibble(const char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; SOL_WRN("Invalid hex character: %d", c); return 0; } static const uint8_t * get_machine_id(void) { static uint8_t machine_id[16] = { 0 }; static bool machine_id_set = false; const char *machine_id_buf; if (unlikely(!machine_id_set)) { machine_id_buf = sol_platform_get_machine_id(); if (!machine_id_buf) { SOL_WRN("Could not get machine ID"); memset(machine_id, 0xFF, sizeof(machine_id)); } else { const char *p; size_t i; for (p = machine_id_buf, i = 0; i < 16; i++, p += 2) machine_id[i] = as_nibble(*p) << 4 | as_nibble(*(p + 1)); } machine_id_set = true; } return machine_id; } static int _sol_oic_server_res(struct sol_coap_server *server, const struct sol_coap_resource *resource, struct sol_coap_packet *req, const struct sol_network_link_addr *cliaddr, void *data) { CborEncoder encoder, array; CborError err; struct sol_oic_server_resource *iter; struct sol_coap_packet *resp; uint16_t size; const uint8_t format_cbor = SOL_COAP_CONTENTTYPE_APPLICATION_CBOR; uint8_t *payload; uint16_t idx; const uint8_t *uri_query; uint16_t uri_query_len; uri_query = sol_coap_find_first_option(req, SOL_COAP_OPTION_URI_QUERY, &uri_query_len); if (uri_query && uri_query_len > sizeof("rt=") - 1) { uri_query += sizeof("rt=") - 1; uri_query_len -= 3; } else { uri_query = NULL; uri_query_len = 0; } resp = sol_coap_packet_new(req); SOL_NULL_CHECK(resp, -ENOMEM); sol_coap_header_set_type(resp, SOL_COAP_TYPE_ACK); sol_coap_add_option(resp, SOL_COAP_OPTION_CONTENT_FORMAT, &format_cbor, sizeof(format_cbor)); sol_coap_packet_get_payload(resp, &payload, &size); cbor_encoder_init(&encoder, payload, size, 0); if (uri_query) { err = cbor_encoder_create_array(&encoder, &array, CborIndefiniteLength); } else { err = cbor_encoder_create_array(&encoder, &array, 1 + oic_server.resources.len); } err |= cbor_encode_uint(&array, SOL_OIC_PAYLOAD_DISCOVERY); SOL_VECTOR_FOREACH_IDX (&oic_server.resources, iter, idx) { CborEncoder map, prop_map, policy_map; if (uri_query && iter->rt) { size_t rt_len = strlen(iter->rt); if (rt_len != uri_query_len) continue; if (memcmp(uri_query, iter->rt, rt_len) != 0) continue; } if (!(iter->flags & SOL_OIC_FLAG_DISCOVERABLE)) continue; if (!(iter->flags & SOL_OIC_FLAG_ACTIVE)) continue; err |= cbor_encoder_create_map(&array, &map, 3); err |= cbor_encode_text_stringz(&map, SOL_OIC_KEY_HREF); err |= cbor_encode_text_stringz(&map, iter->href); err |= cbor_encode_text_stringz(&map, SOL_OIC_KEY_DEVICE_ID); err |= cbor_encode_byte_string(&map, get_machine_id(), 16); err |= cbor_encode_text_stringz(&map, SOL_OIC_KEY_PROPERTIES); err |= cbor_encoder_create_map(&map, &prop_map, !!iter->iface + !!iter->rt + 1); if (iter->iface) { CborEncoder if_array; err |= cbor_encode_text_stringz(&prop_map, SOL_OIC_KEY_INTERFACES); err |= cbor_encoder_create_array(&prop_map, &if_array, 1); err |= cbor_encode_text_stringz(&if_array, iter->iface); err |= cbor_encoder_close_container(&prop_map, &if_array); } if (iter->rt) { CborEncoder rt_array; err |= cbor_encode_text_stringz(&prop_map, SOL_OIC_KEY_RESOURCE_TYPES); err |= cbor_encoder_create_array(&prop_map, &rt_array, 1); err |= cbor_encode_text_stringz(&rt_array, iter->rt); err |= cbor_encoder_close_container(&prop_map, &rt_array); } err |= cbor_encode_text_stringz(&prop_map, SOL_OIC_KEY_POLICY); err |= cbor_encoder_create_map(&prop_map, &policy_map, CborIndefiniteLength); err |= cbor_encode_text_stringz(&policy_map, SOL_OIC_KEY_BITMAP); err |= cbor_encode_uint(&policy_map, iter->flags); err |= cbor_encoder_close_container(&prop_map, &policy_map); err |= cbor_encoder_close_container(&map, &prop_map); err |= cbor_encoder_close_container(&array, &map); } err |= cbor_encoder_close_container(&encoder, &array); if (err != CborNoError) { char addr[SOL_INET_ADDR_STRLEN]; sol_network_addr_to_str(cliaddr, addr, sizeof(addr)); SOL_WRN("Error building response for /oc/core, server %p client %s: %s", oic_server.server, addr, cbor_error_string(err)); sol_coap_header_set_code(resp, SOL_COAP_RSPCODE_INTERNAL_ERROR); } else { sol_coap_header_set_code(resp, SOL_COAP_RSPCODE_OK); sol_coap_packet_set_payload_used(resp, encoder.ptr - payload); } return sol_coap_send_packet(server, resp, cliaddr); } static const struct sol_coap_resource oic_res_coap_resource = { SOL_SET_API_VERSION(.api_version = SOL_COAP_RESOURCE_API_VERSION, ) .path = { SOL_STR_SLICE_LITERAL("oic"), SOL_STR_SLICE_LITERAL("res"), SOL_STR_SLICE_EMPTY }, .get = _sol_oic_server_res, .flags = SOL_COAP_FLAGS_NONE }; static struct sol_oic_server_information * init_static_info(void) { struct sol_oic_server_information information = { .manufacturer_name = SOL_STR_SLICE_LITERAL(OIC_MANUFACTURER_NAME), .manufacturer_url = SOL_STR_SLICE_LITERAL(OIC_MANUFACTURER_URL), .model_number = SOL_STR_SLICE_LITERAL(OIC_MODEL_NUMBER), .manufacture_date = SOL_STR_SLICE_LITERAL(OIC_MANUFACTURE_DATE), .platform_version = SOL_STR_SLICE_LITERAL(OIC_PLATFORM_VERSION), .hardware_version = SOL_STR_SLICE_LITERAL(OIC_HARDWARE_VERSION), .firmware_version = SOL_STR_SLICE_LITERAL(OIC_FIRMWARE_VERSION), .support_url = SOL_STR_SLICE_LITERAL(OIC_SUPPORT_URL) }; struct sol_oic_server_information *info; information.platform_id = SOL_STR_SLICE_STR((const char *)get_machine_id(), 16); info = sol_util_memdup(&information, sizeof(*info)); SOL_NULL_CHECK(info, NULL); return info; } SOL_API int sol_oic_server_init(void) { struct sol_oic_server_information *info; if (oic_server.refcnt > 0) { oic_server.refcnt++; return 0; } SOL_LOG_INTERNAL_INIT_ONCE; info = init_static_info(); SOL_NULL_CHECK(info, -1); oic_server.server = sol_coap_server_new(OIC_COAP_SERVER_UDP_PORT); if (!oic_server.server) goto error; if (!sol_coap_server_register_resource(oic_server.server, &oic_d_coap_resource, NULL)) goto error; if (!sol_coap_server_register_resource(oic_server.server, &oic_res_coap_resource, NULL)) { sol_coap_server_unregister_resource(oic_server.server, &oic_d_coap_resource); goto error; } oic_server.dtls_server = sol_coap_secure_server_new(OIC_COAP_SERVER_DTLS_PORT); if (!oic_server.dtls_server) { if (errno == ENOSYS) { SOL_INF("DTLS support not built in, OIC server running in insecure mode"); } else { SOL_INF("DTLS server could not be created for OIC server: %s", sol_util_strerrora(errno)); } } else { if (!sol_coap_server_register_resource(oic_server.dtls_server, &oic_d_coap_resource, NULL)) { SOL_WRN("Could not register device info secure resource, OIC server running in insecure mode"); sol_coap_server_unref(oic_server.dtls_server); oic_server.dtls_server = NULL; } } oic_server.information = info; sol_vector_init(&oic_server.resources, sizeof(struct sol_oic_server_resource)); oic_server.refcnt++; return 0; error: free(info); return -1; } SOL_API void sol_oic_server_release(void) { struct sol_oic_server_resource *res; uint16_t idx; OIC_SERVER_CHECK(); if (--oic_server.refcnt > 0) return; SOL_VECTOR_FOREACH_REVERSE_IDX (&oic_server.resources, res, idx) sol_coap_server_unregister_resource(oic_server.server, res->coap); if (oic_server.dtls_server) { SOL_VECTOR_FOREACH_REVERSE_IDX (&oic_server.resources, res, idx) sol_coap_server_unregister_resource(oic_server.dtls_server, res->coap); sol_coap_server_unregister_resource(oic_server.dtls_server, &oic_d_coap_resource); sol_coap_server_unref(oic_server.dtls_server); } sol_vector_clear(&oic_server.resources); sol_coap_server_unregister_resource(oic_server.server, &oic_d_coap_resource); sol_coap_server_unregister_resource(oic_server.server, &oic_res_coap_resource); sol_coap_server_unref(oic_server.server); free(oic_server.information); } static void _clear_repr_vector(struct sol_vector *repr) { struct sol_oic_repr_field *field; uint16_t idx; SOL_VECTOR_FOREACH_IDX (repr, field, idx) { if (field->type == SOL_OIC_REPR_TYPE_TEXT_STRING || field->type == SOL_OIC_REPR_TYPE_BYTE_STRING) { free((char *)field->v_slice.data); } } sol_vector_clear(repr); } static int _sol_oic_resource_type_handle( sol_coap_responsecode_t (*handle_fn)(const struct sol_network_link_addr *cliaddr, const void *data, const struct sol_vector *input, struct sol_vector *output), struct sol_coap_server *server, struct sol_coap_packet *req, const struct sol_network_link_addr *cliaddr, struct sol_oic_server_resource *res, bool expect_payload) { const uint8_t format_cbor = SOL_COAP_CONTENTTYPE_APPLICATION_CBOR; struct sol_coap_packet *response; struct sol_vector input = SOL_VECTOR_INIT(struct sol_oic_repr_field); struct sol_vector output = SOL_VECTOR_INIT(struct sol_oic_repr_field); sol_coap_responsecode_t code = SOL_COAP_RSPCODE_INTERNAL_ERROR; OIC_SERVER_CHECK(-ENOTCONN); response = sol_coap_packet_new(req); if (!response) { SOL_WRN("Could not build response packet."); return -1; } if (!handle_fn) { code = SOL_COAP_RSPCODE_NOT_IMPLEMENTED; goto done; } if (expect_payload) { if (!sol_oic_pkt_has_cbor_content(req)) { code = SOL_COAP_RSPCODE_BAD_REQUEST; goto done; } if (sol_oic_decode_cbor_repr(req, &input) != CborNoError) { code = SOL_COAP_RSPCODE_BAD_REQUEST; goto done; } } code = handle_fn(cliaddr, res->callback.data, &input, &output); if (code == SOL_COAP_RSPCODE_CONTENT) { sol_coap_add_option(response, SOL_COAP_OPTION_CONTENT_FORMAT, &format_cbor, sizeof(format_cbor)); if (sol_oic_encode_cbor_repr(response, res->href, &output) != CborNoError) code = SOL_COAP_RSPCODE_INTERNAL_ERROR; } done: sol_coap_header_set_type(response, SOL_COAP_TYPE_ACK); sol_coap_header_set_code(response, code == SOL_COAP_RSPCODE_CONTENT ? SOL_COAP_RSPCODE_OK : code); _clear_repr_vector(&input); /* Output vector is user-built, so it's not safe to call * _clear_repr_vector() on it. Clean the vector itself, but not its * items.*/ sol_vector_clear(&output); return sol_coap_send_packet(server, response, cliaddr); } #define DEFINE_RESOURCE_TYPE_CALLBACK_FOR_METHOD(method, expect_payload) \ static int \ _sol_oic_resource_type_ ## method(struct sol_coap_server *server, \ const struct sol_coap_resource *resource, struct sol_coap_packet *req, \ const struct sol_network_link_addr *cliaddr, void *data) \ { \ struct sol_oic_server_resource *res = data; \ return _sol_oic_resource_type_handle(res->callback.method.handle, \ server, req, cliaddr, res, expect_payload); \ } DEFINE_RESOURCE_TYPE_CALLBACK_FOR_METHOD(get, false) DEFINE_RESOURCE_TYPE_CALLBACK_FOR_METHOD(put, true) DEFINE_RESOURCE_TYPE_CALLBACK_FOR_METHOD(post, true) DEFINE_RESOURCE_TYPE_CALLBACK_FOR_METHOD(delete, true) #undef DEFINE_RESOURCE_TYPE_CALLBACK_FOR_METHOD static struct sol_coap_resource * create_coap_resource(struct sol_oic_server_resource *resource) { const struct sol_str_slice endpoint = sol_str_slice_from_str(resource->href); struct sol_coap_resource *res; unsigned int count = 0; unsigned int current; size_t i; for (i = 0; i < endpoint.len; i++) if (endpoint.data[i] == '/') count++; SOL_INT_CHECK(count, == 0, NULL); if (endpoint.data[0] != '/') { SOL_WRN("Invalid endpoint - Path '%.*s' does not start with '/'", SOL_STR_SLICE_PRINT(endpoint)); return NULL; } if (endpoint.data[endpoint.len - 1] == '/') { SOL_WRN("Invalid endpoint - Path '%.*s' ends with '/'", SOL_STR_SLICE_PRINT(endpoint)); return NULL; } /* alloc space for the path plus empty slice at the end */ res = calloc(1, sizeof(struct sol_coap_resource) + (count + 1) * sizeof(struct sol_str_slice)); SOL_NULL_CHECK(res, NULL); SOL_SET_API_VERSION(res->api_version = SOL_COAP_RESOURCE_API_VERSION; ) res->path[0].data = &endpoint.data[1]; for (i = 1, current = 0; i < endpoint.len; i++) { if (endpoint.data[i] == '/') res->path[++current].data = &endpoint.data[i + 1]; else res->path[current].len++; } res->get = _sol_oic_resource_type_get; res->put = _sol_oic_resource_type_put; res->post = _sol_oic_resource_type_post; res->delete = _sol_oic_resource_type_delete; if (resource->flags & SOL_OIC_FLAG_DISCOVERABLE) res->flags |= SOL_COAP_FLAGS_WELL_KNOWN; if (oic_server.dtls_server) resource->flags |= SOL_OIC_FLAG_SECURE; res->iface = sol_str_slice_from_str(resource->iface); res->resource_type = sol_str_slice_from_str(resource->rt); return res; } static char * create_endpoint(void) { static unsigned int id = 0; char *buffer = NULL; int r; r = asprintf(&buffer, "/sol/%x", id++); return r < 0 ? NULL : buffer; } SOL_API struct sol_oic_server_resource * sol_oic_server_add_resource(const struct sol_oic_resource_type *rt, const void *handler_data, enum sol_oic_resource_flag flags) { struct sol_oic_server_resource *res; OIC_SERVER_CHECK(NULL); SOL_NULL_CHECK(rt, NULL); #ifndef SOL_NO_API_VERSION if (unlikely(rt->api_version != SOL_OIC_RESOURCE_TYPE_API_VERSION)) { SOL_WRN("Couldn't add resource_type with " "version '%u'. Expected version '%u'.", rt->api_version, SOL_OIC_RESOURCE_TYPE_API_VERSION); return NULL; } #endif res = sol_vector_append(&oic_server.resources); SOL_NULL_CHECK(res, NULL); res->callback.data = handler_data; res->callback.get.handle = rt->get.handle; res->callback.put.handle = rt->put.handle; res->callback.post.handle = rt->post.handle; res->callback.delete.handle = rt->delete.handle; res->flags = flags; res->rt = strndup(rt->resource_type.data, rt->resource_type.len); SOL_NULL_CHECK_GOTO(res->rt, remove_res); res->iface = strndup(rt->interface.data, rt->interface.len); SOL_NULL_CHECK_GOTO(res->iface, free_rt); res->href = create_endpoint(); SOL_NULL_CHECK_GOTO(res->href, free_iface); res->coap = create_coap_resource(res); SOL_NULL_CHECK_GOTO(res->coap, free_coap); if (!sol_coap_server_register_resource(oic_server.server, res->coap, res)) goto free_coap; if (oic_server.dtls_server) { if (!sol_coap_server_register_resource(oic_server.dtls_server, res->coap, res)) { SOL_WRN("Could not register resource in DTLS server"); goto unregister_resource; } } return res; unregister_resource: sol_coap_server_unregister_resource(oic_server.server, res->coap); free_coap: free(res->coap); free_iface: free(res->iface); free_rt: free(res->rt); remove_res: sol_vector_del(&oic_server.resources, oic_server.resources.len - 1); return NULL; } SOL_API void sol_oic_server_del_resource(struct sol_oic_server_resource *resource) { struct sol_oic_server_resource *iter; uint16_t idx; OIC_SERVER_CHECK(); SOL_NULL_CHECK(resource); sol_coap_server_unregister_resource(oic_server.server, resource->coap); if (oic_server.dtls_server) sol_coap_server_unregister_resource(oic_server.dtls_server, resource->coap); free(resource->coap); free(resource->href); free(resource->iface); free(resource->rt); SOL_VECTOR_FOREACH_REVERSE_IDX (&oic_server.resources, iter, idx) { if (iter == resource) { sol_vector_del(&oic_server.resources, idx); return; } } SOL_ERR("Could not find resource %p in OIC server resource list", resource); } static bool send_notification_to_server(struct sol_oic_server_resource *resource, const struct sol_vector *fields, struct sol_coap_server *server) { const uint8_t format_cbor = SOL_COAP_CONTENTTYPE_APPLICATION_CBOR; struct sol_coap_packet *pkt; pkt = sol_coap_packet_notification_new(oic_server.server, resource->coap); SOL_NULL_CHECK(pkt, false); sol_coap_add_option(pkt, SOL_COAP_OPTION_CONTENT_FORMAT, &format_cbor, sizeof(format_cbor)); if (sol_oic_encode_cbor_repr(pkt, resource->href, fields) != CborNoError) { sol_coap_header_set_code(pkt, SOL_COAP_RSPCODE_INTERNAL_ERROR); } else { sol_coap_header_set_code(pkt, SOL_COAP_RSPCODE_OK); } sol_coap_header_set_type(pkt, SOL_COAP_TYPE_ACK); return !sol_coap_packet_send_notification(oic_server.server, resource->coap, pkt); } SOL_API bool sol_oic_notify_observers(struct sol_oic_server_resource *resource, const struct sol_vector *fields) { bool sent_server = false; bool sent_dtls_server = false; SOL_NULL_CHECK(resource, false); sent_server = send_notification_to_server(resource, fields, oic_server.server); if (oic_server.dtls_server) sent_dtls_server = send_notification_to_server(resource, fields, oic_server.dtls_server); return sent_server || sent_dtls_server; }
rchiossi/soletta
src/lib/comms/sol-oic-server.c
C
bsd-3-clause
25,099
/* * Copyright (c) 2006, 2006, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. */ /* * ceil(x) * Return x rounded toward -inf to integral value * Method: * Bit twiddling. * Exception: * Inexact flag raised if x not equal to ceil(x). */ #include "fdlibm.h" #ifdef __STDC__ static const double huge = 1.0e300; #else static double huge = 1.0e300; #endif #ifdef __STDC__ double ceil(double x) #else double ceil(x) double x; #endif { int i0,i1,j0; unsigned i,j; i0 = __HI(x); i1 = __LO(x); j0 = ((i0>>20)&0x7ff)-0x3ff; if(j0<20) { if(j0<0) { /* raise inexact if x != 0 */ if(huge+x>0.0) {/* return 0*sign(x) if |x|<1 */ if(i0<0) {i0=0x80000000;i1=0;} else if((i0|i1)!=0) { i0=0x3ff00000;i1=0;} } } else { i = (0x000fffff)>>j0; if(((i0&i)|i1)==0) return x; /* x is integral */ if(huge+x>0.0) { /* raise inexact flag */ if(i0>0) i0 += (0x00100000)>>j0; i0 &= (~i); i1=0; } } } else if (j0>51) { if(j0==0x400) return x+x; /* inf or NaN */ else return x; /* x is integral */ } else { i = ((unsigned)(0xffffffff))>>(j0-20); if((i1&i)==0) return x; /* x is integral */ if(huge+x>0.0) { /* raise inexact flag */ if(i0>0) { if(j0==20) i0+=1; else { j = i1 + (1<<(52-j0)); if(j<i1) i0+=1; /* got a carry */ i1 = j; } } i1 &= (~i); } } __HI(x) = i0; __LO(x) = i1; return x; }
SnakeDoc/GuestVM
guestvm~guestvm/com.oracle.max.ve.native/fdlibm/s_ceil.c
C
bsd-3-clause
1,481
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * Portions Copyright (c) 2008-2009 Stacey Son <sson@FreeBSD.org> * * $FreeBSD$ * */ /* * Copyright 2006 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include "opt_kdtrace.h" #include <sys/cdefs.h> #include <sys/param.h> #include <sys/systm.h> #include <sys/conf.h> #include <sys/kernel.h> #include <sys/limits.h> #include <sys/lock.h> #include <sys/linker.h> #include <sys/module.h> #include <sys/mutex.h> #include <sys/dtrace.h> #include <sys/lockstat.h> #if defined(__i386__) || defined(__amd64__) || \ defined(__mips__) || defined(__powerpc__) #define LOCKSTAT_AFRAMES 1 #else #error "architecture not supported" #endif static d_open_t lockstat_open; static void lockstat_provide(void *, dtrace_probedesc_t *); static void lockstat_destroy(void *, dtrace_id_t, void *); static void lockstat_enable(void *, dtrace_id_t, void *); static void lockstat_disable(void *, dtrace_id_t, void *); static void lockstat_load(void *); static int lockstat_unload(void); typedef struct lockstat_probe { char *lsp_func; char *lsp_name; int lsp_probe; dtrace_id_t lsp_id; #ifdef __FreeBSD__ int lsp_frame; #endif } lockstat_probe_t; #ifdef __FreeBSD__ lockstat_probe_t lockstat_probes[] = { /* Spin Locks */ { LS_MTX_SPIN_LOCK, LSS_ACQUIRE, LS_MTX_SPIN_LOCK_ACQUIRE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_MTX_SPIN_LOCK, LSS_SPIN, LS_MTX_SPIN_LOCK_SPIN, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_MTX_SPIN_UNLOCK, LSS_RELEASE, LS_MTX_SPIN_UNLOCK_RELEASE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, /* Adaptive Locks */ { LS_MTX_LOCK, LSA_ACQUIRE, LS_MTX_LOCK_ACQUIRE, DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) }, { LS_MTX_LOCK, LSA_BLOCK, LS_MTX_LOCK_BLOCK, DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) }, { LS_MTX_LOCK, LSA_SPIN, LS_MTX_LOCK_SPIN, DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) }, { LS_MTX_UNLOCK, LSA_RELEASE, LS_MTX_UNLOCK_RELEASE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_MTX_TRYLOCK, LSA_ACQUIRE, LS_MTX_TRYLOCK_ACQUIRE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, /* Reader/Writer Locks */ { LS_RW_RLOCK, LSR_ACQUIRE, LS_RW_RLOCK_ACQUIRE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_RLOCK, LSR_BLOCK, LS_RW_RLOCK_BLOCK, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_RLOCK, LSR_SPIN, LS_RW_RLOCK_SPIN, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_RUNLOCK, LSR_RELEASE, LS_RW_RUNLOCK_RELEASE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_WLOCK, LSR_ACQUIRE, LS_RW_WLOCK_ACQUIRE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_WLOCK, LSR_BLOCK, LS_RW_WLOCK_BLOCK, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_WLOCK, LSR_SPIN, LS_RW_WLOCK_SPIN, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_WUNLOCK, LSR_RELEASE, LS_RW_WUNLOCK_RELEASE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_TRYUPGRADE, LSR_UPGRADE, LS_RW_TRYUPGRADE_UPGRADE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_RW_DOWNGRADE, LSR_DOWNGRADE, LS_RW_DOWNGRADE_DOWNGRADE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, /* Shared/Exclusive Locks */ { LS_SX_SLOCK, LSX_ACQUIRE, LS_SX_SLOCK_ACQUIRE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_SLOCK, LSX_BLOCK, LS_SX_SLOCK_BLOCK, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_SLOCK, LSX_SPIN, LS_SX_SLOCK_SPIN, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_SUNLOCK, LSX_RELEASE, LS_SX_SUNLOCK_RELEASE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_XLOCK, LSX_ACQUIRE, LS_SX_XLOCK_ACQUIRE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_XLOCK, LSX_BLOCK, LS_SX_XLOCK_BLOCK, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_XLOCK, LSX_SPIN, LS_SX_XLOCK_SPIN, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_XUNLOCK, LSX_RELEASE, LS_SX_XUNLOCK_RELEASE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_TRYUPGRADE, LSX_UPGRADE, LS_SX_TRYUPGRADE_UPGRADE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { LS_SX_DOWNGRADE, LSX_DOWNGRADE, LS_SX_DOWNGRADE_DOWNGRADE, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, /* Thread Locks */ { LS_THREAD_LOCK, LST_SPIN, LS_THREAD_LOCK_SPIN, DTRACE_IDNONE, LOCKSTAT_AFRAMES }, { NULL } }; #else #error "OS not supported" #endif static struct cdevsw lockstat_cdevsw = { .d_version = D_VERSION, .d_open = lockstat_open, .d_name = "lockstat", }; static struct cdev *lockstat_cdev; static dtrace_provider_id_t lockstat_id; /*ARGSUSED*/ static void lockstat_enable(void *arg, dtrace_id_t id, void *parg) { lockstat_probe_t *probe = parg; ASSERT(!lockstat_probemap[probe->lsp_probe]); lockstat_enabled++; lockstat_probemap[probe->lsp_probe] = id; #ifdef DOODAD membar_producer(); #endif lockstat_probe_func = dtrace_probe; #ifdef DOODAD membar_producer(); lockstat_hot_patch(); membar_producer(); #endif } /*ARGSUSED*/ static void lockstat_disable(void *arg, dtrace_id_t id, void *parg) { lockstat_probe_t *probe = parg; int i; ASSERT(lockstat_probemap[probe->lsp_probe]); lockstat_enabled--; lockstat_probemap[probe->lsp_probe] = 0; #ifdef DOODAD lockstat_hot_patch(); membar_producer(); #endif /* * See if we have any probes left enabled. */ for (i = 0; i < LS_NPROBES; i++) { if (lockstat_probemap[i]) { /* * This probe is still enabled. We don't need to deal * with waiting for all threads to be out of the * lockstat critical sections; just return. */ return; } } } /*ARGSUSED*/ static int lockstat_open(struct cdev *dev __unused, int oflags __unused, int devtype __unused, struct thread *td __unused) { return (0); } /*ARGSUSED*/ static void lockstat_provide(void *arg, dtrace_probedesc_t *desc) { int i = 0; for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) { lockstat_probe_t *probe = &lockstat_probes[i]; if (dtrace_probe_lookup(lockstat_id, "kernel", probe->lsp_func, probe->lsp_name) != 0) continue; ASSERT(!probe->lsp_id); #ifdef __FreeBSD__ probe->lsp_id = dtrace_probe_create(lockstat_id, "kernel", probe->lsp_func, probe->lsp_name, probe->lsp_frame, probe); #else probe->lsp_id = dtrace_probe_create(lockstat_id, "kernel", probe->lsp_func, probe->lsp_name, LOCKSTAT_AFRAMES, probe); #endif } } /*ARGSUSED*/ static void lockstat_destroy(void *arg, dtrace_id_t id, void *parg) { lockstat_probe_t *probe = parg; ASSERT(!lockstat_probemap[probe->lsp_probe]); probe->lsp_id = 0; } static dtrace_pattr_t lockstat_attr = { { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, }; static dtrace_pops_t lockstat_pops = { lockstat_provide, NULL, lockstat_enable, lockstat_disable, NULL, NULL, NULL, NULL, NULL, lockstat_destroy }; static void lockstat_load(void *dummy) { /* Create the /dev/dtrace/lockstat entry. */ lockstat_cdev = make_dev(&lockstat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/lockstat"); if (dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_USER, NULL, &lockstat_pops, NULL, &lockstat_id) != 0) return; } static int lockstat_unload() { int error = 0; if ((error = dtrace_unregister(lockstat_id)) != 0) return (error); destroy_dev(lockstat_cdev); return (error); } /* ARGSUSED */ static int lockstat_modevent(module_t mod __unused, int type, void *data __unused) { int error = 0; switch (type) { case MOD_LOAD: break; case MOD_UNLOAD: break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } SYSINIT(lockstat_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_load, NULL); SYSUNINIT(lockstat_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_unload, NULL); DEV_MODULE(lockstat, lockstat_modevent, NULL); MODULE_VERSION(lockstat, 1); MODULE_DEPEND(lockstat, dtrace, 1, 1, 1); MODULE_DEPEND(lockstat, opensolaris, 1, 1, 1);
jrobhoward/SCADAbase
sys/cddl/dev/lockstat/lockstat.c
C
bsd-3-clause
8,829
#include <core/Runtime.h> #include <stdlib.h> #include <math.h> int runtime_f32_mul(Stack stack) { Value* operand1 = NULL; Value* operand2 = NULL; pop_Value(stack, &operand2); pop_Value(stack, &operand1); if(isnan(operand1->value.f32) || isnan(operand2->value.f32)) { push_Value(stack, new_f32Value(nanf(""))); } else if(isinf(operand1->value.f32) || isinf(operand2->value.f32)) { if(operand1->value.f32 == 0 || operand2->value.f32 == 0) { push_Value(stack, new_f32Value(nanf(""))); } else if(signbit(operand1->value.f32) ^ signbit(operand2->value.f32)) { push_Value(stack, new_f32Value(-strtof("INF", NULL))); } else { push_Value(stack, new_f32Value(strtof("INF", NULL))); } } else if(operand1->value.f32 == 0 && operand2->value.f32 == 0) { if(signbit(operand1->value.f32) ^ signbit(operand2->value.f32)) { push_Value(stack, new_f32Value(-0.0f)); } else { push_Value(stack, new_f32Value(+0.0f)); } } else { push_Value(stack, new_f32Value(operand1->value.f32 * operand2->value.f32)); } free_Value(operand1); free_Value(operand2); return 0; }
LuisHsu/WasmVM
src/lib/core/runtime/f32_mul.c
C
bsd-3-clause
1,224
/*- * Copyright (c) 1996-1999 * Kazutaka YOKOTA (yokota@zodiac.mech.utsunomiya-u.ac.jp) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: atkbdc.c,v 1.1 1999/01/09 02:44:50 yokota Exp $ * from kbdio.c,v 1.13 1998/09/25 11:55:46 yokota Exp */ #include "atkbdc.h" #include "opt_kbd.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> #include <sys/malloc.h> #include <sys/syslog.h> #include <machine/clock.h> #include <dev/kbd/atkbdcreg.h> #ifndef __i386__ #include <isa/isareg.h> #else #include <i386/isa/isa.h> #endif /* constants */ #define MAXKBDC MAX(NATKBDC, 1) /* macros */ #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #define kbdcp(p) ((atkbdc_softc_t *)(p)) #define nextq(i) (((i) + 1) % KBDQ_BUFSIZE) #define availq(q) ((q)->head != (q)->tail) #if KBDIO_DEBUG >= 2 #define emptyq(q) ((q)->tail = (q)->head = (q)->qcount = 0) #else #define emptyq(q) ((q)->tail = (q)->head = 0) #endif /* local variables */ /* * We always need at least one copy of the kbdc_softc struct for the * low-level console. As the low-level console accesses the keyboard * controller before kbdc, and all other devices, is probed, we * statically allocate one entry. XXX */ static atkbdc_softc_t default_kbdc; static atkbdc_softc_t *atkbdc_softc[MAXKBDC] = { &default_kbdc }; static int verbose = KBDIO_DEBUG; /* function prototypes */ static int atkbdc_setup(atkbdc_softc_t *sc, int port); static int addq(kqueue *q, int c); static int removeq(kqueue *q); static int wait_while_controller_busy(atkbdc_softc_t *kbdc); static int wait_for_data(atkbdc_softc_t *kbdc); static int wait_for_kbd_data(atkbdc_softc_t *kbdc); static int wait_for_kbd_ack(atkbdc_softc_t *kbdc); static int wait_for_aux_data(atkbdc_softc_t *kbdc); static int wait_for_aux_ack(atkbdc_softc_t *kbdc); #if NATKBDC > 0 atkbdc_softc_t *atkbdc_get_softc(int unit) { atkbdc_softc_t *sc; if (unit >= sizeof(atkbdc_softc)/sizeof(atkbdc_softc[0])) return NULL; sc = atkbdc_softc[unit]; if (sc == NULL) { sc = atkbdc_softc[unit] = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT); if (sc == NULL) return NULL; bzero(sc, sizeof(*sc)); sc->port = -1; /* XXX */ } return sc; } int atkbdc_probe_unit(atkbdc_softc_t *sc, int unit, int port) { return atkbdc_setup(sc, port); } #endif /* NATKBDC > 0 */ /* the backdoor to the keyboard controller! XXX */ int atkbdc_configure(void) { return atkbdc_setup(atkbdc_softc[0], -1); } static int atkbdc_setup(atkbdc_softc_t *sc, int port) { if (port <= 0) port = IO_KBD; if (sc->port <= 0) { sc->command_byte = -1; sc->command_mask = 0; sc->lock = FALSE; sc->kbd.head = sc->kbd.tail = 0; sc->aux.head = sc->aux.tail = 0; #if KBDIO_DEBUG >= 2 sc->kbd.call_count = 0; sc->kbd.qcount = sc->kbd.max_qcount = 0; sc->aux.call_count = 0; sc->aux.qcount = sc->aux.max_qcount = 0; #endif } sc->port = port; /* may override the previous value */ return 0; } /* associate a port number with a KBDC */ KBDC kbdc_open(int port) { int s; int i; if (port <= 0) port = IO_KBD; s = spltty(); for (i = 0; i < sizeof(atkbdc_softc)/sizeof(atkbdc_softc[0]); ++i) { if (atkbdc_softc[i] == NULL) continue; if (atkbdc_softc[i]->port == port) { splx(s); return (KBDC)atkbdc_softc[i]; } if (atkbdc_softc[i]->port <= 0) { if (atkbdc_setup(atkbdc_softc[i], port)) break; splx(s); return (KBDC)atkbdc_softc[i]; } } splx(s); return NULL; } /* * I/O access arbitration in `kbdio' * * The `kbdio' module uses a simplistic convention to arbitrate * I/O access to the controller/keyboard/mouse. The convention requires * close cooperation of the calling device driver. * * The device driver which utilizes the `kbdio' module are assumed to * have the following set of routines. * a. An interrupt handler (the bottom half of the driver). * b. Timeout routines which may briefly polls the keyboard controller. * c. Routines outside interrupt context (the top half of the driver). * They should follow the rules below: * 1. The interrupt handler may assume that it always has full access * to the controller/keyboard/mouse. * 2. The other routines must issue `spltty()' if they wish to * prevent the interrupt handler from accessing * the controller/keyboard/mouse. * 3. The timeout routines and the top half routines of the device driver * arbitrate I/O access by observing the lock flag in `kbdio'. * The flag is manipulated via `kbdc_lock()'; when one wants to * perform I/O, call `kbdc_lock(kbdc, TRUE)' and proceed only if * the call returns with TRUE. Otherwise the caller must back off. * Call `kbdc_lock(kbdc, FALSE)' when necessary I/O operaion * is finished. This mechanism does not prevent the interrupt * handler from being invoked at any time and carrying out I/O. * Therefore, `spltty()' must be strategically placed in the device * driver code. Also note that the timeout routine may interrupt * `kbdc_lock()' called by the top half of the driver, but this * interruption is OK so long as the timeout routine observes the * the rule 4 below. * 4. The interrupt and timeout routines should not extend I/O operation * across more than one interrupt or timeout; they must complete * necessary I/O operation within one invokation of the routine. * This measns that if the timeout routine acquires the lock flag, * it must reset the flag to FALSE before it returns. */ /* set/reset polling lock */ int kbdc_lock(KBDC p, int lock) { int prevlock; prevlock = kbdcp(p)->lock; kbdcp(p)->lock = lock; return (prevlock != lock); } /* check if any data is waiting to be processed */ int kbdc_data_ready(KBDC p) { return (availq(&kbdcp(p)->kbd) || availq(&kbdcp(p)->aux) || (inb(kbdcp(p)->port + KBD_STATUS_PORT) & KBDS_ANY_BUFFER_FULL)); } /* queuing functions */ static int addq(kqueue *q, int c) { if (nextq(q->tail) != q->head) { q->q[q->tail] = c; q->tail = nextq(q->tail); #if KBDIO_DEBUG >= 2 ++q->call_count; ++q->qcount; if (q->qcount > q->max_qcount) q->max_qcount = q->qcount; #endif return TRUE; } return FALSE; } static int removeq(kqueue *q) { int c; if (q->tail != q->head) { c = q->q[q->head]; q->head = nextq(q->head); #if KBDIO_DEBUG >= 2 --q->qcount; #endif return c; } return -1; } /* * device I/O routines */ static int wait_while_controller_busy(struct atkbdc_softc *kbdc) { /* CPU will stay inside the loop for 100msec at most */ int retry = 5000; int port = kbdc->port; int f; while ((f = inb(port + KBD_STATUS_PORT)) & KBDS_INPUT_BUFFER_FULL) { if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->kbd, inb(port + KBD_DATA_PORT)); } else if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->aux, inb(port + KBD_DATA_PORT)); } DELAY(KBDC_DELAYTIME); if (--retry < 0) return FALSE; } return TRUE; } /* * wait for any data; whether it's from the controller, * the keyboard, or the aux device. */ static int wait_for_data(struct atkbdc_softc *kbdc) { /* CPU will stay inside the loop for 200msec at most */ int retry = 10000; int port = kbdc->port; int f; while ((f = inb(port + KBD_STATUS_PORT) & KBDS_ANY_BUFFER_FULL) == 0) { DELAY(KBDC_DELAYTIME); if (--retry < 0) return 0; } DELAY(KBDD_DELAYTIME); return f; } /* wait for data from the keyboard */ static int wait_for_kbd_data(struct atkbdc_softc *kbdc) { /* CPU will stay inside the loop for 200msec at most */ int retry = 10000; int port = kbdc->port; int f; while ((f = inb(port + KBD_STATUS_PORT) & KBDS_BUFFER_FULL) != KBDS_KBD_BUFFER_FULL) { if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->aux, inb(port + KBD_DATA_PORT)); } DELAY(KBDC_DELAYTIME); if (--retry < 0) return 0; } DELAY(KBDD_DELAYTIME); return f; } /* * wait for an ACK(FAh), RESEND(FEh), or RESET_FAIL(FCh) from the keyboard. * queue anything else. */ static int wait_for_kbd_ack(struct atkbdc_softc *kbdc) { /* CPU will stay inside the loop for 200msec at most */ int retry = 10000; int port = kbdc->port; int f; int b; while (retry-- > 0) { if ((f = inb(port + KBD_STATUS_PORT)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = inb(port + KBD_DATA_PORT); if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { if ((b == KBD_ACK) || (b == KBD_RESEND) || (b == KBD_RESET_FAIL)) return b; addq(&kbdc->kbd, b); } else if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { addq(&kbdc->aux, b); } } DELAY(KBDC_DELAYTIME); } return -1; } /* wait for data from the aux device */ static int wait_for_aux_data(struct atkbdc_softc *kbdc) { /* CPU will stay inside the loop for 200msec at most */ int retry = 10000; int port = kbdc->port; int f; while ((f = inb(port + KBD_STATUS_PORT) & KBDS_BUFFER_FULL) != KBDS_AUX_BUFFER_FULL) { if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->kbd, inb(port + KBD_DATA_PORT)); } DELAY(KBDC_DELAYTIME); if (--retry < 0) return 0; } DELAY(KBDD_DELAYTIME); return f; } /* * wait for an ACK(FAh), RESEND(FEh), or RESET_FAIL(FCh) from the aux device. * queue anything else. */ static int wait_for_aux_ack(struct atkbdc_softc *kbdc) { /* CPU will stay inside the loop for 200msec at most */ int retry = 10000; int port = kbdc->port; int f; int b; while (retry-- > 0) { if ((f = inb(port + KBD_STATUS_PORT)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = inb(port + KBD_DATA_PORT); if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { if ((b == PSM_ACK) || (b == PSM_RESEND) || (b == PSM_RESET_FAIL)) return b; addq(&kbdc->aux, b); } else if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { addq(&kbdc->kbd, b); } } DELAY(KBDC_DELAYTIME); } return -1; } /* write a one byte command to the controller */ int write_controller_command(KBDC p, int c) { if (!wait_while_controller_busy(kbdcp(p))) return FALSE; outb(kbdcp(p)->port + KBD_COMMAND_PORT, c); return TRUE; } /* write a one byte data to the controller */ int write_controller_data(KBDC p, int c) { if (!wait_while_controller_busy(kbdcp(p))) return FALSE; outb(kbdcp(p)->port + KBD_DATA_PORT, c); return TRUE; } /* write a one byte keyboard command */ int write_kbd_command(KBDC p, int c) { if (!wait_while_controller_busy(kbdcp(p))) return FALSE; outb(kbdcp(p)->port + KBD_DATA_PORT, c); return TRUE; } /* write a one byte auxiliary device command */ int write_aux_command(KBDC p, int c) { if (!write_controller_command(p, KBDC_WRITE_TO_AUX)) return FALSE; return write_controller_data(p, c); } /* send a command to the keyboard and wait for ACK */ int send_kbd_command(KBDC p, int c) { int retry = KBD_MAXRETRY; int res = -1; while (retry-- > 0) { if (!write_kbd_command(p, c)) continue; res = wait_for_kbd_ack(kbdcp(p)); if (res == KBD_ACK) break; } return res; } /* send a command to the auxiliary device and wait for ACK */ int send_aux_command(KBDC p, int c) { int retry = KBD_MAXRETRY; int res = -1; while (retry-- > 0) { if (!write_aux_command(p, c)) continue; /* * FIXME: XXX * The aux device may have already sent one or two bytes of * status data, when a command is received. It will immediately * stop data transmission, thus, leaving an incomplete data * packet in our buffer. We have to discard any unprocessed * data in order to remove such packets. Well, we may remove * unprocessed, but necessary data byte as well... */ emptyq(&kbdcp(p)->aux); res = wait_for_aux_ack(kbdcp(p)); if (res == PSM_ACK) break; } return res; } /* send a command and a data to the keyboard, wait for ACKs */ int send_kbd_command_and_data(KBDC p, int c, int d) { int retry; int res = -1; for (retry = KBD_MAXRETRY; retry > 0; --retry) { if (!write_kbd_command(p, c)) continue; res = wait_for_kbd_ack(kbdcp(p)); if (res == KBD_ACK) break; else if (res != KBD_RESEND) return res; } if (retry <= 0) return res; for (retry = KBD_MAXRETRY, res = -1; retry > 0; --retry) { if (!write_kbd_command(p, d)) continue; res = wait_for_kbd_ack(kbdcp(p)); if (res != KBD_RESEND) break; } return res; } /* send a command and a data to the auxiliary device, wait for ACKs */ int send_aux_command_and_data(KBDC p, int c, int d) { int retry; int res = -1; for (retry = KBD_MAXRETRY; retry > 0; --retry) { if (!write_aux_command(p, c)) continue; emptyq(&kbdcp(p)->aux); res = wait_for_aux_ack(kbdcp(p)); if (res == PSM_ACK) break; else if (res != PSM_RESEND) return res; } if (retry <= 0) return res; for (retry = KBD_MAXRETRY, res = -1; retry > 0; --retry) { if (!write_aux_command(p, d)) continue; res = wait_for_aux_ack(kbdcp(p)); if (res != PSM_RESEND) break; } return res; } /* * read one byte from any source; whether from the controller, * the keyboard, or the aux device */ int read_controller_data(KBDC p) { if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); if (!wait_for_data(kbdcp(p))) return -1; /* timeout */ return inb(kbdcp(p)->port + KBD_DATA_PORT); } #if KBDIO_DEBUG >= 2 static int call = 0; #endif /* read one byte from the keyboard */ int read_kbd_data(KBDC p) { #if KBDIO_DEBUG >= 2 if (++call > 2000) { call = 0; log(LOG_DEBUG, "kbdc: kbd q: %d calls, max %d chars, " "aux q: %d calls, max %d chars\n", kbdcp(p)->kbd.call_count, kbdcp(p)->kbd.max_qcount, kbdcp(p)->aux.call_count, kbdcp(p)->aux.max_qcount); } #endif if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); if (!wait_for_kbd_data(kbdcp(p))) return -1; /* timeout */ return inb(kbdcp(p)->port + KBD_DATA_PORT); } /* read one byte from the keyboard, but return immediately if * no data is waiting */ int read_kbd_data_no_wait(KBDC p) { int f; #if KBDIO_DEBUG >= 2 if (++call > 2000) { call = 0; log(LOG_DEBUG, "kbdc: kbd q: %d calls, max %d chars, " "aux q: %d calls, max %d chars\n", kbdcp(p)->kbd.call_count, kbdcp(p)->kbd.max_qcount, kbdcp(p)->aux.call_count, kbdcp(p)->aux.max_qcount); } #endif if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); f = inb(kbdcp(p)->port + KBD_STATUS_PORT) & KBDS_BUFFER_FULL; if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdcp(p)->aux, inb(kbdcp(p)->port + KBD_DATA_PORT)); f = inb(kbdcp(p)->port + KBD_STATUS_PORT) & KBDS_BUFFER_FULL; } if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); return inb(kbdcp(p)->port + KBD_DATA_PORT); } return -1; /* no data */ } /* read one byte from the aux device */ int read_aux_data(KBDC p) { if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); if (!wait_for_aux_data(kbdcp(p))) return -1; /* timeout */ return inb(kbdcp(p)->port + KBD_DATA_PORT); } /* read one byte from the aux device, but return immediately if * no data is waiting */ int read_aux_data_no_wait(KBDC p) { int f; if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); f = inb(kbdcp(p)->port + KBD_STATUS_PORT) & KBDS_BUFFER_FULL; if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdcp(p)->kbd, inb(kbdcp(p)->port + KBD_DATA_PORT)); f = inb(kbdcp(p)->port + KBD_STATUS_PORT) & KBDS_BUFFER_FULL; } if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); return inb(kbdcp(p)->port + KBD_DATA_PORT); } return -1; /* no data */ } /* discard data from the keyboard */ void empty_kbd_buffer(KBDC p, int wait) { int t; int b; int f; #if KBDIO_DEBUG >= 2 int c1 = 0; int c2 = 0; #endif int delta = 2; for (t = wait; t > 0; ) { if ((f = inb(kbdcp(p)->port + KBD_STATUS_PORT)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = inb(kbdcp(p)->port + KBD_DATA_PORT); if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { addq(&kbdcp(p)->aux, b); #if KBDIO_DEBUG >= 2 ++c2; } else { ++c1; #endif } t = wait; } else { t -= delta; } DELAY(delta*1000); } #if KBDIO_DEBUG >= 2 if ((c1 > 0) || (c2 > 0)) log(LOG_DEBUG, "kbdc: %d:%d char read (empty_kbd_buffer)\n", c1, c2); #endif emptyq(&kbdcp(p)->kbd); } /* discard data from the aux device */ void empty_aux_buffer(KBDC p, int wait) { int t; int b; int f; #if KBDIO_DEBUG >= 2 int c1 = 0; int c2 = 0; #endif int delta = 2; for (t = wait; t > 0; ) { if ((f = inb(kbdcp(p)->port + KBD_STATUS_PORT)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = inb(kbdcp(p)->port + KBD_DATA_PORT); if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { addq(&kbdcp(p)->kbd, b); #if KBDIO_DEBUG >= 2 ++c1; } else { ++c2; #endif } t = wait; } else { t -= delta; } DELAY(delta*1000); } #if KBDIO_DEBUG >= 2 if ((c1 > 0) || (c2 > 0)) log(LOG_DEBUG, "kbdc: %d:%d char read (empty_aux_buffer)\n", c1, c2); #endif emptyq(&kbdcp(p)->aux); } /* discard any data from the keyboard or the aux device */ void empty_both_buffers(KBDC p, int wait) { int t; int f; #if KBDIO_DEBUG >= 2 int c1 = 0; int c2 = 0; #endif int delta = 2; for (t = wait; t > 0; ) { if ((f = inb(kbdcp(p)->port + KBD_STATUS_PORT)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); (void)inb(kbdcp(p)->port + KBD_DATA_PORT); #if KBDIO_DEBUG >= 2 if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) ++c1; else ++c2; #endif t = wait; } else { t -= delta; } DELAY(delta*1000); } #if KBDIO_DEBUG >= 2 if ((c1 > 0) || (c2 > 0)) log(LOG_DEBUG, "kbdc: %d:%d char read (empty_both_buffers)\n", c1, c2); #endif emptyq(&kbdcp(p)->kbd); emptyq(&kbdcp(p)->aux); } /* keyboard and mouse device control */ /* NOTE: enable the keyboard port but disable the keyboard * interrupt before calling "reset_kbd()". */ int reset_kbd(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = KBD_RESEND; /* keep the compiler happy */ while (retry-- > 0) { empty_both_buffers(p, 10); if (!write_kbd_command(p, KBDC_RESET_KBD)) continue; emptyq(&kbdcp(p)->kbd); c = read_controller_data(p); if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_KBD return code:%04x\n", c); if (c == KBD_ACK) /* keyboard has agreed to reset itself... */ break; } if (retry < 0) return FALSE; while (again-- > 0) { /* wait awhile, well, in fact we must wait quite loooooooooooong */ DELAY(KBD_RESETDELAY*1000); c = read_controller_data(p); /* RESET_DONE/RESET_FAIL */ if (c != -1) /* wait again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_KBD status:%04x\n", c); if (c != KBD_RESET_DONE) return FALSE; return TRUE; } /* NOTE: enable the aux port but disable the aux interrupt * before calling `reset_aux_dev()'. */ int reset_aux_dev(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = PSM_RESEND; /* keep the compiler happy */ while (retry-- > 0) { empty_both_buffers(p, 10); if (!write_aux_command(p, PSMC_RESET_DEV)) continue; emptyq(&kbdcp(p)->aux); /* NOTE: Compaq Armada laptops require extra delay here. XXX */ for (again = KBD_MAXWAIT; again > 0; --again) { DELAY(KBD_RESETDELAY*1000); c = read_aux_data_no_wait(p); if (c != -1) break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_AUX return code:%04x\n", c); if (c == PSM_ACK) /* aux dev is about to reset... */ break; } if (retry < 0) return FALSE; for (again = KBD_MAXWAIT; again > 0; --again) { /* wait awhile, well, quite looooooooooooong */ DELAY(KBD_RESETDELAY*1000); c = read_aux_data_no_wait(p); /* RESET_DONE/RESET_FAIL */ if (c != -1) /* wait again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_AUX status:%04x\n", c); if (c != PSM_RESET_DONE) /* reset status */ return FALSE; c = read_aux_data(p); /* device ID */ if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_AUX ID:%04x\n", c); /* NOTE: we could check the device ID now, but leave it later... */ return TRUE; } /* controller diagnostics and setup */ int test_controller(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = KBD_DIAG_FAIL; while (retry-- > 0) { empty_both_buffers(p, 10); if (write_controller_command(p, KBDC_DIAGNOSE)) break; } if (retry < 0) return FALSE; emptyq(&kbdcp(p)->kbd); while (again-- > 0) { /* wait awhile */ DELAY(KBD_RESETDELAY*1000); c = read_controller_data(p); /* DIAG_DONE/DIAG_FAIL */ if (c != -1) /* wait again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: DIAGNOSE status:%04x\n", c); return (c == KBD_DIAG_DONE); } int test_kbd_port(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = -1; while (retry-- > 0) { empty_both_buffers(p, 10); if (write_controller_command(p, KBDC_TEST_KBD_PORT)) break; } if (retry < 0) return FALSE; emptyq(&kbdcp(p)->kbd); while (again-- > 0) { c = read_controller_data(p); if (c != -1) /* try again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: TEST_KBD_PORT status:%04x\n", c); return c; } int test_aux_port(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = -1; while (retry-- > 0) { empty_both_buffers(p, 10); if (write_controller_command(p, KBDC_TEST_AUX_PORT)) break; } if (retry < 0) return FALSE; emptyq(&kbdcp(p)->kbd); while (again-- > 0) { c = read_controller_data(p); if (c != -1) /* try again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: TEST_AUX_PORT status:%04x\n", c); return c; } int kbdc_get_device_mask(KBDC p) { return kbdcp(p)->command_mask; } void kbdc_set_device_mask(KBDC p, int mask) { kbdcp(p)->command_mask = mask & (KBD_KBD_CONTROL_BITS | KBD_AUX_CONTROL_BITS); } int get_controller_command_byte(KBDC p) { if (kbdcp(p)->command_byte != -1) return kbdcp(p)->command_byte; if (!write_controller_command(p, KBDC_GET_COMMAND_BYTE)) return -1; emptyq(&kbdcp(p)->kbd); kbdcp(p)->command_byte = read_controller_data(p); return kbdcp(p)->command_byte; } int set_controller_command_byte(KBDC p, int mask, int command) { if (get_controller_command_byte(p) == -1) return FALSE; command = (kbdcp(p)->command_byte & ~mask) | (command & mask); if (command & KBD_DISABLE_KBD_PORT) { if (!write_controller_command(p, KBDC_DISABLE_KBD_PORT)) return FALSE; } if (!write_controller_command(p, KBDC_SET_COMMAND_BYTE)) return FALSE; if (!write_controller_data(p, command)) return FALSE; kbdcp(p)->command_byte = command; if (verbose) log(LOG_DEBUG, "kbdc: new command byte:%04x (set_controller...)\n", command); return TRUE; }
MarginC/kame
freebsd3/sys/dev/kbd/atkbdc.c
C
bsd-3-clause
26,073
/* $OpenBSD: isp_sbus.c,v 1.7 1999/03/25 22:58:37 mjacob Exp $ */ /* release_03_25_99 */ /* * SBus specific probe and attach routines for Qlogic ISP SCSI adapters. * * Copyright (c) 1997 by Matthew Jacob * NASA AMES Research Center * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include <sys/param.h> #include <sys/systm.h> #include <sys/device.h> #include <sys/kernel.h> #include <sys/malloc.h> #include <sys/queue.h> #include <machine/autoconf.h> #include <machine/cpu.h> #include <machine/param.h> #include <machine/vmparam.h> #include <sparc/sparc/cpuvar.h> #include <dev/ic/isp_openbsd.h> #include <dev/microcode/isp/asm_sbus.h> static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int)); static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t)); static int isp_sbus_mbxdma __P((struct ispsoftc *)); static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsi_xfer *, ispreq_t *, u_int8_t *, u_int8_t)); static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsi_xfer *, u_int32_t)); static struct ispmdvec mdvec = { isp_sbus_rd_reg, isp_sbus_wr_reg, isp_sbus_mbxdma, isp_sbus_dmasetup, isp_sbus_dmateardown, NULL, NULL, NULL, ISP_RISC_CODE, ISP_CODE_LENGTH, ISP_CODE_ORG, ISP_CODE_VERSION, BIU_BURST_ENABLE, 0 }; struct isp_sbussoftc { struct ispsoftc sbus_isp; sdparam sbus_dev; struct intrhand sbus_ih; volatile u_char *sbus_reg; int sbus_node; int sbus_pri; struct ispmdvec sbus_mdvec; vm_offset_t sbus_kdma_allocs[MAXISPREQUEST]; int16_t sbus_poff[_NREG_BLKS]; }; static int isp_match __P((struct device *, void *, void *)); static void isp_sbus_attach __P((struct device *, struct device *, void *)); struct cfattach isp_sbus_ca = { sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach }; static int isp_match(parent, cfarg, aux) struct device *parent; void *cfarg; void *aux; { int rv; struct cfdata *cf = cfarg; #ifdef DEBUG static int oneshot = 1; #endif struct confargs *ca = aux; register struct romaux *ra = &ca->ca_ra; rv = (strcmp(cf->cf_driver->cd_name, ra->ra_name) == 0 || strcmp("PTI,ptisp", ra->ra_name) == 0 || strcmp("ptisp", ra->ra_name) == 0 || strcmp("SUNW,isp", ra->ra_name) == 0 || strcmp("QLGC,isp", ra->ra_name) == 0); if (rv == 0) return (rv); #ifdef DEBUG if (rv && oneshot) { oneshot = 0; printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version " "%d.%d Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); } #endif if (ca->ca_bustype == BUS_SBUS) return (1); ra->ra_len = NBPG; return (probeget(ra->ra_vaddr, 1) != -1); } static void isp_sbus_attach(parent, self, aux) struct device *parent, *self; void *aux; { int freq; struct confargs *ca = aux; struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self; struct ispsoftc *isp = &sbc->sbus_isp; ISP_LOCKVAL_DECL; if (ca->ca_ra.ra_nintr != 1) { printf(": expected 1 interrupt, got %d\n", ca->ca_ra.ra_nintr); return; } printf("\n"); sbc->sbus_pri = ca->ca_ra.ra_intr[0].int_pri; sbc->sbus_mdvec = mdvec; if (ca->ca_ra.ra_vaddr) { sbc->sbus_reg = (volatile u_char *) ca->ca_ra.ra_vaddr; } else { sbc->sbus_reg = (volatile u_char *) mapiodev(ca->ca_ra.ra_reg, 0, ca->ca_ra.ra_len); } sbc->sbus_node = ca->ca_ra.ra_node; freq = getpropint(ca->ca_ra.ra_node, "clock-frequency", 0); if (freq) { /* * Convert from HZ to MHz, rounding up. */ freq = (freq + 500000)/1000000; #if 0 printf("%s: %d MHz\n", self->dv_xname, freq); #endif } sbc->sbus_mdvec.dv_clock = freq; /* * XXX: Now figure out what the proper burst sizes, etc., to use. */ sbc->sbus_mdvec.dv_conf1 |= BIU_SBUS_CONF1_FIFO_8; /* * Some early versions of the PTI SBus adapter * would fail in trying to download (via poking) * FW. We give up on them. */ if (strcmp("PTI,ptisp", ca->ca_ra.ra_name) == 0 || strcmp("ptisp", ca->ca_ra.ra_name) == 0) { sbc->sbus_mdvec.dv_fwlen = 0; } isp->isp_mdvec = &sbc->sbus_mdvec; isp->isp_bustype = ISP_BT_SBUS; isp->isp_type = ISP_HA_SCSI_UNKNOWN; isp->isp_param = &sbc->sbus_dev; bzero(isp->isp_param, sizeof (sdparam)); sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF; sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF; sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF; sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; /* Establish interrupt channel */ sbc->sbus_ih.ih_fun = (void *) isp_intr; sbc->sbus_ih.ih_arg = sbc; intr_establish(sbc->sbus_pri, &sbc->sbus_ih); ISP_LOCK(isp); isp_reset(isp); if (isp->isp_state != ISP_RESETSTATE) { ISP_UNLOCK(isp); return; } isp_init(isp); if (isp->isp_state != ISP_INITSTATE) { isp_uninit(isp); ISP_UNLOCK(isp); return; } /* * do generic attach. */ isp_attach(isp); if (isp->isp_state != ISP_RUNSTATE) { isp_uninit(isp); } ISP_UNLOCK(isp); } static u_int16_t isp_sbus_rd_reg(isp, regoff) struct ispsoftc *isp; int regoff; { struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp; int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); return (*((u_int16_t *) &sbc->sbus_reg[offset])); } static void isp_sbus_wr_reg (isp, regoff, val) struct ispsoftc *isp; int regoff; u_int16_t val; { struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp; int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); *((u_int16_t *) &sbc->sbus_reg[offset]) = val; } static int isp_sbus_mbxdma(isp) struct ispsoftc *isp; { size_t len; /* * NOTE: Since most Sun machines aren't I/O coherent, * map the mailboxes through kdvma space to force them * to be uncached. */ /* * Allocate and map the request queue. */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); isp->isp_rquest = (volatile caddr_t)malloc(len, M_DEVBUF, M_NOWAIT); if (isp->isp_rquest == 0) return (1); isp->isp_rquest_dma = (u_int32_t)kdvma_mapin((caddr_t)isp->isp_rquest, len, 0); if (isp->isp_rquest_dma == 0) return (1); /* * Allocate and map the result queue. */ len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); isp->isp_result = (volatile caddr_t)malloc(len, M_DEVBUF, M_NOWAIT); if (isp->isp_result == 0) return (1); isp->isp_result_dma = (u_int32_t)kdvma_mapin((caddr_t)isp->isp_result, len, 0); if (isp->isp_result_dma == 0) return (1); return (0); } /* * TODO: If kdvma_mapin fails, try using multiple smaller chunks.. */ static int isp_sbus_dmasetup(isp, xs, rq, iptrp, optr) struct ispsoftc *isp; struct scsi_xfer *xs; ispreq_t *rq; u_int8_t *iptrp; u_int8_t optr; { struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp; vm_offset_t kdvma; int dosleep = (xs->flags & SCSI_NOSLEEP) != 0; if (xs->datalen == 0) { rq->req_seg_count = 1; return (CMD_QUEUED); } if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) { panic("%s: bad handle (%d) in isp_sbus_dmasetup\n", isp->isp_name, rq->req_handle); /* NOTREACHED */ } if (CPU_ISSUN4M) { kdvma = (vm_offset_t) kdvma_mapin((caddr_t)xs->data, xs->datalen, dosleep); if (kdvma == (vm_offset_t) 0) { XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } } else { kdvma = (vm_offset_t) xs->data; } if (sbc->sbus_kdma_allocs[rq->req_handle - 1] != (vm_offset_t) 0) { panic("%s: kdma handle already allocated\n", isp->isp_name); /* NOTREACHED */ } sbc->sbus_kdma_allocs[rq->req_handle - 1] = kdvma; if (xs->flags & SCSI_DATA_IN) { rq->req_flags |= REQFLAG_DATA_IN; } else { rq->req_flags |= REQFLAG_DATA_OUT; } rq->req_dataseg[0].ds_count = xs->datalen; rq->req_dataseg[0].ds_base = (u_int32_t) kdvma; rq->req_seg_count = 1; return (CMD_QUEUED); } static void isp_sbus_dmateardown(isp, xs, handle) struct ispsoftc *isp; struct scsi_xfer *xs; u_int32_t handle; { struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp; vm_offset_t kdvma; if (xs->flags & SCSI_DATA_IN) { cpuinfo.cache_flush(xs->data, xs->datalen - xs->resid); } if (handle >= RQUEST_QUEUE_LEN) { panic("%s: bad handle (%d) in isp_sbus_dmateardown\n", isp->isp_name, handle); /* NOTREACHED */ } if (sbc->sbus_kdma_allocs[handle] == (vm_offset_t) 0) { panic("%s: kdma handle not already allocated\n", isp->isp_name); /* NOTREACHED */ } kdvma = sbc->sbus_kdma_allocs[handle]; sbc->sbus_kdma_allocs[handle] = (vm_offset_t) 0; if (CPU_ISSUN4M) { dvma_mapout(kdvma, (vm_offset_t) xs->data, xs->datalen); } }
MarginC/kame
openbsd/sys/arch/sparc/dev/isp_sbus.c
C
bsd-3-clause
10,106
/** * Copyright (c) 2019, Łukasz Marcin Podkalicki <lpodkalicki@gmail.com> * ESP8266/005 * Blinky example using pure ESP8266 Non-OS SDK. */ #include "ets_sys.h" #include "osapi.h" #include "gpio.h" #include "os_type.h" #include "user_config.h" #define LED_PIN (2) static volatile os_timer_t blinky_timer; static void blinky_timer_handler(void *prv); void ICACHE_FLASH_ATTR user_init() { uint8_t value = 0; /* setup */ gpio_init(); // init gpio subsytem gpio_output_set(0, 0, (1 << LED_PIN), 0); // set LED pin as output with low state uart_div_modify(0, UART_CLK_FREQ / 115200); // set UART baudrate os_printf("\n\nSDK version:%s\n\n", system_get_sdk_version()); /* start timer (500ms) */ os_timer_setfn(&blinky_timer, (os_timer_func_t *)blinky_timer_handler, NULL); os_timer_arm(&blinky_timer, 500, 1); } void blinky_timer_handler(void *prv) { if (GPIO_REG_READ(GPIO_OUT_ADDRESS) & (1 << LED_PIN)) { gpio_output_set(0, (1 << LED_PIN), 0, 0); // LED off } else { gpio_output_set((1 << LED_PIN), 0, 0, 0); // LED on } }
lpodkalicki/blog
esp8266/005_nonos_sdk_blinky/user/main.c
C
bsd-3-clause
1,066
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE78_OS_Command_Injection__char_environment_w32_spawnv_61b.c Label Definition File: CWE78_OS_Command_Injection.strings.label.xml Template File: sources-sink-61b.tmpl.c */ /* * @description * CWE: 78 OS Command Injection * BadSource: environment Read input from an environment variable * GoodSource: Fixed string * Sinks: w32_spawnv * BadSink : execute command with spawnv * Flow Variant: 61 Data flow: data returned from one function to another in different source files * * */ #include "std_testcase.h" #include <wchar.h> #ifdef _WIN32 #define COMMAND_INT_PATH "%WINDIR%\\system32\\cmd.exe" #define COMMAND_INT "cmd.exe" #define COMMAND_ARG1 "/c" #define COMMAND_ARG2 "dir " #define COMMAND_ARG3 data #else /* NOT _WIN32 */ #include <unistd.h> #define COMMAND_INT_PATH "/bin/sh" #define COMMAND_INT "sh" #define COMMAND_ARG1 "-c" #define COMMAND_ARG2 "ls " #define COMMAND_ARG3 data #endif #define ENV_VARIABLE "ADD" #ifdef _WIN32 #define GETENV getenv #else #define GETENV getenv #endif #include <process.h> #ifndef OMITBAD char * CWE78_OS_Command_Injection__char_environment_w32_spawnv_61b_badSource(char * data) { { /* Append input from an environment variable to data */ size_t dataLen = strlen(data); char * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { /* POTENTIAL FLAW: Read data from an environment variable */ strncat(data+dataLen, environment, 100-dataLen-1); } } return data; } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B() uses the GoodSource with the BadSink */ char * CWE78_OS_Command_Injection__char_environment_w32_spawnv_61b_goodG2BSource(char * data) { /* FIX: Append a fixed string to data (not user / external input) */ strcat(data, "*.*"); return data; } #endif /* OMITGOOD */
JianpingZeng/xcc
xcc/test/juliet/testcases/CWE78_OS_Command_Injection/s03/CWE78_OS_Command_Injection__char_environment_w32_spawnv_61b.c
C
bsd-3-clause
2,024
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE194_Unexpected_Sign_Extension__fgets_malloc_03.c Label Definition File: CWE194_Unexpected_Sign_Extension.label.xml Template File: sources-sink-03.tmpl.c */ /* * @description * CWE: 194 Unexpected Sign Extension * BadSource: fgets Read data from the console using fgets() * GoodSource: Positive integer * Sink: malloc * BadSink : Allocate memory using malloc() with the size of data * Flow Variant: 03 Control flow: if(5==5) and if(5!=5) * * */ #include "std_testcase.h" /* Must be at least 8 for atoi() to work properly */ #define CHAR_ARRAY_SIZE 8 #ifndef OMITBAD void CWE194_Unexpected_Sign_Extension__fgets_malloc_03_bad() { short data; /* Initialize data */ data = 0; if(5==5) { { char inputBuffer[CHAR_ARRAY_SIZE] = ""; /* FLAW: Use a value input from the console using fgets() */ if (fgets(inputBuffer, CHAR_ARRAY_SIZE, stdin) != NULL) { /* Convert to short */ data = (short)atoi(inputBuffer); } else { printLine("fgets() failed."); } } } /* Assume we want to allocate a relatively small buffer */ if (data < 100) { /* POTENTIAL FLAW: malloc() takes a size_t (unsigned int) as input and therefore if it is negative, * the conversion will cause malloc() to allocate a very large amount of data or fail */ char * dataBuffer = (char *)malloc(data); if (dataBuffer == NULL) {exit(-1);} /* Do something with dataBuffer */ memset(dataBuffer, 'A', data-1); dataBuffer[data-1] = '\0'; printLine(dataBuffer); free(dataBuffer); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the 5==5 to 5!=5 */ static void goodG2B1() { short data; /* Initialize data */ data = 0; if(5!=5) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Use a positive integer less than &InitialDataSize&*/ data = 100-1; } /* Assume we want to allocate a relatively small buffer */ if (data < 100) { /* POTENTIAL FLAW: malloc() takes a size_t (unsigned int) as input and therefore if it is negative, * the conversion will cause malloc() to allocate a very large amount of data or fail */ char * dataBuffer = (char *)malloc(data); if (dataBuffer == NULL) {exit(-1);} /* Do something with dataBuffer */ memset(dataBuffer, 'A', data-1); dataBuffer[data-1] = '\0'; printLine(dataBuffer); free(dataBuffer); } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { short data; /* Initialize data */ data = 0; if(5==5) { /* FIX: Use a positive integer less than &InitialDataSize&*/ data = 100-1; } /* Assume we want to allocate a relatively small buffer */ if (data < 100) { /* POTENTIAL FLAW: malloc() takes a size_t (unsigned int) as input and therefore if it is negative, * the conversion will cause malloc() to allocate a very large amount of data or fail */ char * dataBuffer = (char *)malloc(data); if (dataBuffer == NULL) {exit(-1);} /* Do something with dataBuffer */ memset(dataBuffer, 'A', data-1); dataBuffer[data-1] = '\0'; printLine(dataBuffer); free(dataBuffer); } } void CWE194_Unexpected_Sign_Extension__fgets_malloc_03_good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE194_Unexpected_Sign_Extension__fgets_malloc_03_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE194_Unexpected_Sign_Extension__fgets_malloc_03_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
JianpingZeng/xcc
xcc/test/juliet/testcases/CWE194_Unexpected_Sign_Extension/s01/CWE194_Unexpected_Sign_Extension__fgets_malloc_03.c
C
bsd-3-clause
4,693
/*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <sys/cdefs.h> __FBSDID("$FreeBSD: releng/9.3/sys/net80211/ieee80211_ioctl.c 234753 2012-04-28 09:15:01Z dim $"); /* * IEEE 802.11 ioctl support (FreeBSD-specific) */ #include "opt_inet.h" #include "opt_ipx.h" #include "opt_wlan.h" #include <sys/endian.h> #include <sys/param.h> #include <sys/kernel.h> #include <sys/priv.h> #include <sys/socket.h> #include <sys/sockio.h> #include <sys/systm.h> #include <net/if.h> #include <net/if_dl.h> #include <net/if_media.h> #include <net/ethernet.h> #ifdef INET #include <netinet/in.h> #include <netinet/if_ether.h> #endif #ifdef IPX #include <netipx/ipx.h> #include <netipx/ipx_if.h> #endif #include <net80211/ieee80211_var.h> #include <net80211/ieee80211_ioctl.h> #include <net80211/ieee80211_regdomain.h> #include <net80211/ieee80211_input.h> #define IS_UP_AUTO(_vap) \ (IFNET_IS_UP_RUNNING((_vap)->iv_ifp) && \ (_vap)->iv_roaming == IEEE80211_ROAMING_AUTO) static const uint8_t zerobssid[IEEE80211_ADDR_LEN]; static struct ieee80211_channel *findchannel(struct ieee80211com *, int ieee, int mode); static int ieee80211_scanreq(struct ieee80211vap *, struct ieee80211_scan_req *); static __noinline int ieee80211_ioctl_getkey(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; struct ieee80211req_key ik; struct ieee80211_key *wk; const struct ieee80211_cipher *cip; u_int kid; int error; if (ireq->i_len != sizeof(ik)) return EINVAL; error = copyin(ireq->i_data, &ik, sizeof(ik)); if (error) return error; kid = ik.ik_keyix; if (kid == IEEE80211_KEYIX_NONE) { ni = ieee80211_find_vap_node(&ic->ic_sta, vap, ik.ik_macaddr); if (ni == NULL) return ENOENT; wk = &ni->ni_ucastkey; } else { if (kid >= IEEE80211_WEP_NKID) return EINVAL; wk = &vap->iv_nw_keys[kid]; IEEE80211_ADDR_COPY(&ik.ik_macaddr, vap->iv_bss->ni_macaddr); ni = NULL; } cip = wk->wk_cipher; ik.ik_type = cip->ic_cipher; ik.ik_keylen = wk->wk_keylen; ik.ik_flags = wk->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV); if (wk->wk_keyix == vap->iv_def_txkey) ik.ik_flags |= IEEE80211_KEY_DEFAULT; if (priv_check(curthread, PRIV_NET80211_GETKEY) == 0) { /* NB: only root can read key data */ ik.ik_keyrsc = wk->wk_keyrsc[IEEE80211_NONQOS_TID]; ik.ik_keytsc = wk->wk_keytsc; memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen); if (cip->ic_cipher == IEEE80211_CIPHER_TKIP) { memcpy(ik.ik_keydata+wk->wk_keylen, wk->wk_key + IEEE80211_KEYBUF_SIZE, IEEE80211_MICBUF_SIZE); ik.ik_keylen += IEEE80211_MICBUF_SIZE; } } else { ik.ik_keyrsc = 0; ik.ik_keytsc = 0; memset(ik.ik_keydata, 0, sizeof(ik.ik_keydata)); } if (ni != NULL) ieee80211_free_node(ni); return copyout(&ik, ireq->i_data, sizeof(ik)); } static __noinline int ieee80211_ioctl_getchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; if (sizeof(ic->ic_chan_active) < ireq->i_len) ireq->i_len = sizeof(ic->ic_chan_active); return copyout(&ic->ic_chan_active, ireq->i_data, ireq->i_len); } static __noinline int ieee80211_ioctl_getchaninfo(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; uint32_t space; space = __offsetof(struct ieee80211req_chaninfo, ic_chans[ic->ic_nchans]); if (space > ireq->i_len) space = ireq->i_len; /* XXX assumes compatible layout */ return copyout(&ic->ic_nchans, ireq->i_data, space); } static __noinline int ieee80211_ioctl_getwpaie(struct ieee80211vap *vap, struct ieee80211req *ireq, int req) { struct ieee80211_node *ni; struct ieee80211req_wpaie2 wpaie; int error; if (ireq->i_len < IEEE80211_ADDR_LEN) return EINVAL; error = copyin(ireq->i_data, wpaie.wpa_macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, wpaie.wpa_macaddr); if (ni == NULL) return ENOENT; memset(wpaie.wpa_ie, 0, sizeof(wpaie.wpa_ie)); if (ni->ni_ies.wpa_ie != NULL) { int ielen = ni->ni_ies.wpa_ie[1] + 2; if (ielen > sizeof(wpaie.wpa_ie)) ielen = sizeof(wpaie.wpa_ie); memcpy(wpaie.wpa_ie, ni->ni_ies.wpa_ie, ielen); } if (req == IEEE80211_IOC_WPAIE2) { memset(wpaie.rsn_ie, 0, sizeof(wpaie.rsn_ie)); if (ni->ni_ies.rsn_ie != NULL) { int ielen = ni->ni_ies.rsn_ie[1] + 2; if (ielen > sizeof(wpaie.rsn_ie)) ielen = sizeof(wpaie.rsn_ie); memcpy(wpaie.rsn_ie, ni->ni_ies.rsn_ie, ielen); } if (ireq->i_len > sizeof(struct ieee80211req_wpaie2)) ireq->i_len = sizeof(struct ieee80211req_wpaie2); } else { /* compatibility op, may overwrite wpa ie */ /* XXX check ic_flags? */ if (ni->ni_ies.rsn_ie != NULL) { int ielen = ni->ni_ies.rsn_ie[1] + 2; if (ielen > sizeof(wpaie.wpa_ie)) ielen = sizeof(wpaie.wpa_ie); memcpy(wpaie.wpa_ie, ni->ni_ies.rsn_ie, ielen); } if (ireq->i_len > sizeof(struct ieee80211req_wpaie)) ireq->i_len = sizeof(struct ieee80211req_wpaie); } ieee80211_free_node(ni); return copyout(&wpaie, ireq->i_data, ireq->i_len); } static __noinline int ieee80211_ioctl_getstastats(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; uint8_t macaddr[IEEE80211_ADDR_LEN]; const size_t off = __offsetof(struct ieee80211req_sta_stats, is_stats); int error; if (ireq->i_len < off) return EINVAL; error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr); if (ni == NULL) return ENOENT; if (ireq->i_len > sizeof(struct ieee80211req_sta_stats)) ireq->i_len = sizeof(struct ieee80211req_sta_stats); /* NB: copy out only the statistics */ error = copyout(&ni->ni_stats, (uint8_t *) ireq->i_data + off, ireq->i_len - off); ieee80211_free_node(ni); return error; } struct scanreq { struct ieee80211req_scan_result *sr; size_t space; }; static size_t scan_space(const struct ieee80211_scan_entry *se, int *ielen) { size_t len; *ielen = se->se_ies.len; /* * NB: ie's can be no more than 255 bytes and the max 802.11 * packet is <3Kbytes so we are sure this doesn't overflow * 16-bits; if this is a concern we can drop the ie's. */ len = sizeof(struct ieee80211req_scan_result) + se->se_ssid[1] + se->se_meshid[1] + *ielen; return roundup(len, sizeof(uint32_t)); } static void get_scan_space(void *arg, const struct ieee80211_scan_entry *se) { struct scanreq *req = arg; int ielen; req->space += scan_space(se, &ielen); } static __noinline void get_scan_result(void *arg, const struct ieee80211_scan_entry *se) { struct scanreq *req = arg; struct ieee80211req_scan_result *sr; int ielen, len, nr, nxr; uint8_t *cp; len = scan_space(se, &ielen); if (len > req->space) return; sr = req->sr; KASSERT(len <= 65535 && ielen <= 65535, ("len %u ssid %u ie %u", len, se->se_ssid[1], ielen)); sr->isr_len = len; sr->isr_ie_off = sizeof(struct ieee80211req_scan_result); sr->isr_ie_len = ielen; sr->isr_freq = se->se_chan->ic_freq; sr->isr_flags = se->se_chan->ic_flags; sr->isr_rssi = se->se_rssi; sr->isr_noise = se->se_noise; sr->isr_intval = se->se_intval; sr->isr_capinfo = se->se_capinfo; sr->isr_erp = se->se_erp; IEEE80211_ADDR_COPY(sr->isr_bssid, se->se_bssid); nr = min(se->se_rates[1], IEEE80211_RATE_MAXSIZE); memcpy(sr->isr_rates, se->se_rates+2, nr); nxr = min(se->se_xrates[1], IEEE80211_RATE_MAXSIZE - nr); memcpy(sr->isr_rates+nr, se->se_xrates+2, nxr); sr->isr_nrates = nr + nxr; /* copy SSID */ sr->isr_ssid_len = se->se_ssid[1]; cp = ((uint8_t *)sr) + sr->isr_ie_off; memcpy(cp, se->se_ssid+2, sr->isr_ssid_len); /* copy mesh id */ cp += sr->isr_ssid_len; sr->isr_meshid_len = se->se_meshid[1]; memcpy(cp, se->se_meshid+2, sr->isr_meshid_len); cp += sr->isr_meshid_len; if (ielen) memcpy(cp, se->se_ies.data, ielen); req->space -= len; req->sr = (struct ieee80211req_scan_result *)(((uint8_t *)sr) + len); } static __noinline int ieee80211_ioctl_getscanresults(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct scanreq req; int error; if (ireq->i_len < sizeof(struct scanreq)) return EFAULT; error = 0; req.space = 0; ieee80211_scan_iterate(vap, get_scan_space, &req); if (req.space > ireq->i_len) req.space = ireq->i_len; if (req.space > 0) { uint32_t space; void *p; space = req.space; /* XXX M_WAITOK after driver lock released */ p = malloc(space, M_TEMP, M_NOWAIT | M_ZERO); if (p == NULL) return ENOMEM; req.sr = p; ieee80211_scan_iterate(vap, get_scan_result, &req); ireq->i_len = space - req.space; error = copyout(p, ireq->i_data, ireq->i_len); free(p, M_TEMP); } else ireq->i_len = 0; return error; } struct stainforeq { struct ieee80211vap *vap; struct ieee80211req_sta_info *si; size_t space; }; static size_t sta_space(const struct ieee80211_node *ni, size_t *ielen) { *ielen = ni->ni_ies.len; return roundup(sizeof(struct ieee80211req_sta_info) + *ielen, sizeof(uint32_t)); } static void get_sta_space(void *arg, struct ieee80211_node *ni) { struct stainforeq *req = arg; size_t ielen; if (req->vap != ni->ni_vap) return; if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP && ni->ni_associd == 0) /* only associated stations */ return; req->space += sta_space(ni, &ielen); } static __noinline void get_sta_info(void *arg, struct ieee80211_node *ni) { struct stainforeq *req = arg; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211req_sta_info *si; size_t ielen, len; uint8_t *cp; if (req->vap != ni->ni_vap) return; if (vap->iv_opmode == IEEE80211_M_HOSTAP && ni->ni_associd == 0) /* only associated stations */ return; if (ni->ni_chan == IEEE80211_CHAN_ANYC) /* XXX bogus entry */ return; len = sta_space(ni, &ielen); if (len > req->space) return; si = req->si; si->isi_len = len; si->isi_ie_off = sizeof(struct ieee80211req_sta_info); si->isi_ie_len = ielen; si->isi_freq = ni->ni_chan->ic_freq; si->isi_flags = ni->ni_chan->ic_flags; si->isi_state = ni->ni_flags; si->isi_authmode = ni->ni_authmode; vap->iv_ic->ic_node_getsignal(ni, &si->isi_rssi, &si->isi_noise); vap->iv_ic->ic_node_getmimoinfo(ni, &si->isi_mimo); si->isi_capinfo = ni->ni_capinfo; si->isi_erp = ni->ni_erp; IEEE80211_ADDR_COPY(si->isi_macaddr, ni->ni_macaddr); si->isi_nrates = ni->ni_rates.rs_nrates; if (si->isi_nrates > 15) si->isi_nrates = 15; memcpy(si->isi_rates, ni->ni_rates.rs_rates, si->isi_nrates); si->isi_txrate = ni->ni_txrate; if (si->isi_txrate & IEEE80211_RATE_MCS) { const struct ieee80211_mcs_rates *mcs = &ieee80211_htrates[ni->ni_txrate &~ IEEE80211_RATE_MCS]; if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { if (ni->ni_flags & IEEE80211_NODE_SGI40) si->isi_txmbps = mcs->ht40_rate_800ns; else si->isi_txmbps = mcs->ht40_rate_400ns; } else { if (ni->ni_flags & IEEE80211_NODE_SGI20) si->isi_txmbps = mcs->ht20_rate_800ns; else si->isi_txmbps = mcs->ht20_rate_400ns; } } else si->isi_txmbps = si->isi_txrate; si->isi_associd = ni->ni_associd; si->isi_txpower = ni->ni_txpower; si->isi_vlan = ni->ni_vlan; if (ni->ni_flags & IEEE80211_NODE_QOS) { memcpy(si->isi_txseqs, ni->ni_txseqs, sizeof(ni->ni_txseqs)); memcpy(si->isi_rxseqs, ni->ni_rxseqs, sizeof(ni->ni_rxseqs)); } else { si->isi_txseqs[0] = ni->ni_txseqs[IEEE80211_NONQOS_TID]; si->isi_rxseqs[0] = ni->ni_rxseqs[IEEE80211_NONQOS_TID]; } /* NB: leave all cases in case we relax ni_associd == 0 check */ if (ieee80211_node_is_authorized(ni)) si->isi_inact = vap->iv_inact_run; else if (ni->ni_associd != 0 || (vap->iv_opmode == IEEE80211_M_WDS && (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))) si->isi_inact = vap->iv_inact_auth; else si->isi_inact = vap->iv_inact_init; si->isi_inact = (si->isi_inact - ni->ni_inact) * IEEE80211_INACT_WAIT; si->isi_localid = ni->ni_mllid; si->isi_peerid = ni->ni_mlpid; si->isi_peerstate = ni->ni_mlstate; if (ielen) { cp = ((uint8_t *)si) + si->isi_ie_off; memcpy(cp, ni->ni_ies.data, ielen); } req->si = (struct ieee80211req_sta_info *)(((uint8_t *)si) + len); req->space -= len; } static __noinline int getstainfo_common(struct ieee80211vap *vap, struct ieee80211req *ireq, struct ieee80211_node *ni, size_t off) { struct ieee80211com *ic = vap->iv_ic; struct stainforeq req; size_t space; void *p; int error; error = 0; req.space = 0; req.vap = vap; if (ni == NULL) ieee80211_iterate_nodes(&ic->ic_sta, get_sta_space, &req); else get_sta_space(&req, ni); if (req.space > ireq->i_len) req.space = ireq->i_len; if (req.space > 0) { space = req.space; /* XXX M_WAITOK after driver lock released */ p = malloc(space, M_TEMP, M_NOWAIT | M_ZERO); if (p == NULL) { error = ENOMEM; goto bad; } req.si = p; if (ni == NULL) ieee80211_iterate_nodes(&ic->ic_sta, get_sta_info, &req); else get_sta_info(&req, ni); ireq->i_len = space - req.space; error = copyout(p, (uint8_t *) ireq->i_data+off, ireq->i_len); free(p, M_TEMP); } else ireq->i_len = 0; bad: if (ni != NULL) ieee80211_free_node(ni); return error; } static __noinline int ieee80211_ioctl_getstainfo(struct ieee80211vap *vap, struct ieee80211req *ireq) { uint8_t macaddr[IEEE80211_ADDR_LEN]; const size_t off = __offsetof(struct ieee80211req_sta_req, info); struct ieee80211_node *ni; int error; if (ireq->i_len < sizeof(struct ieee80211req_sta_req)) return EFAULT; error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; if (IEEE80211_ADDR_EQ(macaddr, vap->iv_ifp->if_broadcastaddr)) { ni = NULL; } else { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr); if (ni == NULL) return ENOENT; } return getstainfo_common(vap, ireq, ni, off); } static __noinline int ieee80211_ioctl_getstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_txpow txpow; int error; if (ireq->i_len != sizeof(txpow)) return EINVAL; error = copyin(ireq->i_data, &txpow, sizeof(txpow)); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr); if (ni == NULL) return ENOENT; txpow.it_txpow = ni->ni_txpower; error = copyout(&txpow, ireq->i_data, sizeof(txpow)); ieee80211_free_node(ni); return error; } static __noinline int ieee80211_ioctl_getwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_wme_state *wme = &ic->ic_wme; struct wmeParams *wmep; int ac; if ((ic->ic_caps & IEEE80211_C_WME) == 0) return EINVAL; ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL); if (ac >= WME_NUM_AC) ac = WME_AC_BE; if (ireq->i_len & IEEE80211_WMEPARAM_BSS) wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac]; else wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac]; switch (ireq->i_type) { case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ ireq->i_val = wmep->wmep_logcwmin; break; case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ ireq->i_val = wmep->wmep_logcwmax; break; case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ ireq->i_val = wmep->wmep_aifsn; break; case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ ireq->i_val = wmep->wmep_txopLimit; break; case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac]; ireq->i_val = wmep->wmep_acm; break; case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/ wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac]; ireq->i_val = !wmep->wmep_noackPolicy; break; } return 0; } static __noinline int ieee80211_ioctl_getmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq) { const struct ieee80211_aclator *acl = vap->iv_acl; return (acl == NULL ? EINVAL : acl->iac_getioctl(vap, ireq)); } static __noinline int ieee80211_ioctl_getcurchan(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel *c; if (ireq->i_len != sizeof(struct ieee80211_channel)) return EINVAL; /* * vap's may have different operating channels when HT is * in use. When in RUN state report the vap-specific channel. * Otherwise return curchan. */ if (vap->iv_state == IEEE80211_S_RUN) c = vap->iv_bss->ni_chan; else c = ic->ic_curchan; return copyout(c, ireq->i_data, sizeof(*c)); } static int getappie(const struct ieee80211_appie *aie, struct ieee80211req *ireq) { if (aie == NULL) return EINVAL; /* NB: truncate, caller can check length */ if (ireq->i_len > aie->ie_len) ireq->i_len = aie->ie_len; return copyout(aie->ie_data, ireq->i_data, ireq->i_len); } static int ieee80211_ioctl_getappie(struct ieee80211vap *vap, struct ieee80211req *ireq) { uint8_t fc0; fc0 = ireq->i_val & 0xff; if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return EINVAL; /* NB: could check iv_opmode and reject but hardly worth the effort */ switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: return getappie(vap->iv_appie_beacon, ireq); case IEEE80211_FC0_SUBTYPE_PROBE_RESP: return getappie(vap->iv_appie_proberesp, ireq); case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: return getappie(vap->iv_appie_assocresp, ireq); case IEEE80211_FC0_SUBTYPE_PROBE_REQ: return getappie(vap->iv_appie_probereq, ireq); case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: return getappie(vap->iv_appie_assocreq, ireq); case IEEE80211_FC0_SUBTYPE_BEACON|IEEE80211_FC0_SUBTYPE_PROBE_RESP: return getappie(vap->iv_appie_wpa, ireq); } return EINVAL; } static __noinline int ieee80211_ioctl_getregdomain(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; if (ireq->i_len != sizeof(ic->ic_regdomain)) return EINVAL; return copyout(&ic->ic_regdomain, ireq->i_data, sizeof(ic->ic_regdomain)); } static __noinline int ieee80211_ioctl_getroam(struct ieee80211vap *vap, const struct ieee80211req *ireq) { size_t len = ireq->i_len; /* NB: accept short requests for backwards compat */ if (len > sizeof(vap->iv_roamparms)) len = sizeof(vap->iv_roamparms); return copyout(vap->iv_roamparms, ireq->i_data, len); } static __noinline int ieee80211_ioctl_gettxparams(struct ieee80211vap *vap, const struct ieee80211req *ireq) { size_t len = ireq->i_len; /* NB: accept short requests for backwards compat */ if (len > sizeof(vap->iv_txparms)) len = sizeof(vap->iv_txparms); return copyout(vap->iv_txparms, ireq->i_data, len); } static __noinline int ieee80211_ioctl_getdevcaps(struct ieee80211com *ic, const struct ieee80211req *ireq) { struct ieee80211_devcaps_req *dc; struct ieee80211req_chaninfo *ci; int maxchans, error; maxchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_devcaps_req)) / sizeof(struct ieee80211_channel)); /* NB: require 1 so we know ic_nchans is accessible */ if (maxchans < 1) return EINVAL; /* constrain max request size, 2K channels is ~24Kbytes */ if (maxchans > 2048) maxchans = 2048; dc = (struct ieee80211_devcaps_req *) malloc(IEEE80211_DEVCAPS_SIZE(maxchans), M_TEMP, M_NOWAIT | M_ZERO); if (dc == NULL) return ENOMEM; dc->dc_drivercaps = ic->ic_caps; dc->dc_cryptocaps = ic->ic_cryptocaps; dc->dc_htcaps = ic->ic_htcaps; ci = &dc->dc_chaninfo; ic->ic_getradiocaps(ic, maxchans, &ci->ic_nchans, ci->ic_chans); KASSERT(ci->ic_nchans <= maxchans, ("nchans %d maxchans %d", ci->ic_nchans, maxchans)); ieee80211_sort_channels(ci->ic_chans, ci->ic_nchans); error = copyout(dc, ireq->i_data, IEEE80211_DEVCAPS_SPACE(dc)); free(dc, M_TEMP); return error; } static __noinline int ieee80211_ioctl_getstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_vlan vlan; int error; if (ireq->i_len != sizeof(vlan)) return EINVAL; error = copyin(ireq->i_data, &vlan, sizeof(vlan)); if (error != 0) return error; if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, vlan.sv_macaddr); if (ni == NULL) return ENOENT; } else ni = ieee80211_ref_node(vap->iv_bss); vlan.sv_vlan = ni->ni_vlan; error = copyout(&vlan, ireq->i_data, sizeof(vlan)); ieee80211_free_node(ni); return error; } /* * Dummy ioctl get handler so the linker set is defined. */ static int dummy_ioctl_get(struct ieee80211vap *vap, struct ieee80211req *ireq) { return ENOSYS; } IEEE80211_IOCTL_GET(dummy, dummy_ioctl_get); static int ieee80211_ioctl_getdefault(struct ieee80211vap *vap, struct ieee80211req *ireq) { ieee80211_ioctl_getfunc * const *get; int error; SET_FOREACH(get, ieee80211_ioctl_getset) { error = (*get)(vap, ireq); if (error != ENOSYS) return error; } return EINVAL; } /* * When building the kernel with -O2 on the i386 architecture, gcc * seems to want to inline this function into ieee80211_ioctl() * (which is the only routine that calls it). When this happens, * ieee80211_ioctl() ends up consuming an additional 2K of stack * space. (Exactly why it needs so much is unclear.) The problem * is that it's possible for ieee80211_ioctl() to invoke other * routines (including driver init functions) which could then find * themselves perilously close to exhausting the stack. * * To avoid this, we deliberately prevent gcc from inlining this * routine. Another way to avoid this is to use less agressive * optimization when compiling this file (i.e. -O instead of -O2) * but special-casing the compilation of this one module in the * build system would be awkward. */ static __noinline int ieee80211_ioctl_get80211(struct ieee80211vap *vap, u_long cmd, struct ieee80211req *ireq) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct ieee80211com *ic = vap->iv_ic; u_int kid, len; uint8_t tmpkey[IEEE80211_KEYBUF_SIZE]; char tmpssid[IEEE80211_NWID_LEN]; int error = 0; switch (ireq->i_type) { case IEEE80211_IOC_SSID: switch (vap->iv_state) { case IEEE80211_S_INIT: case IEEE80211_S_SCAN: ireq->i_len = vap->iv_des_ssid[0].len; memcpy(tmpssid, vap->iv_des_ssid[0].ssid, ireq->i_len); break; default: ireq->i_len = vap->iv_bss->ni_esslen; memcpy(tmpssid, vap->iv_bss->ni_essid, ireq->i_len); break; } error = copyout(tmpssid, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_NUMSSIDS: ireq->i_val = 1; break; case IEEE80211_IOC_WEP: if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) ireq->i_val = IEEE80211_WEP_OFF; else if (vap->iv_flags & IEEE80211_F_DROPUNENC) ireq->i_val = IEEE80211_WEP_ON; else ireq->i_val = IEEE80211_WEP_MIXED; break; case IEEE80211_IOC_WEPKEY: kid = (u_int) ireq->i_val; if (kid >= IEEE80211_WEP_NKID) return EINVAL; len = (u_int) vap->iv_nw_keys[kid].wk_keylen; /* NB: only root can read WEP keys */ if (priv_check(curthread, PRIV_NET80211_GETKEY) == 0) { bcopy(vap->iv_nw_keys[kid].wk_key, tmpkey, len); } else { bzero(tmpkey, len); } ireq->i_len = len; error = copyout(tmpkey, ireq->i_data, len); break; case IEEE80211_IOC_NUMWEPKEYS: ireq->i_val = IEEE80211_WEP_NKID; break; case IEEE80211_IOC_WEPTXKEY: ireq->i_val = vap->iv_def_txkey; break; case IEEE80211_IOC_AUTHMODE: if (vap->iv_flags & IEEE80211_F_WPA) ireq->i_val = IEEE80211_AUTH_WPA; else ireq->i_val = vap->iv_bss->ni_authmode; break; case IEEE80211_IOC_CHANNEL: ireq->i_val = ieee80211_chan2ieee(ic, ic->ic_curchan); break; case IEEE80211_IOC_POWERSAVE: if (vap->iv_flags & IEEE80211_F_PMGTON) ireq->i_val = IEEE80211_POWERSAVE_ON; else ireq->i_val = IEEE80211_POWERSAVE_OFF; break; case IEEE80211_IOC_POWERSAVESLEEP: ireq->i_val = ic->ic_lintval; break; case IEEE80211_IOC_RTSTHRESHOLD: ireq->i_val = vap->iv_rtsthreshold; break; case IEEE80211_IOC_PROTMODE: ireq->i_val = ic->ic_protmode; break; case IEEE80211_IOC_TXPOWER: /* * Tx power limit is the min of max regulatory * power, any user-set limit, and the max the * radio can do. */ ireq->i_val = 2*ic->ic_curchan->ic_maxregpower; if (ireq->i_val > ic->ic_txpowlimit) ireq->i_val = ic->ic_txpowlimit; if (ireq->i_val > ic->ic_curchan->ic_maxpower) ireq->i_val = ic->ic_curchan->ic_maxpower; break; case IEEE80211_IOC_WPA: switch (vap->iv_flags & IEEE80211_F_WPA) { case IEEE80211_F_WPA1: ireq->i_val = 1; break; case IEEE80211_F_WPA2: ireq->i_val = 2; break; case IEEE80211_F_WPA1 | IEEE80211_F_WPA2: ireq->i_val = 3; break; default: ireq->i_val = 0; break; } break; case IEEE80211_IOC_CHANLIST: error = ieee80211_ioctl_getchanlist(vap, ireq); break; case IEEE80211_IOC_ROAMING: ireq->i_val = vap->iv_roaming; break; case IEEE80211_IOC_PRIVACY: ireq->i_val = (vap->iv_flags & IEEE80211_F_PRIVACY) != 0; break; case IEEE80211_IOC_DROPUNENCRYPTED: ireq->i_val = (vap->iv_flags & IEEE80211_F_DROPUNENC) != 0; break; case IEEE80211_IOC_COUNTERMEASURES: ireq->i_val = (vap->iv_flags & IEEE80211_F_COUNTERM) != 0; break; case IEEE80211_IOC_WME: ireq->i_val = (vap->iv_flags & IEEE80211_F_WME) != 0; break; case IEEE80211_IOC_HIDESSID: ireq->i_val = (vap->iv_flags & IEEE80211_F_HIDESSID) != 0; break; case IEEE80211_IOC_APBRIDGE: ireq->i_val = (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0; break; case IEEE80211_IOC_WPAKEY: error = ieee80211_ioctl_getkey(vap, ireq); break; case IEEE80211_IOC_CHANINFO: error = ieee80211_ioctl_getchaninfo(vap, ireq); break; case IEEE80211_IOC_BSSID: if (ireq->i_len != IEEE80211_ADDR_LEN) return EINVAL; if (vap->iv_state == IEEE80211_S_RUN) { error = copyout(vap->iv_opmode == IEEE80211_M_WDS ? vap->iv_bss->ni_macaddr : vap->iv_bss->ni_bssid, ireq->i_data, ireq->i_len); } else error = copyout(vap->iv_des_bssid, ireq->i_data, ireq->i_len); break; case IEEE80211_IOC_WPAIE: error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type); break; case IEEE80211_IOC_WPAIE2: error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type); break; case IEEE80211_IOC_SCAN_RESULTS: error = ieee80211_ioctl_getscanresults(vap, ireq); break; case IEEE80211_IOC_STA_STATS: error = ieee80211_ioctl_getstastats(vap, ireq); break; case IEEE80211_IOC_TXPOWMAX: ireq->i_val = vap->iv_bss->ni_txpower; break; case IEEE80211_IOC_STA_TXPOW: error = ieee80211_ioctl_getstatxpow(vap, ireq); break; case IEEE80211_IOC_STA_INFO: error = ieee80211_ioctl_getstainfo(vap, ireq); break; case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (bss only) */ error = ieee80211_ioctl_getwmeparam(vap, ireq); break; case IEEE80211_IOC_DTIM_PERIOD: ireq->i_val = vap->iv_dtim_period; break; case IEEE80211_IOC_BEACON_INTERVAL: /* NB: get from ic_bss for station mode */ ireq->i_val = vap->iv_bss->ni_intval; break; case IEEE80211_IOC_PUREG: ireq->i_val = (vap->iv_flags & IEEE80211_F_PUREG) != 0; break; case IEEE80211_IOC_BGSCAN: ireq->i_val = (vap->iv_flags & IEEE80211_F_BGSCAN) != 0; break; case IEEE80211_IOC_BGSCAN_IDLE: ireq->i_val = vap->iv_bgscanidle*hz/1000; /* ms */ break; case IEEE80211_IOC_BGSCAN_INTERVAL: ireq->i_val = vap->iv_bgscanintvl/hz; /* seconds */ break; case IEEE80211_IOC_SCANVALID: ireq->i_val = vap->iv_scanvalid/hz; /* seconds */ break; case IEEE80211_IOC_FRAGTHRESHOLD: ireq->i_val = vap->iv_fragthreshold; break; case IEEE80211_IOC_MACCMD: error = ieee80211_ioctl_getmaccmd(vap, ireq); break; case IEEE80211_IOC_BURST: ireq->i_val = (vap->iv_flags & IEEE80211_F_BURST) != 0; break; case IEEE80211_IOC_BMISSTHRESHOLD: ireq->i_val = vap->iv_bmissthreshold; break; case IEEE80211_IOC_CURCHAN: error = ieee80211_ioctl_getcurchan(vap, ireq); break; case IEEE80211_IOC_SHORTGI: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) ireq->i_val |= IEEE80211_HTCAP_SHORTGI20; if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) ireq->i_val |= IEEE80211_HTCAP_SHORTGI40; break; case IEEE80211_IOC_AMPDU: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) ireq->i_val |= 1; if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_RX) ireq->i_val |= 2; break; case IEEE80211_IOC_AMPDU_LIMIT: if (vap->iv_opmode == IEEE80211_M_HOSTAP) ireq->i_val = vap->iv_ampdu_rxmax; else if (vap->iv_state == IEEE80211_S_RUN) ireq->i_val = MS(vap->iv_bss->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); else ireq->i_val = vap->iv_ampdu_limit; break; case IEEE80211_IOC_AMPDU_DENSITY: if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN) ireq->i_val = MS(vap->iv_bss->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); else ireq->i_val = vap->iv_ampdu_density; break; case IEEE80211_IOC_AMSDU: ireq->i_val = 0; if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_TX) ireq->i_val |= 1; if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_RX) ireq->i_val |= 2; break; case IEEE80211_IOC_AMSDU_LIMIT: ireq->i_val = vap->iv_amsdu_limit; /* XXX truncation? */ break; case IEEE80211_IOC_PUREN: ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_PUREN) != 0; break; case IEEE80211_IOC_DOTH: ireq->i_val = (vap->iv_flags & IEEE80211_F_DOTH) != 0; break; case IEEE80211_IOC_REGDOMAIN: error = ieee80211_ioctl_getregdomain(vap, ireq); break; case IEEE80211_IOC_ROAM: error = ieee80211_ioctl_getroam(vap, ireq); break; case IEEE80211_IOC_TXPARAMS: error = ieee80211_ioctl_gettxparams(vap, ireq); break; case IEEE80211_IOC_HTCOMPAT: ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) != 0; break; case IEEE80211_IOC_DWDS: ireq->i_val = (vap->iv_flags & IEEE80211_F_DWDS) != 0; break; case IEEE80211_IOC_INACTIVITY: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_INACT) != 0; break; case IEEE80211_IOC_APPIE: error = ieee80211_ioctl_getappie(vap, ireq); break; case IEEE80211_IOC_WPS: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_WPS) != 0; break; case IEEE80211_IOC_TSN: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_TSN) != 0; break; case IEEE80211_IOC_DFS: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DFS) != 0; break; case IEEE80211_IOC_DOTD: ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DOTD) != 0; break; case IEEE80211_IOC_DEVCAPS: error = ieee80211_ioctl_getdevcaps(ic, ireq); break; case IEEE80211_IOC_HTPROTMODE: ireq->i_val = ic->ic_htprotmode; break; case IEEE80211_IOC_HTCONF: if (vap->iv_flags_ht & IEEE80211_FHT_HT) { ireq->i_val = 1; if (vap->iv_flags_ht & IEEE80211_FHT_USEHT40) ireq->i_val |= 2; } else ireq->i_val = 0; break; case IEEE80211_IOC_STA_VLAN: error = ieee80211_ioctl_getstavlan(vap, ireq); break; case IEEE80211_IOC_SMPS: if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN) { if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_RTS) ireq->i_val = IEEE80211_HTCAP_SMPS_DYNAMIC; else if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_PS) ireq->i_val = IEEE80211_HTCAP_SMPS_ENA; else ireq->i_val = IEEE80211_HTCAP_SMPS_OFF; } else ireq->i_val = vap->iv_htcaps & IEEE80211_HTCAP_SMPS; break; case IEEE80211_IOC_RIFS: if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN) ireq->i_val = (vap->iv_bss->ni_flags & IEEE80211_NODE_RIFS) != 0; else ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_RIFS) != 0; break; default: error = ieee80211_ioctl_getdefault(vap, ireq); break; } return error; #undef MS } static __noinline int ieee80211_ioctl_setkey(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211req_key ik; struct ieee80211_node *ni; struct ieee80211_key *wk; uint16_t kid; int error, i; if (ireq->i_len != sizeof(ik)) return EINVAL; error = copyin(ireq->i_data, &ik, sizeof(ik)); if (error) return error; /* NB: cipher support is verified by ieee80211_crypt_newkey */ /* NB: this also checks ik->ik_keylen > sizeof(wk->wk_key) */ if (ik.ik_keylen > sizeof(ik.ik_keydata)) return E2BIG; kid = ik.ik_keyix; if (kid == IEEE80211_KEYIX_NONE) { /* XXX unicast keys currently must be tx/rx */ if (ik.ik_flags != (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)) return EINVAL; if (vap->iv_opmode == IEEE80211_M_STA) { ni = ieee80211_ref_node(vap->iv_bss); if (!IEEE80211_ADDR_EQ(ik.ik_macaddr, ni->ni_bssid)) { ieee80211_free_node(ni); return EADDRNOTAVAIL; } } else { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, ik.ik_macaddr); if (ni == NULL) return ENOENT; } wk = &ni->ni_ucastkey; } else { if (kid >= IEEE80211_WEP_NKID) return EINVAL; wk = &vap->iv_nw_keys[kid]; /* * Global slots start off w/o any assigned key index. * Force one here for consistency with IEEE80211_IOC_WEPKEY. */ if (wk->wk_keyix == IEEE80211_KEYIX_NONE) wk->wk_keyix = kid; ni = NULL; } error = 0; ieee80211_key_update_begin(vap); if (ieee80211_crypto_newkey(vap, ik.ik_type, ik.ik_flags, wk)) { wk->wk_keylen = ik.ik_keylen; /* NB: MIC presence is implied by cipher type */ if (wk->wk_keylen > IEEE80211_KEYBUF_SIZE) wk->wk_keylen = IEEE80211_KEYBUF_SIZE; for (i = 0; i < IEEE80211_TID_SIZE; i++) wk->wk_keyrsc[i] = ik.ik_keyrsc; wk->wk_keytsc = 0; /* new key, reset */ memset(wk->wk_key, 0, sizeof(wk->wk_key)); memcpy(wk->wk_key, ik.ik_keydata, ik.ik_keylen); IEEE80211_ADDR_COPY(wk->wk_macaddr, ni != NULL ? ni->ni_macaddr : ik.ik_macaddr); if (!ieee80211_crypto_setkey(vap, wk)) error = EIO; else if ((ik.ik_flags & IEEE80211_KEY_DEFAULT)) vap->iv_def_txkey = kid; } else error = ENXIO; ieee80211_key_update_end(vap); if (ni != NULL) ieee80211_free_node(ni); return error; } static __noinline int ieee80211_ioctl_delkey(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211req_del_key dk; int kid, error; if (ireq->i_len != sizeof(dk)) return EINVAL; error = copyin(ireq->i_data, &dk, sizeof(dk)); if (error) return error; kid = dk.idk_keyix; /* XXX uint8_t -> uint16_t */ if (dk.idk_keyix == (uint8_t) IEEE80211_KEYIX_NONE) { struct ieee80211_node *ni; if (vap->iv_opmode == IEEE80211_M_STA) { ni = ieee80211_ref_node(vap->iv_bss); if (!IEEE80211_ADDR_EQ(dk.idk_macaddr, ni->ni_bssid)) { ieee80211_free_node(ni); return EADDRNOTAVAIL; } } else { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, dk.idk_macaddr); if (ni == NULL) return ENOENT; } /* XXX error return */ ieee80211_node_delucastkey(ni); ieee80211_free_node(ni); } else { if (kid >= IEEE80211_WEP_NKID) return EINVAL; /* XXX error return */ ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[kid]); } return 0; } struct mlmeop { struct ieee80211vap *vap; int op; int reason; }; static void mlmedebug(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], int op, int reason) { #ifdef IEEE80211_DEBUG static const struct { int mask; const char *opstr; } ops[] = { { 0, "op#0" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_ASSOC, "assoc" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_ASSOC, "disassoc" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, "deauth" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, "authorize" }, { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, "unauthorize" }, }; if (op == IEEE80211_MLME_AUTH) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, mac, "station authenticate %s via MLME (reason %d)", reason == IEEE80211_STATUS_SUCCESS ? "ACCEPT" : "REJECT", reason); } else if (!(IEEE80211_MLME_ASSOC <= op && op <= IEEE80211_MLME_AUTH)) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, mac, "unknown MLME request %d (reason %d)", op, reason); } else if (reason == IEEE80211_STATUS_SUCCESS) { IEEE80211_NOTE_MAC(vap, ops[op].mask, mac, "station %s via MLME", ops[op].opstr); } else { IEEE80211_NOTE_MAC(vap, ops[op].mask, mac, "station %s via MLME (reason %d)", ops[op].opstr, reason); } #endif /* IEEE80211_DEBUG */ } static void domlme(void *arg, struct ieee80211_node *ni) { struct mlmeop *mop = arg; struct ieee80211vap *vap = ni->ni_vap; if (vap != mop->vap) return; /* * NB: if ni_associd is zero then the node is already cleaned * up and we don't need to do this (we're safely holding a * reference but should otherwise not modify it's state). */ if (ni->ni_associd == 0) return; mlmedebug(vap, ni->ni_macaddr, mop->op, mop->reason); if (mop->op == IEEE80211_MLME_DEAUTH) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, mop->reason); } else { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, mop->reason); } ieee80211_node_leave(ni); } static int setmlme_dropsta(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], struct mlmeop *mlmeop) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211_node *ni; int error = 0; /* NB: the broadcast address means do 'em all */ if (!IEEE80211_ADDR_EQ(mac, ic->ic_ifp->if_broadcastaddr)) { IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_node_locked(nt, mac); if (ni != NULL) { domlme(mlmeop, ni); ieee80211_free_node(ni); } else error = ENOENT; IEEE80211_NODE_UNLOCK(nt); } else { ieee80211_iterate_nodes(nt, domlme, mlmeop); } return error; } static __noinline int setmlme_common(struct ieee80211vap *vap, int op, const uint8_t mac[IEEE80211_ADDR_LEN], int reason) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211_node *ni; struct mlmeop mlmeop; int error; error = 0; switch (op) { case IEEE80211_MLME_DISASSOC: case IEEE80211_MLME_DEAUTH: switch (vap->iv_opmode) { case IEEE80211_M_STA: mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason); /* XXX not quite right */ ieee80211_new_state(vap, IEEE80211_S_INIT, reason); break; case IEEE80211_M_HOSTAP: mlmeop.vap = vap; mlmeop.op = op; mlmeop.reason = reason; error = setmlme_dropsta(vap, mac, &mlmeop); break; case IEEE80211_M_WDS: /* XXX user app should send raw frame? */ if (op != IEEE80211_MLME_DEAUTH) { error = EINVAL; break; } #if 0 /* XXX accept any address, simplifies user code */ if (!IEEE80211_ADDR_EQ(mac, vap->iv_bss->ni_macaddr)) { error = EINVAL; break; } #endif mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason); ni = ieee80211_ref_node(vap->iv_bss); IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, reason); ieee80211_free_node(ni); break; default: error = EINVAL; break; } break; case IEEE80211_MLME_AUTHORIZE: case IEEE80211_MLME_UNAUTHORIZE: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_WDS) { error = EINVAL; break; } IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_vap_node_locked(nt, vap, mac); if (ni != NULL) { mlmedebug(vap, mac, op, reason); if (op == IEEE80211_MLME_AUTHORIZE) ieee80211_node_authorize(ni); else ieee80211_node_unauthorize(ni); ieee80211_free_node(ni); } else error = ENOENT; IEEE80211_NODE_UNLOCK(nt); break; case IEEE80211_MLME_AUTH: if (vap->iv_opmode != IEEE80211_M_HOSTAP) { error = EINVAL; break; } IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_vap_node_locked(nt, vap, mac); if (ni != NULL) { mlmedebug(vap, mac, op, reason); if (reason == IEEE80211_STATUS_SUCCESS) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 2); /* * For shared key auth, just continue the * exchange. Otherwise when 802.1x is not in * use mark the port authorized at this point * so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X && ni->ni_challenge == NULL) ieee80211_node_authorize(ni); } else { vap->iv_stats.is_rx_acl++; ieee80211_send_error(ni, ni->ni_macaddr, IEEE80211_FC0_SUBTYPE_AUTH, 2|(reason<<16)); ieee80211_node_leave(ni); } ieee80211_free_node(ni); } else error = ENOENT; IEEE80211_NODE_UNLOCK(nt); break; default: error = EINVAL; break; } return error; } struct scanlookup { const uint8_t *mac; int esslen; const uint8_t *essid; const struct ieee80211_scan_entry *se; }; /* * Match mac address and any ssid. */ static void mlmelookup(void *arg, const struct ieee80211_scan_entry *se) { struct scanlookup *look = arg; if (!IEEE80211_ADDR_EQ(look->mac, se->se_macaddr)) return; if (look->esslen != 0) { if (se->se_ssid[1] != look->esslen) return; if (memcmp(look->essid, se->se_ssid+2, look->esslen)) return; } look->se = se; } static __noinline int setmlme_assoc_sta(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], int ssid_len, const uint8_t ssid[IEEE80211_NWID_LEN]) { struct scanlookup lookup; KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("expected opmode STA not %s", ieee80211_opmode_name[vap->iv_opmode])); /* NB: this is racey if roaming is !manual */ lookup.se = NULL; lookup.mac = mac; lookup.esslen = ssid_len; lookup.essid = ssid; ieee80211_scan_iterate(vap, mlmelookup, &lookup); if (lookup.se == NULL) return ENOENT; mlmedebug(vap, mac, IEEE80211_MLME_ASSOC, 0); if (!ieee80211_sta_join(vap, lookup.se->se_chan, lookup.se)) return EIO; /* XXX unique but could be better */ return 0; } static __noinline int setmlme_assoc_adhoc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], int ssid_len, const uint8_t ssid[IEEE80211_NWID_LEN]) { struct ieee80211_scan_req sr; KASSERT(vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_AHDEMO, ("expected opmode IBSS or AHDEMO not %s", ieee80211_opmode_name[vap->iv_opmode])); if (ssid_len == 0) return EINVAL; /* NB: IEEE80211_IOC_SSID call missing for ap_scan=2. */ memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN); vap->iv_des_ssid[0].len = ssid_len; memcpy(vap->iv_des_ssid[0].ssid, ssid, ssid_len); vap->iv_des_nssid = 1; memset(&sr, 0, sizeof(sr)); sr.sr_flags = IEEE80211_IOC_SCAN_ACTIVE | IEEE80211_IOC_SCAN_ONCE; sr.sr_duration = IEEE80211_IOC_SCAN_FOREVER; memcpy(sr.sr_ssid[0].ssid, ssid, ssid_len); sr.sr_ssid[0].len = ssid_len; sr.sr_nssid = 1; return ieee80211_scanreq(vap, &sr); } static __noinline int ieee80211_ioctl_setmlme(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211req_mlme mlme; int error; if (ireq->i_len != sizeof(mlme)) return EINVAL; error = copyin(ireq->i_data, &mlme, sizeof(mlme)); if (error) return error; if (vap->iv_opmode == IEEE80211_M_STA && mlme.im_op == IEEE80211_MLME_ASSOC) return setmlme_assoc_sta(vap, mlme.im_macaddr, vap->iv_des_ssid[0].len, vap->iv_des_ssid[0].ssid); else if (mlme.im_op == IEEE80211_MLME_ASSOC) return setmlme_assoc_adhoc(vap, mlme.im_macaddr, mlme.im_ssid_len, mlme.im_ssid); else return setmlme_common(vap, mlme.im_op, mlme.im_macaddr, mlme.im_reason); } static __noinline int ieee80211_ioctl_macmac(struct ieee80211vap *vap, struct ieee80211req *ireq) { uint8_t mac[IEEE80211_ADDR_LEN]; const struct ieee80211_aclator *acl = vap->iv_acl; int error; if (ireq->i_len != sizeof(mac)) return EINVAL; error = copyin(ireq->i_data, mac, ireq->i_len); if (error) return error; if (acl == NULL) { acl = ieee80211_aclator_get("mac"); if (acl == NULL || !acl->iac_attach(vap)) return EINVAL; vap->iv_acl = acl; } if (ireq->i_type == IEEE80211_IOC_ADDMAC) acl->iac_add(vap, mac); else acl->iac_remove(vap, mac); return 0; } static __noinline int ieee80211_ioctl_setmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq) { const struct ieee80211_aclator *acl = vap->iv_acl; switch (ireq->i_val) { case IEEE80211_MACCMD_POLICY_OPEN: case IEEE80211_MACCMD_POLICY_ALLOW: case IEEE80211_MACCMD_POLICY_DENY: case IEEE80211_MACCMD_POLICY_RADIUS: if (acl == NULL) { acl = ieee80211_aclator_get("mac"); if (acl == NULL || !acl->iac_attach(vap)) return EINVAL; vap->iv_acl = acl; } acl->iac_setpolicy(vap, ireq->i_val); break; case IEEE80211_MACCMD_FLUSH: if (acl != NULL) acl->iac_flush(vap); /* NB: silently ignore when not in use */ break; case IEEE80211_MACCMD_DETACH: if (acl != NULL) { vap->iv_acl = NULL; acl->iac_detach(vap); } break; default: if (acl == NULL) return EINVAL; else return acl->iac_setioctl(vap, ireq); } return 0; } static __noinline int ieee80211_ioctl_setchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; uint8_t *chanlist, *list; int i, nchan, maxchan, error; if (ireq->i_len > sizeof(ic->ic_chan_active)) ireq->i_len = sizeof(ic->ic_chan_active); list = malloc(ireq->i_len + IEEE80211_CHAN_BYTES, M_TEMP, M_NOWAIT | M_ZERO); if (list == NULL) return ENOMEM; error = copyin(ireq->i_data, list, ireq->i_len); if (error) { free(list, M_TEMP); return error; } nchan = 0; chanlist = list + ireq->i_len; /* NB: zero'd already */ maxchan = ireq->i_len * NBBY; for (i = 0; i < ic->ic_nchans; i++) { const struct ieee80211_channel *c = &ic->ic_channels[i]; /* * Calculate the intersection of the user list and the * available channels so users can do things like specify * 1-255 to get all available channels. */ if (c->ic_ieee < maxchan && isset(list, c->ic_ieee)) { setbit(chanlist, c->ic_ieee); nchan++; } } if (nchan == 0) { free(list, M_TEMP); return EINVAL; } if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && /* XXX */ isclr(chanlist, ic->ic_bsschan->ic_ieee)) ic->ic_bsschan = IEEE80211_CHAN_ANYC; memcpy(ic->ic_chan_active, chanlist, IEEE80211_CHAN_BYTES); ieee80211_scan_flush(vap); free(list, M_TEMP); return ENETRESET; } static __noinline int ieee80211_ioctl_setstastats(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; uint8_t macaddr[IEEE80211_ADDR_LEN]; int error; /* * NB: we could copyin ieee80211req_sta_stats so apps * could make selective changes but that's overkill; * just clear all stats for now. */ if (ireq->i_len < IEEE80211_ADDR_LEN) return EINVAL; error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr); if (ni == NULL) return ENOENT; /* XXX require ni_vap == vap? */ memset(&ni->ni_stats, 0, sizeof(ni->ni_stats)); ieee80211_free_node(ni); return 0; } static __noinline int ieee80211_ioctl_setstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_txpow txpow; int error; if (ireq->i_len != sizeof(txpow)) return EINVAL; error = copyin(ireq->i_data, &txpow, sizeof(txpow)); if (error != 0) return error; ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr); if (ni == NULL) return ENOENT; ni->ni_txpower = txpow.it_txpow; ieee80211_free_node(ni); return error; } static __noinline int ieee80211_ioctl_setwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_wme_state *wme = &ic->ic_wme; struct wmeParams *wmep, *chanp; int isbss, ac; if ((ic->ic_caps & IEEE80211_C_WME) == 0) return EOPNOTSUPP; isbss = (ireq->i_len & IEEE80211_WMEPARAM_BSS); ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL); if (ac >= WME_NUM_AC) ac = WME_AC_BE; if (isbss) { chanp = &wme->wme_bssChanParams.cap_wmeParams[ac]; wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac]; } else { chanp = &wme->wme_chanParams.cap_wmeParams[ac]; wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac]; } switch (ireq->i_type) { case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ if (isbss) { wmep->wmep_logcwmin = ireq->i_val; if ((wme->wme_flags & WME_F_AGGRMODE) == 0) chanp->wmep_logcwmin = ireq->i_val; } else { wmep->wmep_logcwmin = chanp->wmep_logcwmin = ireq->i_val; } break; case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ if (isbss) { wmep->wmep_logcwmax = ireq->i_val; if ((wme->wme_flags & WME_F_AGGRMODE) == 0) chanp->wmep_logcwmax = ireq->i_val; } else { wmep->wmep_logcwmax = chanp->wmep_logcwmax = ireq->i_val; } break; case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ if (isbss) { wmep->wmep_aifsn = ireq->i_val; if ((wme->wme_flags & WME_F_AGGRMODE) == 0) chanp->wmep_aifsn = ireq->i_val; } else { wmep->wmep_aifsn = chanp->wmep_aifsn = ireq->i_val; } break; case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ if (isbss) { wmep->wmep_txopLimit = ireq->i_val; if ((wme->wme_flags & WME_F_AGGRMODE) == 0) chanp->wmep_txopLimit = ireq->i_val; } else { wmep->wmep_txopLimit = chanp->wmep_txopLimit = ireq->i_val; } break; case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ wmep->wmep_acm = ireq->i_val; if ((wme->wme_flags & WME_F_AGGRMODE) == 0) chanp->wmep_acm = ireq->i_val; break; case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/ wmep->wmep_noackPolicy = chanp->wmep_noackPolicy = (ireq->i_val) == 0; break; } ieee80211_wme_updateparams(vap); return 0; } static int find11gchannel(struct ieee80211com *ic, int start, int freq) { const struct ieee80211_channel *c; int i; for (i = start+1; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c)) return 1; } /* NB: should not be needed but in case things are mis-sorted */ for (i = 0; i < start; i++) { c = &ic->ic_channels[i]; if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c)) return 1; } return 0; } static struct ieee80211_channel * findchannel(struct ieee80211com *ic, int ieee, int mode) { static const u_int chanflags[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = 0, [IEEE80211_MODE_11A] = IEEE80211_CHAN_A, [IEEE80211_MODE_11B] = IEEE80211_CHAN_B, [IEEE80211_MODE_11G] = IEEE80211_CHAN_G, [IEEE80211_MODE_FH] = IEEE80211_CHAN_FHSS, [IEEE80211_MODE_TURBO_A] = IEEE80211_CHAN_108A, [IEEE80211_MODE_TURBO_G] = IEEE80211_CHAN_108G, [IEEE80211_MODE_STURBO_A] = IEEE80211_CHAN_STURBO, [IEEE80211_MODE_HALF] = IEEE80211_CHAN_HALF, [IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_QUARTER, /* NB: handled specially below */ [IEEE80211_MODE_11NA] = IEEE80211_CHAN_A, [IEEE80211_MODE_11NG] = IEEE80211_CHAN_G, }; u_int modeflags; int i; modeflags = chanflags[mode]; for (i = 0; i < ic->ic_nchans; i++) { struct ieee80211_channel *c = &ic->ic_channels[i]; if (c->ic_ieee != ieee) continue; if (mode == IEEE80211_MODE_AUTO) { /* ignore turbo channels for autoselect */ if (IEEE80211_IS_CHAN_TURBO(c)) continue; /* * XXX special-case 11b/g channels so we * always select the g channel if both * are present. * XXX prefer HT to non-HT? */ if (!IEEE80211_IS_CHAN_B(c) || !find11gchannel(ic, i, c->ic_freq)) return c; } else { /* must check HT specially */ if ((mode == IEEE80211_MODE_11NA || mode == IEEE80211_MODE_11NG) && !IEEE80211_IS_CHAN_HT(c)) continue; if ((c->ic_flags & modeflags) == modeflags) return c; } } return NULL; } /* * Check the specified against any desired mode (aka netband). * This is only used (presently) when operating in hostap mode * to enforce consistency. */ static int check_mode_consistency(const struct ieee80211_channel *c, int mode) { KASSERT(c != IEEE80211_CHAN_ANYC, ("oops, no channel")); switch (mode) { case IEEE80211_MODE_11B: return (IEEE80211_IS_CHAN_B(c)); case IEEE80211_MODE_11G: return (IEEE80211_IS_CHAN_ANYG(c) && !IEEE80211_IS_CHAN_HT(c)); case IEEE80211_MODE_11A: return (IEEE80211_IS_CHAN_A(c) && !IEEE80211_IS_CHAN_HT(c)); case IEEE80211_MODE_STURBO_A: return (IEEE80211_IS_CHAN_STURBO(c)); case IEEE80211_MODE_11NA: return (IEEE80211_IS_CHAN_HTA(c)); case IEEE80211_MODE_11NG: return (IEEE80211_IS_CHAN_HTG(c)); } return 1; } /* * Common code to set the current channel. If the device * is up and running this may result in an immediate channel * change or a kick of the state machine. */ static int setcurchan(struct ieee80211vap *vap, struct ieee80211_channel *c) { struct ieee80211com *ic = vap->iv_ic; int error; if (c != IEEE80211_CHAN_ANYC) { if (IEEE80211_IS_CHAN_RADAR(c)) return EBUSY; /* XXX better code? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { if (IEEE80211_IS_CHAN_NOHOSTAP(c)) return EINVAL; if (!check_mode_consistency(c, vap->iv_des_mode)) return EINVAL; } else if (vap->iv_opmode == IEEE80211_M_IBSS) { if (IEEE80211_IS_CHAN_NOADHOC(c)) return EINVAL; } if (vap->iv_state == IEEE80211_S_RUN && vap->iv_bss->ni_chan == c) return 0; /* NB: nothing to do */ } vap->iv_des_chan = c; error = 0; if (vap->iv_opmode == IEEE80211_M_MONITOR && vap->iv_des_chan != IEEE80211_CHAN_ANYC) { /* * Monitor mode can switch directly. */ if (IFNET_IS_UP_RUNNING(vap->iv_ifp)) { /* XXX need state machine for other vap's to follow */ ieee80211_setcurchan(ic, vap->iv_des_chan); vap->iv_bss->ni_chan = ic->ic_curchan; } else ic->ic_curchan = vap->iv_des_chan; ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan); } else { /* * Need to go through the state machine in case we * need to reassociate or the like. The state machine * will pickup the desired channel and avoid scanning. */ if (IS_UP_AUTO(vap)) ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); else if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) { /* * When not up+running and a real channel has * been specified fix the current channel so * there is immediate feedback; e.g. via ifconfig. */ ic->ic_curchan = vap->iv_des_chan; ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan); } } return error; } /* * Old api for setting the current channel; this is * deprecated because channel numbers are ambiguous. */ static __noinline int ieee80211_ioctl_setchannel(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel *c; /* XXX 0xffff overflows 16-bit signed */ if (ireq->i_val == 0 || ireq->i_val == (int16_t) IEEE80211_CHAN_ANY) { c = IEEE80211_CHAN_ANYC; } else { struct ieee80211_channel *c2; c = findchannel(ic, ireq->i_val, vap->iv_des_mode); if (c == NULL) { c = findchannel(ic, ireq->i_val, IEEE80211_MODE_AUTO); if (c == NULL) return EINVAL; } /* * Fine tune channel selection based on desired mode: * if 11b is requested, find the 11b version of any * 11g channel returned, * if static turbo, find the turbo version of any * 11a channel return, * if 11na is requested, find the ht version of any * 11a channel returned, * if 11ng is requested, find the ht version of any * 11g channel returned, * otherwise we should be ok with what we've got. */ switch (vap->iv_des_mode) { case IEEE80211_MODE_11B: if (IEEE80211_IS_CHAN_ANYG(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_11B); /* NB: should not happen, =>'s 11g w/o 11b */ if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_TURBO_A: if (IEEE80211_IS_CHAN_A(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_TURBO_A); if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_11NA: if (IEEE80211_IS_CHAN_A(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_11NA); if (c2 != NULL) c = c2; } break; case IEEE80211_MODE_11NG: if (IEEE80211_IS_CHAN_ANYG(c)) { c2 = findchannel(ic, ireq->i_val, IEEE80211_MODE_11NG); if (c2 != NULL) c = c2; } break; default: /* NB: no static turboG */ break; } } return setcurchan(vap, c); } /* * New/current api for setting the current channel; a complete * channel description is provide so there is no ambiguity in * identifying the channel. */ static __noinline int ieee80211_ioctl_setcurchan(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel chan, *c; int error; if (ireq->i_len != sizeof(chan)) return EINVAL; error = copyin(ireq->i_data, &chan, sizeof(chan)); if (error != 0) return error; /* XXX 0xffff overflows 16-bit signed */ if (chan.ic_freq == 0 || chan.ic_freq == IEEE80211_CHAN_ANY) { c = IEEE80211_CHAN_ANYC; } else { c = ieee80211_find_channel(ic, chan.ic_freq, chan.ic_flags); if (c == NULL) return EINVAL; } return setcurchan(vap, c); } static __noinline int ieee80211_ioctl_setregdomain(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211_regdomain_req *reg; int nchans, error; nchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_regdomain_req)) / sizeof(struct ieee80211_channel)); if (!(1 <= nchans && nchans <= IEEE80211_CHAN_MAX)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL, "%s: bad # chans, i_len %d nchans %d\n", __func__, ireq->i_len, nchans); return EINVAL; } reg = (struct ieee80211_regdomain_req *) malloc(IEEE80211_REGDOMAIN_SIZE(nchans), M_TEMP, M_NOWAIT); if (reg == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL, "%s: no memory, nchans %d\n", __func__, nchans); return ENOMEM; } error = copyin(ireq->i_data, reg, IEEE80211_REGDOMAIN_SIZE(nchans)); if (error == 0) { /* NB: validate inline channel count against storage size */ if (reg->chaninfo.ic_nchans != nchans) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL, "%s: chan cnt mismatch, %d != %d\n", __func__, reg->chaninfo.ic_nchans, nchans); error = EINVAL; } else error = ieee80211_setregdomain(vap, reg); } free(reg, M_TEMP); return (error == 0 ? ENETRESET : error); } static int ieee80211_ioctl_setroam(struct ieee80211vap *vap, const struct ieee80211req *ireq) { if (ireq->i_len != sizeof(vap->iv_roamparms)) return EINVAL; /* XXX validate params */ /* XXX? ENETRESET to push to device? */ return copyin(ireq->i_data, vap->iv_roamparms, sizeof(vap->iv_roamparms)); } static int checkrate(const struct ieee80211_rateset *rs, int rate) { int i; if (rate == IEEE80211_FIXED_RATE_NONE) return 1; for (i = 0; i < rs->rs_nrates; i++) if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rate) return 1; return 0; } static int checkmcs(int mcs) { if (mcs == IEEE80211_FIXED_RATE_NONE) return 1; if ((mcs & IEEE80211_RATE_MCS) == 0) /* MCS always have 0x80 set */ return 0; return (mcs & 0x7f) <= 15; /* XXX could search ht rate set */ } static __noinline int ieee80211_ioctl_settxparams(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_txparams_req parms; /* XXX stack use? */ struct ieee80211_txparam *src, *dst; const struct ieee80211_rateset *rs; int error, mode, changed, is11n, nmodes; /* NB: accept short requests for backwards compat */ if (ireq->i_len > sizeof(parms)) return EINVAL; error = copyin(ireq->i_data, &parms, ireq->i_len); if (error != 0) return error; nmodes = ireq->i_len / sizeof(struct ieee80211_txparam); changed = 0; /* validate parameters and check if anything changed */ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; src = &parms.params[mode]; dst = &vap->iv_txparms[mode]; rs = &ic->ic_sup_rates[mode]; /* NB: 11n maps to legacy */ is11n = (mode == IEEE80211_MODE_11NA || mode == IEEE80211_MODE_11NG); if (src->ucastrate != dst->ucastrate) { if (!checkrate(rs, src->ucastrate) && (!is11n || !checkmcs(src->ucastrate))) return EINVAL; changed++; } if (src->mcastrate != dst->mcastrate) { if (!checkrate(rs, src->mcastrate) && (!is11n || !checkmcs(src->mcastrate))) return EINVAL; changed++; } if (src->mgmtrate != dst->mgmtrate) { if (!checkrate(rs, src->mgmtrate) && (!is11n || !checkmcs(src->mgmtrate))) return EINVAL; changed++; } if (src->maxretry != dst->maxretry) /* NB: no bounds */ changed++; } if (changed) { /* * Copy new parameters in place and notify the * driver so it can push state to the device. */ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) { if (isset(ic->ic_modecaps, mode)) vap->iv_txparms[mode] = parms.params[mode]; } /* XXX could be more intelligent, e.g. don't reset if setting not being used */ return ENETRESET; } return 0; } /* * Application Information Element support. */ static int setappie(struct ieee80211_appie **aie, const struct ieee80211req *ireq) { struct ieee80211_appie *app = *aie; struct ieee80211_appie *napp; int error; if (ireq->i_len == 0) { /* delete any existing ie */ if (app != NULL) { *aie = NULL; /* XXX racey */ free(app, M_80211_NODE_IE); } return 0; } if (!(2 <= ireq->i_len && ireq->i_len <= IEEE80211_MAX_APPIE)) return EINVAL; /* * Allocate a new appie structure and copy in the user data. * When done swap in the new structure. Note that we do not * guard against users holding a ref to the old structure; * this must be handled outside this code. * * XXX bad bad bad */ napp = (struct ieee80211_appie *) malloc( sizeof(struct ieee80211_appie) + ireq->i_len, M_80211_NODE_IE, M_NOWAIT); if (napp == NULL) return ENOMEM; /* XXX holding ic lock */ error = copyin(ireq->i_data, napp->ie_data, ireq->i_len); if (error) { free(napp, M_80211_NODE_IE); return error; } napp->ie_len = ireq->i_len; *aie = napp; if (app != NULL) free(app, M_80211_NODE_IE); return 0; } static void setwparsnie(struct ieee80211vap *vap, uint8_t *ie, int space) { /* validate data is present as best we can */ if (space == 0 || 2+ie[1] > space) return; if (ie[0] == IEEE80211_ELEMID_VENDOR) vap->iv_wpa_ie = ie; else if (ie[0] == IEEE80211_ELEMID_RSN) vap->iv_rsn_ie = ie; } static __noinline int ieee80211_ioctl_setappie_locked(struct ieee80211vap *vap, const struct ieee80211req *ireq, int fc0) { int error; IEEE80211_LOCK_ASSERT(vap->iv_ic); switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_IBSS) { error = EINVAL; break; } error = setappie(&vap->iv_appie_beacon, ireq); if (error == 0) ieee80211_beacon_notify(vap, IEEE80211_BEACON_APPIE); break; case IEEE80211_FC0_SUBTYPE_PROBE_RESP: error = setappie(&vap->iv_appie_proberesp, ireq); break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: if (vap->iv_opmode == IEEE80211_M_HOSTAP) error = setappie(&vap->iv_appie_assocresp, ireq); else error = EINVAL; break; case IEEE80211_FC0_SUBTYPE_PROBE_REQ: error = setappie(&vap->iv_appie_probereq, ireq); break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: if (vap->iv_opmode == IEEE80211_M_STA) error = setappie(&vap->iv_appie_assocreq, ireq); else error = EINVAL; break; case (IEEE80211_APPIE_WPA & IEEE80211_FC0_SUBTYPE_MASK): error = setappie(&vap->iv_appie_wpa, ireq); if (error == 0) { /* * Must split single blob of data into separate * WPA and RSN ie's because they go in different * locations in the mgt frames. * XXX use IEEE80211_IOC_WPA2 so user code does split */ vap->iv_wpa_ie = NULL; vap->iv_rsn_ie = NULL; if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *appie = vap->iv_appie_wpa; uint8_t *data = appie->ie_data; /* XXX ie length validate is painful, cheat */ setwparsnie(vap, data, appie->ie_len); setwparsnie(vap, data + 2 + data[1], appie->ie_len - (2 + data[1])); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { /* * Must rebuild beacon frame as the update * mechanism doesn't handle WPA/RSN ie's. * Could extend it but it doesn't normally * change; this is just to deal with hostapd * plumbing the ie after the interface is up. */ error = ENETRESET; } } break; default: error = EINVAL; break; } return error; } static __noinline int ieee80211_ioctl_setappie(struct ieee80211vap *vap, const struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; int error; uint8_t fc0; fc0 = ireq->i_val & 0xff; if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return EINVAL; /* NB: could check iv_opmode and reject but hardly worth the effort */ IEEE80211_LOCK(ic); error = ieee80211_ioctl_setappie_locked(vap, ireq, fc0); IEEE80211_UNLOCK(ic); return error; } static __noinline int ieee80211_ioctl_chanswitch(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_chanswitch_req csr; struct ieee80211_channel *c; int error; if (ireq->i_len != sizeof(csr)) return EINVAL; error = copyin(ireq->i_data, &csr, sizeof(csr)); if (error != 0) return error; /* XXX adhoc mode not supported */ if (vap->iv_opmode != IEEE80211_M_HOSTAP || (vap->iv_flags & IEEE80211_F_DOTH) == 0) return EOPNOTSUPP; c = ieee80211_find_channel(ic, csr.csa_chan.ic_freq, csr.csa_chan.ic_flags); if (c == NULL) return ENOENT; IEEE80211_LOCK(ic); if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0) ieee80211_csa_startswitch(ic, c, csr.csa_mode, csr.csa_count); else if (csr.csa_count == 0) ieee80211_csa_cancelswitch(ic); else error = EBUSY; IEEE80211_UNLOCK(ic); return error; } static int ieee80211_scanreq(struct ieee80211vap *vap, struct ieee80211_scan_req *sr) { #define IEEE80211_IOC_SCAN_FLAGS \ (IEEE80211_IOC_SCAN_NOPICK | IEEE80211_IOC_SCAN_ACTIVE | \ IEEE80211_IOC_SCAN_PICK1ST | IEEE80211_IOC_SCAN_BGSCAN | \ IEEE80211_IOC_SCAN_ONCE | IEEE80211_IOC_SCAN_NOBCAST | \ IEEE80211_IOC_SCAN_NOJOIN | IEEE80211_IOC_SCAN_FLUSH | \ IEEE80211_IOC_SCAN_CHECK) struct ieee80211com *ic = vap->iv_ic; int error, i; /* convert duration */ if (sr->sr_duration == IEEE80211_IOC_SCAN_FOREVER) sr->sr_duration = IEEE80211_SCAN_FOREVER; else { if (sr->sr_duration < IEEE80211_IOC_SCAN_DURATION_MIN || sr->sr_duration > IEEE80211_IOC_SCAN_DURATION_MAX) return EINVAL; sr->sr_duration = msecs_to_ticks(sr->sr_duration); if (sr->sr_duration < 1) sr->sr_duration = 1; } /* convert min/max channel dwell */ if (sr->sr_mindwell != 0) { sr->sr_mindwell = msecs_to_ticks(sr->sr_mindwell); if (sr->sr_mindwell < 1) sr->sr_mindwell = 1; } if (sr->sr_maxdwell != 0) { sr->sr_maxdwell = msecs_to_ticks(sr->sr_maxdwell); if (sr->sr_maxdwell < 1) sr->sr_maxdwell = 1; } /* NB: silently reduce ssid count to what is supported */ if (sr->sr_nssid > IEEE80211_SCAN_MAX_SSID) sr->sr_nssid = IEEE80211_SCAN_MAX_SSID; for (i = 0; i < sr->sr_nssid; i++) if (sr->sr_ssid[i].len > IEEE80211_NWID_LEN) return EINVAL; /* cleanse flags just in case, could reject if invalid flags */ sr->sr_flags &= IEEE80211_IOC_SCAN_FLAGS; /* * Add an implicit NOPICK if the vap is not marked UP. This * allows applications to scan without joining a bss (or picking * a channel and setting up a bss) and without forcing manual * roaming mode--you just need to mark the parent device UP. */ if ((vap->iv_ifp->if_flags & IFF_UP) == 0) sr->sr_flags |= IEEE80211_IOC_SCAN_NOPICK; IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: flags 0x%x%s duration 0x%x mindwell %u maxdwell %u nssid %d\n", __func__, sr->sr_flags, (vap->iv_ifp->if_flags & IFF_UP) == 0 ? " (!IFF_UP)" : "", sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid); /* * If we are in INIT state then the driver has never had a chance * to setup hardware state to do a scan; we must use the state * machine to get us up to the SCAN state but once we reach SCAN * state we then want to use the supplied params. Stash the * parameters in the vap and mark IEEE80211_FEXT_SCANREQ; the * state machines will recognize this and use the stashed params * to issue the scan request. * * Otherwise just invoke the scan machinery directly. */ IEEE80211_LOCK(ic); if (vap->iv_state == IEEE80211_S_INIT) { /* NB: clobbers previous settings */ vap->iv_scanreq_flags = sr->sr_flags; vap->iv_scanreq_duration = sr->sr_duration; vap->iv_scanreq_nssid = sr->sr_nssid; for (i = 0; i < sr->sr_nssid; i++) { vap->iv_scanreq_ssid[i].len = sr->sr_ssid[i].len; memcpy(vap->iv_scanreq_ssid[i].ssid, sr->sr_ssid[i].ssid, sr->sr_ssid[i].len); } vap->iv_flags_ext |= IEEE80211_FEXT_SCANREQ; IEEE80211_UNLOCK(ic); ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); } else { vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ; IEEE80211_UNLOCK(ic); if (sr->sr_flags & IEEE80211_IOC_SCAN_CHECK) { error = ieee80211_check_scan(vap, sr->sr_flags, sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid, /* NB: cheat, we assume structures are compatible */ (const struct ieee80211_scan_ssid *) &sr->sr_ssid[0]); } else { error = ieee80211_start_scan(vap, sr->sr_flags, sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid, /* NB: cheat, we assume structures are compatible */ (const struct ieee80211_scan_ssid *) &sr->sr_ssid[0]); } if (error == 0) return EINPROGRESS; } return 0; #undef IEEE80211_IOC_SCAN_FLAGS } static __noinline int ieee80211_ioctl_scanreq(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_scan_req sr; /* XXX off stack? */ int error; /* NB: parent must be running */ if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return ENXIO; if (ireq->i_len != sizeof(sr)) return EINVAL; error = copyin(ireq->i_data, &sr, sizeof(sr)); if (error != 0) return error; return ieee80211_scanreq(vap, &sr); } static __noinline int ieee80211_ioctl_setstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq) { struct ieee80211_node *ni; struct ieee80211req_sta_vlan vlan; int error; if (ireq->i_len != sizeof(vlan)) return EINVAL; error = copyin(ireq->i_data, &vlan, sizeof(vlan)); if (error != 0) return error; if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) { ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, vlan.sv_macaddr); if (ni == NULL) return ENOENT; } else ni = ieee80211_ref_node(vap->iv_bss); ni->ni_vlan = vlan.sv_vlan; ieee80211_free_node(ni); return error; } static int isvap11g(const struct ieee80211vap *vap) { const struct ieee80211_node *bss = vap->iv_bss; return bss->ni_chan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_ANYG(bss->ni_chan); } static int isvapht(const struct ieee80211vap *vap) { const struct ieee80211_node *bss = vap->iv_bss; return bss->ni_chan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_HT(bss->ni_chan); } /* * Dummy ioctl set handler so the linker set is defined. */ static int dummy_ioctl_set(struct ieee80211vap *vap, struct ieee80211req *ireq) { return ENOSYS; } IEEE80211_IOCTL_SET(dummy, dummy_ioctl_set); static int ieee80211_ioctl_setdefault(struct ieee80211vap *vap, struct ieee80211req *ireq) { ieee80211_ioctl_setfunc * const *set; int error; SET_FOREACH(set, ieee80211_ioctl_setset) { error = (*set)(vap, ireq); if (error != ENOSYS) return error; } return EINVAL; } static __noinline int ieee80211_ioctl_set80211(struct ieee80211vap *vap, u_long cmd, struct ieee80211req *ireq) { struct ieee80211com *ic = vap->iv_ic; int error; const struct ieee80211_authenticator *auth; uint8_t tmpkey[IEEE80211_KEYBUF_SIZE]; char tmpssid[IEEE80211_NWID_LEN]; uint8_t tmpbssid[IEEE80211_ADDR_LEN]; struct ieee80211_key *k; u_int kid; uint32_t flags; error = 0; switch (ireq->i_type) { case IEEE80211_IOC_SSID: if (ireq->i_val != 0 || ireq->i_len > IEEE80211_NWID_LEN) return EINVAL; error = copyin(ireq->i_data, tmpssid, ireq->i_len); if (error) break; memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN); vap->iv_des_ssid[0].len = ireq->i_len; memcpy(vap->iv_des_ssid[0].ssid, tmpssid, ireq->i_len); vap->iv_des_nssid = (ireq->i_len > 0); error = ENETRESET; break; case IEEE80211_IOC_WEP: switch (ireq->i_val) { case IEEE80211_WEP_OFF: vap->iv_flags &= ~IEEE80211_F_PRIVACY; vap->iv_flags &= ~IEEE80211_F_DROPUNENC; break; case IEEE80211_WEP_ON: vap->iv_flags |= IEEE80211_F_PRIVACY; vap->iv_flags |= IEEE80211_F_DROPUNENC; break; case IEEE80211_WEP_MIXED: vap->iv_flags |= IEEE80211_F_PRIVACY; vap->iv_flags &= ~IEEE80211_F_DROPUNENC; break; } error = ENETRESET; break; case IEEE80211_IOC_WEPKEY: kid = (u_int) ireq->i_val; if (kid >= IEEE80211_WEP_NKID) return EINVAL; k = &vap->iv_nw_keys[kid]; if (ireq->i_len == 0) { /* zero-len =>'s delete any existing key */ (void) ieee80211_crypto_delkey(vap, k); break; } if (ireq->i_len > sizeof(tmpkey)) return EINVAL; memset(tmpkey, 0, sizeof(tmpkey)); error = copyin(ireq->i_data, tmpkey, ireq->i_len); if (error) break; ieee80211_key_update_begin(vap); k->wk_keyix = kid; /* NB: force fixed key id */ if (ieee80211_crypto_newkey(vap, IEEE80211_CIPHER_WEP, IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV, k)) { k->wk_keylen = ireq->i_len; memcpy(k->wk_key, tmpkey, sizeof(tmpkey)); IEEE80211_ADDR_COPY(k->wk_macaddr, vap->iv_myaddr); if (!ieee80211_crypto_setkey(vap, k)) error = EINVAL; } else error = EINVAL; ieee80211_key_update_end(vap); break; case IEEE80211_IOC_WEPTXKEY: kid = (u_int) ireq->i_val; if (kid >= IEEE80211_WEP_NKID && (uint16_t) kid != IEEE80211_KEYIX_NONE) return EINVAL; vap->iv_def_txkey = kid; break; case IEEE80211_IOC_AUTHMODE: switch (ireq->i_val) { case IEEE80211_AUTH_WPA: case IEEE80211_AUTH_8021X: /* 802.1x */ case IEEE80211_AUTH_OPEN: /* open */ case IEEE80211_AUTH_SHARED: /* shared-key */ case IEEE80211_AUTH_AUTO: /* auto */ auth = ieee80211_authenticator_get(ireq->i_val); if (auth == NULL) return EINVAL; break; default: return EINVAL; } switch (ireq->i_val) { case IEEE80211_AUTH_WPA: /* WPA w/ 802.1x */ vap->iv_flags |= IEEE80211_F_PRIVACY; ireq->i_val = IEEE80211_AUTH_8021X; break; case IEEE80211_AUTH_OPEN: /* open */ vap->iv_flags &= ~(IEEE80211_F_WPA|IEEE80211_F_PRIVACY); break; case IEEE80211_AUTH_SHARED: /* shared-key */ case IEEE80211_AUTH_8021X: /* 802.1x */ vap->iv_flags &= ~IEEE80211_F_WPA; /* both require a key so mark the PRIVACY capability */ vap->iv_flags |= IEEE80211_F_PRIVACY; break; case IEEE80211_AUTH_AUTO: /* auto */ vap->iv_flags &= ~IEEE80211_F_WPA; /* XXX PRIVACY handling? */ /* XXX what's the right way to do this? */ break; } /* NB: authenticator attach/detach happens on state change */ vap->iv_bss->ni_authmode = ireq->i_val; /* XXX mixed/mode/usage? */ vap->iv_auth = auth; error = ENETRESET; break; case IEEE80211_IOC_CHANNEL: error = ieee80211_ioctl_setchannel(vap, ireq); break; case IEEE80211_IOC_POWERSAVE: switch (ireq->i_val) { case IEEE80211_POWERSAVE_OFF: if (vap->iv_flags & IEEE80211_F_PMGTON) { ieee80211_syncflag(vap, -IEEE80211_F_PMGTON); error = ERESTART; } break; case IEEE80211_POWERSAVE_ON: if ((vap->iv_caps & IEEE80211_C_PMGT) == 0) error = EOPNOTSUPP; else if ((vap->iv_flags & IEEE80211_F_PMGTON) == 0) { ieee80211_syncflag(vap, IEEE80211_F_PMGTON); error = ERESTART; } break; default: error = EINVAL; break; } break; case IEEE80211_IOC_POWERSAVESLEEP: if (ireq->i_val < 0) return EINVAL; ic->ic_lintval = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_RTSTHRESHOLD: if (!(IEEE80211_RTS_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_RTS_MAX)) return EINVAL; vap->iv_rtsthreshold = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_PROTMODE: if (ireq->i_val > IEEE80211_PROT_RTSCTS) return EINVAL; ic->ic_protmode = (enum ieee80211_protmode)ireq->i_val; /* NB: if not operating in 11g this can wait */ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan)) error = ERESTART; break; case IEEE80211_IOC_TXPOWER: if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0) return EOPNOTSUPP; if (!(IEEE80211_TXPOWER_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_TXPOWER_MAX)) return EINVAL; ic->ic_txpowlimit = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_ROAMING: if (!(IEEE80211_ROAMING_DEVICE <= ireq->i_val && ireq->i_val <= IEEE80211_ROAMING_MANUAL)) return EINVAL; vap->iv_roaming = (enum ieee80211_roamingmode)ireq->i_val; /* XXXX reset? */ break; case IEEE80211_IOC_PRIVACY: if (ireq->i_val) { /* XXX check for key state? */ vap->iv_flags |= IEEE80211_F_PRIVACY; } else vap->iv_flags &= ~IEEE80211_F_PRIVACY; /* XXX ERESTART? */ break; case IEEE80211_IOC_DROPUNENCRYPTED: if (ireq->i_val) vap->iv_flags |= IEEE80211_F_DROPUNENC; else vap->iv_flags &= ~IEEE80211_F_DROPUNENC; /* XXX ERESTART? */ break; case IEEE80211_IOC_WPAKEY: error = ieee80211_ioctl_setkey(vap, ireq); break; case IEEE80211_IOC_DELKEY: error = ieee80211_ioctl_delkey(vap, ireq); break; case IEEE80211_IOC_MLME: error = ieee80211_ioctl_setmlme(vap, ireq); break; case IEEE80211_IOC_COUNTERMEASURES: if (ireq->i_val) { if ((vap->iv_flags & IEEE80211_F_WPA) == 0) return EOPNOTSUPP; vap->iv_flags |= IEEE80211_F_COUNTERM; } else vap->iv_flags &= ~IEEE80211_F_COUNTERM; /* XXX ERESTART? */ break; case IEEE80211_IOC_WPA: if (ireq->i_val > 3) return EINVAL; /* XXX verify ciphers available */ flags = vap->iv_flags & ~IEEE80211_F_WPA; switch (ireq->i_val) { case 1: if (!(vap->iv_caps & IEEE80211_C_WPA1)) return EOPNOTSUPP; flags |= IEEE80211_F_WPA1; break; case 2: if (!(vap->iv_caps & IEEE80211_C_WPA2)) return EOPNOTSUPP; flags |= IEEE80211_F_WPA2; break; case 3: if ((vap->iv_caps & IEEE80211_C_WPA) != IEEE80211_C_WPA) return EOPNOTSUPP; flags |= IEEE80211_F_WPA1 | IEEE80211_F_WPA2; break; default: /* Can't set any -> error */ return EOPNOTSUPP; } vap->iv_flags = flags; error = ERESTART; /* NB: can change beacon frame */ break; case IEEE80211_IOC_WME: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_WME) == 0) return EOPNOTSUPP; ieee80211_syncflag(vap, IEEE80211_F_WME); } else ieee80211_syncflag(vap, -IEEE80211_F_WME); error = ERESTART; /* NB: can change beacon frame */ break; case IEEE80211_IOC_HIDESSID: if (ireq->i_val) vap->iv_flags |= IEEE80211_F_HIDESSID; else vap->iv_flags &= ~IEEE80211_F_HIDESSID; error = ERESTART; /* XXX ENETRESET? */ break; case IEEE80211_IOC_APBRIDGE: if (ireq->i_val == 0) vap->iv_flags |= IEEE80211_F_NOBRIDGE; else vap->iv_flags &= ~IEEE80211_F_NOBRIDGE; break; case IEEE80211_IOC_BSSID: if (ireq->i_len != sizeof(tmpbssid)) return EINVAL; error = copyin(ireq->i_data, tmpbssid, ireq->i_len); if (error) break; IEEE80211_ADDR_COPY(vap->iv_des_bssid, tmpbssid); if (IEEE80211_ADDR_EQ(vap->iv_des_bssid, zerobssid)) vap->iv_flags &= ~IEEE80211_F_DESBSSID; else vap->iv_flags |= IEEE80211_F_DESBSSID; error = ENETRESET; break; case IEEE80211_IOC_CHANLIST: error = ieee80211_ioctl_setchanlist(vap, ireq); break; #define OLD_IEEE80211_IOC_SCAN_REQ 23 #ifdef OLD_IEEE80211_IOC_SCAN_REQ case OLD_IEEE80211_IOC_SCAN_REQ: IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: active scan request\n", __func__); /* * If we are in INIT state then the driver has never * had a chance to setup hardware state to do a scan; * use the state machine to get us up the SCAN state. * Otherwise just invoke the scan machinery to start * a one-time scan. */ if (vap->iv_state == IEEE80211_S_INIT) ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); else (void) ieee80211_start_scan(vap, IEEE80211_SCAN_ACTIVE | IEEE80211_SCAN_NOPICK | IEEE80211_SCAN_ONCE, IEEE80211_SCAN_FOREVER, 0, 0, /* XXX use ioctl params */ vap->iv_des_nssid, vap->iv_des_ssid); break; #endif /* OLD_IEEE80211_IOC_SCAN_REQ */ case IEEE80211_IOC_SCAN_REQ: error = ieee80211_ioctl_scanreq(vap, ireq); break; case IEEE80211_IOC_SCAN_CANCEL: IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: cancel scan\n", __func__); ieee80211_cancel_scan(vap); break; case IEEE80211_IOC_HTCONF: if (ireq->i_val & 1) ieee80211_syncflag_ht(vap, IEEE80211_FHT_HT); else ieee80211_syncflag_ht(vap, -IEEE80211_FHT_HT); if (ireq->i_val & 2) ieee80211_syncflag_ht(vap, IEEE80211_FHT_USEHT40); else ieee80211_syncflag_ht(vap, -IEEE80211_FHT_USEHT40); error = ENETRESET; break; case IEEE80211_IOC_ADDMAC: case IEEE80211_IOC_DELMAC: error = ieee80211_ioctl_macmac(vap, ireq); break; case IEEE80211_IOC_MACCMD: error = ieee80211_ioctl_setmaccmd(vap, ireq); break; case IEEE80211_IOC_STA_STATS: error = ieee80211_ioctl_setstastats(vap, ireq); break; case IEEE80211_IOC_STA_TXPOW: error = ieee80211_ioctl_setstatxpow(vap, ireq); break; case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (bss only) */ error = ieee80211_ioctl_setwmeparam(vap, ireq); break; case IEEE80211_IOC_DTIM_PERIOD: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_MBSS && vap->iv_opmode != IEEE80211_M_IBSS) return EINVAL; if (IEEE80211_DTIM_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_DTIM_MAX) { vap->iv_dtim_period = ireq->i_val; error = ENETRESET; /* requires restart */ } else error = EINVAL; break; case IEEE80211_IOC_BEACON_INTERVAL: if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_MBSS && vap->iv_opmode != IEEE80211_M_IBSS) return EINVAL; if (IEEE80211_BINTVAL_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_BINTVAL_MAX) { ic->ic_bintval = ireq->i_val; error = ENETRESET; /* requires restart */ } else error = EINVAL; break; case IEEE80211_IOC_PUREG: if (ireq->i_val) vap->iv_flags |= IEEE80211_F_PUREG; else vap->iv_flags &= ~IEEE80211_F_PUREG; /* NB: reset only if we're operating on an 11g channel */ if (isvap11g(vap)) error = ENETRESET; break; case IEEE80211_IOC_BGSCAN: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_BGSCAN) == 0) return EOPNOTSUPP; vap->iv_flags |= IEEE80211_F_BGSCAN; } else vap->iv_flags &= ~IEEE80211_F_BGSCAN; break; case IEEE80211_IOC_BGSCAN_IDLE: if (ireq->i_val >= IEEE80211_BGSCAN_IDLE_MIN) vap->iv_bgscanidle = ireq->i_val*hz/1000; else error = EINVAL; break; case IEEE80211_IOC_BGSCAN_INTERVAL: if (ireq->i_val >= IEEE80211_BGSCAN_INTVAL_MIN) vap->iv_bgscanintvl = ireq->i_val*hz; else error = EINVAL; break; case IEEE80211_IOC_SCANVALID: if (ireq->i_val >= IEEE80211_SCAN_VALID_MIN) vap->iv_scanvalid = ireq->i_val*hz; else error = EINVAL; break; case IEEE80211_IOC_FRAGTHRESHOLD: if ((vap->iv_caps & IEEE80211_C_TXFRAG) == 0 && ireq->i_val != IEEE80211_FRAG_MAX) return EOPNOTSUPP; if (!(IEEE80211_FRAG_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_FRAG_MAX)) return EINVAL; vap->iv_fragthreshold = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_BURST: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_BURST) == 0) return EOPNOTSUPP; ieee80211_syncflag(vap, IEEE80211_F_BURST); } else ieee80211_syncflag(vap, -IEEE80211_F_BURST); error = ERESTART; break; case IEEE80211_IOC_BMISSTHRESHOLD: if (!(IEEE80211_HWBMISS_MIN <= ireq->i_val && ireq->i_val <= IEEE80211_HWBMISS_MAX)) return EINVAL; vap->iv_bmissthreshold = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_CURCHAN: error = ieee80211_ioctl_setcurchan(vap, ireq); break; case IEEE80211_IOC_SHORTGI: if (ireq->i_val) { #define IEEE80211_HTCAP_SHORTGI \ (IEEE80211_HTCAP_SHORTGI20 | IEEE80211_HTCAP_SHORTGI40) if (((ireq->i_val ^ vap->iv_htcaps) & IEEE80211_HTCAP_SHORTGI) != 0) return EINVAL; if (ireq->i_val & IEEE80211_HTCAP_SHORTGI20) vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI20; if (ireq->i_val & IEEE80211_HTCAP_SHORTGI40) vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI40; #undef IEEE80211_HTCAP_SHORTGI } else vap->iv_flags_ht &= ~(IEEE80211_FHT_SHORTGI20 | IEEE80211_FHT_SHORTGI40); error = ERESTART; break; case IEEE80211_IOC_AMPDU: if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMPDU) == 0) return EINVAL; if (ireq->i_val & 1) vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_TX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX; if (ireq->i_val & 2) vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_RX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_AMPDU_LIMIT: if (!(IEEE80211_HTCAP_MAXRXAMPDU_8K <= ireq->i_val && ireq->i_val <= IEEE80211_HTCAP_MAXRXAMPDU_64K)) return EINVAL; if (vap->iv_opmode == IEEE80211_M_HOSTAP) vap->iv_ampdu_rxmax = ireq->i_val; else vap->iv_ampdu_limit = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_AMPDU_DENSITY: if (!(IEEE80211_HTCAP_MPDUDENSITY_NA <= ireq->i_val && ireq->i_val <= IEEE80211_HTCAP_MPDUDENSITY_16)) return EINVAL; vap->iv_ampdu_density = ireq->i_val; error = ERESTART; break; case IEEE80211_IOC_AMSDU: if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMSDU) == 0) return EINVAL; if (ireq->i_val & 1) vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_TX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_TX; if (ireq->i_val & 2) vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_RX; else vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_RX; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_AMSDU_LIMIT: /* XXX validate */ vap->iv_amsdu_limit = ireq->i_val; /* XXX truncation? */ break; case IEEE80211_IOC_PUREN: if (ireq->i_val) { if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0) return EINVAL; vap->iv_flags_ht |= IEEE80211_FHT_PUREN; } else vap->iv_flags_ht &= ~IEEE80211_FHT_PUREN; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_DOTH: if (ireq->i_val) { #if 0 /* XXX no capability */ if ((vap->iv_caps & IEEE80211_C_DOTH) == 0) return EOPNOTSUPP; #endif vap->iv_flags |= IEEE80211_F_DOTH; } else vap->iv_flags &= ~IEEE80211_F_DOTH; error = ENETRESET; break; case IEEE80211_IOC_REGDOMAIN: error = ieee80211_ioctl_setregdomain(vap, ireq); break; case IEEE80211_IOC_ROAM: error = ieee80211_ioctl_setroam(vap, ireq); break; case IEEE80211_IOC_TXPARAMS: error = ieee80211_ioctl_settxparams(vap, ireq); break; case IEEE80211_IOC_HTCOMPAT: if (ireq->i_val) { if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0) return EOPNOTSUPP; vap->iv_flags_ht |= IEEE80211_FHT_HTCOMPAT; } else vap->iv_flags_ht &= ~IEEE80211_FHT_HTCOMPAT; /* NB: reset only if we're operating on an 11n channel */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_DWDS: if (ireq->i_val) { /* NB: DWDS only makes sense for WDS-capable devices */ if ((ic->ic_caps & IEEE80211_C_WDS) == 0) return EOPNOTSUPP; /* NB: DWDS is used only with ap+sta vaps */ if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_STA) return EINVAL; vap->iv_flags |= IEEE80211_F_DWDS; if (vap->iv_opmode == IEEE80211_M_STA) vap->iv_flags_ext |= IEEE80211_FEXT_4ADDR; } else { vap->iv_flags &= ~IEEE80211_F_DWDS; if (vap->iv_opmode == IEEE80211_M_STA) vap->iv_flags_ext &= ~IEEE80211_FEXT_4ADDR; } break; case IEEE80211_IOC_INACTIVITY: if (ireq->i_val) vap->iv_flags_ext |= IEEE80211_FEXT_INACT; else vap->iv_flags_ext &= ~IEEE80211_FEXT_INACT; break; case IEEE80211_IOC_APPIE: error = ieee80211_ioctl_setappie(vap, ireq); break; case IEEE80211_IOC_WPS: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_WPA) == 0) return EOPNOTSUPP; vap->iv_flags_ext |= IEEE80211_FEXT_WPS; } else vap->iv_flags_ext &= ~IEEE80211_FEXT_WPS; break; case IEEE80211_IOC_TSN: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_WPA) == 0) return EOPNOTSUPP; vap->iv_flags_ext |= IEEE80211_FEXT_TSN; } else vap->iv_flags_ext &= ~IEEE80211_FEXT_TSN; break; case IEEE80211_IOC_CHANSWITCH: error = ieee80211_ioctl_chanswitch(vap, ireq); break; case IEEE80211_IOC_DFS: if (ireq->i_val) { if ((vap->iv_caps & IEEE80211_C_DFS) == 0) return EOPNOTSUPP; /* NB: DFS requires 11h support */ if ((vap->iv_flags & IEEE80211_F_DOTH) == 0) return EINVAL; vap->iv_flags_ext |= IEEE80211_FEXT_DFS; } else vap->iv_flags_ext &= ~IEEE80211_FEXT_DFS; break; case IEEE80211_IOC_DOTD: if (ireq->i_val) vap->iv_flags_ext |= IEEE80211_FEXT_DOTD; else vap->iv_flags_ext &= ~IEEE80211_FEXT_DOTD; if (vap->iv_opmode == IEEE80211_M_STA) error = ENETRESET; break; case IEEE80211_IOC_HTPROTMODE: if (ireq->i_val > IEEE80211_PROT_RTSCTS) return EINVAL; ic->ic_htprotmode = ireq->i_val ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_NONE; /* NB: if not operating in 11n this can wait */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_STA_VLAN: error = ieee80211_ioctl_setstavlan(vap, ireq); break; case IEEE80211_IOC_SMPS: if ((ireq->i_val &~ IEEE80211_HTCAP_SMPS) != 0 || ireq->i_val == 0x0008) /* value of 2 is reserved */ return EINVAL; if (ireq->i_val != IEEE80211_HTCAP_SMPS_OFF && (vap->iv_htcaps & IEEE80211_HTC_SMPS) == 0) return EOPNOTSUPP; vap->iv_htcaps = (vap->iv_htcaps &~ IEEE80211_HTCAP_SMPS) | ireq->i_val; /* NB: if not operating in 11n this can wait */ if (isvapht(vap)) error = ERESTART; break; case IEEE80211_IOC_RIFS: if (ireq->i_val != 0) { if ((vap->iv_htcaps & IEEE80211_HTC_RIFS) == 0) return EOPNOTSUPP; vap->iv_flags_ht |= IEEE80211_FHT_RIFS; } else vap->iv_flags_ht &= ~IEEE80211_FHT_RIFS; /* NB: if not operating in 11n this can wait */ if (isvapht(vap)) error = ERESTART; break; default: error = ieee80211_ioctl_setdefault(vap, ireq); break; } /* * The convention is that ENETRESET means an operation * requires a complete re-initialization of the device (e.g. * changing something that affects the association state). * ERESTART means the request may be handled with only a * reload of the hardware state. We hand ERESTART requests * to the iv_reset callback so the driver can decide. If * a device does not fillin iv_reset then it defaults to one * that returns ENETRESET. Otherwise a driver may return * ENETRESET (in which case a full reset will be done) or * 0 to mean there's no need to do anything (e.g. when the * change has no effect on the driver/device). */ if (error == ERESTART) error = IFNET_IS_UP_RUNNING(vap->iv_ifp) ? vap->iv_reset(vap, ireq->i_type) : 0; if (error == ENETRESET) { /* XXX need to re-think AUTO handling */ if (IS_UP_AUTO(vap)) ieee80211_init(vap); error = 0; } return error; } /* * Rebuild the parent's multicast address list after an add/del * of a multicast address for a vap. We have no way to tell * what happened above to optimize the work so we purge the entire * list and rebuild from scratch. This is way expensive. * Note also the half-baked workaround for if_addmulti calling * back to the parent device; there's no way to insert mcast * entries quietly and/or cheaply. */ static void ieee80211_ioctl_updatemulti(struct ieee80211com *ic) { struct ifnet *parent = ic->ic_ifp; struct ieee80211vap *vap; void *ioctl; IEEE80211_LOCK(ic); if_delallmulti(parent); ioctl = parent->if_ioctl; /* XXX WAR if_allmulti */ parent->if_ioctl = NULL; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ifnet *ifp = vap->iv_ifp; struct ifmultiaddr *ifma; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; (void) if_addmulti(parent, ifma->ifma_addr, NULL); } } parent->if_ioctl = ioctl; ieee80211_runtask(ic, &ic->ic_mcast_task); IEEE80211_UNLOCK(ic); } int ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; int error = 0; struct ifreq *ifr; struct ifaddr *ifa; /* XXX */ switch (cmd) { case SIOCSIFFLAGS: IEEE80211_LOCK(ic); ieee80211_syncifflag_locked(ic, IFF_PROMISC); ieee80211_syncifflag_locked(ic, IFF_ALLMULTI); if (ifp->if_flags & IFF_UP) { /* * Bring ourself up unless we're already operational. * If we're the first vap and the parent is not up * then it will automatically be brought up as a * side-effect of bringing ourself up. */ if (vap->iv_state == IEEE80211_S_INIT) ieee80211_start_locked(vap); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Stop ourself. If we are the last vap to be * marked down the parent will also be taken down. */ ieee80211_stop_locked(vap); } IEEE80211_UNLOCK(ic); /* Wait for parent ioctl handler if it was queued */ ieee80211_waitfor_parent(ic); break; case SIOCADDMULTI: case SIOCDELMULTI: ieee80211_ioctl_updatemulti(ic); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: ifr = (struct ifreq *)data; error = ifmedia_ioctl(ifp, ifr, &vap->iv_media, cmd); break; case SIOCG80211: error = ieee80211_ioctl_get80211(vap, cmd, (struct ieee80211req *) data); break; case SIOCS80211: error = priv_check(curthread, PRIV_NET80211_MANAGE); if (error == 0) error = ieee80211_ioctl_set80211(vap, cmd, (struct ieee80211req *) data); break; case SIOCG80211STATS: ifr = (struct ifreq *)data; copyout(&vap->iv_stats, ifr->ifr_data, sizeof (vap->iv_stats)); break; case SIOCSIFMTU: ifr = (struct ifreq *)data; if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu && ifr->ifr_mtu <= IEEE80211_MTU_MAX)) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFADDR: /* * XXX Handle this directly so we can supress if_init calls. * XXX This should be done in ether_ioctl but for the moment * XXX there are too many other parts of the system that * XXX set IFF_UP and so supress if_init being called when * XXX it should be. */ ifa = (struct ifaddr *) data; switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: if ((ifp->if_flags & IFF_UP) == 0) { ifp->if_flags |= IFF_UP; ifp->if_init(ifp->if_softc); } arp_ifinit(ifp, ifa); break; #endif #ifdef IPX /* * XXX - This code is probably wrong, * but has been copied many times. */ case AF_IPX: { struct ipx_addr *ina = &(IA_SIPX(ifa)->sipx_addr); if (ipx_nullhost(*ina)) ina->x_host = *(union ipx_host *) IF_LLADDR(ifp); else bcopy((caddr_t) ina->x_host.c_host, (caddr_t) IF_LLADDR(ifp), ETHER_ADDR_LEN); /* fall thru... */ } #endif default: if ((ifp->if_flags & IFF_UP) == 0) { ifp->if_flags |= IFF_UP; ifp->if_init(ifp->if_softc); } break; } break; /* Pass NDIS ioctls up to the driver */ case SIOCGDRVSPEC: case SIOCSDRVSPEC: case SIOCGPRIVATE_0: { struct ifnet *parent = vap->iv_ic->ic_ifp; error = parent->if_ioctl(parent, cmd, data); break; } default: error = ether_ioctl(ifp, cmd, data); break; } return error; }
dcui/FreeBSD-9.3_kernel
sys/net80211/ieee80211_ioctl.c
C
bsd-3-clause
97,910
/* * Copyright (c) 2012 The Native Client Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "native_client/src/include/portability_string.h" #include "native_client/src/include/nacl_macros.h" #include "native_client/src/include/nacl_platform.h" #include "native_client/src/shared/platform/nacl_check.h" #include "native_client/src/trusted/service_runtime/nacl_syscall_asm_symbols.h" #include "native_client/src/trusted/service_runtime/nacl_globals.h" #include "native_client/src/trusted/service_runtime/sel_ldr.h" #include "native_client/src/trusted/service_runtime/sel_memory.h" #include "native_client/src/trusted/service_runtime/springboard.h" #include "native_client/src/trusted/service_runtime/arch/x86/sel_ldr_x86.h" #include "native_client/src/trusted/service_runtime/arch/x86_64/tramp_64.h" static uintptr_t AddDispatchThunk(uintptr_t *next_addr, uintptr_t target_routine) { struct NaClPatchInfo patch_info; struct NaClPatch jmp_target; jmp_target.target = (((uintptr_t) &NaClDispatchThunk_jmp_target) - sizeof(uintptr_t)); jmp_target.value = target_routine; NaClPatchInfoCtor(&patch_info); patch_info.abs64 = &jmp_target; patch_info.num_abs64 = 1; patch_info.dst = *next_addr; patch_info.src = (uintptr_t) &NaClDispatchThunk; patch_info.nbytes = ((uintptr_t) &NaClDispatchThunkEnd - (uintptr_t) &NaClDispatchThunk); NaClApplyPatchToMemory(&patch_info); *next_addr += patch_info.nbytes; return patch_info.dst; } int NaClMakeDispatchThunk(struct NaClApp *nap) { int retval = 0; /* fail */ int error; void *thunk_addr = NULL; uintptr_t next_addr; uintptr_t dispatch_thunk = 0; uintptr_t get_tls_fast_path1 = 0; uintptr_t get_tls_fast_path2 = 0; NaClLog(2, "Entered NaClMakeDispatchThunk\n"); if (0 != nap->dispatch_thunk) { NaClLog(LOG_ERROR, " dispatch_thunk already initialized!\n"); return 1; } if (0 != (error = NaCl_page_alloc_randomized(&thunk_addr, NACL_MAP_PAGESIZE))) { NaClLog(LOG_INFO, "NaClMakeDispatchThunk::NaCl_page_alloc failed, errno %d\n", -error); retval = 0; goto cleanup; } NaClLog(2, "NaClMakeDispatchThunk: got addr 0x%"NACL_PRIxPTR"\n", (uintptr_t) thunk_addr); if (0 != (error = NaCl_mprotect(thunk_addr, NACL_MAP_PAGESIZE, PROT_READ | PROT_WRITE))) { NaClLog(LOG_INFO, "NaClMakeDispatchThunk::NaCl_mprotect r/w failed, errno %d\n", -error); retval = 0; goto cleanup; } NaClFillMemoryRegionWithHalt(thunk_addr, NACL_MAP_PAGESIZE); next_addr = (uintptr_t) thunk_addr; dispatch_thunk = AddDispatchThunk(&next_addr, (uintptr_t) &NaClSyscallSeg); get_tls_fast_path1 = AddDispatchThunk(&next_addr, (uintptr_t) &NaClGetTlsFastPath1); get_tls_fast_path2 = AddDispatchThunk(&next_addr, (uintptr_t) &NaClGetTlsFastPath2); if (0 != (error = NaCl_mprotect(thunk_addr, NACL_MAP_PAGESIZE, PROT_EXEC|PROT_READ))) { NaClLog(LOG_INFO, "NaClMakeDispatchThunk::NaCl_mprotect r/x failed, errno %d\n", -error); retval = 0; goto cleanup; } retval = 1; cleanup: if (0 == retval) { if (NULL != thunk_addr) { NaCl_page_free(thunk_addr, NACL_MAP_PAGESIZE); thunk_addr = NULL; } } else { nap->dispatch_thunk = dispatch_thunk; nap->get_tls_fast_path1 = get_tls_fast_path1; nap->get_tls_fast_path2 = get_tls_fast_path2; } return retval; } /* * Install a syscall trampoline at target_addr. NB: Thread-safe. */ void NaClPatchOneTrampolineCall(uintptr_t call_target_addr, uintptr_t target_addr) { struct NaClPatchInfo patch_info; struct NaClPatch call_target; NaClLog(6, "call_target_addr = 0x%"NACL_PRIxPTR"\n", call_target_addr); CHECK(0 != call_target_addr); call_target.target = (((uintptr_t) &NaCl_trampoline_call_target) - sizeof(uintptr_t)); call_target.value = call_target_addr; NaClPatchInfoCtor(&patch_info); patch_info.abs64 = &call_target; patch_info.num_abs64 = 1; patch_info.dst = target_addr; patch_info.src = (uintptr_t) &NaCl_trampoline_code; patch_info.nbytes = ((uintptr_t) &NaCl_trampoline_code_end - (uintptr_t) &NaCl_trampoline_code); NaClApplyPatchToMemory(&patch_info); } void NaClPatchOneTrampoline(struct NaClApp *nap, uintptr_t target_addr) { uintptr_t call_target_addr; call_target_addr = nap->dispatch_thunk; NaClPatchOneTrampolineCall(call_target_addr, target_addr); } void NaClFillMemoryRegionWithHalt(void *start, size_t size) { CHECK(!(size % NACL_HALT_LEN)); /* Tell valgrind that this memory is accessible and undefined */ NACL_MAKE_MEM_UNDEFINED(start, size); memset(start, NACL_HALT_OPCODE, size); } void NaClFillTrampolineRegion(struct NaClApp *nap) { NaClFillMemoryRegionWithHalt( (void *) (nap->mem_start + NACL_TRAMPOLINE_START), NACL_TRAMPOLINE_SIZE); } void NaClLoadSpringboard(struct NaClApp *nap) { /* * There is no springboard for x86-64. */ UNREFERENCED_PARAMETER(nap); }
leighpauls/k2cro4
native_client/src/trusted/service_runtime/arch/x86_64/sel_ldr_x86_64.c
C
bsd-3-clause
5,566
//------------------------------------------------------------------------------ // GB_Descriptor_get: get the status of a descriptor //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // A descriptor modifies how the behavoir of a GraphBLAS operation. In the // current GraphBLAS spec, the following descriptor fields may be set. // Descriptor field: Descriptor value: // desc->out GxB_DEFAULT or GrB_REPLACE // GrB_REPLACE means that the output matrix C is cleared just // prior to writing results back into it, via C<Mask> = results. This // descriptor does not affect how C is used to compute the results. If // GxB_DEFAULT, then C is not cleared before doing C<Mask>=results. // desc->mask GxB_DEFAULT or GrB_SCMP // An optional 'write mask' defines how the results are to be written back // into C. The boolean Mask matrix has the same size as C (Mask is // typecasted to boolean if it has another type). If the Mask input to // the GraphBLAS method is NULL, then implicitly Mask(i,j)=1 for all i and // j. Let Z be the results to be written into C (the same dimension as // C). If desc->mask is GxB_DEFAULT, and Mask(i,j)=1, then C(i,j) is // over-written with Z(i,j). Otherwise, if Mask(i,j)=0 C(i,j) is left // unmodified (it remains an implicit zero if it is so, or its value is // unchanged if it has one). If desc->mask is GrB_SCMP, then the use of // Mask is negated: Mask(i,j)=0 means that C(i,j) is overwritten with // Z(i,j), and Mask(i,j)=1 means that C(i,j) is left unchanged. // Writing results Z into C via the Mask is written as C<Mask>=Z in // GraphBLAS notation. // Note that it is the value of Mask(i,j) that determines how C(i,j) is // overwritten. If the (i,j) entry is present in the Mask matrix data // structure but has a numerical value of zero, then it is the same as if // (i,j) is not present and thus implicitly zero. Both mean 'Mask(i,j)=0' // in the description above of how the Mask works. // desc->in0 and desc->in1 GxB_DEFAULT or GrB_TRAN // A GrB_Matrix passed as an input parameter to GraphBLAS methods can // optionally transpose them prior to using them. desc->in0 always refers // to the first input to the method, and desc->in1 always refers to the // second one. // If the value of this descriptor is GxB_DEFAULT, then the matrix is used // as-is. Otherwise, it is transposed first. That is, the results are // the same as if the transpose of the matrix was passed to the method. // desc->axb see GraphBLAS.h; can be: // GrB_DEFAULT automatic selection // GxB_AxB_GUSTAVSON gather-scatter saxpy method // GxB_AxB_HEAP heap-based saxpy method // GxB_AxB_DOT dot product #include "GB.h" GrB_Info GB_Descriptor_get // get the contents of a descriptor ( const GrB_Descriptor desc, // descriptor to query, may be NULL bool *C_replace, // if true replace C before C<Mask>=Z bool *Mask_comp, // if true use logical negation of Mask bool *In0_transpose, // if true transpose first input bool *In1_transpose, // if true transpose second input GrB_Desc_Value *AxB_method, // method for C=A*B GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // desc may be null, but if not NULL it must be initialized GB_RETURN_IF_FAULTY (desc) ; //-------------------------------------------------------------------------- // get the contents of the descriptor //-------------------------------------------------------------------------- // default values if descriptor is NULL GrB_Desc_Value C_desc = GxB_DEFAULT ; GrB_Desc_Value Mask_desc = GxB_DEFAULT ; GrB_Desc_Value In0_desc = GxB_DEFAULT ; GrB_Desc_Value In1_desc = GxB_DEFAULT ; GrB_Desc_Value AxB_desc = GxB_DEFAULT ; // non-defaults descriptors if (desc != NULL) { // get the contents C_desc = desc->out ; // DEFAULT or REPLACE Mask_desc = desc->mask ; // DEFAULT or SCMP In0_desc = desc->in0 ; // DEFAULT or TRAN In1_desc = desc->in1 ; // DEFAULT or TRAN AxB_desc = desc->axb ; // DEFAULT, GUSTAVSON, HEAP, or DOT } // check for valid values of each descriptor field if (!(C_desc == GxB_DEFAULT || C_desc == GrB_REPLACE) || !(Mask_desc == GxB_DEFAULT || Mask_desc == GrB_SCMP) || !(In0_desc == GxB_DEFAULT || In0_desc == GrB_TRAN) || !(In1_desc == GxB_DEFAULT || In1_desc == GrB_TRAN) || !(AxB_desc == GxB_DEFAULT || AxB_desc == GxB_AxB_GUSTAVSON || AxB_desc == GxB_AxB_DOT || AxB_desc == GxB_AxB_HEAP)) { return (GB_ERROR (GrB_INVALID_OBJECT, (GB_LOG, "Descriptor invalid"))) ; } if (C_replace != NULL) { *C_replace = (C_desc == GrB_REPLACE) ; } if (Mask_comp != NULL) { *Mask_comp = (Mask_desc == GrB_SCMP) ; } if (In0_transpose != NULL) { *In0_transpose = (In0_desc == GrB_TRAN) ; } if (In1_transpose != NULL) { *In1_transpose = (In1_desc == GrB_TRAN) ; } if (AxB_method != NULL) { *AxB_method = AxB_desc ; } return (GrB_SUCCESS) ; }
jlblancoc/suitesparse-metis-for-windows
SuiteSparse/GraphBLAS/Source/GB_Descriptor_get.c
C
bsd-3-clause
5,829
//------------------------------------------------------------------------------ // GB_AxB: hard-coded C=A*B and C<M>=A*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. // If this filename has a double underscore in its name ("__") then it has been // automatically constructed from Generator/GB_AxB.c, via the Source/axb*.m // scripts, and should not be editted. Edit the original source file instead. //------------------------------------------------------------------------------ #include "GB.h" #ifndef GBCOMPACT #include "GB_heap.h" #include "GB_AxB__semirings.h" // The C=A*B semiring is defined by the following types and operators: // A*B function (Gustavon): GB_AgusB__times_isgt_int32 // A'*B function (dot): GB_AdotB__times_isgt_int32 // A*B function (heap): GB_AheapB__times_isgt_int32 // Z type: int32_t (the type of C) // X type: int32_t (the type of x for z=mult(x,y)) // Y type: int32_t (the type of y for z=mult(x,y)) // handle flipxy: 0 (0 if mult(x,y) is commutative, 1 otherwise) // Identity: 1 (where cij *= identity does not change cij) // Multiply: z = x > y // Add: cij *= z #define GB_XTYPE \ int32_t #define GB_YTYPE \ int32_t #define GB_HANDLE_FLIPXY \ 0 #define GB_MULTOP(z,x,y) \ z = x > y //------------------------------------------------------------------------------ // C<M>=A*B and C=A*B: gather/scatter saxpy-based method (Gustavson) //------------------------------------------------------------------------------ #define GB_IDENTITY \ 1 // x [i] = y #define GB_COPY_SCALAR_TO_ARRAY(x,i,y,s) \ x [i] = y ; // x = y [i] #define GB_COPY_ARRAY_TO_SCALAR(x,y,i,s) \ GB_btype x = y [i] ; // x [i] = y [i] #define GB_COPY_ARRAY_TO_ARRAY(x,i,y,j,s) \ x [i] = y [j] ; // mult-add operation (no mask) #define GB_MULTADD_NOMASK \ { \ /* Sauna_Work [i] += A(i,k) * B(k,j) */ \ GB_atype aik = Ax [pA] ; \ int32_t t ; \ GB_MULTIPLY (t, aik, bkj) ; \ Sauna_Work [i] *= t ; \ } // mult-add operation (with mask) #define GB_MULTADD_WITH_MASK \ { \ /* Sauna_Work [i] += A(i,k) * B(k,j) */ \ GB_atype aik = Ax [pA] ; \ int32_t t ; \ GB_MULTIPLY (t, aik, bkj) ; \ if (mark == hiwater) \ { \ /* first time C(i,j) seen */ \ Sauna_Mark [i] = hiwater + 1 ; \ Sauna_Work [i] = t ; \ } \ else \ { \ /* C(i,j) seen before, update it */ \ Sauna_Work [i] *= t ; \ } \ } GrB_Info GB_AgusB__times_isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const GrB_Matrix A, const GrB_Matrix B, bool flipxy, // if true, A and B have been swapped GB_Sauna Sauna, // sparse accumulator GB_Context Context ) { int32_t *restrict Sauna_Work = Sauna->Sauna_Work ; // size C->vlen*zsize int32_t *restrict Cx = C->x ; GrB_Info info = GrB_SUCCESS ; #include "GB_AxB_Gustavson_flipxy.c" return (info) ; } //------------------------------------------------------------------------------ // C<M>=A'*B or C=A'*B: dot product //------------------------------------------------------------------------------ // get A(k,i) #define GB_DOT_GETA(pA) \ GB_atype aki = Ax [pA] ; // get B(k,j) #define GB_DOT_GETB(pB) \ GB_btype bkj = Bx [pB] ; // t = aki*bkj #define GB_DOT_MULT(bkj) \ int32_t t ; \ GB_MULTIPLY (t, aki, bkj) ; // cij += t #define GB_DOT_ADD \ cij *= t ; // cij = t #define GB_DOT_COPY \ cij = t ; // cij is not a pointer but a scalar; nothing to do #define GB_DOT_REACQUIRE ; // clear cij #define GB_DOT_CLEAR \ cij = 1 ; // save the value of C(i,j) #define GB_DOT_SAVE \ Cx [cnz] = cij ; #define GB_DOT_WORK_TYPE \ GB_btype #define GB_DOT_WORK(k) Work [k] // Work [k] = Bx [pB] #define GB_DOT_SCATTER \ Work [k] = Bx [pB] ; GrB_Info GB_AdotB__times_isgt_int32 ( GrB_Matrix *Chandle, const GrB_Matrix M, const GrB_Matrix A, const GrB_Matrix B, bool flipxy, // if true, A and B have been swapped GB_Context Context ) { GrB_Matrix C = (*Chandle) ; int32_t *restrict Cx = C->x ; int32_t cij ; GrB_Info info = GrB_SUCCESS ; size_t bkj_size = B->type->size ; // no typecasting here #include "GB_AxB_dot_flipxy.c" return (info) ; } //------------------------------------------------------------------------------ // C<M>=A*B and C=A*B: heap saxpy-based method //------------------------------------------------------------------------------ #define GB_CIJ_GETB(pB) \ GB_btype bkj = Bx [pB] ; // C(i,j) = A(i,k) * bkj #define GB_CIJ_MULT(pA) \ { \ GB_atype aik = Ax [pA] ; \ GB_MULTIPLY (cij, aik, bkj) ; \ } // C(i,j) += A(i,k) * B(k,j) #define GB_CIJ_MULTADD(pA,pB) \ { \ GB_atype aik = Ax [pA] ; \ GB_btype bkj = Bx [pB] ; \ int32_t t ; \ GB_MULTIPLY (t, aik, bkj) ; \ cij *= t ; \ } // cij is not a pointer but a scalar; nothing to do #define GB_CIJ_REACQUIRE ; // cij = identity #define GB_CIJ_CLEAR \ cij = 1 ; // save the value of C(i,j) #define GB_CIJ_SAVE \ Cx [cnz] = cij ; GrB_Info GB_AheapB__times_isgt_int32 ( GrB_Matrix *Chandle, const GrB_Matrix M, const GrB_Matrix A, const GrB_Matrix B, bool flipxy, // if true, A and B have been swapped int64_t *restrict List, GB_pointer_pair *restrict pA_pair, GB_Element *restrict Heap, const int64_t bjnz_max, GB_Context Context ) { GrB_Matrix C = (*Chandle) ; int32_t *restrict Cx = C->x ; int32_t cij ; int64_t cvlen = C->vlen ; GrB_Info info = GrB_SUCCESS ; GB_CIJ_CLEAR ; #include "GB_AxB_heap_flipxy.c" return (info) ; } //------------------------------------------------------------------------------ // clear macro definitions //------------------------------------------------------------------------------ #undef GB_XTYPE #undef GB_YTYPE #undef GB_HANDLE_FLIPXY #undef GB_MULTOP #undef GB_IDENTITY #undef GB_COPY_SCALAR_TO_ARRAY #undef GB_COPY_ARRAY_TO_SCALAR #undef GB_COPY_ARRAY_TO_ARRAY #undef GB_MULTADD_NOMASK #undef GB_MULTADD_WITH_MASK #undef GB_DOT_GETA #undef GB_DOT_GETB #undef GB_DOT_MULT #undef GB_DOT_ADD #undef GB_DOT_COPY #undef GB_DOT_REACQUIRE #undef GB_DOT_CLEAR #undef GB_DOT_SAVE #undef GB_DOT_WORK_TYPE #undef GB_DOT_WORK #undef GB_DOT_SCATTER #undef GB_CIJ_GETB #undef GB_CIJ_MULT #undef GB_CIJ_MULTADD #undef GB_CIJ_REACQUIRE #undef GB_CIJ_CLEAR #undef GB_CIJ_SAVE #undef GB_MULTIPLY #endif
jlblancoc/suitesparse-metis-for-windows
SuiteSparse/GraphBLAS/Source/Generated/GB_AxB__times_isgt_int32.c
C
bsd-3-clause
8,225
/* * Copyright (c) 2016, The OpenThread Authors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file alarm.c * Platform abstraction for the alarm */ #include <openthread/platform/alarm-milli.h> #include "platform-da15000.h" #include "hw_timer0.h" static bool sIsRunning = false; static uint32_t sAlarm = 0; static uint32_t sCounter; volatile bool sAlarmFired = false; static void timer0_interrupt_cb(void) { sCounter++; } void da15000AlarmProcess(otInstance *aInstance) { if ((sIsRunning) && (sAlarm <= sCounter)) { sIsRunning = false; otPlatAlarmMilliFired(aInstance); } } void da15000AlarmInit(void) { hw_timer0_init(NULL); hw_timer0_set_clock_source(HW_TIMER0_CLK_SRC_FAST); hw_timer0_set_pwm_mode(HW_TIMER0_MODE_PWM); hw_timer0_set_fast_clock_div(HW_TIMER0_FAST_CLK_DIV_4); hw_timer0_set_t0_reload(0x07D0, 0x07D0); hw_timer0_register_int(timer0_interrupt_cb); hw_timer0_set_on_clock_div(false); } uint32_t otPlatAlarmMilliGetNow(void) { return sCounter; } void otPlatAlarmMilliStartAt(otInstance *aInstance, uint32_t t0, uint32_t dt) { OT_UNUSED_VARIABLE(aInstance); sAlarm = t0 + dt; sIsRunning = true; if (sCounter == 0) { hw_timer0_enable(); } hw_timer0_unfreeze(); } void otPlatAlarmMilliStop(otInstance *aInstance) { OT_UNUSED_VARIABLE(aInstance); sIsRunning = false; hw_timer0_freeze(); }
erja-gp/openthread
examples/platforms/da15000/alarm.c
C
bsd-3-clause
2,963
/******************************************************************************* * * Module Name: rscalc - Calculate stream and list lengths * ******************************************************************************/ /* * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <contrib/dev/acpica/include/acpi.h> #include <contrib/dev/acpica/include/accommon.h> #include <contrib/dev/acpica/include/acresrc.h> #include <contrib/dev/acpica/include/acnamesp.h> #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME ("rscalc") /* Local prototypes */ static UINT8 AcpiRsCountSetBits ( UINT16 BitField); static ACPI_RS_LENGTH AcpiRsStructOptionLength ( ACPI_RESOURCE_SOURCE *ResourceSource); static UINT32 AcpiRsStreamOptionLength ( UINT32 ResourceLength, UINT32 MinimumTotalLength); /******************************************************************************* * * FUNCTION: AcpiRsCountSetBits * * PARAMETERS: BitField - Field in which to count bits * * RETURN: Number of bits set within the field * * DESCRIPTION: Count the number of bits set in a resource field. Used for * (Short descriptor) interrupt and DMA lists. * ******************************************************************************/ static UINT8 AcpiRsCountSetBits ( UINT16 BitField) { UINT8 BitsSet; ACPI_FUNCTION_ENTRY (); for (BitsSet = 0; BitField; BitsSet++) { /* Zero the least significant bit that is set */ BitField &= (UINT16) (BitField - 1); } return (BitsSet); } /******************************************************************************* * * FUNCTION: AcpiRsStructOptionLength * * PARAMETERS: ResourceSource - Pointer to optional descriptor field * * RETURN: Status * * DESCRIPTION: Common code to handle optional ResourceSourceIndex and * ResourceSource fields in some Large descriptors. Used during * list-to-stream conversion * ******************************************************************************/ static ACPI_RS_LENGTH AcpiRsStructOptionLength ( ACPI_RESOURCE_SOURCE *ResourceSource) { ACPI_FUNCTION_ENTRY (); /* * If the ResourceSource string is valid, return the size of the string * (StringLength includes the NULL terminator) plus the size of the * ResourceSourceIndex (1). */ if (ResourceSource->StringPtr) { return ((ACPI_RS_LENGTH) (ResourceSource->StringLength + 1)); } return (0); } /******************************************************************************* * * FUNCTION: AcpiRsStreamOptionLength * * PARAMETERS: ResourceLength - Length from the resource header * MinimumTotalLength - Minimum length of this resource, before * any optional fields. Includes header size * * RETURN: Length of optional string (0 if no string present) * * DESCRIPTION: Common code to handle optional ResourceSourceIndex and * ResourceSource fields in some Large descriptors. Used during * stream-to-list conversion * ******************************************************************************/ static UINT32 AcpiRsStreamOptionLength ( UINT32 ResourceLength, UINT32 MinimumAmlResourceLength) { UINT32 StringLength = 0; ACPI_FUNCTION_ENTRY (); /* * The ResourceSourceIndex and ResourceSource are optional elements of * some Large-type resource descriptors. */ /* * If the length of the actual resource descriptor is greater than the * ACPI spec-defined minimum length, it means that a ResourceSourceIndex * exists and is followed by a (required) null terminated string. The * string length (including the null terminator) is the resource length * minus the minimum length, minus one byte for the ResourceSourceIndex * itself. */ if (ResourceLength > MinimumAmlResourceLength) { /* Compute the length of the optional string */ StringLength = ResourceLength - MinimumAmlResourceLength - 1; } /* * Round the length up to a multiple of the native word in order to * guarantee that the entire resource descriptor is native word aligned */ return ((UINT32) ACPI_ROUND_UP_TO_NATIVE_WORD (StringLength)); } /******************************************************************************* * * FUNCTION: AcpiRsGetAmlLength * * PARAMETERS: Resource - Pointer to the resource linked list * ResourceListSize - Size of the resource linked list * SizeNeeded - Where the required size is returned * * RETURN: Status * * DESCRIPTION: Takes a linked list of internal resource descriptors and * calculates the size buffer needed to hold the corresponding * external resource byte stream. * ******************************************************************************/ ACPI_STATUS AcpiRsGetAmlLength ( ACPI_RESOURCE *Resource, ACPI_SIZE ResourceListSize, ACPI_SIZE *SizeNeeded) { ACPI_SIZE AmlSizeNeeded = 0; ACPI_RESOURCE *ResourceEnd; ACPI_RS_LENGTH TotalSize; ACPI_FUNCTION_TRACE (RsGetAmlLength); /* Traverse entire list of internal resource descriptors */ ResourceEnd = ACPI_ADD_PTR (ACPI_RESOURCE, Resource, ResourceListSize); while (Resource < ResourceEnd) { /* Validate the descriptor type */ if (Resource->Type > ACPI_RESOURCE_TYPE_MAX) { return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); } /* Sanity check the length. It must not be zero, or we loop forever */ if (!Resource->Length) { return_ACPI_STATUS (AE_AML_BAD_RESOURCE_LENGTH); } /* Get the base size of the (external stream) resource descriptor */ TotalSize = AcpiGbl_AmlResourceSizes [Resource->Type]; /* * Augment the base size for descriptors with optional and/or * variable-length fields */ switch (Resource->Type) { case ACPI_RESOURCE_TYPE_IRQ: /* Length can be 3 or 2 */ if (Resource->Data.Irq.DescriptorLength == 2) { TotalSize--; } break; case ACPI_RESOURCE_TYPE_START_DEPENDENT: /* Length can be 1 or 0 */ if (Resource->Data.Irq.DescriptorLength == 0) { TotalSize--; } break; case ACPI_RESOURCE_TYPE_VENDOR: /* * Vendor Defined Resource: * For a Vendor Specific resource, if the Length is between 1 and 7 * it will be created as a Small Resource data type, otherwise it * is a Large Resource data type. */ if (Resource->Data.Vendor.ByteLength > 7) { /* Base size of a Large resource descriptor */ TotalSize = sizeof (AML_RESOURCE_LARGE_HEADER); } /* Add the size of the vendor-specific data */ TotalSize = (ACPI_RS_LENGTH) (TotalSize + Resource->Data.Vendor.ByteLength); break; case ACPI_RESOURCE_TYPE_END_TAG: /* * End Tag: * We are done -- return the accumulated total size. */ *SizeNeeded = AmlSizeNeeded + TotalSize; /* Normal exit */ return_ACPI_STATUS (AE_OK); case ACPI_RESOURCE_TYPE_ADDRESS16: /* * 16-Bit Address Resource: * Add the size of the optional ResourceSource info */ TotalSize = (ACPI_RS_LENGTH) (TotalSize + AcpiRsStructOptionLength ( &Resource->Data.Address16.ResourceSource)); break; case ACPI_RESOURCE_TYPE_ADDRESS32: /* * 32-Bit Address Resource: * Add the size of the optional ResourceSource info */ TotalSize = (ACPI_RS_LENGTH) (TotalSize + AcpiRsStructOptionLength ( &Resource->Data.Address32.ResourceSource)); break; case ACPI_RESOURCE_TYPE_ADDRESS64: /* * 64-Bit Address Resource: * Add the size of the optional ResourceSource info */ TotalSize = (ACPI_RS_LENGTH) (TotalSize + AcpiRsStructOptionLength ( &Resource->Data.Address64.ResourceSource)); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: /* * Extended IRQ Resource: * Add the size of each additional optional interrupt beyond the * required 1 (4 bytes for each UINT32 interrupt number) */ TotalSize = (ACPI_RS_LENGTH) (TotalSize + ((Resource->Data.ExtendedIrq.InterruptCount - 1) * 4) + /* Add the size of the optional ResourceSource info */ AcpiRsStructOptionLength ( &Resource->Data.ExtendedIrq.ResourceSource)); break; case ACPI_RESOURCE_TYPE_GPIO: TotalSize = (ACPI_RS_LENGTH) (TotalSize + (Resource->Data.Gpio.PinTableLength * 2) + Resource->Data.Gpio.ResourceSource.StringLength + Resource->Data.Gpio.VendorLength); break; case ACPI_RESOURCE_TYPE_SERIAL_BUS: TotalSize = AcpiGbl_AmlResourceSerialBusSizes [ Resource->Data.CommonSerialBus.Type]; TotalSize = (ACPI_RS_LENGTH) (TotalSize + Resource->Data.I2cSerialBus.ResourceSource.StringLength + Resource->Data.I2cSerialBus.VendorLength); break; default: break; } /* Update the total */ AmlSizeNeeded += TotalSize; /* Point to the next object */ Resource = ACPI_ADD_PTR (ACPI_RESOURCE, Resource, Resource->Length); } /* Did not find an EndTag resource descriptor */ return_ACPI_STATUS (AE_AML_NO_RESOURCE_END_TAG); } /******************************************************************************* * * FUNCTION: AcpiRsGetListLength * * PARAMETERS: AmlBuffer - Pointer to the resource byte stream * AmlBufferLength - Size of AmlBuffer * SizeNeeded - Where the size needed is returned * * RETURN: Status * * DESCRIPTION: Takes an external resource byte stream and calculates the size * buffer needed to hold the corresponding internal resource * descriptor linked list. * ******************************************************************************/ ACPI_STATUS AcpiRsGetListLength ( UINT8 *AmlBuffer, UINT32 AmlBufferLength, ACPI_SIZE *SizeNeeded) { ACPI_STATUS Status; UINT8 *EndAml; UINT8 *Buffer; UINT32 BufferSize; UINT16 Temp16; UINT16 ResourceLength; UINT32 ExtraStructBytes; UINT8 ResourceIndex; UINT8 MinimumAmlResourceLength; AML_RESOURCE *AmlResource; ACPI_FUNCTION_TRACE (RsGetListLength); *SizeNeeded = ACPI_RS_SIZE_MIN; /* Minimum size is one EndTag */ EndAml = AmlBuffer + AmlBufferLength; /* Walk the list of AML resource descriptors */ while (AmlBuffer < EndAml) { /* Validate the Resource Type and Resource Length */ Status = AcpiUtValidateResource (NULL, AmlBuffer, &ResourceIndex); if (ACPI_FAILURE (Status)) { /* * Exit on failure. Cannot continue because the descriptor length * may be bogus also. */ return_ACPI_STATUS (Status); } AmlResource = (void *) AmlBuffer; /* Get the resource length and base (minimum) AML size */ ResourceLength = AcpiUtGetResourceLength (AmlBuffer); MinimumAmlResourceLength = AcpiGbl_ResourceAmlSizes[ResourceIndex]; /* * Augment the size for descriptors with optional * and/or variable length fields */ ExtraStructBytes = 0; Buffer = AmlBuffer + AcpiUtGetResourceHeaderLength (AmlBuffer); switch (AcpiUtGetResourceType (AmlBuffer)) { case ACPI_RESOURCE_NAME_IRQ: /* * IRQ Resource: * Get the number of bits set in the 16-bit IRQ mask */ ACPI_MOVE_16_TO_16 (&Temp16, Buffer); ExtraStructBytes = AcpiRsCountSetBits (Temp16); break; case ACPI_RESOURCE_NAME_DMA: /* * DMA Resource: * Get the number of bits set in the 8-bit DMA mask */ ExtraStructBytes = AcpiRsCountSetBits (*Buffer); break; case ACPI_RESOURCE_NAME_VENDOR_SMALL: case ACPI_RESOURCE_NAME_VENDOR_LARGE: /* * Vendor Resource: * Get the number of vendor data bytes */ ExtraStructBytes = ResourceLength; /* * There is already one byte included in the minimum * descriptor size. If there are extra struct bytes, * subtract one from the count. */ if (ExtraStructBytes) { ExtraStructBytes--; } break; case ACPI_RESOURCE_NAME_END_TAG: /* * End Tag: This is the normal exit */ return_ACPI_STATUS (AE_OK); case ACPI_RESOURCE_NAME_ADDRESS32: case ACPI_RESOURCE_NAME_ADDRESS16: case ACPI_RESOURCE_NAME_ADDRESS64: /* * Address Resource: * Add the size of the optional ResourceSource */ ExtraStructBytes = AcpiRsStreamOptionLength ( ResourceLength, MinimumAmlResourceLength); break; case ACPI_RESOURCE_NAME_EXTENDED_IRQ: /* * Extended IRQ Resource: * Using the InterruptTableLength, add 4 bytes for each additional * interrupt. Note: at least one interrupt is required and is * included in the minimum descriptor size (reason for the -1) */ ExtraStructBytes = (Buffer[1] - 1) * sizeof (UINT32); /* Add the size of the optional ResourceSource */ ExtraStructBytes += AcpiRsStreamOptionLength ( ResourceLength - ExtraStructBytes, MinimumAmlResourceLength); break; case ACPI_RESOURCE_NAME_GPIO: /* Vendor data is optional */ if (AmlResource->Gpio.VendorLength) { ExtraStructBytes += AmlResource->Gpio.VendorOffset - AmlResource->Gpio.PinTableOffset + AmlResource->Gpio.VendorLength; } else { ExtraStructBytes += AmlResource->LargeHeader.ResourceLength + sizeof (AML_RESOURCE_LARGE_HEADER) - AmlResource->Gpio.PinTableOffset; } break; case ACPI_RESOURCE_NAME_SERIAL_BUS: MinimumAmlResourceLength = AcpiGbl_ResourceAmlSerialBusSizes[ AmlResource->CommonSerialBus.Type]; ExtraStructBytes += AmlResource->CommonSerialBus.ResourceLength - MinimumAmlResourceLength; break; default: break; } /* * Update the required buffer size for the internal descriptor structs * * Important: Round the size up for the appropriate alignment. This * is a requirement on IA64. */ if (AcpiUtGetResourceType (AmlBuffer) == ACPI_RESOURCE_NAME_SERIAL_BUS) { BufferSize = AcpiGbl_ResourceStructSerialBusSizes[ AmlResource->CommonSerialBus.Type] + ExtraStructBytes; } else { BufferSize = AcpiGbl_ResourceStructSizes[ResourceIndex] + ExtraStructBytes; } BufferSize = (UINT32) ACPI_ROUND_UP_TO_NATIVE_WORD (BufferSize); *SizeNeeded += BufferSize; ACPI_DEBUG_PRINT ((ACPI_DB_RESOURCES, "Type %.2X, AmlLength %.2X InternalLength %.2X\n", AcpiUtGetResourceType (AmlBuffer), AcpiUtGetDescriptorLength (AmlBuffer), BufferSize)); /* * Point to the next resource within the AML stream using the length * contained in the resource descriptor header */ AmlBuffer += AcpiUtGetDescriptorLength (AmlBuffer); } /* Did not find an EndTag resource descriptor */ return_ACPI_STATUS (AE_AML_NO_RESOURCE_END_TAG); } /******************************************************************************* * * FUNCTION: AcpiRsGetPciRoutingTableLength * * PARAMETERS: PackageObject - Pointer to the package object * BufferSizeNeeded - UINT32 pointer of the size buffer * needed to properly return the * parsed data * * RETURN: Status * * DESCRIPTION: Given a package representing a PCI routing table, this * calculates the size of the corresponding linked list of * descriptions. * ******************************************************************************/ ACPI_STATUS AcpiRsGetPciRoutingTableLength ( ACPI_OPERAND_OBJECT *PackageObject, ACPI_SIZE *BufferSizeNeeded) { UINT32 NumberOfElements; ACPI_SIZE TempSizeNeeded = 0; ACPI_OPERAND_OBJECT **TopObjectList; UINT32 Index; ACPI_OPERAND_OBJECT *PackageElement; ACPI_OPERAND_OBJECT **SubObjectList; BOOLEAN NameFound; UINT32 TableIndex; ACPI_FUNCTION_TRACE (RsGetPciRoutingTableLength); NumberOfElements = PackageObject->Package.Count; /* * Calculate the size of the return buffer. * The base size is the number of elements * the sizes of the * structures. Additional space for the strings is added below. * The minus one is to subtract the size of the UINT8 Source[1] * member because it is added below. * * But each PRT_ENTRY structure has a pointer to a string and * the size of that string must be found. */ TopObjectList = PackageObject->Package.Elements; for (Index = 0; Index < NumberOfElements; Index++) { /* Dereference the subpackage */ PackageElement = *TopObjectList; /* We must have a valid Package object */ if (!PackageElement || (PackageElement->Common.Type != ACPI_TYPE_PACKAGE)) { return_ACPI_STATUS (AE_AML_OPERAND_TYPE); } /* * The SubObjectList will now point to an array of the * four IRQ elements: Address, Pin, Source and SourceIndex */ SubObjectList = PackageElement->Package.Elements; /* Scan the IrqTableElements for the Source Name String */ NameFound = FALSE; for (TableIndex = 0; TableIndex < PackageElement->Package.Count && !NameFound; TableIndex++) { if (*SubObjectList && /* Null object allowed */ ((ACPI_TYPE_STRING == (*SubObjectList)->Common.Type) || ((ACPI_TYPE_LOCAL_REFERENCE == (*SubObjectList)->Common.Type) && ((*SubObjectList)->Reference.Class == ACPI_REFCLASS_NAME)))) { NameFound = TRUE; } else { /* Look at the next element */ SubObjectList++; } } TempSizeNeeded += (sizeof (ACPI_PCI_ROUTING_TABLE) - 4); /* Was a String type found? */ if (NameFound) { if ((*SubObjectList)->Common.Type == ACPI_TYPE_STRING) { /* * The length String.Length field does not include the * terminating NULL, add 1 */ TempSizeNeeded += ((ACPI_SIZE) (*SubObjectList)->String.Length + 1); } else { TempSizeNeeded += AcpiNsGetPathnameLength ( (*SubObjectList)->Reference.Node); } } else { /* * If no name was found, then this is a NULL, which is * translated as a UINT32 zero. */ TempSizeNeeded += sizeof (UINT32); } /* Round up the size since each element must be aligned */ TempSizeNeeded = ACPI_ROUND_UP_TO_64BIT (TempSizeNeeded); /* Point to the next ACPI_OPERAND_OBJECT */ TopObjectList++; } /* * Add an extra element to the end of the list, essentially a * NULL terminator */ *BufferSizeNeeded = TempSizeNeeded + sizeof (ACPI_PCI_ROUTING_TABLE); return_ACPI_STATUS (AE_OK); }
TigerBSD/TigerBSD
FreeBSD/sys/contrib/dev/acpica/components/resources/rscalc.c
C
isc
23,744
/*- * Copyright (c) 1999 M. Warner Losh <imp@village.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Modifications for Megahertz X-Jack Ethernet Card (XJ-10BT) * * Copyright (c) 1996 by Tatsumi Hosokawa <hosokawa@jp.FreeBSD.org> * BSD-nomads, Tokyo, Japan. */ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/bus.h> #include <sys/kernel.h> #include <sys/module.h> #include <sys/socket.h> #include <sys/systm.h> #include <net/ethernet.h> #include <net/if.h> #include <net/if_arp.h> #include <machine/bus.h> #include <machine/resource.h> #include <sys/rman.h> #include <dev/pccard/pccardvar.h> #include <dev/pccard/pccard_cis.h> #include <dev/sn/if_snreg.h> #include <dev/sn/if_snvar.h> #include "card_if.h" #include "pccarddevs.h" typedef int sn_get_enaddr_t(device_t dev, u_char *eaddr); typedef int sn_activate_t(device_t dev); struct sn_sw { int type; #define SN_NORMAL 1 #define SN_MEGAHERTZ 2 #define SN_OSITECH 3 #define SN_OSI_SOD 4 #define SN_MOTO_MARINER 5 char *typestr; sn_get_enaddr_t *get_mac; sn_activate_t *activate; }; static sn_get_enaddr_t sn_pccard_normal_get_mac; static sn_activate_t sn_pccard_normal_activate; const static struct sn_sw sn_normal_sw = { SN_NORMAL, "plain", sn_pccard_normal_get_mac, sn_pccard_normal_activate }; static sn_get_enaddr_t sn_pccard_megahertz_get_mac; static sn_activate_t sn_pccard_megahertz_activate; const static struct sn_sw sn_mhz_sw = { SN_MEGAHERTZ, "Megahertz", sn_pccard_megahertz_get_mac, sn_pccard_megahertz_activate }; static const struct sn_product { struct pccard_product prod; const struct sn_sw *sw; } sn_pccard_products[] = { { PCMCIA_CARD(DSPSI, XJEM1144), &sn_mhz_sw }, { PCMCIA_CARD(DSPSI, XJACK), &sn_normal_sw }, /* { PCMCIA_CARD(MOTOROLA, MARINER), SN_MOTO_MARINER }, */ { PCMCIA_CARD(NEWMEDIA, BASICS), &sn_normal_sw }, { PCMCIA_CARD(MEGAHERTZ, VARIOUS), &sn_mhz_sw}, { PCMCIA_CARD(MEGAHERTZ, XJEM3336), &sn_mhz_sw}, /* { PCMCIA_CARD(OSITECH, TRUMP_SOD), SN_OSI_SOD }, */ /* { PCMCIA_CARD(OSITECH, TRUMP_JOH), SN_OSITECH }, */ /* { PCMCIA_CARD(PSION, GOLDCARD), SN_OSITECH }, */ /* { PCMCIA_CARD(PSION, NETGLOBAL), SNI_OSI_SOD }, */ /* { PCMCIA_CARD(PSION, NETGLOBAL2), SN_OSITECH }, */ { PCMCIA_CARD(SMC, 8020BT), &sn_normal_sw }, { PCMCIA_CARD(SMC, SMC91C96), &sn_normal_sw }, { { NULL } } }; static const struct sn_product * sn_pccard_lookup(device_t dev) { return ((const struct sn_product *) pccard_product_lookup(dev, (const struct pccard_product *)sn_pccard_products, sizeof(sn_pccard_products[0]), NULL)); } static int sn_pccard_probe(device_t dev) { const struct sn_product *pp; if ((pp = sn_pccard_lookup(dev)) != NULL) { if (pp->prod.pp_name != NULL) device_set_desc(dev, pp->prod.pp_name); return 0; } return EIO; } static int sn_pccard_ascii_enaddr(const char *str, u_char *enet) { uint8_t digit; int i; memset(enet, 0, ETHER_ADDR_LEN); for (i = 0, digit = 0; i < (ETHER_ADDR_LEN * 2); i++) { if (str[i] >= '0' && str[i] <= '9') digit |= str[i] - '0'; else if (str[i] >= 'a' && str[i] <= 'f') digit |= (str[i] - 'a') + 10; else if (str[i] >= 'A' && str[i] <= 'F') digit |= (str[i] - 'A') + 10; else return (0); /* Bogus digit!! */ /* Compensate for ordering of digits. */ if (i & 1) { enet[i >> 1] = digit; digit = 0; } else digit <<= 4; } return (1); } static int sn_pccard_normal_get_mac(device_t dev, u_char *eaddr) { int i, sum; const char *cisstr; pccard_get_ether(dev, eaddr); for (i = 0, sum = 0; i < ETHER_ADDR_LEN; i++) sum |= eaddr[i]; if (sum == 0) { pccard_get_cis3_str(dev, &cisstr); if (cisstr && strlen(cisstr) == ETHER_ADDR_LEN * 2) sum = sn_pccard_ascii_enaddr(cisstr, eaddr); } if (sum == 0) { pccard_get_cis4_str(dev, &cisstr); if (cisstr && strlen(cisstr) == ETHER_ADDR_LEN * 2) sum = sn_pccard_ascii_enaddr(cisstr, eaddr); } return sum; } static int sn_pccard_normal_activate(device_t dev) { int err; err = sn_activate(dev); if (err) sn_deactivate(dev); return (err); } static int sn_pccard_megahertz_mac(const struct pccard_tuple *tuple, void *argp) { uint8_t *enaddr = argp; int i; uint8_t buffer[ETHER_ADDR_LEN * 2]; /* Code 0x81 is Megahertz' special cis node contianing the MAC */ if (tuple->code != 0x81) return (0); /* Make sure this is a sane node, as ASCII digits */ if (tuple->length != ETHER_ADDR_LEN * 2 + 1) return (0); /* Copy the MAC ADDR and return success if decoded */ for (i = 0; i < ETHER_ADDR_LEN * 2; i++) buffer[i] = pccard_tuple_read_1(tuple, i); return (sn_pccard_ascii_enaddr(buffer, enaddr)); } static int sn_pccard_megahertz_get_mac(device_t dev, u_char *eaddr) { if (sn_pccard_normal_get_mac(dev, eaddr)) return 1; /* * If that fails, try the special CIS tuple 0x81 that the * '3288 and '3336 cards have. That tuple specifies an ASCII * string, ala CIS3 or CIS4 in the 'normal' cards. */ return (pccard_cis_scan(dev, sn_pccard_megahertz_mac, eaddr)); } static int sn_pccard_megahertz_activate(device_t dev) { int err; struct sn_softc *sc = device_get_softc(dev); u_long start; err = sn_activate(dev); if (err) { sn_deactivate(dev); return (err); } /* * CIS resource is the modem one, so save it away. */ sc->modem_rid = sc->port_rid; sc->modem_res = sc->port_res; /* * The MHz XJEM/CCEM series of cards just need to have any * old resource allocated for the ethernet side of things, * provided bit 0x80 isn't set in the address. That bit is * evidentially reserved for modem function and is how the * card steers the addresses internally. */ sc->port_res = NULL; start = 0; do { sc->port_rid = 1; sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, start, ~0, SMC_IO_EXTENT, RF_ACTIVE); if (sc->port_res == NULL) break; if (!(rman_get_start(sc->port_res) & 0x80)) break; start = rman_get_start(sc->port_res) + SMC_IO_EXTENT; bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); } while (start < 0xff80); if (sc->port_res == NULL) { sn_deactivate(dev); return ENOMEM; } return 0; } static int sn_pccard_attach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); u_char eaddr[ETHER_ADDR_LEN]; int i, err; uint16_t w; u_char sum; const struct sn_product *pp; pp = sn_pccard_lookup(dev); sum = pp->sw->get_mac(dev, eaddr); /* Allocate resources so we can program the ether addr */ sc->dev = dev; err = pp->sw->activate(dev); if (err != 0) return (err); if (sum) { printf("Programming sn card's addr\n"); SMC_SELECT_BANK(sc, 1); for (i = 0; i < 3; i++) { w = (uint16_t)eaddr[i * 2] | (((uint16_t)eaddr[i * 2 + 1]) << 8); CSR_WRITE_2(sc, IAR_ADDR0_REG_W + i * 2, w); } } err = sn_attach(dev); if (err) sn_deactivate(dev); return (err); } static device_method_t sn_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sn_pccard_probe), DEVMETHOD(device_attach, sn_pccard_attach), DEVMETHOD(device_detach, sn_detach), { 0, 0 } }; static driver_t sn_pccard_driver = { "sn", sn_pccard_methods, sizeof(struct sn_softc), }; extern devclass_t sn_devclass; DRIVER_MODULE(sn, pccard, sn_pccard_driver, sn_devclass, 0, 0); MODULE_DEPEND(sn, ether, 1, 1, 1); PCCARD_PNP_INFO(sn_pccard_products);
TigerBSD/TigerBSD
FreeBSD/sys/dev/sn/if_sn_pccard.c
C
isc
8,607
/* Copyright (C) 2004 Michael J. Silbersack. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <assert.h> #include <err.h> #include <errno.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> /* * $FreeBSD$ * The goal of this program is to see if fstat reports the correct * data count for a pipe. Prior to revision 1.172 of sys_pipe.c, * 0 would be returned once the pipe entered direct write mode. * * Linux (2.6) always returns zero, so it's not a valuable platform * for comparison. */ int main(void) { char buffer[32768], buffer2[32768], go[] = "go", go2[] = "go2"; int desc[2], ipc_coord[2]; ssize_t error; int successes = 0; struct stat status; pid_t new_pid; error = pipe(desc); if (error == -1) err(1, "Couldn't allocate data pipe"); error = pipe(ipc_coord); if (error == -1) err(1, "Couldn't allocate IPC coordination pipe"); new_pid = fork(); assert(new_pid != -1); close(new_pid == 0 ? desc[0] : desc[1]); #define SYNC_R(i, _buf) do { \ int _error = errno; \ warnx("%d: waiting for synchronization", __LINE__); \ if (read(ipc_coord[i], &_buf, sizeof(_buf)) != sizeof(_buf)) \ err(1, "failed to synchronize (%s)", (i == 0 ? "parent" : "child")); \ errno = _error; \ } while(0) #define SYNC_W(i, _buf) do { \ int _error = errno; \ warnx("%d: sending synchronization", __LINE__); \ if (write(ipc_coord[i], &_buf, sizeof(_buf)) != sizeof(_buf)) \ err(1, "failed to synchronize (%s)", (i == 0 ? "child" : "parent")); \ errno = _error; \ } while(0) #define WRITE(s) do { \ ssize_t _size; \ if ((_size = write(desc[1], &buffer, s)) != s) \ warn("short write; wrote %zd, expected %d", _size, s); \ } while(0) if (new_pid == 0) { SYNC_R(0, go); WRITE(145); SYNC_W(0, go2); SYNC_R(0, go); WRITE(2048); SYNC_W(0, go2); SYNC_R(0, go); WRITE(4096); SYNC_W(0, go2); SYNC_R(0, go); WRITE(8191); SYNC_W(0, go2); SYNC_R(0, go); SYNC_W(0, go2); /* XXX: why is this required? */ WRITE(8192); SYNC_W(0, go2); close(ipc_coord[0]); close(ipc_coord[1]); _exit(0); } while (successes < 5) { SYNC_W(1, go); SYNC_R(1, go2); fstat(desc[0], &status); error = read(desc[0], &buffer2, sizeof(buffer2)); if (status.st_size != error) err(1, "FAILURE: stat size %jd read size %zd", (intmax_t)status.st_size, error); if (error > 0) { printf("SUCCESS at stat size %jd read size %zd\n", (intmax_t)status.st_size, error); successes++; } } exit(0); }
TigerBSD/TigerBSD
FreeBSD/tests/sys/kern/pipe/pipe_fstat_bug_test.c
C
isc
3,787
/* * The Yices SMT Solver. Copyright 2014 SRI International. * * This program may only be used subject to the noncommercial end user * license agreement which is downloadable along with this program. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> #include "utils/int_stack.h" static int_stack_t stack; static void print_stack(int_stack_t *stack) { iblock_t *b; printf("stack %p\n", stack); printf(" current block = %p\n", stack->current); printf(" free list = %p\n", stack->free); printf(" active blocks:\n"); b = stack->current; while (b != NULL) { printf(" block %p: size = %"PRIu32" ptr = %"PRIu32" data = %p\n", b, b->size, b->ptr, b->data); b = b->next; } printf(" free blocks:\n"); b = stack->free; while (b != NULL) { printf(" block %p: size = %"PRIu32" ptr = %"PRIu32" data = %p\n", b, b->size, b->ptr, b->data); b = b->next; } printf("\n"); } int main(void) { int32_t *a1, *a2, *a3, *a4; printf("=== Initialization ===\n"); init_istack(&stack); print_stack(&stack); printf("=== Allocation a1: size 100 ===\n"); a1 = alloc_istack_array(&stack, 100); printf(" a1 = %p\n", a1); print_stack(&stack); printf("=== Allocation a2: size 500 ===\n"); a2 = alloc_istack_array(&stack, 500); printf(" a2 = %p\n", a2); print_stack(&stack); printf("=== Allocation a3: size 800 ===\n"); a3 = alloc_istack_array(&stack, 800); printf(" a3 = %p\n", a3); print_stack(&stack); printf("=== Allocation a4: size 8000 ===\n"); a4 = alloc_istack_array(&stack, 8000); printf(" a4 = %p\n", a4); print_stack(&stack); printf("=== Free a4 ===\n"); free_istack_array(&stack, a4); print_stack(&stack); printf("=== Allocation a4: size 800 ===\n"); a4 = alloc_istack_array(&stack, 800); printf(" a4 = %p\n", a4); print_stack(&stack); printf("=== Free a4 ===\n"); free_istack_array(&stack, a4); print_stack(&stack); printf("=== Free a3 ===\n"); free_istack_array(&stack, a3); print_stack(&stack); printf("=== Reset ===\n"); reset_istack(&stack); print_stack(&stack); delete_istack(&stack); return 0; }
maelvalais/ocamlyices2
ext/yices/tests/unit/test_int_stack.c
C
isc
2,167
/*- * Copyright (c) 2010, 2012 Konstantin Belousov <kib@FreeBSD.org> * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_vm.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> #include <sys/lock.h> #include <sys/malloc.h> #include <sys/rwlock.h> #include <sys/sysent.h> #include <sys/sysctl.h> #include <sys/vdso.h> #include <vm/vm.h> #include <vm/vm_param.h> #include <vm/pmap.h> #include <vm/vm_extern.h> #include <vm/vm_kern.h> #include <vm/vm_map.h> #include <vm/vm_object.h> #include <vm/vm_page.h> #include <vm/vm_pager.h> static struct sx shared_page_alloc_sx; static vm_object_t shared_page_obj; static int shared_page_free; char *shared_page_mapping; void shared_page_write(int base, int size, const void *data) { bcopy(data, shared_page_mapping + base, size); } static int shared_page_alloc_locked(int size, int align) { int res; res = roundup(shared_page_free, align); if (res + size >= IDX_TO_OFF(shared_page_obj->size)) res = -1; else shared_page_free = res + size; return (res); } int shared_page_alloc(int size, int align) { int res; sx_xlock(&shared_page_alloc_sx); res = shared_page_alloc_locked(size, align); sx_xunlock(&shared_page_alloc_sx); return (res); } int shared_page_fill(int size, int align, const void *data) { int res; sx_xlock(&shared_page_alloc_sx); res = shared_page_alloc_locked(size, align); if (res != -1) shared_page_write(res, size, data); sx_xunlock(&shared_page_alloc_sx); return (res); } static void shared_page_init(void *dummy __unused) { vm_page_t m; vm_offset_t addr; sx_init(&shared_page_alloc_sx, "shpsx"); shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE, VM_PROT_DEFAULT, 0, NULL); VM_OBJECT_WLOCK(shared_page_obj); m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO); m->valid = VM_PAGE_BITS_ALL; VM_OBJECT_WUNLOCK(shared_page_obj); addr = kva_alloc(PAGE_SIZE); pmap_qenter(addr, &m, 1); shared_page_mapping = (char *)addr; } SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init, NULL); /* * Push the timehands update to the shared page. * * The lockless update scheme is similar to the one used to update the * in-kernel timehands, see sys/kern/kern_tc.c:tc_windup() (which * calls us after the timehands are updated). */ static void timehands_update(struct vdso_sv_tk *svtk) { struct vdso_timehands th; struct vdso_timekeep *tk; uint32_t enabled, idx; enabled = tc_fill_vdso_timehands(&th); th.th_gen = 0; idx = svtk->sv_timekeep_curr; if (++idx >= VDSO_TH_NUM) idx = 0; svtk->sv_timekeep_curr = idx; if (++svtk->sv_timekeep_gen == 0) svtk->sv_timekeep_gen = 1; tk = (struct vdso_timekeep *)(shared_page_mapping + svtk->sv_timekeep_off); tk->tk_th[idx].th_gen = 0; atomic_thread_fence_rel(); if (enabled) tk->tk_th[idx] = th; atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen); atomic_store_rel_32(&tk->tk_current, idx); /* * The ordering of the assignment to tk_enabled relative to * the update of the vdso_timehands is not important. */ tk->tk_enabled = enabled; } #ifdef COMPAT_FREEBSD32 static void timehands_update32(struct vdso_sv_tk *svtk) { struct vdso_timehands32 th; struct vdso_timekeep32 *tk; uint32_t enabled, idx; enabled = tc_fill_vdso_timehands32(&th); th.th_gen = 0; idx = svtk->sv_timekeep_curr; if (++idx >= VDSO_TH_NUM) idx = 0; svtk->sv_timekeep_curr = idx; if (++svtk->sv_timekeep_gen == 0) svtk->sv_timekeep_gen = 1; tk = (struct vdso_timekeep32 *)(shared_page_mapping + svtk->sv_timekeep_off); tk->tk_th[idx].th_gen = 0; atomic_thread_fence_rel(); if (enabled) tk->tk_th[idx] = th; atomic_store_rel_32(&tk->tk_th[idx].th_gen, svtk->sv_timekeep_gen); atomic_store_rel_32(&tk->tk_current, idx); tk->tk_enabled = enabled; } #endif /* * This is hackish, but easiest way to avoid creating list structures * that needs to be iterated over from the hardclock interrupt * context. */ static struct vdso_sv_tk *host_svtk; #ifdef COMPAT_FREEBSD32 static struct vdso_sv_tk *compat32_svtk; #endif void timekeep_push_vdso(void) { if (host_svtk != NULL) timehands_update(host_svtk); #ifdef COMPAT_FREEBSD32 if (compat32_svtk != NULL) timehands_update32(compat32_svtk); #endif } struct vdso_sv_tk * alloc_sv_tk(void) { struct vdso_sv_tk *svtk; int tk_base; uint32_t tk_ver; tk_ver = VDSO_TK_VER_CURR; svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO); tk_base = shared_page_alloc(sizeof(struct vdso_timekeep) + sizeof(struct vdso_timehands) * VDSO_TH_NUM, 16); KASSERT(tk_base != -1, ("tk_base -1 for native")); shared_page_write(tk_base + offsetof(struct vdso_timekeep, tk_ver), sizeof(uint32_t), &tk_ver); svtk->sv_timekeep_off = tk_base; timekeep_push_vdso(); return (svtk); } #ifdef COMPAT_FREEBSD32 struct vdso_sv_tk * alloc_sv_tk_compat32(void) { struct vdso_sv_tk *svtk; int tk_base; uint32_t tk_ver; svtk = malloc(sizeof(struct vdso_sv_tk), M_TEMP, M_WAITOK | M_ZERO); tk_ver = VDSO_TK_VER_CURR; tk_base = shared_page_alloc(sizeof(struct vdso_timekeep32) + sizeof(struct vdso_timehands32) * VDSO_TH_NUM, 16); KASSERT(tk_base != -1, ("tk_base -1 for 32bit")); shared_page_write(tk_base + offsetof(struct vdso_timekeep32, tk_ver), sizeof(uint32_t), &tk_ver); svtk->sv_timekeep_off = tk_base; timekeep_push_vdso(); return (svtk); } #endif void exec_sysvec_init(void *param) { struct sysentvec *sv; sv = (struct sysentvec *)param; if ((sv->sv_flags & SV_SHP) == 0) return; sv->sv_shared_page_obj = shared_page_obj; sv->sv_sigcode_base = sv->sv_shared_page_base + shared_page_fill(*(sv->sv_szsigcode), 16, sv->sv_sigcode); if ((sv->sv_flags & SV_ABI_MASK) != SV_ABI_FREEBSD) return; if ((sv->sv_flags & SV_TIMEKEEP) != 0) { #ifdef COMPAT_FREEBSD32 if ((sv->sv_flags & SV_ILP32) != 0) { KASSERT(compat32_svtk == NULL, ("Compat32 already registered")); compat32_svtk = alloc_sv_tk_compat32(); sv->sv_timekeep_base = sv->sv_shared_page_base + compat32_svtk->sv_timekeep_off; } else { #endif KASSERT(host_svtk == NULL, ("Host already registered")); host_svtk = alloc_sv_tk(); sv->sv_timekeep_base = sv->sv_shared_page_base + host_svtk->sv_timekeep_off; #ifdef COMPAT_FREEBSD32 } #endif } }
TigerBSD/TigerBSD
FreeBSD/sys/kern/kern_sharedpage.c
C
isc
7,826
/**************************************************************************** ** ** Name: MicoFileStat.c ** ** Description: ** Implements _fstat, called by Newlib C library file functions ** ** $Revision: $ ** ** Disclaimer: ** ** This source code is intended as a design reference which ** illustrates how these types of functions can be implemented. It ** is the user's responsibility to verify their design for ** consistency and functionality through the use of formal ** verification methods. Lattice Semiconductor provides no warranty ** regarding the use or functionality of this code. ** ** -------------------------------------------------------------------- ** ** Lattice Semiconductor Corporation ** 5555 NE Moore Court ** Hillsboro, OR 97214 ** U.S.A ** ** TEL: 1-800-Lattice (USA and Canada) ** (503)268-8001 (other locations) ** ** web: http://www.latticesemi.com ** email: techsupport@latticesemi.com ** ** -------------------------------------------------------------------------- ** ** Change History (Latest changes on top) ** ** Ver Date Description ** -------------------------------------------------------------------------- ** ** 3.0 Mar-25-2008 Added Header ** **--------------------------------------------------------------------------- *****************************************************************************/ #include <_ansi.h> #include <_syslist.h> #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include "MicoFileDevices.h" #ifdef __cplusplus extern "C" { #endif /****************************************************************** * * * Implements _read required by NewLibC's _read_r function * * * ******************************************************************/ int _fstat(int fd, struct stat *pstat) { MicoFileDesc_t *pFD; int retValue =-1; /* given the file-id, fetch the associated file-descriptor */ if(MicoGetFDEntry(fd, &pFD) != 0) return(-1); /* ask the device to write the data if it is capable of writing data */ if(pFD->pFileOpsTable->stat) retValue = pFD->pFileOpsTable->stat(pFD, pstat); /* all done */ return(retValue); } #ifdef __cplusplus } #endif
ptracton/wb_soc_template
rtl/lm32_top/drivers/service/MicoFileStat.c
C
mit
2,525
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #include <curl/curl.h> #include "urldata.h" #include "transfer.h" #include "url.h" #include "connect.h" #include "progress.h" #include "easyif.h" #include "share.h" #include "psl.h" #include "multiif.h" #include "sendf.h" #include "timeval.h" #include "http.h" #include "select.h" #include "warnless.h" #include "speedcheck.h" #include "conncache.h" #include "multihandle.h" #include "sigpipe.h" #include "vtls/vtls.h" #include "connect.h" #include "http_proxy.h" #include "http2.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" /* CURL_SOCKET_HASH_TABLE_SIZE should be a prime number. Increasing it from 97 to 911 takes on a 32-bit machine 4 x 804 = 3211 more bytes. Still, every CURL handle takes 45-50 K memory, therefore this 3K are not significant. */ #ifndef CURL_SOCKET_HASH_TABLE_SIZE #define CURL_SOCKET_HASH_TABLE_SIZE 911 #endif #ifndef CURL_CONNECTION_HASH_SIZE #define CURL_CONNECTION_HASH_SIZE 97 #endif #define CURL_MULTI_HANDLE 0x000bab1e #define GOOD_MULTI_HANDLE(x) \ ((x) && (x)->type == CURL_MULTI_HANDLE) static CURLMcode singlesocket(struct Curl_multi *multi, struct Curl_easy *data); static CURLMcode add_next_timeout(struct curltime now, struct Curl_multi *multi, struct Curl_easy *d); static CURLMcode multi_timeout(struct Curl_multi *multi, long *timeout_ms); static void process_pending_handles(struct Curl_multi *multi); static void detach_connnection(struct Curl_easy *data); #ifdef DEBUGBUILD static const char * const statename[]={ "INIT", "CONNECT_PEND", "CONNECT", "WAITRESOLVE", "WAITCONNECT", "WAITPROXYCONNECT", "SENDPROTOCONNECT", "PROTOCONNECT", "DO", "DOING", "DO_MORE", "DO_DONE", "PERFORM", "TOOFAST", "DONE", "COMPLETED", "MSGSENT", }; #endif /* function pointer called once when switching TO a state */ typedef void (*init_multistate_func)(struct Curl_easy *data); static void Curl_init_completed(struct Curl_easy *data) { /* this is a completed transfer */ /* Important: reset the conn pointer so that we don't point to memory that could be freed anytime */ detach_connnection(data); Curl_expire_clear(data); /* stop all timers */ } /* always use this function to change state, to make debugging easier */ static void mstate(struct Curl_easy *data, CURLMstate state #ifdef DEBUGBUILD , int lineno #endif ) { CURLMstate oldstate = data->mstate; static const init_multistate_func finit[CURLM_STATE_LAST] = { NULL, /* INIT */ NULL, /* CONNECT_PEND */ Curl_init_CONNECT, /* CONNECT */ NULL, /* WAITRESOLVE */ NULL, /* WAITCONNECT */ NULL, /* WAITPROXYCONNECT */ NULL, /* SENDPROTOCONNECT */ NULL, /* PROTOCONNECT */ Curl_connect_free, /* DO */ NULL, /* DOING */ NULL, /* DO_MORE */ NULL, /* DO_DONE */ NULL, /* PERFORM */ NULL, /* TOOFAST */ NULL, /* DONE */ Curl_init_completed, /* COMPLETED */ NULL /* MSGSENT */ }; #if defined(DEBUGBUILD) && defined(CURL_DISABLE_VERBOSE_STRINGS) (void) lineno; #endif if(oldstate == state) /* don't bother when the new state is the same as the old state */ return; data->mstate = state; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) if(data->mstate >= CURLM_STATE_CONNECT_PEND && data->mstate < CURLM_STATE_COMPLETED) { long connection_id = -5000; if(data->conn) connection_id = data->conn->connection_id; infof(data, "STATE: %s => %s handle %p; line %d (connection #%ld)\n", statename[oldstate], statename[data->mstate], (void *)data, lineno, connection_id); } #endif if(state == CURLM_STATE_COMPLETED) /* changing to COMPLETED means there's one less easy handle 'alive' */ data->multi->num_alive--; /* if this state has an init-function, run it */ if(finit[state]) finit[state](data); } #ifndef DEBUGBUILD #define multistate(x,y) mstate(x,y) #else #define multistate(x,y) mstate(x,y, __LINE__) #endif /* * We add one of these structs to the sockhash for each socket */ struct Curl_sh_entry { struct curl_hash transfers; /* hash of transfers using this socket */ unsigned int action; /* what combined action READ/WRITE this socket waits for */ void *socketp; /* settable by users with curl_multi_assign() */ unsigned int users; /* number of transfers using this */ unsigned int readers; /* this many transfers want to read */ unsigned int writers; /* this many transfers want to write */ }; /* bits for 'action' having no bits means this socket is not expecting any action */ #define SH_READ 1 #define SH_WRITE 2 /* look up a given socket in the socket hash, skip invalid sockets */ static struct Curl_sh_entry *sh_getentry(struct curl_hash *sh, curl_socket_t s) { if(s != CURL_SOCKET_BAD) { /* only look for proper sockets */ return Curl_hash_pick(sh, (char *)&s, sizeof(curl_socket_t)); } return NULL; } #define TRHASH_SIZE 13 static size_t trhash(void *key, size_t key_length, size_t slots_num) { size_t keyval = (size_t)*(struct Curl_easy **)key; (void) key_length; return (keyval % slots_num); } static size_t trhash_compare(void *k1, size_t k1_len, void *k2, size_t k2_len) { (void)k1_len; (void)k2_len; return *(struct Curl_easy **)k1 == *(struct Curl_easy **)k2; } static void trhash_dtor(void *nada) { (void)nada; } /* make sure this socket is present in the hash for this handle */ static struct Curl_sh_entry *sh_addentry(struct curl_hash *sh, curl_socket_t s) { struct Curl_sh_entry *there = sh_getentry(sh, s); struct Curl_sh_entry *check; if(there) { /* it is present, return fine */ return there; } /* not present, add it */ check = calloc(1, sizeof(struct Curl_sh_entry)); if(!check) return NULL; /* major failure */ if(Curl_hash_init(&check->transfers, TRHASH_SIZE, trhash, trhash_compare, trhash_dtor)) { free(check); return NULL; } /* make/add new hash entry */ if(!Curl_hash_add(sh, (char *)&s, sizeof(curl_socket_t), check)) { free(check); return NULL; /* major failure */ } return check; /* things are good in sockhash land */ } /* delete the given socket + handle from the hash */ static void sh_delentry(struct Curl_sh_entry *entry, struct curl_hash *sh, curl_socket_t s) { Curl_hash_destroy(&entry->transfers); /* We remove the hash entry. This will end up in a call to sh_freeentry(). */ Curl_hash_delete(sh, (char *)&s, sizeof(curl_socket_t)); } /* * free a sockhash entry */ static void sh_freeentry(void *freethis) { struct Curl_sh_entry *p = (struct Curl_sh_entry *) freethis; free(p); } static size_t fd_key_compare(void *k1, size_t k1_len, void *k2, size_t k2_len) { (void) k1_len; (void) k2_len; return (*((curl_socket_t *) k1)) == (*((curl_socket_t *) k2)); } static size_t hash_fd(void *key, size_t key_length, size_t slots_num) { curl_socket_t fd = *((curl_socket_t *) key); (void) key_length; return (fd % slots_num); } /* * sh_init() creates a new socket hash and returns the handle for it. * * Quote from README.multi_socket: * * "Some tests at 7000 and 9000 connections showed that the socket hash lookup * is somewhat of a bottle neck. Its current implementation may be a bit too * limiting. It simply has a fixed-size array, and on each entry in the array * it has a linked list with entries. So the hash only checks which list to * scan through. The code I had used so for used a list with merely 7 slots * (as that is what the DNS hash uses) but with 7000 connections that would * make an average of 1000 nodes in each list to run through. I upped that to * 97 slots (I believe a prime is suitable) and noticed a significant speed * increase. I need to reconsider the hash implementation or use a rather * large default value like this. At 9000 connections I was still below 10us * per call." * */ static int sh_init(struct curl_hash *hash, int hashsize) { return Curl_hash_init(hash, hashsize, hash_fd, fd_key_compare, sh_freeentry); } /* * multi_addmsg() * * Called when a transfer is completed. Adds the given msg pointer to * the list kept in the multi handle. */ static CURLMcode multi_addmsg(struct Curl_multi *multi, struct Curl_message *msg) { Curl_llist_insert_next(&multi->msglist, multi->msglist.tail, msg, &msg->list); return CURLM_OK; } struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */ int chashsize) /* connection hash */ { struct Curl_multi *multi = calloc(1, sizeof(struct Curl_multi)); if(!multi) return NULL; multi->type = CURL_MULTI_HANDLE; if(Curl_mk_dnscache(&multi->hostcache)) goto error; if(sh_init(&multi->sockhash, hashsize)) goto error; if(Curl_conncache_init(&multi->conn_cache, chashsize)) goto error; Curl_llist_init(&multi->msglist, NULL); Curl_llist_init(&multi->pending, NULL); multi->multiplexing = CURLPIPE_MULTIPLEX; /* -1 means it not set by user, use the default value */ multi->maxconnects = -1; return multi; error: Curl_hash_destroy(&multi->sockhash); Curl_hash_destroy(&multi->hostcache); Curl_conncache_destroy(&multi->conn_cache); Curl_llist_destroy(&multi->msglist, NULL); Curl_llist_destroy(&multi->pending, NULL); free(multi); return NULL; } struct Curl_multi *curl_multi_init(void) { return Curl_multi_handle(CURL_SOCKET_HASH_TABLE_SIZE, CURL_CONNECTION_HASH_SIZE); } CURLMcode curl_multi_add_handle(struct Curl_multi *multi, struct Curl_easy *data) { /* First, make some basic checks that the CURLM handle is a good handle */ if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; /* Verify that we got a somewhat good easy handle too */ if(!GOOD_EASY_HANDLE(data)) return CURLM_BAD_EASY_HANDLE; /* Prevent users from adding same easy handle more than once and prevent adding to more than one multi stack */ if(data->multi) return CURLM_ADDED_ALREADY; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; /* Initialize timeout list for this handle */ Curl_llist_init(&data->state.timeoutlist, NULL); /* * No failure allowed in this function beyond this point. And no * modification of easy nor multi handle allowed before this except for * potential multi's connection cache growing which won't be undone in this * function no matter what. */ if(data->set.errorbuffer) data->set.errorbuffer[0] = 0; /* set the easy handle */ multistate(data, CURLM_STATE_INIT); /* for multi interface connections, we share DNS cache automatically if the easy handle's one is currently not set. */ if(!data->dns.hostcache || (data->dns.hostcachetype == HCACHE_NONE)) { data->dns.hostcache = &multi->hostcache; data->dns.hostcachetype = HCACHE_MULTI; } /* Point to the shared or multi handle connection cache */ if(data->share && (data->share->specifier & (1<< CURL_LOCK_DATA_CONNECT))) data->state.conn_cache = &data->share->conn_cache; else data->state.conn_cache = &multi->conn_cache; #ifdef USE_LIBPSL /* Do the same for PSL. */ if(data->share && (data->share->specifier & (1 << CURL_LOCK_DATA_PSL))) data->psl = &data->share->psl; else data->psl = &multi->psl; #endif /* We add the new entry last in the list. */ data->next = NULL; /* end of the line */ if(multi->easyp) { struct Curl_easy *last = multi->easylp; last->next = data; data->prev = last; multi->easylp = data; /* the new last node */ } else { /* first node, make prev NULL! */ data->prev = NULL; multi->easylp = multi->easyp = data; /* both first and last */ } /* make the Curl_easy refer back to this multi handle */ data->multi = multi; /* Set the timeout for this handle to expire really soon so that it will be taken care of even when this handle is added in the midst of operation when only the curl_multi_socket() API is used. During that flow, only sockets that time-out or have actions will be dealt with. Since this handle has no action yet, we make sure it times out to get things to happen. */ Curl_expire(data, 0, EXPIRE_RUN_NOW); /* increase the node-counter */ multi->num_easy++; /* increase the alive-counter */ multi->num_alive++; /* A somewhat crude work-around for a little glitch in Curl_update_timer() that happens if the lastcall time is set to the same time when the handle is removed as when the next handle is added, as then the check in Curl_update_timer() that prevents calling the application multiple times with the same timer info will not trigger and then the new handle's timeout will not be notified to the app. The work-around is thus simply to clear the 'lastcall' variable to force Curl_update_timer() to always trigger a callback to the app when a new easy handle is added */ memset(&multi->timer_lastcall, 0, sizeof(multi->timer_lastcall)); /* The closure handle only ever has default timeouts set. To improve the state somewhat we clone the timeouts from each added handle so that the closure handle always has the same timeouts as the most recently added easy handle. */ data->state.conn_cache->closure_handle->set.timeout = data->set.timeout; data->state.conn_cache->closure_handle->set.server_response_timeout = data->set.server_response_timeout; data->state.conn_cache->closure_handle->set.no_signal = data->set.no_signal; Curl_update_timer(multi); return CURLM_OK; } #if 0 /* Debug-function, used like this: * * Curl_hash_print(multi->sockhash, debug_print_sock_hash); * * Enable the hash print function first by editing hash.c */ static void debug_print_sock_hash(void *p) { struct Curl_sh_entry *sh = (struct Curl_sh_entry *)p; fprintf(stderr, " [easy %p/magic %x/socket %d]", (void *)sh->data, sh->data->magic, (int)sh->socket); } #endif static CURLcode multi_done(struct Curl_easy *data, CURLcode status, /* an error if this is called after an error was detected */ bool premature) { CURLcode result; struct connectdata *conn = data->conn; unsigned int i; DEBUGF(infof(data, "multi_done\n")); if(data->state.done) /* Stop if multi_done() has already been called */ return CURLE_OK; /* Stop the resolver and free its own resources (but not dns_entry yet). */ Curl_resolver_kill(conn); /* Cleanup possible redirect junk */ Curl_safefree(data->req.newurl); Curl_safefree(data->req.location); switch(status) { case CURLE_ABORTED_BY_CALLBACK: case CURLE_READ_ERROR: case CURLE_WRITE_ERROR: /* When we're aborted due to a callback return code it basically have to be counted as premature as there is trouble ahead if we don't. We have many callbacks and protocols work differently, we could potentially do this more fine-grained in the future. */ premature = TRUE; default: break; } /* this calls the protocol-specific function pointer previously set */ if(conn->handler->done) result = conn->handler->done(conn, status, premature); else result = status; if(CURLE_ABORTED_BY_CALLBACK != result) { /* avoid this if we already aborted by callback to avoid this calling another callback */ CURLcode rc = Curl_pgrsDone(conn); if(!result && rc) result = CURLE_ABORTED_BY_CALLBACK; } process_pending_handles(data->multi); /* connection / multiplex */ detach_connnection(data); if(CONN_INUSE(conn)) { /* Stop if still used. */ DEBUGF(infof(data, "Connection still in use %zu, " "no more multi_done now!\n", conn->easyq.size)); return CURLE_OK; } data->state.done = TRUE; /* called just now! */ if(conn->dns_entry) { Curl_resolv_unlock(data, conn->dns_entry); /* done with this */ conn->dns_entry = NULL; } Curl_hostcache_prune(data); Curl_safefree(data->state.ulbuf); /* if the transfer was completed in a paused state there can be buffered data left to free */ for(i = 0; i < data->state.tempcount; i++) { free(data->state.tempwrite[i].buf); } data->state.tempcount = 0; /* if data->set.reuse_forbid is TRUE, it means the libcurl client has forced us to close this connection. This is ignored for requests taking place in a NTLM/NEGOTIATE authentication handshake if conn->bits.close is TRUE, it means that the connection should be closed in spite of all our efforts to be nice, due to protocol restrictions in our or the server's end if premature is TRUE, it means this connection was said to be DONE before the entire request operation is complete and thus we can't know in what state it is for re-using, so we're forced to close it. In a perfect world we can add code that keep track of if we really must close it here or not, but currently we have no such detail knowledge. */ if((data->set.reuse_forbid #if defined(USE_NTLM) && !(conn->http_ntlm_state == NTLMSTATE_TYPE2 || conn->proxy_ntlm_state == NTLMSTATE_TYPE2) #endif #if defined(USE_SPNEGO) && !(conn->http_negotiate_state == GSS_AUTHRECV || conn->proxy_negotiate_state == GSS_AUTHRECV) #endif ) || conn->bits.close || (premature && !(conn->handler->flags & PROTOPT_STREAM))) { CURLcode res2 = Curl_disconnect(data, conn, premature); /* If we had an error already, make sure we return that one. But if we got a new error, return that. */ if(!result && res2) result = res2; } else { char buffer[256]; /* create string before returning the connection */ msnprintf(buffer, sizeof(buffer), "Connection #%ld to host %s left intact", conn->connection_id, conn->bits.socksproxy ? conn->socks_proxy.host.dispname : conn->bits.httpproxy ? conn->http_proxy.host.dispname : conn->bits.conn_to_host ? conn->conn_to_host.dispname : conn->host.dispname); /* the connection is no longer in use by this transfer */ if(Curl_conncache_return_conn(conn)) { /* remember the most recently used connection */ data->state.lastconnect = conn; infof(data, "%s\n", buffer); } else data->state.lastconnect = NULL; } Curl_free_request_state(data); return result; } CURLMcode curl_multi_remove_handle(struct Curl_multi *multi, struct Curl_easy *data) { struct Curl_easy *easy = data; bool premature; bool easy_owns_conn; struct curl_llist_element *e; /* First, make some basic checks that the CURLM handle is a good handle */ if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; /* Verify that we got a somewhat good easy handle too */ if(!GOOD_EASY_HANDLE(data)) return CURLM_BAD_EASY_HANDLE; /* Prevent users from trying to remove same easy handle more than once */ if(!data->multi) return CURLM_OK; /* it is already removed so let's say it is fine! */ if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; premature = (data->mstate < CURLM_STATE_COMPLETED) ? TRUE : FALSE; easy_owns_conn = (data->conn && (data->conn->data == easy)) ? TRUE : FALSE; /* If the 'state' is not INIT or COMPLETED, we might need to do something nice to put the easy_handle in a good known state when this returns. */ if(premature) { /* this handle is "alive" so we need to count down the total number of alive connections when this is removed */ multi->num_alive--; } if(data->conn && data->mstate > CURLM_STATE_DO && data->mstate < CURLM_STATE_COMPLETED) { /* Set connection owner so that the DONE function closes it. We can safely do this here since connection is killed. */ data->conn->data = easy; streamclose(data->conn, "Removed with partial response"); easy_owns_conn = TRUE; } /* The timer must be shut down before data->multi is set to NULL, else the timenode will remain in the splay tree after curl_easy_cleanup is called. */ Curl_expire_clear(data); if(data->conn) { /* we must call multi_done() here (if we still own the connection) so that we don't leave a half-baked one around */ if(easy_owns_conn) { /* multi_done() clears the conn->data field to lose the association between the easy handle and the connection Note that this ignores the return code simply because there's nothing really useful to do with it anyway! */ (void)multi_done(data, data->result, premature); } } if(data->connect_queue.ptr) /* the handle was in the pending list waiting for an available connection, so go ahead and remove it */ Curl_llist_remove(&multi->pending, &data->connect_queue, NULL); if(data->dns.hostcachetype == HCACHE_MULTI) { /* stop using the multi handle's DNS cache, *after* the possible multi_done() call above */ data->dns.hostcache = NULL; data->dns.hostcachetype = HCACHE_NONE; } Curl_wildcard_dtor(&data->wildcard); /* destroy the timeout list that is held in the easy handle, do this *after* multi_done() as that may actually call Curl_expire that uses this */ Curl_llist_destroy(&data->state.timeoutlist, NULL); /* as this was using a shared connection cache we clear the pointer to that since we're not part of that multi handle anymore */ data->state.conn_cache = NULL; /* change state without using multistate(), only to make singlesocket() do what we want */ data->mstate = CURLM_STATE_COMPLETED; singlesocket(multi, easy); /* to let the application know what sockets that vanish with this handle */ /* Remove the association between the connection and the handle */ if(data->conn) { data->conn->data = NULL; detach_connnection(data); } #ifdef USE_LIBPSL /* Remove the PSL association. */ if(data->psl == &multi->psl) data->psl = NULL; #endif data->multi = NULL; /* clear the association to this multi handle */ /* make sure there's no pending message in the queue sent from this easy handle */ for(e = multi->msglist.head; e; e = e->next) { struct Curl_message *msg = e->ptr; if(msg->extmsg.easy_handle == easy) { Curl_llist_remove(&multi->msglist, e, NULL); /* there can only be one from this specific handle */ break; } } /* make the previous node point to our next */ if(data->prev) data->prev->next = data->next; else multi->easyp = data->next; /* point to first node */ /* make our next point to our previous node */ if(data->next) data->next->prev = data->prev; else multi->easylp = data->prev; /* point to last node */ /* NOTE NOTE NOTE We do not touch the easy handle here! */ multi->num_easy--; /* one less to care about now */ Curl_update_timer(multi); return CURLM_OK; } /* Return TRUE if the application asked for multiplexing */ bool Curl_multiplex_wanted(const struct Curl_multi *multi) { return (multi && (multi->multiplexing)); } /* This is the only function that should clear data->conn. This will occasionally be called with the pointer already cleared. */ static void detach_connnection(struct Curl_easy *data) { struct connectdata *conn = data->conn; if(conn) Curl_llist_remove(&conn->easyq, &data->conn_queue, NULL); data->conn = NULL; } /* This is the only function that should assign data->conn */ void Curl_attach_connnection(struct Curl_easy *data, struct connectdata *conn) { DEBUGASSERT(!data->conn); DEBUGASSERT(conn); data->conn = conn; Curl_llist_insert_next(&conn->easyq, conn->easyq.tail, data, &data->conn_queue); } static int waitconnect_getsock(struct connectdata *conn, curl_socket_t *sock) { int i; int s = 0; int rc = 0; #ifdef USE_SSL if(CONNECT_FIRSTSOCKET_PROXY_SSL()) return Curl_ssl_getsock(conn, sock); #endif for(i = 0; i<2; i++) { if(conn->tempsock[i] != CURL_SOCKET_BAD) { sock[s] = conn->tempsock[i]; rc |= GETSOCK_WRITESOCK(s); #ifdef ENABLE_QUIC if(conn->transport == TRNSPRT_QUIC) /* when connecting QUIC, we want to read the socket too */ rc |= GETSOCK_READSOCK(s); #endif s++; } } return rc; } static int waitproxyconnect_getsock(struct connectdata *conn, curl_socket_t *sock) { sock[0] = conn->sock[FIRSTSOCKET]; /* when we've sent a CONNECT to a proxy, we should rather wait for the socket to become readable to be able to get the response headers */ if(conn->connect_state) return GETSOCK_READSOCK(0); return GETSOCK_WRITESOCK(0); } static int domore_getsock(struct connectdata *conn, curl_socket_t *socks) { if(conn && conn->handler->domore_getsock) return conn->handler->domore_getsock(conn, socks); return GETSOCK_BLANK; } static int doing_getsock(struct connectdata *conn, curl_socket_t *socks) { if(conn && conn->handler->doing_getsock) return conn->handler->doing_getsock(conn, socks); return GETSOCK_BLANK; } static int protocol_getsock(struct connectdata *conn, curl_socket_t *socks) { if(conn->handler->proto_getsock) return conn->handler->proto_getsock(conn, socks); /* Backup getsock logic. Since there is a live socket in use, we must wait for it or it will be removed from watching when the multi_socket API is used. */ socks[0] = conn->sock[FIRSTSOCKET]; return GETSOCK_READSOCK(0) | GETSOCK_WRITESOCK(0); } /* returns bitmapped flags for this handle and its sockets. The 'socks[]' array contains MAX_SOCKSPEREASYHANDLE entries. */ static int multi_getsock(struct Curl_easy *data, curl_socket_t *socks) { /* The no connection case can happen when this is called from curl_multi_remove_handle() => singlesocket() => multi_getsock(). */ if(!data->conn) return 0; if(data->mstate > CURLM_STATE_CONNECT && data->mstate < CURLM_STATE_COMPLETED) { /* Set up ownership correctly */ data->conn->data = data; } switch(data->mstate) { default: #if 0 /* switch back on these cases to get the compiler to check for all enums to be present */ case CURLM_STATE_TOOFAST: /* returns 0, so will not select. */ case CURLM_STATE_COMPLETED: case CURLM_STATE_MSGSENT: case CURLM_STATE_INIT: case CURLM_STATE_CONNECT: case CURLM_STATE_WAITDO: case CURLM_STATE_DONE: case CURLM_STATE_LAST: /* this will get called with CURLM_STATE_COMPLETED when a handle is removed */ #endif return 0; case CURLM_STATE_WAITRESOLVE: return Curl_resolv_getsock(data->conn, socks); case CURLM_STATE_PROTOCONNECT: case CURLM_STATE_SENDPROTOCONNECT: return protocol_getsock(data->conn, socks); case CURLM_STATE_DO: case CURLM_STATE_DOING: return doing_getsock(data->conn, socks); case CURLM_STATE_WAITPROXYCONNECT: return waitproxyconnect_getsock(data->conn, socks); case CURLM_STATE_WAITCONNECT: return waitconnect_getsock(data->conn, socks); case CURLM_STATE_DO_MORE: return domore_getsock(data->conn, socks); case CURLM_STATE_DO_DONE: /* since is set after DO is completed, we switch to waiting for the same as the *PERFORM states */ case CURLM_STATE_PERFORM: return Curl_single_getsock(data->conn, socks); } } CURLMcode curl_multi_fdset(struct Curl_multi *multi, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd) { /* Scan through all the easy handles to get the file descriptors set. Some easy handles may not have connected to the remote host yet, and then we must make sure that is done. */ struct Curl_easy *data; int this_max_fd = -1; curl_socket_t sockbunch[MAX_SOCKSPEREASYHANDLE]; int i; (void)exc_fd_set; /* not used */ if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; data = multi->easyp; while(data) { int bitmap = multi_getsock(data, sockbunch); for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) { curl_socket_t s = CURL_SOCKET_BAD; if((bitmap & GETSOCK_READSOCK(i)) && VALID_SOCK((sockbunch[i]))) { FD_SET(sockbunch[i], read_fd_set); s = sockbunch[i]; } if((bitmap & GETSOCK_WRITESOCK(i)) && VALID_SOCK((sockbunch[i]))) { FD_SET(sockbunch[i], write_fd_set); s = sockbunch[i]; } if(s == CURL_SOCKET_BAD) /* this socket is unused, break out of loop */ break; if((int)s > this_max_fd) this_max_fd = (int)s; } data = data->next; /* check next handle */ } *max_fd = this_max_fd; return CURLM_OK; } #define NUM_POLLS_ON_STACK 10 static CURLMcode Curl_multi_wait(struct Curl_multi *multi, struct curl_waitfd extra_fds[], unsigned int extra_nfds, int timeout_ms, int *ret, bool extrawait) /* when no socket, wait */ { struct Curl_easy *data; curl_socket_t sockbunch[MAX_SOCKSPEREASYHANDLE]; int bitmap; unsigned int i; unsigned int nfds = 0; unsigned int curlfds; bool ufds_malloc = FALSE; long timeout_internal; int retcode = 0; struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK]; struct pollfd *ufds = &a_few_on_stack[0]; if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; /* Count up how many fds we have from the multi handle */ data = multi->easyp; while(data) { bitmap = multi_getsock(data, sockbunch); for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) { curl_socket_t s = CURL_SOCKET_BAD; if(bitmap & GETSOCK_READSOCK(i)) { ++nfds; s = sockbunch[i]; } if(bitmap & GETSOCK_WRITESOCK(i)) { ++nfds; s = sockbunch[i]; } if(s == CURL_SOCKET_BAD) { break; } } data = data->next; /* check next handle */ } /* If the internally desired timeout is actually shorter than requested from the outside, then use the shorter time! But only if the internal timer is actually larger than -1! */ (void)multi_timeout(multi, &timeout_internal); if((timeout_internal >= 0) && (timeout_internal < (long)timeout_ms)) timeout_ms = (int)timeout_internal; curlfds = nfds; /* number of internal file descriptors */ nfds += extra_nfds; /* add the externally provided ones */ if(nfds > NUM_POLLS_ON_STACK) { /* 'nfds' is a 32 bit value and 'struct pollfd' is typically 8 bytes big, so at 2^29 sockets this value might wrap. When a process gets the capability to actually handle over 500 million sockets this calculation needs a integer overflow check. */ ufds = malloc(nfds * sizeof(struct pollfd)); if(!ufds) return CURLM_OUT_OF_MEMORY; ufds_malloc = TRUE; } nfds = 0; /* only do the second loop if we found descriptors in the first stage run above */ if(curlfds) { /* Add the curl handles to our pollfds first */ data = multi->easyp; while(data) { bitmap = multi_getsock(data, sockbunch); for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) { curl_socket_t s = CURL_SOCKET_BAD; if(bitmap & GETSOCK_READSOCK(i)) { ufds[nfds].fd = sockbunch[i]; ufds[nfds].events = POLLIN; ++nfds; s = sockbunch[i]; } if(bitmap & GETSOCK_WRITESOCK(i)) { ufds[nfds].fd = sockbunch[i]; ufds[nfds].events = POLLOUT; ++nfds; s = sockbunch[i]; } if(s == CURL_SOCKET_BAD) { break; } } data = data->next; /* check next handle */ } } /* Add external file descriptions from poll-like struct curl_waitfd */ for(i = 0; i < extra_nfds; i++) { ufds[nfds].fd = extra_fds[i].fd; ufds[nfds].events = 0; if(extra_fds[i].events & CURL_WAIT_POLLIN) ufds[nfds].events |= POLLIN; if(extra_fds[i].events & CURL_WAIT_POLLPRI) ufds[nfds].events |= POLLPRI; if(extra_fds[i].events & CURL_WAIT_POLLOUT) ufds[nfds].events |= POLLOUT; ++nfds; } if(nfds) { int pollrc; /* wait... */ pollrc = Curl_poll(ufds, nfds, timeout_ms); if(pollrc > 0) { retcode = pollrc; /* copy revents results from the poll to the curl_multi_wait poll struct, the bit values of the actual underlying poll() implementation may not be the same as the ones in the public libcurl API! */ for(i = 0; i < extra_nfds; i++) { unsigned short mask = 0; unsigned r = ufds[curlfds + i].revents; if(r & POLLIN) mask |= CURL_WAIT_POLLIN; if(r & POLLOUT) mask |= CURL_WAIT_POLLOUT; if(r & POLLPRI) mask |= CURL_WAIT_POLLPRI; extra_fds[i].revents = mask; } } } if(ufds_malloc) free(ufds); if(ret) *ret = retcode; if(!extrawait || extra_fds || curlfds) /* if any socket was checked */ ; else { long sleep_ms = 0; /* Avoid busy-looping when there's nothing particular to wait for */ if(!curl_multi_timeout(multi, &sleep_ms) && sleep_ms) { if(sleep_ms > timeout_ms) sleep_ms = timeout_ms; Curl_wait_ms((int)sleep_ms); } } return CURLM_OK; } CURLMcode curl_multi_wait(struct Curl_multi *multi, struct curl_waitfd extra_fds[], unsigned int extra_nfds, int timeout_ms, int *ret) { return Curl_multi_wait(multi, extra_fds, extra_nfds, timeout_ms, ret, FALSE); } CURLMcode curl_multi_poll(struct Curl_multi *multi, struct curl_waitfd extra_fds[], unsigned int extra_nfds, int timeout_ms, int *ret) { return Curl_multi_wait(multi, extra_fds, extra_nfds, timeout_ms, ret, TRUE); } /* * multi_ischanged() is called * * Returns TRUE/FALSE whether the state is changed to trigger a CONNECT_PEND * => CONNECT action. * * Set 'clear' to TRUE to have it also clear the state variable. */ static bool multi_ischanged(struct Curl_multi *multi, bool clear) { bool retval = multi->recheckstate; if(clear) multi->recheckstate = FALSE; return retval; } CURLMcode Curl_multi_add_perform(struct Curl_multi *multi, struct Curl_easy *data, struct connectdata *conn) { CURLMcode rc; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; rc = curl_multi_add_handle(multi, data); if(!rc) { struct SingleRequest *k = &data->req; /* pass in NULL for 'conn' here since we don't want to init the connection, only this transfer */ Curl_init_do(data, NULL); /* take this handle to the perform state right away */ multistate(data, CURLM_STATE_PERFORM); Curl_attach_connnection(data, conn); k->keepon |= KEEP_RECV; /* setup to receive! */ } return rc; } /* * do_complete is called when the DO actions are complete. * * We init chunking and trailer bits to their default values here immediately * before receiving any header data for the current request. */ static void do_complete(struct connectdata *conn) { conn->data->req.chunk = FALSE; Curl_pgrsTime(conn->data, TIMER_PRETRANSFER); } static CURLcode multi_do(struct Curl_easy *data, bool *done) { CURLcode result = CURLE_OK; struct connectdata *conn = data->conn; DEBUGASSERT(conn); DEBUGASSERT(conn->handler); if(conn->handler->do_it) { /* generic protocol-specific function pointer set in curl_connect() */ result = conn->handler->do_it(conn, done); if(!result && *done) /* do_complete must be called after the protocol-specific DO function */ do_complete(conn); } return result; } /* * multi_do_more() is called during the DO_MORE multi state. It is basically a * second stage DO state which (wrongly) was introduced to support FTP's * second connection. * * 'complete' can return 0 for incomplete, 1 for done and -1 for go back to * DOING state there's more work to do! */ static CURLcode multi_do_more(struct connectdata *conn, int *complete) { CURLcode result = CURLE_OK; *complete = 0; if(conn->handler->do_more) result = conn->handler->do_more(conn, complete); if(!result && (*complete == 1)) /* do_complete must be called after the protocol-specific DO function */ do_complete(conn); return result; } /* * We are doing protocol-specific connecting and this is being called over and * over from the multi interface until the connection phase is done on * protocol layer. */ static CURLcode protocol_connecting(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->connecting) { *done = FALSE; result = conn->handler->connecting(conn, done); } else *done = TRUE; return result; } /* * We are DOING this is being called over and over from the multi interface * until the DOING phase is done on protocol layer. */ static CURLcode protocol_doing(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->doing) { *done = FALSE; result = conn->handler->doing(conn, done); } else *done = TRUE; return result; } /* * We have discovered that the TCP connection has been successful, we can now * proceed with some action. * */ static CURLcode protocol_connect(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; DEBUGASSERT(conn); DEBUGASSERT(protocol_done); *protocol_done = FALSE; if(conn->bits.tcpconnect[FIRSTSOCKET] && conn->bits.protoconnstart) { /* We already are connected, get back. This may happen when the connect worked fine in the first call, like when we connect to a local server or proxy. Note that we don't know if the protocol is actually done. Unless this protocol doesn't have any protocol-connect callback, as then we know we're done. */ if(!conn->handler->connecting) *protocol_done = TRUE; return CURLE_OK; } if(!conn->bits.protoconnstart) { result = Curl_proxy_connect(conn, FIRSTSOCKET); if(result) return result; if(CONNECT_FIRSTSOCKET_PROXY_SSL()) /* wait for HTTPS proxy SSL initialization to complete */ return CURLE_OK; if(conn->bits.tunnel_proxy && conn->bits.httpproxy && Curl_connect_ongoing(conn)) /* when using an HTTP tunnel proxy, await complete tunnel establishment before proceeding further. Return CURLE_OK so we'll be called again */ return CURLE_OK; if(conn->handler->connect_it) { /* is there a protocol-specific connect() procedure? */ /* Call the protocol-specific connect function */ result = conn->handler->connect_it(conn, protocol_done); } else *protocol_done = TRUE; /* it has started, possibly even completed but that knowledge isn't stored in this bit! */ if(!result) conn->bits.protoconnstart = TRUE; } return result; /* pass back status */ } static CURLMcode multi_runsingle(struct Curl_multi *multi, struct curltime now, struct Curl_easy *data) { struct Curl_message *msg = NULL; bool connected; bool async; bool protocol_connected = FALSE; bool dophase_done = FALSE; bool done = FALSE; CURLMcode rc; CURLcode result = CURLE_OK; timediff_t timeout_ms; timediff_t recv_timeout_ms; timediff_t send_timeout_ms; int control; if(!GOOD_EASY_HANDLE(data)) return CURLM_BAD_EASY_HANDLE; do { /* A "stream" here is a logical stream if the protocol can handle that (HTTP/2), or the full connection for older protocols */ bool stream_error = FALSE; rc = CURLM_OK; DEBUGASSERT((data->mstate <= CURLM_STATE_CONNECT) || (data->mstate >= CURLM_STATE_DONE) || data->conn); if(!data->conn && data->mstate > CURLM_STATE_CONNECT && data->mstate < CURLM_STATE_DONE) { /* In all these states, the code will blindly access 'data->conn' so this is precaution that it isn't NULL. And it silences static analyzers. */ failf(data, "In state %d with no conn, bail out!\n", data->mstate); return CURLM_INTERNAL_ERROR; } if(multi_ischanged(multi, TRUE)) { DEBUGF(infof(data, "multi changed, check CONNECT_PEND queue!\n")); process_pending_handles(multi); /* multiplexed */ } if(data->conn && data->mstate > CURLM_STATE_CONNECT && data->mstate < CURLM_STATE_COMPLETED) { /* Make sure we set the connection's current owner */ data->conn->data = data; } if(data->conn && (data->mstate >= CURLM_STATE_CONNECT) && (data->mstate < CURLM_STATE_COMPLETED)) { /* we need to wait for the connect state as only then is the start time stored, but we must not check already completed handles */ timeout_ms = Curl_timeleft(data, &now, (data->mstate <= CURLM_STATE_DO)? TRUE:FALSE); if(timeout_ms < 0) { /* Handle timed out */ if(data->mstate == CURLM_STATE_WAITRESOLVE) failf(data, "Resolving timed out after %" CURL_FORMAT_TIMEDIFF_T " milliseconds", Curl_timediff(now, data->progress.t_startsingle)); else if(data->mstate == CURLM_STATE_WAITCONNECT) failf(data, "Connection timed out after %" CURL_FORMAT_TIMEDIFF_T " milliseconds", Curl_timediff(now, data->progress.t_startsingle)); else { struct SingleRequest *k = &data->req; if(k->size != -1) { failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %" CURL_FORMAT_CURL_OFF_T " bytes received", Curl_timediff(now, data->progress.t_startsingle), k->bytecount, k->size); } else { failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received", Curl_timediff(now, data->progress.t_startsingle), k->bytecount); } } /* Force connection closed if the connection has indeed been used */ if(data->mstate > CURLM_STATE_DO) { streamclose(data->conn, "Disconnected with pending data"); stream_error = TRUE; } result = CURLE_OPERATION_TIMEDOUT; (void)multi_done(data, result, TRUE); /* Skip the statemachine and go directly to error handling section. */ goto statemachine_end; } } switch(data->mstate) { case CURLM_STATE_INIT: /* init this transfer. */ result = Curl_pretransfer(data); if(!result) { /* after init, go CONNECT */ multistate(data, CURLM_STATE_CONNECT); Curl_pgrsTime(data, TIMER_STARTOP); rc = CURLM_CALL_MULTI_PERFORM; } break; case CURLM_STATE_CONNECT_PEND: /* We will stay here until there is a connection available. Then we try again in the CURLM_STATE_CONNECT state. */ break; case CURLM_STATE_CONNECT: /* Connect. We want to get a connection identifier filled in. */ Curl_pgrsTime(data, TIMER_STARTSINGLE); if(data->set.timeout) Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT); if(data->set.connecttimeout) Curl_expire(data, data->set.connecttimeout, EXPIRE_CONNECTTIMEOUT); result = Curl_connect(data, &async, &protocol_connected); if(CURLE_NO_CONNECTION_AVAILABLE == result) { /* There was no connection available. We will go to the pending state and wait for an available connection. */ multistate(data, CURLM_STATE_CONNECT_PEND); /* add this handle to the list of connect-pending handles */ Curl_llist_insert_next(&multi->pending, multi->pending.tail, data, &data->connect_queue); result = CURLE_OK; break; } else if(data->state.previouslypending) { /* this transfer comes from the pending queue so try move another */ infof(data, "Transfer was pending, now try another\n"); process_pending_handles(data->multi); } if(!result) { if(async) /* We're now waiting for an asynchronous name lookup */ multistate(data, CURLM_STATE_WAITRESOLVE); else { /* after the connect has been sent off, go WAITCONNECT unless the protocol connect is already done and we can go directly to WAITDO or DO! */ rc = CURLM_CALL_MULTI_PERFORM; if(protocol_connected) multistate(data, CURLM_STATE_DO); else { #ifndef CURL_DISABLE_HTTP if(Curl_connect_ongoing(data->conn)) multistate(data, CURLM_STATE_WAITPROXYCONNECT); else #endif multistate(data, CURLM_STATE_WAITCONNECT); } } } break; case CURLM_STATE_WAITRESOLVE: /* awaiting an asynch name resolve to complete */ { struct Curl_dns_entry *dns = NULL; struct connectdata *conn = data->conn; const char *hostname; DEBUGASSERT(conn); if(conn->bits.httpproxy) hostname = conn->http_proxy.host.name; else if(conn->bits.conn_to_host) hostname = conn->conn_to_host.name; else hostname = conn->host.name; /* check if we have the name resolved by now */ dns = Curl_fetch_addr(conn, hostname, (int)conn->port); if(dns) { #ifdef CURLRES_ASYNCH conn->async.dns = dns; conn->async.done = TRUE; #endif result = CURLE_OK; infof(data, "Hostname '%s' was found in DNS cache\n", hostname); } if(!dns) result = Curl_resolv_check(data->conn, &dns); /* Update sockets here, because the socket(s) may have been closed and the application thus needs to be told, even if it is likely that the same socket(s) will again be used further down. If the name has not yet been resolved, it is likely that new sockets have been opened in an attempt to contact another resolver. */ singlesocket(multi, data); if(dns) { /* Perform the next step in the connection phase, and then move on to the WAITCONNECT state */ result = Curl_once_resolved(data->conn, &protocol_connected); if(result) /* if Curl_once_resolved() returns failure, the connection struct is already freed and gone */ data->conn = NULL; /* no more connection */ else { /* call again please so that we get the next socket setup */ rc = CURLM_CALL_MULTI_PERFORM; if(protocol_connected) multistate(data, CURLM_STATE_DO); else { #ifndef CURL_DISABLE_HTTP if(Curl_connect_ongoing(data->conn)) multistate(data, CURLM_STATE_WAITPROXYCONNECT); else #endif multistate(data, CURLM_STATE_WAITCONNECT); } } } if(result) { /* failure detected */ stream_error = TRUE; break; } } break; #ifndef CURL_DISABLE_HTTP case CURLM_STATE_WAITPROXYCONNECT: /* this is HTTP-specific, but sending CONNECT to a proxy is HTTP... */ DEBUGASSERT(data->conn); result = Curl_http_connect(data->conn, &protocol_connected); if(data->conn->bits.proxy_connect_closed) { rc = CURLM_CALL_MULTI_PERFORM; /* connect back to proxy again */ result = CURLE_OK; multi_done(data, CURLE_OK, FALSE); multistate(data, CURLM_STATE_CONNECT); } else if(!result) { if((data->conn->http_proxy.proxytype != CURLPROXY_HTTPS || data->conn->bits.proxy_ssl_connected[FIRSTSOCKET]) && Curl_connect_complete(data->conn)) { rc = CURLM_CALL_MULTI_PERFORM; /* initiate protocol connect phase */ multistate(data, CURLM_STATE_SENDPROTOCONNECT); } } else if(result) stream_error = TRUE; break; #endif case CURLM_STATE_WAITCONNECT: /* awaiting a completion of an asynch TCP connect */ DEBUGASSERT(data->conn); result = Curl_is_connected(data->conn, FIRSTSOCKET, &connected); if(connected && !result) { #ifndef CURL_DISABLE_HTTP if((data->conn->http_proxy.proxytype == CURLPROXY_HTTPS && !data->conn->bits.proxy_ssl_connected[FIRSTSOCKET]) || Curl_connect_ongoing(data->conn)) { multistate(data, CURLM_STATE_WAITPROXYCONNECT); break; } #endif rc = CURLM_CALL_MULTI_PERFORM; multistate(data, data->conn->bits.tunnel_proxy? CURLM_STATE_WAITPROXYCONNECT: CURLM_STATE_SENDPROTOCONNECT); } else if(result) { /* failure detected */ Curl_posttransfer(data); multi_done(data, result, TRUE); stream_error = TRUE; break; } break; case CURLM_STATE_SENDPROTOCONNECT: result = protocol_connect(data->conn, &protocol_connected); if(!result && !protocol_connected) /* switch to waiting state */ multistate(data, CURLM_STATE_PROTOCONNECT); else if(!result) { /* protocol connect has completed, go WAITDO or DO */ multistate(data, CURLM_STATE_DO); rc = CURLM_CALL_MULTI_PERFORM; } else if(result) { /* failure detected */ Curl_posttransfer(data); multi_done(data, result, TRUE); stream_error = TRUE; } break; case CURLM_STATE_PROTOCONNECT: /* protocol-specific connect phase */ result = protocol_connecting(data->conn, &protocol_connected); if(!result && protocol_connected) { /* after the connect has completed, go WAITDO or DO */ multistate(data, CURLM_STATE_DO); rc = CURLM_CALL_MULTI_PERFORM; } else if(result) { /* failure detected */ Curl_posttransfer(data); multi_done(data, result, TRUE); stream_error = TRUE; } break; case CURLM_STATE_DO: if(data->set.connect_only) { /* keep connection open for application to use the socket */ connkeep(data->conn, "CONNECT_ONLY"); multistate(data, CURLM_STATE_DONE); result = CURLE_OK; rc = CURLM_CALL_MULTI_PERFORM; } else { /* Perform the protocol's DO action */ result = multi_do(data, &dophase_done); /* When multi_do() returns failure, data->conn might be NULL! */ if(!result) { if(!dophase_done) { #ifndef CURL_DISABLE_FTP /* some steps needed for wildcard matching */ if(data->state.wildcardmatch) { struct WildcardData *wc = &data->wildcard; if(wc->state == CURLWC_DONE || wc->state == CURLWC_SKIP) { /* skip some states if it is important */ multi_done(data, CURLE_OK, FALSE); multistate(data, CURLM_STATE_DONE); rc = CURLM_CALL_MULTI_PERFORM; break; } } #endif /* DO was not completed in one function call, we must continue DOING... */ multistate(data, CURLM_STATE_DOING); rc = CURLM_OK; } /* after DO, go DO_DONE... or DO_MORE */ else if(data->conn->bits.do_more) { /* we're supposed to do more, but we need to sit down, relax and wait a little while first */ multistate(data, CURLM_STATE_DO_MORE); rc = CURLM_OK; } else { /* we're done with the DO, now DO_DONE */ multistate(data, CURLM_STATE_DO_DONE); rc = CURLM_CALL_MULTI_PERFORM; } } else if((CURLE_SEND_ERROR == result) && data->conn->bits.reuse) { /* * In this situation, a connection that we were trying to use * may have unexpectedly died. If possible, send the connection * back to the CONNECT phase so we can try again. */ char *newurl = NULL; followtype follow = FOLLOW_NONE; CURLcode drc; drc = Curl_retry_request(data->conn, &newurl); if(drc) { /* a failure here pretty much implies an out of memory */ result = drc; stream_error = TRUE; } Curl_posttransfer(data); drc = multi_done(data, result, FALSE); /* When set to retry the connection, we must to go back to * the CONNECT state */ if(newurl) { if(!drc || (drc == CURLE_SEND_ERROR)) { follow = FOLLOW_RETRY; drc = Curl_follow(data, newurl, follow); if(!drc) { multistate(data, CURLM_STATE_CONNECT); rc = CURLM_CALL_MULTI_PERFORM; result = CURLE_OK; } else { /* Follow failed */ result = drc; } } else { /* done didn't return OK or SEND_ERROR */ result = drc; } } else { /* Have error handler disconnect conn if we can't retry */ stream_error = TRUE; } free(newurl); } else { /* failure detected */ Curl_posttransfer(data); if(data->conn) multi_done(data, result, FALSE); stream_error = TRUE; } } break; case CURLM_STATE_DOING: /* we continue DOING until the DO phase is complete */ DEBUGASSERT(data->conn); result = protocol_doing(data->conn, &dophase_done); if(!result) { if(dophase_done) { /* after DO, go DO_DONE or DO_MORE */ multistate(data, data->conn->bits.do_more? CURLM_STATE_DO_MORE: CURLM_STATE_DO_DONE); rc = CURLM_CALL_MULTI_PERFORM; } /* dophase_done */ } else { /* failure detected */ Curl_posttransfer(data); multi_done(data, result, FALSE); stream_error = TRUE; } break; case CURLM_STATE_DO_MORE: /* * When we are connected, DO MORE and then go DO_DONE */ DEBUGASSERT(data->conn); result = multi_do_more(data->conn, &control); if(!result) { if(control) { /* if positive, advance to DO_DONE if negative, go back to DOING */ multistate(data, control == 1? CURLM_STATE_DO_DONE: CURLM_STATE_DOING); rc = CURLM_CALL_MULTI_PERFORM; } else /* stay in DO_MORE */ rc = CURLM_OK; } else { /* failure detected */ Curl_posttransfer(data); multi_done(data, result, FALSE); stream_error = TRUE; } break; case CURLM_STATE_DO_DONE: DEBUGASSERT(data->conn); if(data->conn->bits.multiplex) /* Check if we can move pending requests to send pipe */ process_pending_handles(multi); /* multiplexed */ /* Only perform the transfer if there's a good socket to work with. Having both BAD is a signal to skip immediately to DONE */ if((data->conn->sockfd != CURL_SOCKET_BAD) || (data->conn->writesockfd != CURL_SOCKET_BAD)) multistate(data, CURLM_STATE_PERFORM); else { #ifndef CURL_DISABLE_FTP if(data->state.wildcardmatch && ((data->conn->handler->flags & PROTOPT_WILDCARD) == 0)) { data->wildcard.state = CURLWC_DONE; } #endif multistate(data, CURLM_STATE_DONE); } rc = CURLM_CALL_MULTI_PERFORM; break; case CURLM_STATE_TOOFAST: /* limit-rate exceeded in either direction */ DEBUGASSERT(data->conn); /* if both rates are within spec, resume transfer */ if(Curl_pgrsUpdate(data->conn)) result = CURLE_ABORTED_BY_CALLBACK; else result = Curl_speedcheck(data, now); if(!result) { send_timeout_ms = 0; if(data->set.max_send_speed > 0) send_timeout_ms = Curl_pgrsLimitWaitTime(data->progress.uploaded, data->progress.ul_limit_size, data->set.max_send_speed, data->progress.ul_limit_start, now); recv_timeout_ms = 0; if(data->set.max_recv_speed > 0) recv_timeout_ms = Curl_pgrsLimitWaitTime(data->progress.downloaded, data->progress.dl_limit_size, data->set.max_recv_speed, data->progress.dl_limit_start, now); if(!send_timeout_ms && !recv_timeout_ms) { multistate(data, CURLM_STATE_PERFORM); Curl_ratelimit(data, now); } else if(send_timeout_ms >= recv_timeout_ms) Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST); else Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST); } break; case CURLM_STATE_PERFORM: { char *newurl = NULL; bool retry = FALSE; bool comeback = FALSE; /* check if over send speed */ send_timeout_ms = 0; if(data->set.max_send_speed > 0) send_timeout_ms = Curl_pgrsLimitWaitTime(data->progress.uploaded, data->progress.ul_limit_size, data->set.max_send_speed, data->progress.ul_limit_start, now); /* check if over recv speed */ recv_timeout_ms = 0; if(data->set.max_recv_speed > 0) recv_timeout_ms = Curl_pgrsLimitWaitTime(data->progress.downloaded, data->progress.dl_limit_size, data->set.max_recv_speed, data->progress.dl_limit_start, now); if(send_timeout_ms || recv_timeout_ms) { Curl_ratelimit(data, now); multistate(data, CURLM_STATE_TOOFAST); if(send_timeout_ms >= recv_timeout_ms) Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST); else Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST); break; } /* read/write data if it is ready to do so */ result = Curl_readwrite(data->conn, data, &done, &comeback); if(done || (result == CURLE_RECV_ERROR)) { /* If CURLE_RECV_ERROR happens early enough, we assume it was a race * condition and the server closed the re-used connection exactly when * we wanted to use it, so figure out if that is indeed the case. */ CURLcode ret = Curl_retry_request(data->conn, &newurl); if(!ret) retry = (newurl)?TRUE:FALSE; else if(!result) result = ret; if(retry) { /* if we are to retry, set the result to OK and consider the request as done */ result = CURLE_OK; done = TRUE; } } else if((CURLE_HTTP2_STREAM == result) && Curl_h2_http_1_1_error(data->conn)) { CURLcode ret = Curl_retry_request(data->conn, &newurl); if(!ret) { infof(data, "Downgrades to HTTP/1.1!\n"); data->set.httpversion = CURL_HTTP_VERSION_1_1; /* clear the error message bit too as we ignore the one we got */ data->state.errorbuf = FALSE; if(!newurl) /* typically for HTTP_1_1_REQUIRED error on first flight */ newurl = strdup(data->change.url); /* if we are to retry, set the result to OK and consider the request as done */ retry = TRUE; result = CURLE_OK; done = TRUE; } else result = ret; } if(result) { /* * The transfer phase returned error, we mark the connection to get * closed to prevent being re-used. This is because we can't possibly * know if the connection is in a good shape or not now. Unless it is * a protocol which uses two "channels" like FTP, as then the error * happened in the data connection. */ if(!(data->conn->handler->flags & PROTOPT_DUAL) && result != CURLE_HTTP2_STREAM) streamclose(data->conn, "Transfer returned error"); Curl_posttransfer(data); multi_done(data, result, TRUE); } else if(done) { followtype follow = FOLLOW_NONE; /* call this even if the readwrite function returned error */ Curl_posttransfer(data); /* When we follow redirects or is set to retry the connection, we must to go back to the CONNECT state */ if(data->req.newurl || retry) { if(!retry) { /* if the URL is a follow-location and not just a retried request then figure out the URL here */ free(newurl); newurl = data->req.newurl; data->req.newurl = NULL; follow = FOLLOW_REDIR; } else follow = FOLLOW_RETRY; (void)multi_done(data, CURLE_OK, FALSE); /* multi_done() might return CURLE_GOT_NOTHING */ result = Curl_follow(data, newurl, follow); if(!result) { multistate(data, CURLM_STATE_CONNECT); rc = CURLM_CALL_MULTI_PERFORM; } free(newurl); } else { /* after the transfer is done, go DONE */ /* but first check to see if we got a location info even though we're not following redirects */ if(data->req.location) { free(newurl); newurl = data->req.location; data->req.location = NULL; result = Curl_follow(data, newurl, FOLLOW_FAKE); free(newurl); if(result) { stream_error = TRUE; result = multi_done(data, result, TRUE); } } if(!result) { multistate(data, CURLM_STATE_DONE); rc = CURLM_CALL_MULTI_PERFORM; } } } else if(comeback) rc = CURLM_CALL_MULTI_PERFORM; break; } case CURLM_STATE_DONE: /* this state is highly transient, so run another loop after this */ rc = CURLM_CALL_MULTI_PERFORM; if(data->conn) { CURLcode res; if(data->conn->bits.multiplex) /* Check if we can move pending requests to connection */ process_pending_handles(multi); /* multiplexing */ /* post-transfer command */ res = multi_done(data, result, FALSE); /* allow a previously set error code take precedence */ if(!result) result = res; /* * If there are other handles on the connection, multi_done won't set * conn to NULL. In such a case, curl_multi_remove_handle() can * access free'd data, if the connection is free'd and the handle * removed before we perform the processing in CURLM_STATE_COMPLETED */ if(data->conn) detach_connnection(data); } #ifndef CURL_DISABLE_FTP if(data->state.wildcardmatch) { if(data->wildcard.state != CURLWC_DONE) { /* if a wildcard is set and we are not ending -> lets start again with CURLM_STATE_INIT */ multistate(data, CURLM_STATE_INIT); break; } } #endif /* after we have DONE what we're supposed to do, go COMPLETED, and it doesn't matter what the multi_done() returned! */ multistate(data, CURLM_STATE_COMPLETED); break; case CURLM_STATE_COMPLETED: break; case CURLM_STATE_MSGSENT: data->result = result; return CURLM_OK; /* do nothing */ default: return CURLM_INTERNAL_ERROR; } statemachine_end: if(data->mstate < CURLM_STATE_COMPLETED) { if(result) { /* * If an error was returned, and we aren't in completed state now, * then we go to completed and consider this transfer aborted. */ /* NOTE: no attempt to disconnect connections must be made in the case blocks above - cleanup happens only here */ /* Check if we can move pending requests to send pipe */ process_pending_handles(multi); /* connection */ if(data->conn) { if(stream_error) { /* Don't attempt to send data over a connection that timed out */ bool dead_connection = result == CURLE_OPERATION_TIMEDOUT; struct connectdata *conn = data->conn; /* This is where we make sure that the conn pointer is reset. We don't have to do this in every case block above where a failure is detected */ detach_connnection(data); /* disconnect properly */ Curl_disconnect(data, conn, dead_connection); } } else if(data->mstate == CURLM_STATE_CONNECT) { /* Curl_connect() failed */ (void)Curl_posttransfer(data); } multistate(data, CURLM_STATE_COMPLETED); rc = CURLM_CALL_MULTI_PERFORM; } /* if there's still a connection to use, call the progress function */ else if(data->conn && Curl_pgrsUpdate(data->conn)) { /* aborted due to progress callback return code must close the connection */ result = CURLE_ABORTED_BY_CALLBACK; streamclose(data->conn, "Aborted by callback"); /* if not yet in DONE state, go there, otherwise COMPLETED */ multistate(data, (data->mstate < CURLM_STATE_DONE)? CURLM_STATE_DONE: CURLM_STATE_COMPLETED); rc = CURLM_CALL_MULTI_PERFORM; } } if(CURLM_STATE_COMPLETED == data->mstate) { if(data->set.fmultidone) { /* signal via callback instead */ data->set.fmultidone(data, result); } else { /* now fill in the Curl_message with this info */ msg = &data->msg; msg->extmsg.msg = CURLMSG_DONE; msg->extmsg.easy_handle = data; msg->extmsg.data.result = result; rc = multi_addmsg(multi, msg); DEBUGASSERT(!data->conn); } multistate(data, CURLM_STATE_MSGSENT); } } while((rc == CURLM_CALL_MULTI_PERFORM) || multi_ischanged(multi, FALSE)); data->result = result; return rc; } CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles) { struct Curl_easy *data; CURLMcode returncode = CURLM_OK; struct Curl_tree *t; struct curltime now = Curl_now(); if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; data = multi->easyp; while(data) { CURLMcode result; SIGPIPE_VARIABLE(pipe_st); sigpipe_ignore(data, &pipe_st); result = multi_runsingle(multi, now, data); sigpipe_restore(&pipe_st); if(result) returncode = result; data = data->next; /* operate on next handle */ } /* * Simply remove all expired timers from the splay since handles are dealt * with unconditionally by this function and curl_multi_timeout() requires * that already passed/handled expire times are removed from the splay. * * It is important that the 'now' value is set at the entry of this function * and not for the current time as it may have ticked a little while since * then and then we risk this loop to remove timers that actually have not * been handled! */ do { multi->timetree = Curl_splaygetbest(now, multi->timetree, &t); if(t) /* the removed may have another timeout in queue */ (void)add_next_timeout(now, multi, t->payload); } while(t); *running_handles = multi->num_alive; if(CURLM_OK >= returncode) Curl_update_timer(multi); return returncode; } CURLMcode curl_multi_cleanup(struct Curl_multi *multi) { struct Curl_easy *data; struct Curl_easy *nextdata; if(GOOD_MULTI_HANDLE(multi)) { if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; multi->type = 0; /* not good anymore */ /* Firsrt remove all remaining easy handles */ data = multi->easyp; while(data) { nextdata = data->next; if(!data->state.done && data->conn) /* if DONE was never called for this handle */ (void)multi_done(data, CURLE_OK, TRUE); if(data->dns.hostcachetype == HCACHE_MULTI) { /* clear out the usage of the shared DNS cache */ Curl_hostcache_clean(data, data->dns.hostcache); data->dns.hostcache = NULL; data->dns.hostcachetype = HCACHE_NONE; } /* Clear the pointer to the connection cache */ data->state.conn_cache = NULL; data->multi = NULL; /* clear the association */ #ifdef USE_LIBPSL if(data->psl == &multi->psl) data->psl = NULL; #endif data = nextdata; } /* Close all the connections in the connection cache */ Curl_conncache_close_all_connections(&multi->conn_cache); Curl_hash_destroy(&multi->sockhash); Curl_conncache_destroy(&multi->conn_cache); Curl_llist_destroy(&multi->msglist, NULL); Curl_llist_destroy(&multi->pending, NULL); Curl_hash_destroy(&multi->hostcache); Curl_psl_destroy(&multi->psl); free(multi); return CURLM_OK; } return CURLM_BAD_HANDLE; } /* * curl_multi_info_read() * * This function is the primary way for a multi/multi_socket application to * figure out if a transfer has ended. We MUST make this function as fast as * possible as it will be polled frequently and we MUST NOT scan any lists in * here to figure out things. We must scale fine to thousands of handles and * beyond. The current design is fully O(1). */ CURLMsg *curl_multi_info_read(struct Curl_multi *multi, int *msgs_in_queue) { struct Curl_message *msg; *msgs_in_queue = 0; /* default to none */ if(GOOD_MULTI_HANDLE(multi) && !multi->in_callback && Curl_llist_count(&multi->msglist)) { /* there is one or more messages in the list */ struct curl_llist_element *e; /* extract the head of the list to return */ e = multi->msglist.head; msg = e->ptr; /* remove the extracted entry */ Curl_llist_remove(&multi->msglist, e, NULL); *msgs_in_queue = curlx_uztosi(Curl_llist_count(&multi->msglist)); return &msg->extmsg; } return NULL; } /* * singlesocket() checks what sockets we deal with and their "action state" * and if we have a different state in any of those sockets from last time we * call the callback accordingly. */ static CURLMcode singlesocket(struct Curl_multi *multi, struct Curl_easy *data) { curl_socket_t socks[MAX_SOCKSPEREASYHANDLE]; int i; struct Curl_sh_entry *entry; curl_socket_t s; int num; unsigned int curraction; int actions[MAX_SOCKSPEREASYHANDLE]; for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) socks[i] = CURL_SOCKET_BAD; /* Fill in the 'current' struct with the state as it is now: what sockets to supervise and for what actions */ curraction = multi_getsock(data, socks); /* We have 0 .. N sockets already and we get to know about the 0 .. M sockets we should have from now on. Detect the differences, remove no longer supervised ones and add new ones */ /* walk over the sockets we got right now */ for(i = 0; (i< MAX_SOCKSPEREASYHANDLE) && (curraction & (GETSOCK_READSOCK(i) | GETSOCK_WRITESOCK(i))); i++) { unsigned int action = CURL_POLL_NONE; unsigned int prevaction = 0; unsigned int comboaction; bool sincebefore = FALSE; s = socks[i]; /* get it from the hash */ entry = sh_getentry(&multi->sockhash, s); if(curraction & GETSOCK_READSOCK(i)) action |= CURL_POLL_IN; if(curraction & GETSOCK_WRITESOCK(i)) action |= CURL_POLL_OUT; actions[i] = action; if(entry) { /* check if new for this transfer */ int j; for(j = 0; j< data->numsocks; j++) { if(s == data->sockets[j]) { prevaction = data->actions[j]; sincebefore = TRUE; break; } } } else { /* this is a socket we didn't have before, add it to the hash! */ entry = sh_addentry(&multi->sockhash, s); if(!entry) /* fatal */ return CURLM_OUT_OF_MEMORY; } if(sincebefore && (prevaction != action)) { /* Socket was used already, but different action now */ if(prevaction & CURL_POLL_IN) entry->readers--; if(prevaction & CURL_POLL_OUT) entry->writers--; if(action & CURL_POLL_IN) entry->readers++; if(action & CURL_POLL_OUT) entry->writers++; } else if(!sincebefore) { /* a new user */ entry->users++; if(action & CURL_POLL_IN) entry->readers++; if(action & CURL_POLL_OUT) entry->writers++; /* add 'data' to the transfer hash on this socket! */ if(!Curl_hash_add(&entry->transfers, (char *)&data, /* hash key */ sizeof(struct Curl_easy *), data)) return CURLM_OUT_OF_MEMORY; } comboaction = (entry->writers? CURL_POLL_OUT : 0) | (entry->readers ? CURL_POLL_IN : 0); /* socket existed before and has the same action set as before */ if(sincebefore && (entry->action == comboaction)) /* same, continue */ continue; if(multi->socket_cb) multi->socket_cb(data, s, comboaction, multi->socket_userp, entry->socketp); entry->action = comboaction; /* store the current action state */ } num = i; /* number of sockets */ /* when we've walked over all the sockets we should have right now, we must make sure to detect sockets that are removed */ for(i = 0; i< data->numsocks; i++) { int j; bool stillused = FALSE; s = data->sockets[i]; for(j = 0; j < num; j++) { if(s == socks[j]) { /* this is still supervised */ stillused = TRUE; break; } } if(stillused) continue; entry = sh_getentry(&multi->sockhash, s); /* if this is NULL here, the socket has been closed and notified so already by Curl_multi_closed() */ if(entry) { int oldactions = data->actions[i]; /* this socket has been removed. Decrease user count */ entry->users--; if(oldactions & CURL_POLL_OUT) entry->writers--; if(oldactions & CURL_POLL_IN) entry->readers--; if(!entry->users) { if(multi->socket_cb) multi->socket_cb(data, s, CURL_POLL_REMOVE, multi->socket_userp, entry->socketp); sh_delentry(entry, &multi->sockhash, s); } else { /* still users, but remove this handle as a user of this socket */ if(Curl_hash_delete(&entry->transfers, (char *)&data, sizeof(struct Curl_easy *))) { DEBUGASSERT(NULL); } } } } /* for loop over numsocks */ memcpy(data->sockets, socks, num*sizeof(curl_socket_t)); memcpy(data->actions, actions, num*sizeof(int)); data->numsocks = num; return CURLM_OK; } void Curl_updatesocket(struct Curl_easy *data) { singlesocket(data->multi, data); } /* * Curl_multi_closed() * * Used by the connect code to tell the multi_socket code that one of the * sockets we were using is about to be closed. This function will then * remove it from the sockethash for this handle to make the multi_socket API * behave properly, especially for the case when libcurl will create another * socket again and it gets the same file descriptor number. */ void Curl_multi_closed(struct Curl_easy *data, curl_socket_t s) { if(data) { /* if there's still an easy handle associated with this connection */ struct Curl_multi *multi = data->multi; if(multi) { /* this is set if this connection is part of a handle that is added to a multi handle, and only then this is necessary */ struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s); if(entry) { if(multi->socket_cb) multi->socket_cb(data, s, CURL_POLL_REMOVE, multi->socket_userp, entry->socketp); /* now remove it from the socket hash */ sh_delentry(entry, &multi->sockhash, s); } } } } /* * add_next_timeout() * * Each Curl_easy has a list of timeouts. The add_next_timeout() is called * when it has just been removed from the splay tree because the timeout has * expired. This function is then to advance in the list to pick the next * timeout to use (skip the already expired ones) and add this node back to * the splay tree again. * * The splay tree only has each sessionhandle as a single node and the nearest * timeout is used to sort it on. */ static CURLMcode add_next_timeout(struct curltime now, struct Curl_multi *multi, struct Curl_easy *d) { struct curltime *tv = &d->state.expiretime; struct curl_llist *list = &d->state.timeoutlist; struct curl_llist_element *e; struct time_node *node = NULL; /* move over the timeout list for this specific handle and remove all timeouts that are now passed tense and store the next pending timeout in *tv */ for(e = list->head; e;) { struct curl_llist_element *n = e->next; timediff_t diff; node = (struct time_node *)e->ptr; diff = Curl_timediff(node->time, now); if(diff <= 0) /* remove outdated entry */ Curl_llist_remove(list, e, NULL); else /* the list is sorted so get out on the first mismatch */ break; e = n; } e = list->head; if(!e) { /* clear the expire times within the handles that we remove from the splay tree */ tv->tv_sec = 0; tv->tv_usec = 0; } else { /* copy the first entry to 'tv' */ memcpy(tv, &node->time, sizeof(*tv)); /* Insert this node again into the splay. Keep the timer in the list in case we need to recompute future timers. */ multi->timetree = Curl_splayinsert(*tv, multi->timetree, &d->state.timenode); } return CURLM_OK; } static CURLMcode multi_socket(struct Curl_multi *multi, bool checkall, curl_socket_t s, int ev_bitmask, int *running_handles) { CURLMcode result = CURLM_OK; struct Curl_easy *data = NULL; struct Curl_tree *t; struct curltime now = Curl_now(); if(checkall) { /* *perform() deals with running_handles on its own */ result = curl_multi_perform(multi, running_handles); /* walk through each easy handle and do the socket state change magic and callbacks */ if(result != CURLM_BAD_HANDLE) { data = multi->easyp; while(data && !result) { result = singlesocket(multi, data); data = data->next; } } /* or should we fall-through and do the timer-based stuff? */ return result; } if(s != CURL_SOCKET_TIMEOUT) { struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s); if(!entry) /* Unmatched socket, we can't act on it but we ignore this fact. In real-world tests it has been proved that libevent can in fact give the application actions even though the socket was just previously asked to get removed, so thus we better survive stray socket actions and just move on. */ ; else { struct curl_hash_iterator iter; struct curl_hash_element *he; /* the socket can be shared by many transfers, iterate */ Curl_hash_start_iterate(&entry->transfers, &iter); for(he = Curl_hash_next_element(&iter); he; he = Curl_hash_next_element(&iter)) { data = (struct Curl_easy *)he->ptr; DEBUGASSERT(data); DEBUGASSERT(data->magic == CURLEASY_MAGIC_NUMBER); if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK)) /* set socket event bitmask if they're not locked */ data->conn->cselect_bits = ev_bitmask; Curl_expire(data, 0, EXPIRE_RUN_NOW); } /* Now we fall-through and do the timer-based stuff, since we don't want to force the user to have to deal with timeouts as long as at least one connection in fact has traffic. */ data = NULL; /* set data to NULL again to avoid calling multi_runsingle() in case there's no need to */ now = Curl_now(); /* get a newer time since the multi_runsingle() loop may have taken some time */ } } else { /* Asked to run due to time-out. Clear the 'lastcall' variable to force Curl_update_timer() to trigger a callback to the app again even if the same timeout is still the one to run after this call. That handles the case when the application asks libcurl to run the timeout prematurely. */ memset(&multi->timer_lastcall, 0, sizeof(multi->timer_lastcall)); } /* * The loop following here will go on as long as there are expire-times left * to process in the splay and 'data' will be re-assigned for every expired * handle we deal with. */ do { /* the first loop lap 'data' can be NULL */ if(data) { SIGPIPE_VARIABLE(pipe_st); sigpipe_ignore(data, &pipe_st); result = multi_runsingle(multi, now, data); sigpipe_restore(&pipe_st); if(CURLM_OK >= result) { /* get the socket(s) and check if the state has been changed since last */ result = singlesocket(multi, data); if(result) return result; } } /* Check if there's one (more) expired timer to deal with! This function extracts a matching node if there is one */ multi->timetree = Curl_splaygetbest(now, multi->timetree, &t); if(t) { data = t->payload; /* assign this for next loop */ (void)add_next_timeout(now, multi, t->payload); } } while(t); *running_handles = multi->num_alive; return result; } #undef curl_multi_setopt CURLMcode curl_multi_setopt(struct Curl_multi *multi, CURLMoption option, ...) { CURLMcode res = CURLM_OK; va_list param; if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; va_start(param, option); switch(option) { case CURLMOPT_SOCKETFUNCTION: multi->socket_cb = va_arg(param, curl_socket_callback); break; case CURLMOPT_SOCKETDATA: multi->socket_userp = va_arg(param, void *); break; case CURLMOPT_PUSHFUNCTION: multi->push_cb = va_arg(param, curl_push_callback); break; case CURLMOPT_PUSHDATA: multi->push_userp = va_arg(param, void *); break; case CURLMOPT_PIPELINING: multi->multiplexing = va_arg(param, long) & CURLPIPE_MULTIPLEX; break; case CURLMOPT_TIMERFUNCTION: multi->timer_cb = va_arg(param, curl_multi_timer_callback); break; case CURLMOPT_TIMERDATA: multi->timer_userp = va_arg(param, void *); break; case CURLMOPT_MAXCONNECTS: multi->maxconnects = va_arg(param, long); break; case CURLMOPT_MAX_HOST_CONNECTIONS: multi->max_host_connections = va_arg(param, long); break; case CURLMOPT_MAX_TOTAL_CONNECTIONS: multi->max_total_connections = va_arg(param, long); break; /* options formerly used for pipelining */ case CURLMOPT_MAX_PIPELINE_LENGTH: break; case CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE: break; case CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE: break; case CURLMOPT_PIPELINING_SITE_BL: break; case CURLMOPT_PIPELINING_SERVER_BL: break; default: res = CURLM_UNKNOWN_OPTION; break; } va_end(param); return res; } /* we define curl_multi_socket() in the public multi.h header */ #undef curl_multi_socket CURLMcode curl_multi_socket(struct Curl_multi *multi, curl_socket_t s, int *running_handles) { CURLMcode result; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; result = multi_socket(multi, FALSE, s, 0, running_handles); if(CURLM_OK >= result) Curl_update_timer(multi); return result; } CURLMcode curl_multi_socket_action(struct Curl_multi *multi, curl_socket_t s, int ev_bitmask, int *running_handles) { CURLMcode result; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; result = multi_socket(multi, FALSE, s, ev_bitmask, running_handles); if(CURLM_OK >= result) Curl_update_timer(multi); return result; } CURLMcode curl_multi_socket_all(struct Curl_multi *multi, int *running_handles) { CURLMcode result; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; result = multi_socket(multi, TRUE, CURL_SOCKET_BAD, 0, running_handles); if(CURLM_OK >= result) Curl_update_timer(multi); return result; } static CURLMcode multi_timeout(struct Curl_multi *multi, long *timeout_ms) { static struct curltime tv_zero = {0, 0}; if(multi->timetree) { /* we have a tree of expire times */ struct curltime now = Curl_now(); /* splay the lowest to the bottom */ multi->timetree = Curl_splay(tv_zero, multi->timetree); if(Curl_splaycomparekeys(multi->timetree->key, now) > 0) { /* some time left before expiration */ timediff_t diff = Curl_timediff(multi->timetree->key, now); if(diff <= 0) /* * Since we only provide millisecond resolution on the returned value * and the diff might be less than one millisecond here, we don't * return zero as that may cause short bursts of busyloops on fast * processors while the diff is still present but less than one * millisecond! instead we return 1 until the time is ripe. */ *timeout_ms = 1; else /* this should be safe even on 64 bit archs, as we don't use that overly long timeouts */ *timeout_ms = (long)diff; } else /* 0 means immediately */ *timeout_ms = 0; } else *timeout_ms = -1; return CURLM_OK; } CURLMcode curl_multi_timeout(struct Curl_multi *multi, long *timeout_ms) { /* First, make some basic checks that the CURLM handle is a good handle */ if(!GOOD_MULTI_HANDLE(multi)) return CURLM_BAD_HANDLE; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; return multi_timeout(multi, timeout_ms); } /* * Tell the application it should update its timers, if it subscribes to the * update timer callback. */ void Curl_update_timer(struct Curl_multi *multi) { long timeout_ms; if(!multi->timer_cb) return; if(multi_timeout(multi, &timeout_ms)) { return; } if(timeout_ms < 0) { static const struct curltime none = {0, 0}; if(Curl_splaycomparekeys(none, multi->timer_lastcall)) { multi->timer_lastcall = none; /* there's no timeout now but there was one previously, tell the app to disable it */ multi->timer_cb(multi, -1, multi->timer_userp); return; } return; } /* When multi_timeout() is done, multi->timetree points to the node with the * timeout we got the (relative) time-out time for. We can thus easily check * if this is the same (fixed) time as we got in a previous call and then * avoid calling the callback again. */ if(Curl_splaycomparekeys(multi->timetree->key, multi->timer_lastcall) == 0) return; multi->timer_lastcall = multi->timetree->key; multi->timer_cb(multi, timeout_ms, multi->timer_userp); } /* * multi_deltimeout() * * Remove a given timestamp from the list of timeouts. */ static void multi_deltimeout(struct Curl_easy *data, expire_id eid) { struct curl_llist_element *e; struct curl_llist *timeoutlist = &data->state.timeoutlist; /* find and remove the specific node from the list */ for(e = timeoutlist->head; e; e = e->next) { struct time_node *n = (struct time_node *)e->ptr; if(n->eid == eid) { Curl_llist_remove(timeoutlist, e, NULL); return; } } } /* * multi_addtimeout() * * Add a timestamp to the list of timeouts. Keep the list sorted so that head * of list is always the timeout nearest in time. * */ static CURLMcode multi_addtimeout(struct Curl_easy *data, struct curltime *stamp, expire_id eid) { struct curl_llist_element *e; struct time_node *node; struct curl_llist_element *prev = NULL; size_t n; struct curl_llist *timeoutlist = &data->state.timeoutlist; node = &data->state.expires[eid]; /* copy the timestamp and id */ memcpy(&node->time, stamp, sizeof(*stamp)); node->eid = eid; /* also marks it as in use */ n = Curl_llist_count(timeoutlist); if(n) { /* find the correct spot in the list */ for(e = timeoutlist->head; e; e = e->next) { struct time_node *check = (struct time_node *)e->ptr; timediff_t diff = Curl_timediff(check->time, node->time); if(diff > 0) break; prev = e; } } /* else this is the first timeout on the list */ Curl_llist_insert_next(timeoutlist, prev, node, &node->list); return CURLM_OK; } /* * Curl_expire() * * given a number of milliseconds from now to use to set the 'act before * this'-time for the transfer, to be extracted by curl_multi_timeout() * * The timeout will be added to a queue of timeouts if it defines a moment in * time that is later than the current head of queue. * * Expire replaces a former timeout using the same id if already set. */ void Curl_expire(struct Curl_easy *data, timediff_t milli, expire_id id) { struct Curl_multi *multi = data->multi; struct curltime *nowp = &data->state.expiretime; struct curltime set; /* this is only interesting while there is still an associated multi struct remaining! */ if(!multi) return; DEBUGASSERT(id < EXPIRE_LAST); set = Curl_now(); set.tv_sec += (time_t)(milli/1000); /* might be a 64 to 32 bit conversion */ set.tv_usec += (unsigned int)(milli%1000)*1000; if(set.tv_usec >= 1000000) { set.tv_sec++; set.tv_usec -= 1000000; } /* Remove any timer with the same id just in case. */ multi_deltimeout(data, id); /* Add it to the timer list. It must stay in the list until it has expired in case we need to recompute the minimum timer later. */ multi_addtimeout(data, &set, id); if(nowp->tv_sec || nowp->tv_usec) { /* This means that the struct is added as a node in the splay tree. Compare if the new time is earlier, and only remove-old/add-new if it is. */ timediff_t diff = Curl_timediff(set, *nowp); int rc; if(diff > 0) { /* The current splay tree entry is sooner than this new expiry time. We don't need to update our splay tree entry. */ return; } /* Since this is an updated time, we must remove the previous entry from the splay tree first and then re-add the new value */ rc = Curl_splayremovebyaddr(multi->timetree, &data->state.timenode, &multi->timetree); if(rc) infof(data, "Internal error removing splay node = %d\n", rc); } /* Indicate that we are in the splay tree and insert the new timer expiry value since it is our local minimum. */ *nowp = set; data->state.timenode.payload = data; multi->timetree = Curl_splayinsert(*nowp, multi->timetree, &data->state.timenode); } /* * Curl_expire_done() * * Removes the expire timer. Marks it as done. * */ void Curl_expire_done(struct Curl_easy *data, expire_id id) { /* remove the timer, if there */ multi_deltimeout(data, id); } /* * Curl_expire_clear() * * Clear ALL timeout values for this handle. */ void Curl_expire_clear(struct Curl_easy *data) { struct Curl_multi *multi = data->multi; struct curltime *nowp = &data->state.expiretime; /* this is only interesting while there is still an associated multi struct remaining! */ if(!multi) return; if(nowp->tv_sec || nowp->tv_usec) { /* Since this is an cleared time, we must remove the previous entry from the splay tree */ struct curl_llist *list = &data->state.timeoutlist; int rc; rc = Curl_splayremovebyaddr(multi->timetree, &data->state.timenode, &multi->timetree); if(rc) infof(data, "Internal error clearing splay node = %d\n", rc); /* flush the timeout list too */ while(list->size > 0) { Curl_llist_remove(list, list->tail, NULL); } #ifdef DEBUGBUILD infof(data, "Expire cleared (transfer %p)\n", data); #endif nowp->tv_sec = 0; nowp->tv_usec = 0; } } CURLMcode curl_multi_assign(struct Curl_multi *multi, curl_socket_t s, void *hashp) { struct Curl_sh_entry *there = NULL; if(multi->in_callback) return CURLM_RECURSIVE_API_CALL; there = sh_getentry(&multi->sockhash, s); if(!there) return CURLM_BAD_SOCKET; there->socketp = hashp; return CURLM_OK; } size_t Curl_multi_max_host_connections(struct Curl_multi *multi) { return multi ? multi->max_host_connections : 0; } size_t Curl_multi_max_total_connections(struct Curl_multi *multi) { return multi ? multi->max_total_connections : 0; } /* * When information about a connection has appeared, call this! */ void Curl_multiuse_state(struct connectdata *conn, int bundlestate) /* use BUNDLE_* defines */ { DEBUGASSERT(conn); DEBUGASSERT(conn->bundle); DEBUGASSERT(conn->data); DEBUGASSERT(conn->data->multi); conn->bundle->multiuse = bundlestate; process_pending_handles(conn->data->multi); } static void process_pending_handles(struct Curl_multi *multi) { struct curl_llist_element *e = multi->pending.head; if(e) { struct Curl_easy *data = e->ptr; DEBUGASSERT(data->mstate == CURLM_STATE_CONNECT_PEND); multistate(data, CURLM_STATE_CONNECT); /* Remove this node from the list */ Curl_llist_remove(&multi->pending, e, NULL); /* Make sure that the handle will be processed soonish. */ Curl_expire(data, 0, EXPIRE_RUN_NOW); /* mark this as having been in the pending queue */ data->state.previouslypending = TRUE; } } void Curl_set_in_callback(struct Curl_easy *data, bool value) { /* might get called when there is no data pointer! */ if(data) { if(data->multi_easy) data->multi_easy->in_callback = value; else if(data->multi) data->multi->in_callback = value; } } bool Curl_is_in_callback(struct Curl_easy *easy) { return ((easy->multi && easy->multi->in_callback) || (easy->multi_easy && easy->multi_easy->in_callback)); } #ifdef DEBUGBUILD void Curl_multi_dump(struct Curl_multi *multi) { struct Curl_easy *data; int i; fprintf(stderr, "* Multi status: %d handles, %d alive\n", multi->num_easy, multi->num_alive); for(data = multi->easyp; data; data = data->next) { if(data->mstate < CURLM_STATE_COMPLETED) { /* only display handles that are not completed */ fprintf(stderr, "handle %p, state %s, %d sockets\n", (void *)data, statename[data->mstate], data->numsocks); for(i = 0; i < data->numsocks; i++) { curl_socket_t s = data->sockets[i]; struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s); fprintf(stderr, "%d ", (int)s); if(!entry) { fprintf(stderr, "INTERNAL CONFUSION\n"); continue; } fprintf(stderr, "[%s %s] ", (entry->action&CURL_POLL_IN)?"RECVING":"", (entry->action&CURL_POLL_OUT)?"SENDING":""); } if(data->numsocks) fprintf(stderr, "\n"); } } } #endif
PooyaEimandar/WolfEngine
engine/src/wolf.system/curl/src/multi.c
C
mit
99,453
/** \brief A timer module with only a single compare value. Can be used to replace the "bsp_timer" and "radiotimer" modules with the help of abstimer. \author Xavi Vilajosana <xvilajosana@eecs.berkeley.edu>, May 2012. \author Thomas Watteyne <watteyne@eecs.berkeley.edu>, May 2012. */ #include "sctimer.h" #include "msp430x26x.h" //=========================== defines ========================================= //=========================== variables ======================================= typedef struct { uint8_t running; uint16_t taiv; } sctimers_vars_t; sctimers_vars_t sctimers_vars; //=========================== prototypes ====================================== void sctimer_setup(); void sctimer_start(); //=========================== public ========================================== void sctimer_init() { sctimer_setup(); } void sctimer_stop() { sctimer_setup(); } void sctimer_schedule(uint16_t val) { if (sctimers_vars.running==0) { sctimers_vars.running=1; sctimer_start(); } // load when to fire TACCR1 = val; // enable interrupt TACCTL1 = CCIE; } uint16_t sctimer_getValue() { return TAR; } void sctimer_setCb(sctimer_cbt cb){ // does nothing as it is done by IAR -- look at board.c } void sctimer_clearISR() { sctimers_vars.taiv = TAIV;//read taiv to clear the flags. } //=========================== private ========================================= void sctimer_setup() { // clear local variables memset(&sctimers_vars,0,sizeof(sctimers_vars_t)); // ACLK sources from external 32kHz BCSCTL3 |= LFXT1S_0; // disable all compares TACCTL0 = 0; TACCR0 = 0; // CCR1 in compare mode (disabled for now) TACCTL1 = 0; TACCR1 = 0; // CCR2 in capture mode TACCTL2 = 0; TACCR2 = 0; // reset couter TAR = 0; } void sctimer_start() { // start counting TACTL = MC_2+TASSEL_1; // continuous mode, clocked from ACLK }
barriquello/iotstack
openwsn-fw-work/firmware/openos/bsp/boards/gina/sctimer.c
C
mit
2,026
#include "multi_modelstruct.h" #include "multivar_support.h" #include <math.h> #include <assert.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_eigen.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_errno.h> /** * ccs, 05.06.2012 * \todo this all needs to be checked carefully against an example that works in R * \todo valgrind for mem-leaks * \bug no error checking in linalg * \bug no error checking in alloc * \bug is there error checking in file handles? * * ccs, 19.06.2012 * if you allocate a multi_modelstruct m * then m->pca_model_array[i]->xmodel == m->xmodel * the value in the pca_model_array is just a pointer to m, do we really want this? */ /** * allocates a multi_modelstruct, like alloc_modelstruct_2, * but for multivariate models with t output values at each location * * @param model_in: (n x d) matrix of the design * @param training_matrix: (n x t) matrix of the values of the training values at each of the n locations * @param cov_fn_index: POWEREXPCOVFN, MATERN32, or MATERN52 * @param regression_order: 0, 1, 2, or 3 * @param varfrac: the minimum fractional variance that should be retained during the PCA decomp * * applies a pca decomp to training_matrix to reduce the dimensionality * */ multi_modelstruct* alloc_multimodelstruct(gsl_matrix *xmodel_in, gsl_matrix *training_matrix_in, int cov_fn_index, int regression_order, double varfrac) { assert(training_matrix_in->size1 == xmodel_in->size1); assert(training_matrix_in->size1 > 0); assert(training_matrix_in->size2 > 0); assert(xmodel_in->size2 > 0); int i; double mean_temp = 0.0; int nt = training_matrix_in->size2; int nmodel_points = xmodel_in->size1; int nparams = xmodel_in->size2; gsl_vector_view col_view; /* use default if out of range */ if (regression_order < 0 || regression_order > 3) regression_order = 0; /* use a sensible default for the variance fraction */ if(varfrac < 0 || varfrac > 1) varfrac = 0.95; /* ntheta is a function of cov_fn_index and nparams */ int nthetas; if ((cov_fn_index == MATERN32) || (cov_fn_index == MATERN52)) { nthetas = 3; } else if (cov_fn_index == POWEREXPCOVFN) { nthetas = nparams + 2; } else { cov_fn_index = POWEREXPCOVFN; nthetas = nparams + 2; } // this doesn't seem to be allocating correctly, seems to be a broken defn of MallocChecked // strangely, code will build in this case... //multi_modelstruct * model = (multi_modelstruct*)MallocChecked(sizeof(multi_modelstruct)); multi_modelstruct * model = (multi_modelstruct*)malloc(sizeof(multi_modelstruct)); // fill in model->nt = nt; model->nr = 0; // init at zero model->nmodel_points = nmodel_points; model->nparams = nparams; model->xmodel = xmodel_in; model->training_matrix = training_matrix_in; model->training_mean = gsl_vector_alloc(nt); model->regression_order = regression_order; model->cov_fn_index = cov_fn_index; /* fill in the mean vector, should probably sum this more carefully... */ for(i = 0; i < nt; i++){ col_view = gsl_matrix_column(model->training_matrix, i); mean_temp = vector_elt_sum(&col_view.vector, nmodel_points); //printf("%lf\n", (mean_temp/((double)nmodel_points))); gsl_vector_set(model->training_mean, i, (mean_temp/((double)nmodel_points)) ); } /* carry out the pca decomp on this model, this is defined in multivar_support for now * this will fill in nr, pca_eigenvalues, pca_eigenvectors, pca_evals_r, pca_evecs_r * * this is making a mess if nt = 1 */ gen_pca_decomp(model, varfrac); /* fill in pca_model_array */ gen_pca_model_array(model); return model; } /** * fill in pca_model_array * requires: * - the pca decomp to have been calculated, so nr and all the pca_... fields are allocated and filled in * - m to have been allocated up to: nt, nmodel_points, and xmodel * * is it possible we could start running out of memory doing all these allocs? */ void gen_pca_model_array(multi_modelstruct *m) { int nr = m->nr; int i; gsl_vector_view col_view; gsl_vector* temp_train_vector = gsl_vector_alloc(m->nmodel_points); //gsl_matrix* temp_xmodel = gsl_matrix_alloc(m->nmodel_points, m->nparams); //gsl_matrix_mempcy(temp_xmodel, m->xmodel); // alloc the array of nr model structs //m->pca_model_array = (modelstruct**)MallocChecked(sizeof(modelstruct*)*nr); m->pca_model_array = (modelstruct**)malloc(sizeof(modelstruct*)*nr); // fill in the modelstructs correctly for(i = 0; i < nr; i++){ col_view = gsl_matrix_column(m->pca_zmatrix, i); gsl_vector_memcpy(temp_train_vector, &(col_view.vector)); // this isn't copying in the training vector correctly for somereason m->pca_model_array[i] = alloc_modelstruct_2(m->xmodel, temp_train_vector, m->cov_fn_index, m->regression_order); // see if brute forcing it will work m->pca_model_array[i]->training_vector = gsl_vector_alloc(m->nmodel_points); gsl_vector_memcpy(m->pca_model_array[i]->training_vector, temp_train_vector); } gsl_vector_free(temp_train_vector); } /** * carries out a pca decomp on m->training_matrix; * setting m->nr, m->pca_eigenvalues, m->pca_eigenvectors and * initializing m->pca_model_array * * the pca decomp is pretty simple: * let Y_ij = m->y_training * let mu_i = (1/nmodel_points) sum_{j=1}^{nmodel_points} Y_{ij} // the sample mean * let ysub_i = Y_i - rep(mu_i,nmodel_points) //subtract the sample means from each column * let sigma_ij = (1/nmodel_points) ( ysub * t(ysub)) // this is the covariance matrix * * then all we need to do is compute the eigendecomposition * * sigma_ij = U^{-1} Lambda U * * where Lambda is a diagonal matrix of t eigenvalues and U is a t x t matrix with the eigenvectors as columns * * requires: * - 0 < vfrac < 1 (0.95 is a good value) * - m to have allocated and filled out, nt, nmodel_points, trianing_matrix, (doesn't need xmodel explicitly) */ void gen_pca_decomp(multi_modelstruct *m, double vfrac) { FILE *fptr; // for debug output int i,j; int nt = m->nt; int retval; double total_variance = 0.0, frac = 0.0; gsl_matrix *pca_zmatrix_temp; gsl_matrix *y_sub_mat = gsl_matrix_alloc(m->nmodel_points, nt); gsl_matrix *y_temp_mat = gsl_matrix_alloc(m->nmodel_points, nt); gsl_matrix *y_cov_mat = gsl_matrix_alloc(nt, nt); gsl_vector *evals_temp = gsl_vector_alloc(nt); gsl_matrix *evecs_temp = gsl_matrix_alloc(nt,nt); gsl_eigen_symmv_workspace *ework = gsl_eigen_symmv_alloc(nt); gsl_vector_view col_view; // why is this here? gsl_matrix_memcpy(y_sub_mat, m->training_matrix); // subtract out the mean for(i = 0; i < nt; i++){ //col_view = gsl_matrix_column(y_sub_mat, i); printf("# y(%d) mean: %lf\n", i, gsl_vector_get(m->training_mean, i)); for(j = 0; j < m->nmodel_points; j++){ gsl_matrix_set(y_sub_mat, j, i, gsl_matrix_get(y_sub_mat, j, i) - gsl_vector_get(m->training_mean, i)); } } // compute the sample-variance, by multiplying y_sub_mat, with itself transposed gsl_matrix_memcpy(y_temp_mat, y_sub_mat); gsl_matrix_set_zero(y_cov_mat); /** — Function: int gsl_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, const gsl_matrix * A, const gsl_matrix * B, double beta, gsl_matrix * C) (always forget this one)*/ /* want C (nt x nt ) so we need to do: (nt x nmodel_points) * (nmodel_points x nt) */ /** * this is strange, the upper triangle is not obviously correct */ //retval = gsl_blas_dgemm(CblasTrans, CblasNoTrans, 1.0, y_temp_mat, y_sub_mat, 0.0, y_cov_mat); retval = gsl_blas_dgemm(CblasTrans, CblasNoTrans, 1.0, y_sub_mat, y_temp_mat, 0.0, y_cov_mat); if(retval){ printf("# gen_pca_decomp:gsl_blas_dgemm %s\n", gsl_strerror(retval)); exit(EXIT_FAILURE); } gsl_matrix_scale(y_cov_mat, (1.0/((double)m->nmodel_points))); /** * debug output */ #ifdef DEBUGPCA fptr = fopen("pca-debug.dat","w"); fprintf(fptr, "# ycov:\n"); for(j = 0; j < nt; j++){ for(i = 0; i < nt; i++) fprintf(fptr, "%lf ", gsl_matrix_get(y_cov_mat, i, j)); fprintf(fptr, "\n"); } #endif /** now the eigendecomp * y_cov_mat is symmetric and better be real so we can use gsl_eigen_symmv, * note that the ?upper? triangle of y_cov_mat is borked during this process * also: the evals are not sorted by order, but the evectors are in the order of the evalues. * so we need to sort the evalues and the evectors correctly before we can use them * using * — Function: int gsl_eigen_symmv_sort (gsl_vector * eval, gsl_matrix * evec, gsl_eigen_sort_t sort_type) * sort them into descending order (biggest first) */ gsl_eigen_symmv(y_cov_mat, evals_temp, evecs_temp, ework); gsl_eigen_symmv_sort(evals_temp, evecs_temp, GSL_EIGEN_SORT_VAL_DESC); /** * eigenvectors are stored in columns of pca_evecs*/ total_variance = vector_elt_sum(evals_temp, nt); #ifdef DEBUGPCA fprintf(fptr, "# evals:\n"); for(i = 0; i < nt; i++) fprintf(fptr, "%lf %lf\n", gsl_vector_get(evals_temp, i), gsl_vector_get(evals_temp, i) / total_variance); fprintf(fptr, "# evecs:\n"); for(j = 0; j < nt; j++){ for(i = 0; i < nt; i++) fprintf(fptr, "%lf ", gsl_matrix_get(evecs_temp, i, j)); fprintf(fptr, "\n"); } #endif i=0; while( frac < vfrac && (i+1) < nt){ frac = (1.0/total_variance) * vector_elt_sum(evals_temp, i); i++; } m->nr = i; if(nt == 1){ //printf("# 1d case, nr=1\n"); m->nr = 1; } m->pca_evals_r = gsl_vector_alloc(m->nr); m->pca_evecs_r = gsl_matrix_alloc(m->nt, m->nr); // debug... fprintf(stderr, "# nr: %d frac: %lf\n", m->nr, frac); for(i = 0; i < m->nr; i++){ gsl_vector_set(m->pca_evals_r, i, gsl_vector_get(evals_temp, i)); col_view = gsl_matrix_column(evecs_temp, i); gsl_matrix_set_col(m->pca_evecs_r, i, &col_view.vector); } // fill in pca_zmatrix m->pca_zmatrix = gsl_matrix_alloc(m->nmodel_points, m->nr); pca_zmatrix_temp = gsl_matrix_alloc(m->nmodel_points, m->nr); // zmat: (nmodel_points x nr) = (nmodel_points x nt) * ( nt x nr ) /** — Function: int gsl_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, const gsl_matrix * A, const gsl_matrix * B, double beta, gsl_matrix * C) (always forget this one)*/ gsl_blas_dgemm(CblasNoTrans, CblasNoTrans, 1.0, y_sub_mat, m->pca_evecs_r, 0.0, m->pca_zmatrix); #ifdef DEBUGPCA fprintf(fptr, "# evecs R:\n"); for(i = 0; i < m->nt; i++){ for(j = 0; j < m->nr; j++) fprintf(fptr, "%lf ", gsl_matrix_get(m->pca_evecs_r, i, j)); fprintf(fptr, "\n"); } #endif gsl_matrix_free(y_temp_mat); y_temp_mat = gsl_matrix_alloc(m->nr, m->nr); for(i = 0; i < m->nr; i++) // scale the diagonal by the evalue */ gsl_matrix_set(y_temp_mat, i, i, 1.0/(sqrt(gsl_vector_get(m->pca_evals_r, i)))); //print_matrix(y_temp_mat, m->nr, m->nr); // if nr != nt this won't work! //gsl_matrix_memcpy(y_sub_mat, m->pca_zmatrix); gsl_matrix_memcpy(pca_zmatrix_temp, m->pca_zmatrix); gsl_blas_dgemm(CblasNoTrans, CblasNoTrans, 1.0, pca_zmatrix_temp, y_temp_mat, 0.0, m->pca_zmatrix); #ifdef DEBUGPCA fprintf(fptr, "# zmat:\n"); for(i = 0; i < 5; i++){ for(j = 0; j < m->nr; j++) fprintf(fptr, "%lf ", gsl_matrix_get(m->pca_zmatrix, i, j)); fprintf(fptr, "\n"); } fclose(fptr); #endif gsl_vector_free(evals_temp); gsl_matrix_free(evecs_temp); gsl_matrix_free(pca_zmatrix_temp); gsl_eigen_symmv_free(ework); gsl_matrix_free(y_sub_mat); gsl_matrix_free(y_temp_mat); gsl_matrix_free(y_cov_mat); } /** * dump the multimodelstruct to fptr, follows from dump_modelstruct_2 * we first dump the new fields and then iterate through the nr additional models which are dumped as before * so we dump a lot of the same info, but this is probably ok, the advantage is that each section defining * a model can be pulled out and worked on separately... */ void dump_multi_modelstruct(FILE* fptr, multi_modelstruct *m){ assert(fptr); int i,j; int nt = m->nt; int nr = m->nr; int nparams = m->nparams; int nmodel_points = m->nmodel_points; int cov_fn_index = m->cov_fn_index; int regression_order = m->regression_order; fprintf(fptr, "%d\n", nt); fprintf(fptr, "%d\n", nr); fprintf(fptr, "%d\n", nparams); fprintf(fptr, "%d\n", nmodel_points); fprintf(fptr, "%d\n", cov_fn_index); fprintf(fptr, "%d\n", regression_order); // multimodel thetas are inside pca_model_array... for(i = 0; i < nmodel_points; i++){ for(j = 0; j < nparams; j++) fprintf(fptr, "%.17lf ", gsl_matrix_get(m->xmodel, i, j)); fprintf(fptr, "\n"); } for(i = 0; i < nmodel_points; i++){ for(j = 0; j < nt; j++) fprintf(fptr, "%.17lf ", gsl_matrix_get(m->training_matrix, i, j)); fprintf(fptr, "\n"); } // now the rest of the pca information for(i = 0; i < nr; i++) fprintf(fptr, "%.17lf ", gsl_vector_get(m->pca_evals_r, i)); fprintf(fptr, "\n"); for(i = 0; i < nt; i++){ for(j = 0; j < nr; j++) fprintf(fptr, "%.17lf ", gsl_matrix_get(m->pca_evecs_r, i, j)); fprintf(fptr, "\n"); } for(i = 0; i < nmodel_points; i++){ for(j = 0; j < nr; j++) fprintf(fptr, "%.17lf ", gsl_matrix_get(m->pca_zmatrix, i, j)); fprintf(fptr, "\n"); } // now the pca_model_array // these could go in separate files... for(i = 0; i < nr; i++){ dump_modelstruct_2(fptr, m->pca_model_array[i]); } } /** * loads a multivariate modelstructure from fptr */ multi_modelstruct *load_multi_modelstruct(FILE* fptr){ multi_modelstruct *m = (multi_modelstruct*)malloc(sizeof(multi_modelstruct)); int i,j; int nt, nr; int nparams, nmodel_points; int cov_fn_index; int regression_order; double mean_temp; gsl_vector_view col_view; fscanf(fptr, "%d%*c", & nt); fscanf(fptr, "%d%*c", & nr); fscanf(fptr, "%d%*c", & nparams); fscanf(fptr, "%d%*c", & nmodel_points); fscanf(fptr, "%d%*c", & cov_fn_index); fscanf(fptr, "%d%*c", & regression_order); m->nt = nt; m->nr = nr; m->nparams = nparams; m->nmodel_points = nmodel_points; m->cov_fn_index = cov_fn_index; m->regression_order = regression_order; // now we can allocate everything in m m->xmodel = gsl_matrix_alloc(nmodel_points, nparams); m->training_matrix = gsl_matrix_alloc(nmodel_points, nt); m->training_mean = gsl_vector_alloc(nt); // do we need this? (yes!) m->pca_model_array = (modelstruct**)malloc(sizeof(modelstruct*)*nr); m->pca_evals_r = gsl_vector_alloc(nr); m->pca_evecs_r = gsl_matrix_alloc(nt, nr); m->pca_zmatrix = gsl_matrix_alloc(nmodel_points, nr); for(i = 0; i < nmodel_points; i++) for(j = 0; j < nparams; j++) fscanf(fptr, "%lf%*c", gsl_matrix_ptr(m->xmodel, i, j)); for(i = 0; i < nmodel_points; i++) for(j = 0; j < nt; j++) fscanf(fptr, "%lf%*c", gsl_matrix_ptr(m->training_matrix, i, j)); // now the rest of the pca information for(i = 0; i < nr; i++) fscanf(fptr, "%lf%*c", gsl_vector_ptr(m->pca_evals_r, i)); for(i = 0; i < nt; i++) for(j = 0; j < nr; j++) fscanf(fptr, "%lf%*c", gsl_matrix_ptr(m->pca_evecs_r, i, j)); for(i = 0; i < nmodel_points; i++) for(j = 0; j < nr; j++) fscanf(fptr, "%lf%*c", gsl_matrix_ptr(m->pca_zmatrix, i, j)); for(i = 0; i < nr; i++) m->pca_model_array[i] = load_modelstruct_2(fptr); /* fill in the mean vector */ for(i = 0; i < nt; i++){ col_view = gsl_matrix_column(m->training_matrix, i); mean_temp = vector_elt_sum(&col_view.vector, nmodel_points); gsl_vector_set(m->training_mean, i, (mean_temp/((double)nmodel_points)) ); } return m; } /** * return the sum of the elements of vec from 0:nstop */ double vector_elt_sum(gsl_vector* vec, int nstop) { assert(nstop >= 0); assert((unsigned)nstop <= vec->size); int i; double sum = 0.0; for(i = 0; i < nstop; i++){ sum += gsl_vector_get(vec, i); } return(sum); } /** * this free's everything in m */ void free_multimodelstruct(multi_modelstruct *m) { int i; gsl_vector_free(m->training_mean); for(i = 0; i < m->nr; i++){ free_modelstruct_2(m->pca_model_array[i]); //gsl_matrix_free(m->pca_model_array[i]->xmodel); } free(m->pca_model_array); gsl_matrix_free(m->xmodel); gsl_matrix_free(m->training_matrix); gsl_vector_free(m->pca_evals_r); gsl_matrix_free(m->pca_evecs_r); gsl_matrix_free(m->pca_zmatrix); }
MADAI/MADAIEmulator
src/multi_modelstruct.c
C
mit
16,184
/* Generated by CIL v. 1.7.0 */ /* print_CIL_Input is false */ struct _IO_FILE; struct timeval; extern float strtof(char const *str , char const *endptr ) ; extern void signal(int sig , void *func ) ; typedef struct _IO_FILE FILE; extern int atoi(char const *s ) ; extern double strtod(char const *str , char const *endptr ) ; extern int fclose(void *stream ) ; extern void *fopen(char const *filename , char const *mode ) ; extern void abort() ; extern void exit(int status ) ; extern int raise(int sig ) ; extern int fprintf(struct _IO_FILE *stream , char const *format , ...) ; extern int strcmp(char const *a , char const *b ) ; extern int rand() ; extern unsigned long strtoul(char const *str , char const *endptr , int base ) ; void RandomFunc(unsigned int input[1] , unsigned int output[1] ) ; extern int strncmp(char const *s1 , char const *s2 , unsigned long maxlen ) ; extern int gettimeofday(struct timeval *tv , void *tz , ...) ; extern int printf(char const *format , ...) ; int main(int argc , char *argv[] ) ; void megaInit(void) ; extern unsigned long strlen(char const *s ) ; extern long strtol(char const *str , char const *endptr , int base ) ; extern unsigned long strnlen(char const *s , unsigned long maxlen ) ; extern void *memcpy(void *s1 , void const *s2 , unsigned long size ) ; struct timeval { long tv_sec ; long tv_usec ; }; extern void *malloc(unsigned long size ) ; extern int scanf(char const *format , ...) ; void megaInit(void) { { } } void RandomFunc(unsigned int input[1] , unsigned int output[1] ) { unsigned int state[1] ; unsigned int local1 ; char copy11 ; unsigned short copy12 ; { state[0UL] = (input[0UL] + 914778474UL) ^ 3462201355U; local1 = 0UL; while (local1 < input[1UL]) { if (state[0UL] > local1) { copy11 = *((char *)(& state[local1]) + 0); *((char *)(& state[local1]) + 0) = *((char *)(& state[local1]) + 1); *((char *)(& state[local1]) + 1) = copy11; copy12 = *((unsigned short *)(& state[0UL]) + 1); *((unsigned short *)(& state[0UL]) + 1) = *((unsigned short *)(& state[0UL]) + 0); *((unsigned short *)(& state[0UL]) + 0) = copy12; } else { state[0UL] |= (state[0UL] & 31U) << 4UL; state[local1] *= state[local1]; } local1 ++; } output[0UL] = state[0UL] << 5U; } } int main(int argc , char *argv[] ) { unsigned int input[1] ; unsigned int output[1] ; int randomFuns_i5 ; unsigned int randomFuns_value6 ; int randomFuns_main_i7 ; { megaInit(); if (argc != 2) { printf("Call this program with %i arguments\n", 1); exit(-1); } else { } randomFuns_i5 = 0; while (randomFuns_i5 < 1) { randomFuns_value6 = (unsigned int )strtoul(argv[randomFuns_i5 + 1], 0, 10); input[randomFuns_i5] = randomFuns_value6; randomFuns_i5 ++; } RandomFunc(input, output); if (output[0] == 460535040U) { printf("You win!\n"); } else { } randomFuns_main_i7 = 0; while (randomFuns_main_i7 < 1) { printf("%u\n", output[randomFuns_main_i7]); randomFuns_main_i7 ++; } } }
tum-i22/obfuscation-benchmarks
tigress-generated-programs/empty-Seed4-RandomFuns-Type_int-ControlStructures_9-BB2-ForBound_input-Operators_all.c
C
mit
3,118
/* MACHINE GENERATED FILE, DO NOT EDIT */ #include <jni.h> #include "extgl.h" typedef GL_APICALL void (GL_APIENTRY *glTexStorage1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); typedef GL_APICALL void (GL_APIENTRY *glTexStorage2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); typedef GL_APICALL void (GL_APIENTRY *glTexStorage3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); typedef GL_APICALL void (GL_APIENTRY *glTextureStorage1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); typedef GL_APICALL void (GL_APIENTRY *glTextureStorage2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); typedef GL_APICALL void (GL_APIENTRY *glTextureStorage3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); static glTexStorage1DEXTPROC glTexStorage1DEXT; static glTexStorage2DEXTPROC glTexStorage2DEXT; static glTexStorage3DEXTPROC glTexStorage3DEXT; static glTextureStorage1DEXTPROC glTextureStorage1DEXT; static glTextureStorage2DEXTPROC glTextureStorage2DEXT; static glTextureStorage3DEXTPROC glTextureStorage3DEXT; static void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_nglTexStorage1DEXT(JNIEnv *env, jclass clazz, jint target, jint levels, jint internalformat, jint width) { glTexStorage1DEXT(target, levels, internalformat, width); } static void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_nglTexStorage2DEXT(JNIEnv *env, jclass clazz, jint target, jint levels, jint internalformat, jint width, jint height) { glTexStorage2DEXT(target, levels, internalformat, width, height); } static void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_nglTexStorage3DEXT(JNIEnv *env, jclass clazz, jint target, jint levels, jint internalformat, jint width, jint height, jint depth) { glTexStorage3DEXT(target, levels, internalformat, width, height, depth); } static void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_nglTextureStorage1DEXT(JNIEnv *env, jclass clazz, jint texture, jint target, jint levels, jint internalformat, jint width) { glTextureStorage1DEXT(texture, target, levels, internalformat, width); } static void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_nglTextureStorage2DEXT(JNIEnv *env, jclass clazz, jint texture, jint target, jint levels, jint internalformat, jint width, jint height) { glTextureStorage2DEXT(texture, target, levels, internalformat, width, height); } static void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_nglTextureStorage3DEXT(JNIEnv *env, jclass clazz, jint texture, jint target, jint levels, jint internalformat, jint width, jint height, jint depth) { glTextureStorage3DEXT(texture, target, levels, internalformat, width, height, depth); } JNIEXPORT void JNICALL Java_org_lwjgl_opengles_EXTTextureStorage_initNativeStubs(JNIEnv *env, jclass clazz) { JavaMethodAndExtFunction functions[] = { {"nglTexStorage1DEXT", "(IIII)V", (void *)&Java_org_lwjgl_opengles_EXTTextureStorage_nglTexStorage1DEXT, "glTexStorage1DEXT", (void *)&glTexStorage1DEXT, false}, {"nglTexStorage2DEXT", "(IIIII)V", (void *)&Java_org_lwjgl_opengles_EXTTextureStorage_nglTexStorage2DEXT, "glTexStorage2DEXT", (void *)&glTexStorage2DEXT, false}, {"nglTexStorage3DEXT", "(IIIIII)V", (void *)&Java_org_lwjgl_opengles_EXTTextureStorage_nglTexStorage3DEXT, "glTexStorage3DEXT", (void *)&glTexStorage3DEXT, false}, {"nglTextureStorage1DEXT", "(IIIII)V", (void *)&Java_org_lwjgl_opengles_EXTTextureStorage_nglTextureStorage1DEXT, "glTextureStorage1DEXT", (void *)&glTextureStorage1DEXT, false}, {"nglTextureStorage2DEXT", "(IIIIII)V", (void *)&Java_org_lwjgl_opengles_EXTTextureStorage_nglTextureStorage2DEXT, "glTextureStorage2DEXT", (void *)&glTextureStorage2DEXT, false}, {"nglTextureStorage3DEXT", "(IIIIIII)V", (void *)&Java_org_lwjgl_opengles_EXTTextureStorage_nglTextureStorage3DEXT, "glTextureStorage3DEXT", (void *)&glTextureStorage3DEXT, false} }; int num_functions = NUMFUNCTIONS(functions); extgl_InitializeClass(env, clazz, num_functions, functions); }
eriqadams/computer-graphics
lib/lwjgl-2.9.1/lwjgl-source-2.9.1/src/native/generated/opengles/org_lwjgl_opengles_EXTTextureStorage.c
C
mit
4,247
/* This file contains the sigma-delta driver implementation. */ #include "platform.h" #include "hw_timer.h" #include "task/task.h" #include "c_stdlib.h" #include "pcm.h" static const os_param_t drv_sd_hw_timer_owner = 0x70636D; // "pcm" static void ICACHE_RAM_ATTR drv_sd_timer_isr( os_param_t arg ) { cfg_t *cfg = (cfg_t *)arg; pcm_buf_t *buf = &(cfg->bufs[cfg->rbuf_idx]); if (cfg->isr_throttled) { return; } if (!buf->empty) { uint16_t tmp; // buffer is not empty, continue reading tmp = abs((int16_t)(buf->data[buf->rpos]) - 128); if (tmp > cfg->vu_peak_tmp) { cfg->vu_peak_tmp = tmp; } cfg->vu_samples_tmp++; if (cfg->vu_samples_tmp >= cfg->vu_req_samples) { cfg->vu_peak = cfg->vu_peak_tmp; task_post_low( pcm_data_vu_task, (os_param_t)cfg ); cfg->vu_samples_tmp = 0; cfg->vu_peak_tmp = 0; } platform_sigma_delta_set_target( buf->data[buf->rpos++] ); if (buf->rpos >= buf->len) { // buffer data consumed, request to re-fill it buf->empty = TRUE; cfg->fbuf_idx = cfg->rbuf_idx; task_post_high( pcm_data_play_task, (os_param_t)cfg ); // switch to next buffer cfg->rbuf_idx ^= 1; dbg_platform_gpio_write( PLATFORM_GPIO_LOW ); } } else { // flag ISR throttled cfg->isr_throttled = 1; dbg_platform_gpio_write( PLATFORM_GPIO_LOW ); cfg->fbuf_idx = cfg->rbuf_idx; task_post_high( pcm_data_play_task, (os_param_t)cfg ); } } static uint8_t drv_sd_stop( cfg_t *cfg ) { platform_hw_timer_close( drv_sd_hw_timer_owner ); return TRUE; } static uint8_t drv_sd_close( cfg_t *cfg ) { drv_sd_stop( cfg ); platform_sigma_delta_close( cfg->pin ); dbg_platform_gpio_mode( PLATFORM_GPIO_INPUT, PLATFORM_GPIO_PULLUP ); return TRUE; } static uint8_t drv_sd_play( cfg_t *cfg ) { // VU control: derive callback frequency cfg->vu_req_samples = (uint16_t)((1000000L / (uint32_t)cfg->vu_freq) / (uint32_t)pcm_rate_def[cfg->rate]); cfg->vu_samples_tmp = 0; cfg->vu_peak_tmp = 0; // (re)start hardware timer ISR to feed the sigma-delta if (platform_hw_timer_init( drv_sd_hw_timer_owner, FRC1_SOURCE, TRUE )) { platform_hw_timer_set_func( drv_sd_hw_timer_owner, drv_sd_timer_isr, (os_param_t)cfg ); platform_hw_timer_arm_us( drv_sd_hw_timer_owner, pcm_rate_def[cfg->rate] ); return TRUE; } else { return FALSE; } } static uint8_t drv_sd_init( cfg_t *cfg ) { dbg_platform_gpio_write( PLATFORM_GPIO_HIGH ); dbg_platform_gpio_mode( PLATFORM_GPIO_OUTPUT, PLATFORM_GPIO_PULLUP ); platform_sigma_delta_setup( cfg->pin ); platform_sigma_delta_set_prescale( 9 ); return TRUE; } static uint8_t drv_sd_fail( cfg_t *cfg ) { return FALSE; } const drv_t pcm_drv_sd = { .init = drv_sd_init, .close = drv_sd_close, .play = drv_sd_play, .record = drv_sd_fail, .stop = drv_sd_stop };
devsaurus/nodemcu-firmware
app/pcm/drv_sigma_delta.c
C
mit
2,905
/* * Special implementations of built-in functions and methods. * * Optional optimisations for builtins are in Optimize.c. * * General object operations and protocols are in ObjectHandling.c. */ //////////////////// Globals.proto //////////////////// static PyObject* __Pyx_Globals(void); /*proto*/ //////////////////// Globals //////////////////// //@substitute: naming //@requires: ObjectHandling.c::GetAttr // This is a stub implementation until we have something more complete. // Currently, we only handle the most common case of a read-only dict // of Python names. Supporting cdef names in the module and write // access requires a rewrite as a dedicated class. static PyObject* __Pyx_Globals(void) { Py_ssize_t i; PyObject *names; PyObject *globals = $moddict_cname; Py_INCREF(globals); names = PyObject_Dir($module_cname); if (!names) goto bad; for (i = PyList_GET_SIZE(names)-1; i >= 0; i--) { #if CYTHON_COMPILING_IN_PYPY PyObject* name = PySequence_ITEM(names, i); if (!name) goto bad; #else PyObject* name = PyList_GET_ITEM(names, i); #endif if (!PyDict_Contains(globals, name)) { PyObject* value = __Pyx_GetAttr($module_cname, name); if (!value) { #if CYTHON_COMPILING_IN_PYPY Py_DECREF(name); #endif goto bad; } if (PyDict_SetItem(globals, name, value) < 0) { #if CYTHON_COMPILING_IN_PYPY Py_DECREF(name); #endif Py_DECREF(value); goto bad; } } #if CYTHON_COMPILING_IN_PYPY Py_DECREF(name); #endif } Py_DECREF(names); return globals; bad: Py_XDECREF(names); Py_XDECREF(globals); return NULL; } //////////////////// PyExecGlobals.proto //////////////////// static PyObject* __Pyx_PyExecGlobals(PyObject*); //////////////////// PyExecGlobals //////////////////// //@requires: Globals //@requires: PyExec static PyObject* __Pyx_PyExecGlobals(PyObject* code) { PyObject* result; PyObject* globals = __Pyx_Globals(); if (unlikely(!globals)) return NULL; result = __Pyx_PyExec2(code, globals); Py_DECREF(globals); return result; } //////////////////// PyExec.proto //////////////////// static PyObject* __Pyx_PyExec3(PyObject*, PyObject*, PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject*, PyObject*); //////////////////// PyExec //////////////////// //@substitute: naming static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject* o, PyObject* globals) { return __Pyx_PyExec3(o, globals, NULL); } static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals) { PyObject* result; PyObject* s = 0; char *code = 0; if (!globals || globals == Py_None) { globals = $moddict_cname; } else if (!PyDict_Check(globals)) { PyErr_Format(PyExc_TypeError, "exec() arg 2 must be a dict, not %.200s", Py_TYPE(globals)->tp_name); goto bad; } if (!locals || locals == Py_None) { locals = globals; } if (PyDict_GetItem(globals, PYIDENT("__builtins__")) == NULL) { if (PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0) goto bad; } if (PyCode_Check(o)) { if (PyCode_GetNumFree((PyCodeObject *)o) > 0) { PyErr_SetString(PyExc_TypeError, "code object passed to exec() may not contain free variables"); goto bad; } #if PY_VERSION_HEX < 0x030200B1 result = PyEval_EvalCode((PyCodeObject *)o, globals, locals); #else result = PyEval_EvalCode(o, globals, locals); #endif } else { PyCompilerFlags cf; cf.cf_flags = 0; if (PyUnicode_Check(o)) { cf.cf_flags = PyCF_SOURCE_IS_UTF8; s = PyUnicode_AsUTF8String(o); if (!s) goto bad; o = s; #if PY_MAJOR_VERSION >= 3 } else if (!PyBytes_Check(o)) { #else } else if (!PyString_Check(o)) { #endif PyErr_Format(PyExc_TypeError, "exec: arg 1 must be string, bytes or code object, got %.200s", Py_TYPE(o)->tp_name); goto bad; } #if PY_MAJOR_VERSION >= 3 code = PyBytes_AS_STRING(o); #else code = PyString_AS_STRING(o); #endif if (PyEval_MergeCompilerFlags(&cf)) { result = PyRun_StringFlags(code, Py_file_input, globals, locals, &cf); } else { result = PyRun_String(code, Py_file_input, globals, locals); } Py_XDECREF(s); } return result; bad: Py_XDECREF(s); return 0; } //////////////////// GetAttr3.proto //////////////////// static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /*proto*/ //////////////////// GetAttr3 //////////////////// //@requires: ObjectHandling.c::GetAttr static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); r = d; Py_INCREF(d); } return r; bad: return NULL; } //////////////////// Intern.proto //////////////////// static PyObject* __Pyx_Intern(PyObject* s); /* proto */ //////////////////// Intern //////////////////// static PyObject* __Pyx_Intern(PyObject* s) { if (!(likely(PyString_CheckExact(s)))) { PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(s)->tp_name); return 0; } Py_INCREF(s); #if PY_MAJOR_VERSION >= 3 PyUnicode_InternInPlace(&s); #else PyString_InternInPlace(&s); #endif return s; } //////////////////// abs_int.proto //////////////////// static CYTHON_INLINE unsigned int __Pyx_abs_int(int x) { if (unlikely(x == -INT_MAX-1)) return ((unsigned int)INT_MAX) + 1U; return (unsigned int) abs(x); } //////////////////// abs_long.proto //////////////////// static CYTHON_INLINE unsigned long __Pyx_abs_long(long x) { if (unlikely(x == -LONG_MAX-1)) return ((unsigned long)LONG_MAX) + 1U; return (unsigned long) labs(x); } //////////////////// abs_longlong.proto //////////////////// static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_abs_longlong(PY_LONG_LONG x) { #ifndef PY_LLONG_MAX #ifdef LLONG_MAX const PY_LONG_LONG PY_LLONG_MAX = LLONG_MAX; #else // copied from pyport.h in CPython 3.3, missing in 2.4 const PY_LONG_LONG PY_LLONG_MAX = (1 + 2 * ((1LL << (CHAR_BIT * sizeof(PY_LONG_LONG) - 2)) - 1)); #endif #endif if (unlikely(x == -PY_LLONG_MAX-1)) return ((unsigned PY_LONG_LONG)PY_LLONG_MAX) + 1U; #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L return (unsigned PY_LONG_LONG) llabs(x); #else return (x<0) ? (unsigned PY_LONG_LONG)-x : (unsigned PY_LONG_LONG)x; #endif } //////////////////// pow2.proto //////////////////// #define __Pyx_PyNumber_Power2(a, b) PyNumber_Power(a, b, Py_None) //////////////////// py_dict_keys.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d); /*proto*/ //////////////////// py_dict_keys //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod1 static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d) { if (PY_MAJOR_VERSION >= 3) return __Pyx_PyObject_CallMethod1((PyObject*)&PyDict_Type, PYIDENT("keys"), d); else return PyDict_Keys(d); } //////////////////// py_dict_values.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); /*proto*/ //////////////////// py_dict_values //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod1 static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) { if (PY_MAJOR_VERSION >= 3) return __Pyx_PyObject_CallMethod1((PyObject*)&PyDict_Type, PYIDENT("values"), d); else return PyDict_Values(d); } //////////////////// py_dict_items.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d); /*proto*/ //////////////////// py_dict_items //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod1 static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) { if (PY_MAJOR_VERSION >= 3) return __Pyx_PyObject_CallMethod1((PyObject*)&PyDict_Type, PYIDENT("items"), d); else return PyDict_Items(d); } //////////////////// py_dict_iterkeys.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d); /*proto*/ //////////////////// py_dict_iterkeys //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d) { return __Pyx_PyObject_CallMethod0(d, (PY_MAJOR_VERSION >= 3) ? PYIDENT("keys") : PYIDENT("iterkeys")); } //////////////////// py_dict_itervalues.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d); /*proto*/ //////////////////// py_dict_itervalues //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d) { return __Pyx_PyObject_CallMethod0(d, (PY_MAJOR_VERSION >= 3) ? PYIDENT("values") : PYIDENT("itervalues")); } //////////////////// py_dict_iteritems.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d); /*proto*/ //////////////////// py_dict_iteritems //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d) { return __Pyx_PyObject_CallMethod0(d, (PY_MAJOR_VERSION >= 3) ? PYIDENT("items") : PYIDENT("iteritems")); } //////////////////// py_dict_viewkeys.proto //////////////////// #if PY_VERSION_HEX < 0x02070000 #error This module uses dict views, which require Python 2.7 or later #endif static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d); /*proto*/ //////////////////// py_dict_viewkeys //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d) { return __Pyx_PyObject_CallMethod0(d, (PY_MAJOR_VERSION >= 3) ? PYIDENT("keys") : PYIDENT("viewkeys")); } //////////////////// py_dict_viewvalues.proto //////////////////// #if PY_VERSION_HEX < 0x02070000 #error This module uses dict views, which require Python 2.7 or later #endif static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d); /*proto*/ //////////////////// py_dict_viewvalues //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d) { return __Pyx_PyObject_CallMethod0(d, (PY_MAJOR_VERSION >= 3) ? PYIDENT("values") : PYIDENT("viewvalues")); } //////////////////// py_dict_viewitems.proto //////////////////// #if PY_VERSION_HEX < 0x02070000 #error This module uses dict views, which require Python 2.7 or later #endif static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d); /*proto*/ //////////////////// py_dict_viewitems //////////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d) { return __Pyx_PyObject_CallMethod0(d, (PY_MAJOR_VERSION >= 3) ? PYIDENT("items") : PYIDENT("viewitems")); } //////////////////// pyfrozenset_new.proto //////////////////// //@substitute: naming static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) { if (it) { PyObject* result; #if CYTHON_COMPILING_IN_PYPY // PyPy currently lacks PyFrozenSet_CheckExact() and PyFrozenSet_New() PyObject* args; args = PyTuple_Pack(1, it); if (unlikely(!args)) return NULL; result = PyObject_Call((PyObject*)&PyFrozenSet_Type, args, NULL); Py_DECREF(args); return result; #else if (PyFrozenSet_CheckExact(it)) { Py_INCREF(it); return it; } result = PyFrozenSet_New(it); if (unlikely(!result)) return NULL; if (likely(PySet_GET_SIZE(result))) return result; // empty frozenset is a singleton // seems wasteful, but CPython does the same Py_DECREF(result); #endif } #if CYTHON_COMPILING_IN_CPYTHON return PyFrozenSet_Type.tp_new(&PyFrozenSet_Type, $empty_tuple, NULL); #else return PyObject_Call((PyObject*)&PyFrozenSet_Type, $empty_tuple, NULL); #endif }
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Cython-0.22.1/Cython/Utility/Builtins.c
C
mit
12,711
/** \file * \brief Scintilla control: Cursor and Zooming * * See Copyright Notice in "iup.h" */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #undef SCI_NAMESPACE #include <Scintilla.h> #include "iup.h" #include "iup_object.h" #include "iup_attrib.h" #include "iup_str.h" #include "iupsci.h" /***** CURSOR **** SCI_SETCURSOR(int curType) SCI_GETCURSOR */ static char* iScintillaGetCursorAttrib(Ihandle *ih) { if(IupScintillaSendMessage(ih, SCI_GETCURSOR, 0, 0) == SC_CURSORWAIT) return "WAIT"; else return "NORMAL"; } static int iScintillaSetCursorAttrib(Ihandle *ih, const char *value) { if (iupStrEqualNoCase(value, "WAIT")) IupScintillaSendMessage(ih, SCI_SETCURSOR, (uptr_t)SC_CURSORWAIT, 0); else /* NORMAL */ IupScintillaSendMessage(ih, SCI_SETCURSOR, (uptr_t)SC_CURSORNORMAL, 0); return 0; } /***** ZOOMING **** SCI_ZOOMIN SCI_ZOOMOUT SCI_SETZOOM(int zoomInPoints) SCI_GETZOOM */ static int iScintillaSetZoomInAttrib(Ihandle *ih, const char *value) { (void)value; IupScintillaSendMessage(ih, SCI_ZOOMIN, 0, 0); return 0; } static int iScintillaSetZoomOutAttrib(Ihandle *ih, const char *value) { (void)value; IupScintillaSendMessage(ih, SCI_ZOOMOUT, 0, 0); return 0; } static int iScintillaSetZoomAttrib(Ihandle *ih, const char *value) { int points; if (!iupStrToInt(value, &points)) return 0; if(points > 20) points = 20; if(points < -10) points = -10; IupScintillaSendMessage(ih, SCI_SETZOOM, points, 0); return 0; } static char* iScintillaGetZoomAttrib(Ihandle* ih) { int points = IupScintillaSendMessage(ih, SCI_GETZOOM, 0, 0); return iupStrReturnInt(points); } void iupScintillaRegisterCursor(Iclass* ic) { iupClassRegisterAttribute(ic, "CURSOR", iScintillaGetCursorAttrib, iScintillaSetCursorAttrib, NULL, NULL, IUPAF_NO_INHERIT); iupClassRegisterAttribute(ic, "ZOOMIN", NULL, iScintillaSetZoomInAttrib, NULL, NULL, IUPAF_WRITEONLY|IUPAF_NO_INHERIT); iupClassRegisterAttribute(ic, "ZOOMOUT", NULL, iScintillaSetZoomOutAttrib, NULL, NULL, IUPAF_WRITEONLY|IUPAF_NO_INHERIT); iupClassRegisterAttribute(ic, "ZOOM", iScintillaGetZoomAttrib, iScintillaSetZoomAttrib, NULL, NULL, IUPAF_NO_INHERIT); }
ivanceras/iup-mirror
srcscintilla/iupsci_cursor.c
C
mit
2,233
/**************************************************************************//** * Copyright (c) 2016 by Silicon Laboratories Inc. All rights reserved. * * http://developer.silabs.com/legal/version/v11/Silicon_Labs_Software_License_Agreement.txt *****************************************************************************/ #include "uart_1.h" #if EFM8PDL_UART1_AUTO_PAGE == 1 // declare variable needed for autopage enter/exit #define DECL_PAGE uint8_t savedPage // enter autopage section #define SET_PAGE(p) do \ { \ savedPage = SFRPAGE; /* save current SFR page */ \ SFRPAGE = (p); /* set SFR page */ \ } while(0) // exit autopage section #define RESTORE_PAGE do \ { \ SFRPAGE = savedPage; /* restore saved SFR page */ \ } while(0) #else #define DECL_PAGE #define SET_PAGE(x) #define RESTORE_PAGE #endif //EFM8PDL_UART1_AUTO_PAGE // SFR page used to access UART1 registers #define UART1_SFR_PAGE 0x20 // Clock prescaler values for baud rate initialization #define NUM_PRESC 8 static const uint8_t PRESC[NUM_PRESC] = {1, 4, 8, 12, 16, 24, 32, 48}; static const uint8_t PRESC_ENUM[NUM_PRESC] = {SBCON1_BPS__DIV_BY_1, SBCON1_BPS__DIV_BY_4, SBCON1_BPS__DIV_BY_8, SBCON1_BPS__DIV_BY_12, SBCON1_BPS__DIV_BY_16, SBCON1_BPS__DIV_BY_24, SBCON1_BPS__DIV_BY_32, SBCON1_BPS__DIV_BY_48}; static void UART1_initBaudRate(uint32_t sysclk, uint32_t baudrate) { uint8_t i; uint8_t min_presc; uint16_t reload; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); // Calculate baud rate prescaler and baud rate reload // value to maximize precision. // See reference manual for calculation details min_presc = ((*((uint16_t*)(&sysclk)) >> 1) + baudrate) / baudrate; // calculate minimum prescaler necessary for(i = 0; i < NUM_PRESC; ++i) { if(PRESC[i] >= min_presc) // use a prescaler that is equal or just greater than the minimum { reload = ((1 << 16) - (sysclk / (2 * baudrate * PRESC[i]))); // calculate reload value using prescaler SBRL1 = reload; SBCON1 |= (SBCON1_BREN__ENABLED | PRESC_ENUM[i]); // enable baud rate with calculated prescaler RESTORE_PAGE; return; } } // Baud rate is too small to be match while(1); } //========================================================= // Runtime API //========================================================= #if (EFM8PDL_UART1_AUTO_PAGE == 1) uint8_t UART1_getIntFlags(void) { uint8_t val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); val = SCON1 & (UART1_TX_IF | UART1_RX_IF); RESTORE_PAGE; return val; } void UART1_clearIntFlags(uint8_t flags) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SCON1 &= ~(flags); RESTORE_PAGE; } void UART1_enableTxInt(bool enable) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1FCN1_TIE = enable; RESTORE_PAGE; } void UART1_enableRxInt(bool enable) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1FCN1_RIE = enable; RESTORE_PAGE; } void UART1_initTxPolling(void) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SCON1_TI = 1; RESTORE_PAGE; } void UART1_write(uint8_t value) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SBUF1 = value; RESTORE_PAGE; } uint8_t UART1_read(void) { uint8_t val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); val = SBUF1; RESTORE_PAGE; return val; } #endif void UART1_writeWithExtraBit(uint16_t value) { uint8_t shift, mask; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); // Calculate shift and mask for data length shift = ((SMOD1 & SMOD1_SDL__FMASK) >> SMOD1_SDL__SHIFT) + 5; mask = 0xFF >> (8 - shift); SCON1_TBX = (value >> shift) & 0x1; SBUF1 = (value & mask); RESTORE_PAGE; } uint16_t UART1_readWithExtraBit(void) { uint8_t shift, mask; uint16_t val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); // Calculate shift and mask for data length shift = ((SMOD1 & SMOD1_SDL__FMASK) >> SMOD1_SDL__SHIFT) + 5; mask = 0xFF >> (8 - shift); val = SCON1_RBX; val = val << shift; val |= (SBUF1 & mask); RESTORE_PAGE; return val; } #if (EFM8PDL_UART1_AUTO_PAGE == 1) uint8_t UART1_getErrFlags(void) { uint8_t val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); val = SCON1 & (UART1_RXOVR_EF | UART1_PARITY_EF); RESTORE_PAGE; return val; } void UART1_clearErrFlags(uint8_t flags) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SCON1 &= ~flags; RESTORE_PAGE; } uint8_t UART1_getFifoIntFlags(void) { uint8_t val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); val = UART1FCN1 & (UART1_TFRQ_IF | UART1_RFRQ_IF); RESTORE_PAGE; return val; } #endif void UART1_enableTxFifoInt(bool enable) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); if(enable) { UART1FCN0 |= UART1FCN0_TFRQE__ENABLED; } else { UART1FCN0 &= ~UART1FCN0_TFRQE__ENABLED; } RESTORE_PAGE; } void UART1_enableRxFifoInt(bool enable) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); if(enable) { UART1FCN0 |= UART1FCN0_RFRQE__ENABLED; } else { UART1FCN0 &= ~UART1FCN0_RFRQE__ENABLED; } RESTORE_PAGE; } #if (EFM8PDL_UART1_AUTO_PAGE == 1) uint8_t UART1_getTxFifoCount(void) { uint8_t txcnt; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); txcnt = (UART1FCT & UART1FCT_TXCNT__FMASK) >> UART1FCT_TXCNT__SHIFT; RESTORE_PAGE; return txcnt; } uint8_t UART1_getRxFifoCount(void) { uint8_t rxcnt; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); rxcnt = (UART1FCT & UART1FCT_RXCNT__FMASK) >> UART1FCT_RXCNT__SHIFT; RESTORE_PAGE; return rxcnt; } #endif bool UART1_isTxFifoFull(void){ bool txfull; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); if(UART1FCN1 & UART1FCN1_TXNF__NOT_FULL) { txfull = false; } else { txfull = true; } RESTORE_PAGE; return txfull; } void UART1_stallTxFifo(bool enable) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); if(enable) { UART1FCN1 |= UART1FCN1_TXHOLD__HOLD; } else { UART1FCN1 &= ~UART1FCN1_TXHOLD__HOLD; } RESTORE_PAGE; } #if (EFM8PDL_UART1_AUTO_PAGE == 1) void UART1_flushTxFifo(void) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1FCN0 |= UART1FCN0_TFLSH__FLUSH; RESTORE_PAGE; } void UART1_flushRxFifo(void) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1FCN0 |= UART1FCN0_RFLSH__FLUSH; RESTORE_PAGE; } uint8_t UART1_getAutobaudIntFlag(void) { uint8_t val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); val = UART1LIN & UART1_AUTOBAUD_IF; RESTORE_PAGE; return val; } void UART1_clearAutobaudIntFlag(void) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1LIN &= ~UART1_AUTOBAUD_IF; RESTORE_PAGE; } #endif void UART1_enableAutobaud(bool enable) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); if(enable) { UART1LIN |= (UART1LIN_AUTOBDE__ENABLED | UART1LIN_SYNCDIE__ENABLED); } else { UART1LIN &= ~(UART1LIN_AUTOBDE__ENABLED | UART1LIN_SYNCDIE__ENABLED); } RESTORE_PAGE; } //========================================================= // Initialization API //========================================================= void UART1_init(uint32_t sysclk, uint32_t baudrate, UART1_DataLen_t datalen, UART1_StopLen_t stoplen, UART1_FeatureBit_t featbit, UART1_ParityType_t partype, UART1_RxEnable_t rxen, UART1_Multiproc_t mcen) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1_initBaudRate(sysclk, baudrate); SCON1 = rxen; SMOD1 = datalen | stoplen | featbit | partype | mcen; RESTORE_PAGE; } void UART1_initWithAutobaud(UART1_BrPrescaler_t presc, UART1_StopLen_t stoplen, UART1_FeatureBit_t featbit, UART1_ParityType_t partype, UART1_RxEnable_t rxen, UART1_Multiproc_t mcen) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SCON1 = rxen; SMOD1 = SMOD1_SDL__8_BITS | stoplen | featbit | partype | mcen; UART1LIN = (UART1LIN_AUTOBDE__ENABLED | UART1LIN_SYNCDIE__ENABLED); SBCON1 = (SBCON1_BREN__ENABLED | presc); RESTORE_PAGE; } void UART1_reset(void) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SCON1 = SCON1_OVR__NOT_SET | SCON1_PERR__NOT_SET | SCON1_REN__RECEIVE_DISABLED | SCON1_TBX__LOW | SCON1_RBX__LOW | SCON1_TI__NOT_SET | SCON1_RI__NOT_SET; SMOD1 = SMOD1_MCE__MULTI_DISABLED | SMOD1_SPT__ODD_PARITY | SMOD1_PE__PARITY_DISABLED | SMOD1_SDL__8_BITS | SMOD1_XBE__DISABLED | SMOD1_SBL__SHORT; SBCON1 = SBCON1_BREN__DISABLED | SBCON1_BPS__DIV_BY_1; UART1LIN = (UART1LIN_AUTOBDE__DISABLED | UART1LIN_SYNCDIE__DISABLED); RESTORE_PAGE; } void UART1_initTxFifo(UART1_TxFifoThreshold_t txth, UART1_TxFifoInt_t txint) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1FCN0 &= ~(UART1FCN0_TFRQE__BMASK | UART1FCN0_TFLSH__BMASK | UART1FCN0_TXTH__FMASK | UART1FCN0_TFRQE__BMASK); UART1FCN0 |= (txth | txint); UART1FCN1 &= ~(UART1FCN1_TFRQ__BMASK | UART1FCN1_TXHOLD__BMASK | UART1FCN1_TXNF__BMASK | UART1FCN1_TIE__BMASK); UART1FCN1 |= (UART1FCN1_TFRQ__SET | UART1FCN1_TXHOLD__CONTINUE | UART1FCN1_TIE__DISABLED); RESTORE_PAGE; } void UART1_initRxFifo(UART1_RxFiFoThreshold_t rxth, UART1_RxFifoTimeout_t rxto, UART1_RxFifoInt_t rxint) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); UART1FCN0 &= ~(UART1FCN0_RFRQE__BMASK | UART1FCN0_RFLSH__BMASK | UART1FCN0_RXTH__FMASK | UART1FCN0_RFRQE__BMASK); UART1FCN0 |= (rxth | rxint); UART1FCN1 &= ~(UART1FCN1_RFRQ__BMASK | UART1FCN1_RXTO__FMASK | UART1FCN1_RIE__BMASK); UART1FCN1 |= (UART1FCN1_RFRQ__SET | rxto | UART1FCN1_RIE__DISABLED); RESTORE_PAGE; } //========================================================= // Buffer Access API //========================================================= #if EFM8PDL_UART1_USE_BUFFER == 1 SI_SEGMENT_VARIABLE(txRemaining, static uint8_t, SI_SEG_XDATA) = 0; SI_SEGMENT_VARIABLE(rxRemaining, static uint8_t, SI_SEG_XDATA) = 0; SI_SEGMENT_VARIABLE_SEGMENT_POINTER(txBuffer, static uint8_t, EFM8PDL_UART1_TX_BUFTYPE, SI_SEG_XDATA); SI_SEGMENT_VARIABLE_SEGMENT_POINTER(rxBuffer, static uint8_t, EFM8PDL_UART1_RX_BUFTYPE, SI_SEG_XDATA); SI_INTERRUPT(UART1_ISR, UART1_IRQn) { #if (EFM8PDL_UART1_USE_ERR_CALLBACK == 1) uint8_t discard; uint8_t errors; #endif //EFM8PDL_UART1_USE_ERR_CALLBACK // If auto-baud sync word detected to set baudrate, clear flag and disable auto-baud detection if (UART1LIN & UART1_AUTOBAUD_IF) { UART1LIN &= ~(UART1_AUTOBAUD_IF | UART1LIN_AUTOBDE__ENABLED | UART1LIN_SYNCDIE__ENABLED); } // If rx fifo request interrupt is set and enabled if ((UART1FCN1 & UART1_RFRQ_IF) && (UART1FCN0 & UART1FCN0_RFRQE__ENABLED)) { // Read bytes as long as rx fifo count is not zero and there // is room in the tx buffer while (rxRemaining && ((UART1FCT & UART1FCT_RXCNT__FMASK) >> UART1FCT_RXCNT__SHIFT)) { #if (EFM8PDL_UART1_USE_ERR_CALLBACK == 1) // If parity or overrun error, clear flags, and call user errors = SCON1 & (UART1_RXOVR_EF | UART1_PARITY_EF); if(errors) { SCON1 &= ~errors; UART1_transferErrorCb(errors); } // Store byte if there is no parity error a if (errors & UART1_PARITY_EF) { discard = SBUF1; } else #endif //EFM8PDL_UART1_USE_ERR_CALLBACK { *rxBuffer = SBUF1; ++rxBuffer; --rxRemaining; if (!rxRemaining) { UART1_receiveCompleteCb(); } } } if(!rxRemaining) { // Flush Fifo if there is no room available in rx buffer UART1FCN0 |= UART1FCN0_RFLSH__FLUSH; } } // If tx fifo request interrupt is set and enabled if ((UART1FCN1 & UART1_TFRQ_IF) && (UART1FCN0 & UART1FCN0_TFRQE__ENABLED)) { // Write bytes as long as the tx fifo is not full and there // is room in the tx buffer while (txRemaining && (UART1FCN1 & UART1FCN1_TXNF__NOT_FULL)) { SBUF1 = *txBuffer; ++txBuffer; --txRemaining; } if(!txRemaining) { UART1_transmitCompleteCb(); } } } void UART1_writeBuffer(SI_VARIABLE_SEGMENT_POINTER(buffer, uint8_t, EFM8PDL_UART1_RX_BUFTYPE), uint8_t length) { // Initialize internal data txBuffer = buffer; txRemaining = length; // Enable tx fifo interrupts to kick off transfer UART1FCN0 |= UART1FCN0_TFRQE__ENABLED; } void UART1_readBuffer(SI_VARIABLE_SEGMENT_POINTER(buffer, uint8_t, EFM8PDL_UART1_TX_BUFTYPE), uint8_t length) { // Initialize internal data rxBuffer = buffer; rxRemaining = length; } void UART1_abortTx(void) { txRemaining = 0; UART1_flushTxFifo(); } void UART1_abortRx(void) { rxRemaining = 0; UART1_flushRxFifo(); } uint8_t UART1_txBytesRemaining(void) { return txRemaining; } uint8_t UART1_rxBytesRemaining(void) { return rxRemaining; } #endif //EFM8PDL_UART1_USE_BUFFER //========================================================= // STDIO API //========================================================= #if EFM8PDL_UART1_USE_STDIO == 1 char putchar(char c){ DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); while(!SCON1_TI); SBUF1 = c; SCON1_TI = 0; RESTORE_PAGE; return c; } char _getkey(void){ char val; DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); while(!SCON1_RI); SCON1_RI = 0; val = SBUF1; RESTORE_PAGE; return val; } void UART1_initStdio(uint32_t sysclk, uint32_t baudrate) { DECL_PAGE; SET_PAGE(UART1_SFR_PAGE); SCON1 |= SCON1_REN__RECEIVE_ENABLED | SCON1_TI__SET; SMOD1 |= SMOD1_SDL__8_BITS; UART1_initBaudRate(sysclk, baudrate); RESTORE_PAGE; } #endif //EFM8PDL_UART0_USE_STDIO
ahtn/keyplus
ports/efm8/efm8/mcu/EFM8UB3/peripheral_driver/src/uart_1.c
C
mit
14,124
/***************************************************************************/ /* */ /* pshinter.c */ /* */ /* FreeType PostScript Hinting module */ /* */ /* Copyright 2001-2015 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #define FT_MAKE_OPTION_SINGLE_OBJECT #include <ft2build.h> #include "pshpic.c" #include "pshrec.c" #include "pshglob.c" #include "pshalgo.c" #include "pshmod.c" /* END */
yapingxin/saturn-gui-lib-workshop
Lib/FreeType/freetype-2.6.2/src/pshinter/pshinter.c
C
mit
1,453
/* * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * Copyright (c) 2007 Mans Rullgard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdarg.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include "config.h" #include "common.h" #include "mem.h" #include "avassert.h" #include "avstring.h" #include "bprint.h" int av_strstart(const char *str, const char *pfx, const char **ptr) { while (*pfx && *pfx == *str) { pfx++; str++; } if (!*pfx && ptr) *ptr = str; return !*pfx; } int av_stristart(const char *str, const char *pfx, const char **ptr) { while (*pfx && av_toupper((unsigned)*pfx) == av_toupper((unsigned)*str)) { pfx++; str++; } if (!*pfx && ptr) *ptr = str; return !*pfx; } char *av_stristr(const char *s1, const char *s2) { if (!*s2) return (char*)(intptr_t)s1; do if (av_stristart(s1, s2, NULL)) return (char*)(intptr_t)s1; while (*s1++); return NULL; } char *av_strnstr(const char *haystack, const char *needle, size_t hay_length) { size_t needle_len = strlen(needle); if (!needle_len) return (char*)haystack; while (hay_length >= needle_len) { hay_length--; if (!memcmp(haystack, needle, needle_len)) return (char*)haystack; haystack++; } return NULL; } size_t av_strlcpy(char *dst, const char *src, size_t size) { size_t len = 0; while (++len < size && *src) *dst++ = *src++; if (len <= size) *dst = 0; return len + strlen(src) - 1; } size_t av_strlcat(char *dst, const char *src, size_t size) { size_t len = strlen(dst); if (size <= len + 1) return len + strlen(src); return len + av_strlcpy(dst + len, src, size - len); } size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) { size_t len = strlen(dst); va_list vl; va_start(vl, fmt); len += vsnprintf(dst + len, size > len ? size - len : 0, fmt, vl); va_end(vl); return len; } char *av_asprintf(const char *fmt, ...) { char *p = NULL; va_list va; int len; va_start(va, fmt); len = vsnprintf(NULL, 0, fmt, va); va_end(va); if (len < 0) goto end; p = av_malloc(len + 1); if (!p) goto end; va_start(va, fmt); len = vsnprintf(p, len + 1, fmt, va); va_end(va); if (len < 0) av_freep(&p); end: return p; } char *av_d2str(double d) { char *str = av_malloc(16); if (str) snprintf(str, 16, "%f", d); return str; } #define WHITESPACES " \n\t" char *av_get_token(const char **buf, const char *term) { char *out = av_malloc(strlen(*buf) + 1); char *ret = out, *end = out; const char *p = *buf; if (!out) return NULL; p += strspn(p, WHITESPACES); while (*p && !strspn(p, term)) { char c = *p++; if (c == '\\' && *p) { *out++ = *p++; end = out; } else if (c == '\'') { while (*p && *p != '\'') *out++ = *p++; if (*p) { p++; end = out; } } else { *out++ = c; } } do *out-- = 0; while (out >= end && strspn(out, WHITESPACES)); *buf = p; return ret; } char *av_strtok(char *s, const char *delim, char **saveptr) { char *tok; if (!s && !(s = *saveptr)) return NULL; /* skip leading delimiters */ s += strspn(s, delim); /* s now points to the first non delimiter char, or to the end of the string */ if (!*s) { *saveptr = NULL; return NULL; } tok = s++; /* skip non delimiters */ s += strcspn(s, delim); if (*s) { *s = 0; *saveptr = s+1; } else { *saveptr = NULL; } return tok; } int av_strcasecmp(const char *a, const char *b) { uint8_t c1, c2; do { c1 = av_tolower(*a++); c2 = av_tolower(*b++); } while (c1 && c1 == c2); return c1 - c2; } int av_strncasecmp(const char *a, const char *b, size_t n) { const char *end = a + n; uint8_t c1, c2; do { c1 = av_tolower(*a++); c2 = av_tolower(*b++); } while (a < end && c1 && c1 == c2); return c1 - c2; } const char *av_basename(const char *path) { char *p = strrchr(path, '/'); #if HAVE_DOS_PATHS char *q = strrchr(path, '\\'); char *d = strchr(path, ':'); p = FFMAX3(p, q, d); #endif if (!p) return path; return p + 1; } const char *av_dirname(char *path) { char *p = strrchr(path, '/'); #if HAVE_DOS_PATHS char *q = strrchr(path, '\\'); char *d = strchr(path, ':'); d = d ? d + 1 : d; p = FFMAX3(p, q, d); #endif if (!p) return "."; *p = '\0'; return path; } int av_escape(char **dst, const char *src, const char *special_chars, enum AVEscapeMode mode, int flags) { AVBPrint dstbuf; av_bprint_init(&dstbuf, 1, AV_BPRINT_SIZE_UNLIMITED); av_bprint_escape(&dstbuf, src, special_chars, mode, flags); if (!av_bprint_is_complete(&dstbuf)) { av_bprint_finalize(&dstbuf, NULL); return AVERROR(ENOMEM); } else { av_bprint_finalize(&dstbuf, dst); return dstbuf.len; } } int av_isdigit(int c) { return c >= '0' && c <= '9'; } int av_isgraph(int c) { return c > 32 && c < 127; } int av_isspace(int c) { return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v'; } int av_isxdigit(int c) { c = av_tolower(c); return av_isdigit(c) || (c >= 'a' && c <= 'f'); } int av_match_name(const char *name, const char *names) { const char *p; int len, namelen; if (!name || !names) return 0; namelen = strlen(name); while ((p = strchr(names, ','))) { len = FFMAX(p - names, namelen); if (!av_strncasecmp(name, names, len)) return 1; names = p + 1; } return !av_strcasecmp(name, names); } int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, unsigned int flags) { const uint8_t *p = *bufp; uint32_t top; uint64_t code; int ret = 0, tail_len; uint32_t overlong_encoding_mins[6] = { 0x00000000, 0x00000080, 0x00000800, 0x00010000, 0x00200000, 0x04000000, }; if (p >= buf_end) return 0; code = *p++; /* first sequence byte starts with 10, or is 1111-1110 or 1111-1111, which is not admitted */ if ((code & 0xc0) == 0x80 || code >= 0xFE) { ret = AVERROR(EILSEQ); goto end; } top = (code & 128) >> 1; tail_len = 0; while (code & top) { int tmp; tail_len++; if (p >= buf_end) { (*bufp) ++; return AVERROR(EILSEQ); /* incomplete sequence */ } /* we assume the byte to be in the form 10xx-xxxx */ tmp = *p++ - 128; /* strip leading 1 */ if (tmp>>6) { (*bufp) ++; return AVERROR(EILSEQ); } code = (code<<6) + tmp; top <<= 5; } code &= (top << 1) - 1; /* check for overlong encodings */ av_assert0(tail_len <= 5); if (code < overlong_encoding_mins[tail_len]) { ret = AVERROR(EILSEQ); goto end; } if (code >= 1<<31) { ret = AVERROR(EILSEQ); /* out-of-range value */ goto end; } *codep = code; if (code > 0x10FFFF && !(flags & AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES)) ret = AVERROR(EILSEQ); if (code < 0x20 && code != 0x9 && code != 0xA && code != 0xD && flags & AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES) ret = AVERROR(EILSEQ); if (code >= 0xD800 && code <= 0xDFFF && !(flags & AV_UTF8_FLAG_ACCEPT_SURROGATES)) ret = AVERROR(EILSEQ); if ((code == 0xFFFE || code == 0xFFFF) && !(flags & AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS)) ret = AVERROR(EILSEQ); end: *bufp = p; return ret; } int av_match_list(const char *name, const char *list, char separator) { const char *p, *q; for (p = name; p && *p; ) { for (q = list; q && *q; ) { int k; for (k = 0; p[k] == q[k] || (p[k]*q[k] == 0 && p[k]+q[k] == separator); k++) if (k && (!p[k] || p[k] == separator)) return 1; q = strchr(q, separator); q += !!q; } p = strchr(p, separator); p += !!p; } return 0; } #ifdef TEST int main(void) { int i; static const char * const strings[] = { "''", "", ":", "\\", "'", " '' :", " '' '' :", "foo '' :", "'foo'", "foo ", " ' foo ' ", "foo\\", "foo': blah:blah", "foo\\: blah:blah", "foo\'", "'foo : ' :blahblah", "\\ :blah", " foo", " foo ", " foo \\ ", "foo ':blah", " foo bar : blahblah", "\\f\\o\\o", "'foo : \\ \\ ' : blahblah", "'\\fo\\o:': blahblah", "\\'fo\\o\\:': foo ' :blahblah" }; printf("Testing av_get_token()\n"); for (i = 0; i < FF_ARRAY_ELEMS(strings); i++) { const char *p = strings[i]; char *q; printf("|%s|", p); q = av_get_token(&p, ":"); printf(" -> |%s|", q); printf(" + |%s|\n", p); av_free(q); } return 0; } #endif /* TEST */
devintaietta/Remote-Player-Audio
dipendenze/dacompilare/ffmpeg-2.6.3/libavutil/avstring.c
C
gpl-2.0
10,363
/* -*- mode: c -*- */ /* Copyright (C) 2005-2016 Alexander Chernov <cher@ejudge.ru> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "ejudge/cpu.h" #include "ejudge/errlog.h" #include <stdlib.h> int cpu_get_bogomips(void) { err("cpu_get_bogomips: not implemented"); return -1; } void cpu_get_performance_info(unsigned char **p_model, unsigned char **p_mhz) { *p_model = NULL; *p_mhz = NULL; }
misty-fungus/ejudge-debian
win32/cpu.c
C
gpl-2.0
878
/* * This file is part of the UCB release of Plan 9. It is subject to the license * terms in the LICENSE file found in the top-level directory of this * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No * part of the UCB release of Plan 9, including this file, may be copied, * modified, propagated, or distributed except according to the terms contained * in the LICENSE file. */ #include <u.h> #include <libc.h> #include <bio.h> #include "pci.h" #include "vga.h" /* * ATI Mach64 family. */ enum { HTotalDisp, HSyncStrtWid, VTotalDisp, VSyncStrtWid, VlineCrntVline, OffPitch, IntCntl, CrtcGenCntl, OvrClr, OvrWidLR, OvrWidTB, CurClr0, CurClr1, CurOffset, CurHVposn, CurHVoff, ScratchReg0, ScratchReg1, /* Scratch Register (BIOS info) */ ClockCntl, BusCntl, MemCntl, ExtMemCntl, MemVgaWpSel, MemVgaRpSel, DacRegs, DacCntl, GenTestCntl, ConfigCntl, /* Configuration control */ ConfigChipId, ConfigStat0, /* Configuration status 0 */ ConfigStat1, /* Configuration status 1 */ ConfigStat2, DspConfig, /* Rage */ DspOnOff, /* Rage */ DpBkgdClr, DpChainMsk, DpFrgdClr, DpMix, DpPixWidth, DpSrc, DpWriteMsk, LcdIndex, LcdData, Nreg, TvIndex = 0x1D, TvData = 0x27, LCD_ConfigPanel = 0, LCD_GenCtrl, LCD_DstnCntl, LCD_HfbPitchAddr, LCD_HorzStretch, LCD_VertStretch, LCD_ExtVertStretch, LCD_LtGio, LCD_PowerMngmnt, LCD_ZvgPio, Nlcd, }; static char* iorname[Nreg] = { "HTotalDisp", "HSyncStrtWid", "VTotalDisp", "VSyncStrtWid", "VlineCrntVline", "OffPitch", "IntCntl", "CrtcGenCntl", "OvrClr", "OvrWidLR", "OvrWidTB", "CurClr0", "CurClr1", "CurOffset", "CurHVposn", "CurHVoff", "ScratchReg0", "ScratchReg1", "ClockCntl", "BusCntl", "MemCntl", "ExtMemCntl", "MemVgaWpSel", "MemVgaRpSel", "DacRegs", "DacCntl", "GenTestCntl", "ConfigCntl", "ConfigChipId", "ConfigStat0", "ConfigStat1", "ConfigStat2", "DspConfig", "DspOnOff", "DpBkgdClr", "DpChainMsk", "DpFrgdClr", "DpMix", "DpPixWidth", "DpSrc", "DpWriteMsk", "LcdIndex", "LcdData", }; static char* lcdname[Nlcd] = { "LCD ConfigPanel", "LCD GenCntl", "LCD DstnCntl", "LCD HfbPitchAddr", "LCD HorzStretch", "LCD VertStretch", "LCD ExtVertStretch", "LCD LtGio", "LCD PowerMngmnt", "LCD ZvgPio" }; /* * Crummy hack: all io register offsets * here get IOREG or'ed in, so that we can * tell the difference between an uninitialized * array entry and HTotalDisp. */ enum { IOREG = 0x10000, }; static ushort ioregs[Nreg] = { [HTotalDisp] IOREG|0x0000, [HSyncStrtWid] IOREG|0x0100, [VTotalDisp] IOREG|0x0200, [VSyncStrtWid] IOREG|0x0300, [VlineCrntVline] IOREG|0x0400, [OffPitch] IOREG|0x0500, [IntCntl] IOREG|0x0600, [CrtcGenCntl] IOREG|0x0700, [OvrClr] IOREG|0x0800, [OvrWidLR] IOREG|0x0900, [OvrWidTB] IOREG|0x0A00, [CurClr0] IOREG|0x0B00, [CurClr1] IOREG|0x0C00, [CurOffset] IOREG|0x0D00, [CurHVposn] IOREG|0x0E00, [CurHVoff] IOREG|0x0F00, [ScratchReg0] IOREG|0x1000, [ScratchReg1] IOREG|0x1100, [ClockCntl] IOREG|0x1200, [BusCntl] IOREG|0x1300, [MemCntl] IOREG|0x1400, [MemVgaWpSel] IOREG|0x1500, [MemVgaRpSel] IOREG|0x1600, [DacRegs] IOREG|0x1700, [DacCntl] IOREG|0x1800, [GenTestCntl] IOREG|0x1900, [ConfigCntl] IOREG|0x1A00, [ConfigChipId] IOREG|0x1B00, [ConfigStat0] IOREG|0x1C00, [ConfigStat1] IOREG|0x1D00, /* [GpIo] IOREG|0x1E00, */ /* [HTotalDisp] IOREG|0x1F00, duplicate, says XFree86 */ }; static ushort pciregs[Nreg] = { [HTotalDisp] 0x00, [HSyncStrtWid] 0x01, [VTotalDisp] 0x02, [VSyncStrtWid] 0x03, [VlineCrntVline] 0x04, [OffPitch] 0x05, [IntCntl] 0x06, [CrtcGenCntl] 0x07, [DspConfig] 0x08, [DspOnOff] 0x09, [OvrClr] 0x10, [OvrWidLR] 0x11, [OvrWidTB] 0x12, [CurClr0] 0x18, [CurClr1] 0x19, [CurOffset] 0x1A, [CurHVposn] 0x1B, [CurHVoff] 0x1C, [ScratchReg0] 0x20, [ScratchReg1] 0x21, [ClockCntl] 0x24, [BusCntl] 0x28, [LcdIndex] 0x29, [LcdData] 0x2A, [ExtMemCntl] 0x2B, [MemCntl] 0x2C, [MemVgaWpSel] 0x2D, [MemVgaRpSel] 0x2E, [DacRegs] 0x30, [DacCntl] 0x31, [GenTestCntl] 0x34, [ConfigCntl] 0x37, [ConfigChipId] 0x38, [ConfigStat0] 0x39, [ConfigStat1] 0x25, /* rsc: was 0x3A, but that's not what the LT manual says */ [ConfigStat2] 0x26, [DpBkgdClr] 0xB0, [DpChainMsk] 0xB3, [DpFrgdClr] 0xB1, [DpMix] 0xB5, [DpPixWidth] 0xB4, [DpSrc] 0xB6, [DpWriteMsk] 0xB2, }; enum { PLLm = 0x02, PLLp = 0x06, PLLn0 = 0x07, PLLn1 = 0x08, PLLn2 = 0x09, PLLn3 = 0x0A, PLLx = 0x0B, /* external divisor (Rage) */ Npll = 32, Ntv = 1, /* actually 256, but not used */ }; typedef struct Mach64xx Mach64xx; struct Mach64xx { ulong io; Pcidev* pci; int bigmem; int lcdon; int lcdpanelid; ulong reg[Nreg]; ulong lcd[Nlcd]; ulong tv[Ntv]; uchar pll[Npll]; ulong (*ior32)(Mach64xx*, int); void (*iow32)(Mach64xx*, int, ulong); }; static ulong portior32(Mach64xx* mp, int r) { if((ioregs[r] & IOREG) == 0) return ~0; return inportl(((ioregs[r] & ~IOREG)<<2)+mp->io); } static void portiow32(Mach64xx* mp, int r, ulong l) { if((ioregs[r] & IOREG) == 0) return; outportl(((ioregs[r] & ~IOREG)<<2)+mp->io, l); } static ulong pciior32(Mach64xx* mp, int r) { return inportl((pciregs[r]<<2)+mp->io); } static void pciiow32(Mach64xx* mp, int r, ulong l) { outportl((pciregs[r]<<2)+mp->io, l); } static uchar pllr(Mach64xx* mp, int r) { int io; if(mp->ior32 == portior32) io = ((ioregs[ClockCntl]&~IOREG)<<2)+mp->io; else io = (pciregs[ClockCntl]<<2)+mp->io; outportb(io+1, r<<2); return inportb(io+2); } static void pllw(Mach64xx* mp, int r, uchar b) { int io; if(mp->ior32 == portior32) io = ((ioregs[ClockCntl]&~IOREG)<<2)+mp->io; else io = (pciregs[ClockCntl]<<2)+mp->io; outportb(io+1, (r<<2)|0x02); outportb(io+2, b); } static ulong lcdr32(Mach64xx *mp, ulong r) { ulong or; or = mp->ior32(mp, LcdIndex); mp->iow32(mp, LcdIndex, (or&~0x0F) | (r&0x0F)); return mp->ior32(mp, LcdData); } static void lcdw32(Mach64xx *mp, ulong r, ulong v) { ulong or; or = mp->ior32(mp, LcdIndex); mp->iow32(mp, LcdIndex, (or&~0x0F) | (r&0x0F)); mp->iow32(mp, LcdData, v); } static ulong tvr32(Mach64xx *mp, ulong r) { outportb(mp->io+(TvIndex<<2), r&0x0F); return inportl(mp->io+(TvData<<2)); } static void tvw32(Mach64xx *mp, ulong r, ulong v) { outportb(mp->io+(TvIndex<<2), r&0x0F); outportl(mp->io+(TvData<<2), v); } static int smallmem[] = { 512*1024, 1024*1024, 2*1024*1024, 4*1024*1024, 6*1024*1024, 8*1024*1024, 12*1024*1024, 16*1024*1024, }; static int bigmem[] = { 512*1024, 2*512*1024, 3*512*1024, 4*512*1024, 5*512*1024, 6*512*1024, 7*512*1024, 8*512*1024, 5*1024*1024, 6*1024*1024, 7*1024*1024, 8*1024*1024, 10*1024*1024, 12*1024*1024, 14*1024*1024, 16*1024*1024, }; static void snarf(Vga* vga, Ctlr* ctlr) { Mach64xx *mp; int i; ulong v; if(vga->private == nil){ vga->private = alloc(sizeof(Mach64xx)); mp = vga->private; mp->io = 0x2EC; mp->ior32 = portior32; mp->iow32 = portiow32; mp->pci = pcimatch(0, 0x1002, 0); if (mp->pci) { if(v = mp->pci->mem[1].bar & ~0x3) { mp->io = v; mp->ior32 = pciior32; mp->iow32 = pciiow32; } } } mp = vga->private; for(i = 0; i < Nreg; i++) mp->reg[i] = mp->ior32(mp, i); for(i = 0; i < Npll; i++) mp->pll[i] = pllr(mp, i); switch(mp->reg[ConfigChipId] & 0xFFFF){ default: mp->lcdpanelid = 0; break; case ('L'<<8)|'B': /* 4C42: Rage LTPro AGP */ case ('L'<<8)|'I': /* 4C49: Rage 3D LTPro */ case ('L'<<8)|'M': /* 4C4D: Rage Mobility */ case ('L'<<8)|'P': /* 4C50: Rage 3D LTPro */ for(i = 0; i < Nlcd; i++) mp->lcd[i] = lcdr32(mp, i); if(mp->lcd[LCD_GenCtrl] & 0x02) mp->lcdon = 1; mp->lcdpanelid = ((mp->reg[ConfigStat2]>>14) & 0x1F); break; } /* * Check which memory size map we are using. */ mp->bigmem = 0; switch(mp->reg[ConfigChipId] & 0xFFFF){ case ('G'<<8)|'B': /* 4742: 264GT PRO */ case ('G'<<8)|'D': /* 4744: 264GT PRO */ case ('G'<<8)|'I': /* 4749: 264GT PRO */ case ('G'<<8)|'M': /* 474D: Rage XL */ case ('G'<<8)|'P': /* 4750: 264GT PRO */ case ('G'<<8)|'Q': /* 4751: 264GT PRO */ case ('G'<<8)|'R': /* 4752: */ case ('G'<<8)|'U': /* 4755: 264GT DVD */ case ('G'<<8)|'V': /* 4756: Rage2C */ case ('G'<<8)|'Z': /* 475A: Rage2C */ case ('V'<<8)|'U': /* 5655: 264VT3 */ case ('V'<<8)|'V': /* 5656: 264VT4 */ case ('L'<<8)|'B': /* 4C42: Rage LTPro AGP */ case ('L'<<8)|'I': /* 4C49: Rage 3D LTPro */ case ('L'<<8)|'M': /* 4C4D: Rage Mobility */ case ('L'<<8)|'P': /* 4C50: Rage 3D LTPro */ mp->bigmem = 1; break; case ('G'<<8)|'T': /* 4754: 264GT[B] */ case ('V'<<8)|'T': /* 5654: 264VT/GT/VTB */ /* * Only the VTB and GTB use the new memory encoding, * and they are identified by a nonzero ChipVersion, * apparently. */ if((mp->reg[ConfigChipId] >> 24) & 0x7) mp->bigmem = 1; break; } /* * Memory size and aperture. It's recommended * to use an 8Mb aperture on a 16Mb boundary. */ if(mp->bigmem) vga->vmz = bigmem[mp->reg[MemCntl] & 0x0F]; else vga->vmz = smallmem[mp->reg[MemCntl] & 0x07]; vga->vma = 16*1024*1024; switch(mp->reg[ConfigCntl]&0x3){ case 0: vga->apz = 16*1024*1024; /* empirical -rsc */ break; case 1: vga->apz = 4*1024*1024; break; case 2: vga->apz = 8*1024*1024; break; case 3: vga->apz = 2*1024*1024; /* empirical: mach64GX -rsc */ break; } ctlr->flag |= Fsnarf; } static void options(Vga*, Ctlr* ctlr) { ctlr->flag |= Hlinear|Foptions; } static void clock(Vga* vga, Ctlr* ctlr) { int clk, m, n, p; double f, q; Mach64xx *mp; mp = vga->private; /* * Don't compute clock timings for LCD panels. * Just use what's already there. We can't just use * the frequency in the vgadb for this because * the frequency being programmed into the PLLs * is not the frequency being used to compute the DSP * settings. The DSP-relevant frequency is the one * we keep in /lib/vgadb. */ if(mp->lcdon){ clk = mp->reg[ClockCntl] & 0x03; n = mp->pll[7+clk]; p = (mp->pll[6]>>(clk*2)) & 0x03; p |= (mp->pll[11]>>(2+clk)) & 0x04; switch(p){ case 0: case 1: case 2: case 3: p = 1<<p; break; case 4+0: p = 3; break; case 4+2: p = 6; break; case 4+3: p = 12; break; default: case 4+1: p = -1; break; } m = mp->pll[PLLm]; f = (2.0*RefFreq*n)/(m*p) + 0.5; vga->m[0] = m; vga->p[0] = p; vga->n[0] = n; vga->f[0] = f; return; } if(vga->f[0] == 0) vga->f[0] = vga->mode->frequency; f = vga->f[0]; /* * To generate a specific output frequency, the reference (m), * feedback (n), and post dividers (p) must be loaded with the * appropriate divide-down ratios. In the following r is the * XTALIN frequency (usually RefFreq) and t is the target frequency * (vga->f). * * Use the maximum reference divider left by the BIOS for now, * otherwise MCLK might be a concern. It can be calculated as * follows: * Upper Limit of PLL Lock Range * Minimum PLLREFCLK = ----------------------------- * (2*255) * * XTALIN * m = Floor[-----------------] * Minimum PLLREFCLK * * For an upper limit of 135MHz and XTALIN of 14.318MHz m * would be 54. */ m = mp->pll[PLLm]; vga->m[0] = m; /* * The post divider may be 1, 2, 4 or 8 and is determined by * calculating * t*m * q = ----- * (2*r) * and using the result to look-up p. */ q = (f*m)/(2*RefFreq); if(ctlr->flag&Uenhanced){ if(q > 255 || q < 10.6666666667) error("%s: vclk %lud out of range\n", ctlr->name, vga->f[0]); if(q > 127.5) p = 1; else if(q > 85) p = 2; else if(q > 63.75) p = 3; else if(q > 42.5) p = 4; else if(q > 31.875) p = 6; else if(q > 21.25) p = 8; else p = 12; }else{ if(q > 255 || q < 16) error("%s: vclk %lud out of range\n", ctlr->name, vga->f[0]); if(q >= 127.5) p = 1; else if(q >= 63.5) p = 2; else if(q >= 31.5) p = 4; else p = 8; } vga->p[0] = p; /* * The feedback divider should be kept in the range 0x80 to 0xFF * and is found from * n = q*p * rounded to the nearest whole number. */ vga->n[0] = (q*p)+0.5; } typedef struct Meminfo Meminfo; struct Meminfo { int latency; int latch; int trp; /* filled in from card */ int trcd; /* filled in from card */ int tcrd; /* filled in from card */ int tras; /* filled in from card */ }; enum { Mdram, Medo, Msdram, Mwram, }; /* * The manuals and documentation are silent on which settings * to use for Mwdram, or how to tell which to use. */ static Meminfo meminfo[] = { [Mdram] { 1, 0 }, [Medo] { 1, 2 }, [Msdram] { 3, 1 }, [Mwram] { 1, 3 }, /* non TYPE_A */ }; static ushort looplatencytab[2][2] = { { 8, 6 }, /* DRAM: ≤1M, > 1M */ { 9, 8 }, /* SDRAM: ≤1M, > 1M */ }; static ushort cyclesperqwordtab[2][2] = { { 3, 2 }, /* DRAM: ≤1M, > 1M */ { 2, 1 }, /* SDRAM: ≤1M, > 1M */ }; static int memtype[] = { -1, /* disable memory access */ Mdram, /* basic DRAM */ Medo, /* EDO */ Medo, /* hyper page DRAM or EDO */ Msdram, /* SDRAM */ Msdram, /* SGRAM */ Mwram, Mwram }; /* * Calculate various memory parameters so that the card * fetches the right bytes at the right time. I don't claim to * understand the actual calculations very well. * * This is remarkably useful on laptops, since knowledge of * x lets us find the frequency that the screen is really running * at, which is not necessarily in the VCLKs. */ static void setdsp(Vga* vga, Ctlr*) { Mach64xx *mp; Meminfo *mem; ushort table, memclk, memtyp; int i, prec, xprec, fprec; ulong t; double pw, x, fifosz, fifoon, fifooff; ushort dspon, dspoff; int afifosz, lat, ncycle, pfc, rcc; mp = vga->private; /* * Get video ram configuration from BIOS and chip */ table = *(ushort*)readbios(sizeof table, 0xc0048); trace("rom table offset %uX\n", table); table = *(ushort*)readbios(sizeof table, 0xc0000+table+16); trace("freq table offset %uX\n", table); memclk = *(ushort*)readbios(sizeof memclk, 0xc0000+table+18); trace("memclk %ud\n", memclk); memtyp = memtype[mp->reg[ConfigStat0]&07]; mem = &meminfo[memtyp]; /* * First we need to calculate x, the number of * XCLKs that one QWORD occupies in the display FIFO. * * For some reason, x gets stretched out if LCD stretching * is turned on. */ x = ((double)memclk*640000.0) / ((double)vga->mode->frequency * (double)vga->mode->z); if(mp->lcd[LCD_HorzStretch] & (1<<31)) x *= 4096.0 / (double)(mp->lcd[LCD_HorzStretch] & 0xFFFF); trace("memclk %d... x %f...", memclk, x); /* * We have 14 bits to specify x in. Decide where to * put the decimal (err, binary) point by counting how * many significant bits are in the integer portion of x. */ t = x; for(i=31; i>=0; i--) if(t & (1<<i)) break; xprec = i+1; trace("t %lud... xprec %d...", t, xprec); /* * The maximum FIFO size is the number of XCLKs per QWORD * multiplied by 32, for some reason. We have 11 bits to * specify fifosz. */ fifosz = x * 32.0; trace("fifosz %f...", fifosz); t = fifosz; for(i=31; i>=0; i--) if(t & (1<<i)) break; fprec = i+1; trace("fprec %d...", fprec); /* * Precision is specified as 3 less than the number of bits * in the integer part of x, and 5 less than the number of bits * in the integer part of fifosz. * * It is bounded by zero and seven. */ prec = (xprec-3 > fprec-5) ? xprec-3 : fprec-5; if(prec < 0) prec = 0; if(prec > 7) prec = 7; xprec = prec+3; fprec = prec+5; trace("prec %d...", prec); /* * Actual fifo size */ afifosz = (1<<fprec) / x; if(afifosz > 32) afifosz = 32; fifooff = ceil(x*(afifosz-1)); /* * I am suspicious of this table, lifted from ATI docs, * because it doesn't agree with the Windows drivers. * We always get 0x0A for lat+2 while Windows uses 0x08. */ lat = looplatencytab[memtyp > 1][vga->vmz > 1*1024*1024]; trace("afifosz %d...fifooff %f...", afifosz, fifooff); /* * Page fault clock */ t = mp->reg[MemCntl]; mem->trp = (t>>8)&3; /* RAS precharge time */ mem->trcd = (t>>10)&3; /* RAS to CAS delay */ mem->tcrd = (t>>12)&1; /* CAS to RAS delay */ mem->tras = (t>>16)&7; /* RAS low minimum pulse width */ pfc = mem->trp + 1 + mem->trcd + 1 + mem->tcrd; trace("pfc %d...", pfc); /* * Maximum random access cycle clock. */ ncycle = cyclesperqwordtab[memtyp > 1][vga->vmz > 1*1024*1024]; rcc = mem->trp + 1 + mem->tras + 1; if(rcc < pfc+ncycle) rcc = pfc+ncycle; trace("rcc %d...", rcc); fifoon = (rcc > floor(x)) ? rcc : floor(x); fifoon += (3.0 * rcc) - 1 + pfc + ncycle; trace("fifoon %f...\n", fifoon); /* * Now finally put the bits together. * x is stored in a 14 bit field with xprec bits of integer. */ pw = x * (1<<(14-xprec)); mp->reg[DspConfig] = (ulong)pw | (((lat+2)&0xF)<<16) | ((prec&7)<<20); /* * These are stored in an 11 bit field with fprec bits of integer. */ dspon = (ushort)fifoon << (11-fprec); dspoff = (ushort)fifooff << (11-fprec); mp->reg[DspOnOff] = ((dspon&0x7ff) << 16) | (dspoff&0x7ff); } static void init(Vga* vga, Ctlr* ctlr) { Mode *mode; Mach64xx *mp; int p, x, y; mode = vga->mode; if((mode->x > 640 || mode->y > 480) && mode->z == 1) error("%s: no support for 1-bit mode other than 640x480x1\n", ctlr->name); mp = vga->private; if(mode->z > 8 && mp->pci == nil) error("%s: no support for >8-bit color without PCI\n", ctlr->name); /* * Check for Rage chip */ switch (mp->reg[ConfigChipId]&0xffff) { case ('G'<<8)|'B': /* 4742: 264GT PRO */ case ('G'<<8)|'D': /* 4744: 264GT PRO */ case ('G'<<8)|'I': /* 4749: 264GT PRO */ case ('G'<<8)|'M': /* 474D: Rage XL */ case ('G'<<8)|'P': /* 4750: 264GT PRO */ case ('G'<<8)|'Q': /* 4751: 264GT PRO */ case ('G'<<8)|'R': /* 4752: */ case ('G'<<8)|'U': /* 4755: 264GT DVD */ case ('G'<<8)|'V': /* 4756: Rage2C */ case ('G'<<8)|'Z': /* 475A: Rage2C */ case ('V'<<8)|'U': /* 5655: 264VT3 */ case ('V'<<8)|'V': /* 5656: 264VT4 */ case ('G'<<8)|'T': /* 4754: 264GT[B] */ case ('V'<<8)|'T': /* 5654: 264VT/GT/VTB */ case ('L'<<8)|'B': /* 4C42: Rage LTPro AGP */ case ('L'<<8)|'I': /* 4C49: 264LT PRO */ case ('L'<<8)|'M': /* 4C4D: Rage Mobility */ case ('L'<<8)|'P': /* 4C50: 264LT PRO */ ctlr->flag |= Uenhanced; break; } /* * Always use VCLK2. */ clock(vga, ctlr); mp->pll[PLLn2] = vga->n[0]; mp->pll[PLLp] &= ~(0x03<<(2*2)); switch(vga->p[0]){ case 1: case 3: p = 0; break; case 2: p = 1; break; case 4: case 6: p = 2; break; case 8: case 12: p = 3; break; default: p = 3; break; } mp->pll[PLLp] |= p<<(2*2); if ((1<<p) != vga->p[0]) mp->pll[PLLx] |= 1<<(4+2); else mp->pll[PLLx] &= ~(1<<(4+2)); mp->reg[ClockCntl] = 2; mp->reg[ConfigCntl] = 0; mp->reg[CrtcGenCntl] = 0x02000000|(mp->reg[CrtcGenCntl] & ~0x01400700); switch(mode->z){ default: case 1: mp->reg[CrtcGenCntl] |= 0x00000100; mp->reg[DpPixWidth] = 0x00000000; break; case 8: mp->reg[CrtcGenCntl] |= 0x01000200; mp->reg[DpPixWidth] = 0x00020202; break; case 15: mp->reg[CrtcGenCntl] |= 0x01000300; mp->reg[DpPixWidth] = 0x00030303; break; case 16: mp->reg[CrtcGenCntl] |= 0x01000400; mp->reg[DpPixWidth] = 0x00040404; break; case 24: mp->reg[CrtcGenCntl] |= 0x01000500; mp->reg[DpPixWidth] = 0x00050505; break; case 32: mp->reg[CrtcGenCntl] |= 0x01000600; mp->reg[DpPixWidth] = 0x00060606; break; } mp->reg[HTotalDisp] = (((mode->x>>3)-1)<<16)|((mode->ht>>3)-1); mp->reg[HSyncStrtWid] = (((mode->ehs - mode->shs)>>3)<<16) |((mode->shs>>3)-1); if(mode->hsync == '-') mp->reg[HSyncStrtWid] |= 0x00200000; mp->reg[VTotalDisp] = ((mode->y-1)<<16)|(mode->vt-1); mp->reg[VSyncStrtWid] = ((mode->vre - mode->vrs)<<16)|(mode->vrs-1); if(mode->vsync == '-') mp->reg[VSyncStrtWid] |= 0x00200000; mp->reg[IntCntl] = 0; /* * This used to set it to (mode->x/(8*2))<<22 for depths < 8, * but from the manual that seems wrong to me. -rsc */ mp->reg[OffPitch] = (vga->virtx/8)<<22; mp->reg[OvrClr] = Pblack; if(vga->linear && mode->z != 1) ctlr->flag |= Ulinear; /* * Heuristic fiddling on LT PRO. * Do this before setdsp so the stretching is right. */ if(mp->lcdon){ /* use non-shadowed registers */ mp->lcd[LCD_GenCtrl] &= ~0x00000404; mp->lcd[LCD_ConfigPanel] |= 0x00004000; mp->lcd[LCD_VertStretch] = 0; y = ((mp->lcd[LCD_ExtVertStretch]>>11) & 0x7FF)+1; if(mode->y < y){ x = (mode->y*1024)/y; mp->lcd[LCD_VertStretch] = 0xC0000000|x; } mp->lcd[LCD_ExtVertStretch] &= ~0x00400400; /* * The x value doesn't seem to be available on all * chips so intuit it from the y value which seems to * be reliable. */ mp->lcd[LCD_HorzStretch] &= ~0xC00000FF; x = (mp->lcd[LCD_HorzStretch]>>20) & 0xFF; if(x == 0){ switch(y){ default: break; case 480: x = 640; break; case 600: x = 800; break; case 768: x = 1024; break; case 1024: x = 1280; break; } } else x = (x+1)*8; if(mode->x < x){ x = (mode->x*4096)/x; mp->lcd[LCD_HorzStretch] |= 0xC0000000|x; } } if(ctlr->flag&Uenhanced) setdsp(vga, ctlr); ctlr->flag |= Finit; } static void load(Vga* vga, Ctlr* ctlr) { Mach64xx *mp; int i; mp = vga->private; /* * Unlock the CRTC and LCD registers. */ mp->iow32(mp, CrtcGenCntl, mp->ior32(mp, CrtcGenCntl)&~0x00400000); if(mp->lcdon) lcdw32(mp, LCD_GenCtrl, mp->lcd[LCD_GenCtrl]|0x80000000); /* * Always use an aperture on a 16Mb boundary. */ if(ctlr->flag & Ulinear) mp->reg[ConfigCntl] = ((vga->vmb/(4*1024*1024))<<4)|0x02; mp->iow32(mp, ConfigCntl, mp->reg[ConfigCntl]); mp->iow32(mp, GenTestCntl, 0); mp->iow32(mp, GenTestCntl, 0x100); if((ctlr->flag&Uenhanced) == 0) mp->iow32(mp, MemCntl, mp->reg[MemCntl] & ~0x70000); mp->iow32(mp, BusCntl, mp->reg[BusCntl]); mp->iow32(mp, HTotalDisp, mp->reg[HTotalDisp]); mp->iow32(mp, HSyncStrtWid, mp->reg[HSyncStrtWid]); mp->iow32(mp, VTotalDisp, mp->reg[VTotalDisp]); mp->iow32(mp, VSyncStrtWid, mp->reg[VSyncStrtWid]); mp->iow32(mp, IntCntl, mp->reg[IntCntl]); mp->iow32(mp, OffPitch, mp->reg[OffPitch]); if(mp->lcdon){ for(i=0; i<Nlcd; i++) lcdw32(mp, i, mp->lcd[i]); } mp->iow32(mp, GenTestCntl, mp->reg[GenTestCntl]); mp->iow32(mp, ConfigCntl, mp->reg[ConfigCntl]); mp->iow32(mp, CrtcGenCntl, mp->reg[CrtcGenCntl]); mp->iow32(mp, OvrClr, mp->reg[OvrClr]); mp->iow32(mp, OvrWidLR, mp->reg[OvrWidLR]); mp->iow32(mp, OvrWidTB, mp->reg[OvrWidTB]); if(ctlr->flag&Uenhanced){ mp->iow32(mp, DacRegs, mp->reg[DacRegs]); mp->iow32(mp, DacCntl, mp->reg[DacCntl]); mp->iow32(mp, CrtcGenCntl, mp->reg[CrtcGenCntl]&~0x02000000); mp->iow32(mp, DspOnOff, mp->reg[DspOnOff]); mp->iow32(mp, DspConfig, mp->reg[DspConfig]); mp->iow32(mp, CrtcGenCntl, mp->reg[CrtcGenCntl]); pllw(mp, PLLx, mp->pll[PLLx]); } pllw(mp, PLLn2, mp->pll[PLLn2]); pllw(mp, PLLp, mp->pll[PLLp]); pllw(mp, PLLn3, mp->pll[PLLn3]); mp->iow32(mp, ClockCntl, mp->reg[ClockCntl]); mp->iow32(mp, ClockCntl, 0x40|mp->reg[ClockCntl]); mp->iow32(mp, DpPixWidth, mp->reg[DpPixWidth]); if(vga->mode->z > 8){ int sh, i; /* * We need to initialize the palette, since the DACs use it * in true color modes. First see if the card supports an * 8-bit DAC. */ mp->iow32(mp, DacCntl, mp->reg[DacCntl] | 0x100); if(mp->ior32(mp, DacCntl)&0x100){ /* card appears to support it */ vgactlw("palettedepth", "8"); mp->reg[DacCntl] |= 0x100; } if(mp->reg[DacCntl] & 0x100) sh = 0; /* 8-bit DAC */ else sh = 2; /* 6-bit DAC */ for(i=0; i<256; i++) setpalette(i, i>>sh, i>>sh, i>>sh); } ctlr->flag |= Fload; } static void pixelclock(Vga* vga, Ctlr* ctlr) { Mach64xx *mp; ushort table, s; int memclk, ref_freq, ref_divider, min_freq, max_freq; int feedback, nmult, pd, post, value; int clock; /* * Find the pixel clock from the BIOS and current * settings. Lifted from the ATI-supplied example code. * The clocks stored in the BIOS table are in kHz/10. * * This is the clock LCDs use in vgadb to set the DSP * values. */ mp = vga->private; /* * GetPLLInfo() */ table = *(ushort*)readbios(sizeof table, 0xc0048); trace("rom table offset %uX\n", table); table = *(ushort*)readbios(sizeof table, 0xc0000+table+16); trace("freq table offset %uX\n", table); s = *(ushort*)readbios(sizeof s, 0xc0000+table+18); memclk = s*10000; trace("memclk %ud\n", memclk); s = *(ushort*)readbios(sizeof s, 0xc0000+table+8); ref_freq = s*10000; trace("ref_freq %ud\n", ref_freq); s = *(ushort*)readbios(sizeof s, 0xc0000+table+10); ref_divider = s; trace("ref_divider %ud\n", ref_divider); s = *(ushort*)readbios(sizeof s, 0xc0000+table+2); min_freq = s*10000; trace("min_freq %ud\n", min_freq); s = *(ushort*)readbios(sizeof s, 0xc0000+table+4); max_freq = s*10000; trace("max_freq %ud\n", max_freq); /* * GetDivider() */ pd = mp->pll[PLLp] & 0x03; value = (mp->pll[PLLx] & 0x10)>>2; trace("pd %uX value %uX (|%d)\n", pd, value, value|pd); value |= pd; post = 0; switch(value){ case 0: post = 1; break; case 1: post = 2; break; case 2: post = 4; break; case 3: post = 8; break; case 4: post = 3; break; case 5: post = 0; break; case 6: post = 6; break; case 7: post = 12; break; } trace("post = %d\n", post); feedback = mp->pll[PLLn0]; if(mp->pll[PLLx] & 0x08) nmult = 4; else nmult = 2; clock = (ref_freq/10000)*nmult*feedback; clock /= ref_divider*post; clock *= 10000; Bprint(&stdout, "%s pixel clock = %ud\n", ctlr->name, clock); } static void dumpmach64bios(Mach64xx*); static void dump(Vga* vga, Ctlr* ctlr) { Mach64xx *mp; int i, m, n, p; double f; static int first = 1; if((mp = vga->private) == 0) return; Bprint(&stdout, "%s pci %p io %lux %s\n", ctlr->name, mp->pci, mp->io, mp->ior32 == pciior32 ? "pciregs" : "ioregs"); if(mp->pci) Bprint(&stdout, "%s ccru %ux\n", ctlr->name, mp->pci->ccru); for(i = 0; i < Nreg; i++) Bprint(&stdout, "%s %-*s%.8luX\n", ctlr->name, 20, iorname[i], mp->reg[i]); printitem(ctlr->name, "PLL"); for(i = 0; i < Npll; i++) printreg(mp->pll[i]); Bprint(&stdout, "\n"); switch(mp->reg[ConfigChipId] & 0xFFFF){ default: break; case ('L'<<8)|'B': /* 4C42: Rage LTPro AGP */ case ('L'<<8)|'I': /* 4C49: Rage 3D LTPro */ case ('L'<<8)|'M': /* 4C4D: Rage Mobility */ case ('L'<<8)|'P': /* 4C50: Rage 3D LTPro */ for(i = 0; i < Nlcd; i++) Bprint(&stdout, "%s %-*s%.8luX\n", ctlr->name, 20, lcdname[i], mp->lcd[i]); break; } /* * (2*r*n) * f = ------- * (m*p) */ m = mp->pll[2]; for(i = 0; i < 4; i++){ n = mp->pll[7+i]; p = (mp->pll[6]>>(i*2)) & 0x03; p |= (mp->pll[11]>>(2+i)) & 0x04; switch(p){ case 0: case 1: case 2: case 3: p = 1<<p; break; case 4+0: p = 3; break; case 4+2: p = 6; break; case 4+3: p = 12; break; default: case 4+1: p = -1; break; } if(m*p == 0) Bprint(&stdout, "unknown VCLK%d\n", i); else { f = (2.0*RefFreq*n)/(m*p) + 0.5; Bprint(&stdout, "%s VCLK%d\t%ud\n", ctlr->name, i, (int)f); } } pixelclock(vga, ctlr); if(first) { first = 0; dumpmach64bios(mp); } } enum { ClockFixed=0, ClockIcs2595, ClockStg1703, ClockCh8398, ClockInternal, ClockAtt20c408, ClockIbmrgb514 }; /* * mostly derived from the xfree86 probe routines. */ static void dumpmach64bios(Mach64xx *mp) { int i, romtable, clocktable, freqtable, lcdtable, lcdpanel; uchar bios[0x10000]; memmove(bios, readbios(sizeof bios, 0xC0000), sizeof bios); /* find magic string */ for(i=0; i<1024; i++) if(strncmp((char*)bios+i, " 761295520", 10) == 0) break; if(i==1024) { Bprint(&stdout, "no ATI bios found\n"); return; } /* this is horribly endian dependent. sorry. */ romtable = *(ushort*)(bios+0x48); if(romtable+0x12 > sizeof(bios)) { Bprint(&stdout, "couldn't find ATI rom table\n"); return; } clocktable = *(ushort*)(bios+romtable+0x10); if(clocktable+0x0C > sizeof(bios)) { Bprint(&stdout, "couldn't find ATI clock table\n"); return; } freqtable = *(ushort*)(bios+clocktable-2); if(freqtable+0x20 > sizeof(bios)) { Bprint(&stdout, "couldn't find ATI frequency table\n"); return; } Bprint(&stdout, "ATI BIOS rom 0x%x freq 0x%x clock 0x%x\n", romtable, freqtable, clocktable); Bprint(&stdout, "clocks:"); for(i=0; i<16; i++) Bprint(&stdout, " %d", *(ushort*)(bios+freqtable+2*i)); Bprint(&stdout, "\n"); Bprint(&stdout, "programmable clock: %d\n", bios[clocktable]); Bprint(&stdout, "clock to program: %d\n", bios[clocktable+6]); if(*(ushort*)(bios+clocktable+8) != 1430) { Bprint(&stdout, "reference numerator: %d\n", *(ushort*)(bios+clocktable+8)*10); Bprint(&stdout, "reference denominator: 1\n"); } else { Bprint(&stdout, "default reference numerator: 157500\n"); Bprint(&stdout, "default reference denominator: 11\n"); } switch(bios[clocktable]) { case ClockIcs2595: Bprint(&stdout, "ics2595\n"); Bprint(&stdout, "reference divider: %d\n", *(ushort*)(bios+clocktable+0x0A)); break; case ClockStg1703: Bprint(&stdout, "stg1703\n"); break; case ClockCh8398: Bprint(&stdout, "ch8398\n"); break; case ClockInternal: Bprint(&stdout, "internal clock\n"); Bprint(&stdout, "reference divider in plls\n"); break; case ClockAtt20c408: Bprint(&stdout, "att 20c408\n"); break; case ClockIbmrgb514: Bprint(&stdout, "ibm rgb514\n"); Bprint(&stdout, "clock to program = 7\n"); break; default: Bprint(&stdout, "unknown clock\n"); break; } USED(mp); if(1 || mp->lcdpanelid) { lcdtable = *(ushort*)(bios+0x78); if(lcdtable+5 > sizeof bios || lcdtable+bios[lcdtable+5] > sizeof bios) { Bprint(&stdout, "can't find lcd bios table\n"); goto NoLcd; } lcdpanel = *(ushort*)(bios+lcdtable+0x0A); if(lcdpanel+0x1D > sizeof bios /*|| bios[lcdpanel] != mp->lcdpanelid*/) { Bprint(&stdout, "can't find lcd bios table0\n"); goto NoLcd; } Bprint(&stdout, "panelid %d x %d y %d\n", bios[lcdpanel], *(ushort*)(bios+lcdpanel+0x19), *(ushort*)(bios+lcdpanel+0x1B)); } NoLcd:; } Ctlr mach64xx = { "mach64xx", /* name */ snarf, /* snarf */ 0, /* options */ init, /* init */ load, /* load */ dump, /* dump */ }; Ctlr mach64xxhwgc = { "mach64xxhwgc", /* name */ 0, /* snarf */ 0, /* options */ 0, /* init */ 0, /* load */ 0, /* dump */ };
brho/plan9
sys/src/cmd/aux/vga/mach64xx.c
C
gpl-2.0
30,679
/* soundeffects.c * An example on how to use libmikmod to play sound effects. * * (C) 2004, Raphael Assenat (raph@raphnet.net) * * This example is distributed in the hope that it will be useful, * but WITHOUT ANY WARRENTY; without event the implied warrenty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <stdlib.h> #include <stdio.h> #include <mikmod.h> #if !defined _WIN32 && !defined _WIN64 #include <unistd.h> /* for usleep() */ #define MikMod_Sleep(ns) usleep(ns) #else #define MikMod_Sleep(ns) Sleep(ns / 1000) #endif SAMPLE *Load(const char *fn) { char *data_buf; long data_len; FILE *fptr; /* open the file */ fptr = fopen(fn, "rb"); if (fptr == NULL) { perror("fopen"); return 0; } /* calculate the file size */ fseek(fptr, 0, SEEK_END); data_len = ftell(fptr); fseek(fptr, 0, SEEK_SET); /* allocate a buffer and load the file into it */ data_buf = (char *) malloc(data_len); if (data_buf == NULL) { perror("malloc"); fclose(fptr); return 0; } if (fread(data_buf, data_len, 1, fptr) != 1) { perror("fread"); fclose(fptr); free(data_buf); return 0; } fclose(fptr); return Sample_LoadMem(data_buf, data_len); } int main(void) { /* sound effects */ SAMPLE *sfx1, *sfx2; /* voices */ int v1, v2; int i; /* register all the drivers */ MikMod_RegisterAllDrivers(); /* initialize the library */ md_mode |= DMODE_SOFT_SNDFX; if (MikMod_Init("")) { fprintf(stderr, "Could not initialize sound, reason: %s\n", MikMod_strerror(MikMod_errno)); return 1; } /* load samples */ sfx1 = Load("first.wav"); if (!sfx1) { MikMod_Exit(); fprintf(stderr, "Could not load the first sound, reason: %s\n", MikMod_strerror(MikMod_errno)); return 1; } sfx2 = Load("second.wav"); if (!sfx2) { Sample_Free(sfx1); MikMod_Exit(); fprintf(stderr, "Could not load the second sound, reason: %s\n", MikMod_strerror(MikMod_errno)); return 1; } /* reserve 2 voices for sound effects */ MikMod_SetNumVoices(-1, 2); /* get ready to play */ MikMod_EnableOutput(); /* play first sample */ v1 = Sample_Play(sfx1, 0, 0); do { MikMod_Update(); MikMod_Sleep(100000); } while (!Voice_Stopped(v1)); for (i = 0; i < 10; i++) { MikMod_Update(); MikMod_Sleep(100000); } /* half a second later, play second sample */ v2 = Sample_Play(sfx2, 0, 0); do { MikMod_Update(); MikMod_Sleep(100000); } while (!Voice_Stopped(v2)); for (i = 0; i < 10; i++) { MikMod_Update(); MikMod_Sleep(100000); } MikMod_DisableOutput(); Sample_Free(sfx2); Sample_Free(sfx1); MikMod_Exit(); return 0; }
gameblabla/methane
source/gcw/libmikmod-3.3.7/examples/soundeffects/soundeffects.c
C
gpl-2.0
2,980
/*++ drivers/i2c/busses/wmt-i2c-bus-3.c Copyright (c) 2013 WonderMedia Technologies, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. WonderMedia Technologies, Inc. 10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C. --*/ /* Include your headers here*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> /* #include <linux/i2c-id.h> */ #include <linux/init.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <mach/hardware.h> #include <asm/irq.h> #include <mach/irqs.h> #include <mach/wmt-i2c-bus.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/syscore_ops.h> #ifdef __KERNEL__ #ifdef DEBUG #define DPRINTK printk #else #define DPRINTK(x...) #endif #else #define DPRINTK printf #endif #define MAX_BUS_READY_CNT 50 /* jiffy*/ #define MAX_TX_TIMEOUT 500 /* ms*/ #define MAX_RX_TIMEOUT 500 /* ms*/ #define CTRL_GPIO GPIO_CTRL_GP23_I2C3_BYTE_ADDR #define PU_EN_GPIO PULL_EN_GP23_I2C3_BYTE_ADDR #define PU_CTRL_GPIO PULL_CTRL_GP23_I2C3_BYTE_ADDR #define USE_UBOOT_PARA struct wmt_i2c_s { struct i2c_regs_s *regs; int irq_no ; enum i2c_mode_e i2c_mode ; int volatile isr_nack ; int volatile isr_byte_end ; int volatile isr_timeout ; int volatile isr_int_pending ; }; static int i2c_wmt_wait_bus_not_busy(void); extern int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen); static unsigned int speed_mode = 1; static unsigned int is_master = 1;/*master:1, slave:0*/ unsigned int wmt_i2c3_is_master = 1; unsigned int wmt_i2c3_speed_mode = 0; static unsigned int wmt_i2c3_power_state = 0;/*0:power on, 1:suspend, 2:shutdown*/ EXPORT_SYMBOL(wmt_i2c3_is_master); /**/ /* variable*/ /*-------------------------------------------------*/ static volatile struct wmt_i2c_s i2c ; DECLARE_WAIT_QUEUE_HEAD(i2c3_wait); /* spinlock_t i2c3_wmt_irqlock = SPIN_LOCK_UNLOCKED; */ static DEFINE_SPINLOCK(i2c3_wmt_irqlock); static struct list_head wmt_i2c_fifohead; /* static spinlock_t i2c_fifolock = SPIN_LOCK_UNLOCKED; */ static DEFINE_SPINLOCK(i2c_fifolock); static int i2c_wmt_read_buf( unsigned int slave_addr, char *buf, unsigned int length, int restart, int last ); static int i2c_wmt_write_buf( unsigned int slave_addr, char *buf, unsigned int length, int restart, int last ); static void i2c_wmt_set_mode(enum i2c_mode_e mode /*!<; //[IN] mode */) { if (is_master == 0) return; i2c.i2c_mode = mode ; if (i2c.i2c_mode == I2C_STANDARD_MODE) { DPRINTK("I2C: set standard mode \n"); i2c.regs->tr_reg = I2C_TR_STD_VALUE ; /* 0x8041*/ } else if (i2c.i2c_mode == I2C_FAST_MODE) { DPRINTK("I2C: set fast mode \n"); i2c.regs->tr_reg = I2C_TR_FAST_VALUE ; /* 0x8011*/ } } static int i2c_send_request( struct i2c_msg *msg, int msg_num, int non_block, void (*callback)(void *data), void *data ) { struct wmt_i2cbusfifo *i2c_fifo_head; struct i2c_msg *pmsg = NULL; int ret = 0; int restart = 0; int last = 0; unsigned long flags; int slave_addr = msg[0].addr; if (slave_addr == WMT_I2C_API_I2C_ADDR) return ret ; if (wmt_i2c3_power_state == 2) { printk("I2C3 has been shutdown\n"); return -EIO; } i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; i2c.isr_int_pending = 0; i2c_fifo_head = kzalloc(sizeof(struct wmt_i2cbusfifo), GFP_ATOMIC); INIT_LIST_HEAD(&i2c_fifo_head->busfifohead); pmsg = &msg[0]; i2c_fifo_head->msg = pmsg; i2c_fifo_head->msg_num = msg_num; spin_lock_irqsave(&i2c_fifolock, flags); if (list_empty(&wmt_i2c_fifohead)) { i2c_wmt_wait_bus_not_busy(); pmsg = &msg[0]; i2c_fifo_head->xfer_length = 1; i2c_fifo_head->xfer_msgnum = 0; i2c_fifo_head->restart = 0; i2c_fifo_head->non_block = non_block; if (non_block == 1) { i2c_fifo_head->callback = callback; i2c_fifo_head->data = data; } else { i2c_fifo_head->callback = 0; i2c_fifo_head->data = 0; } list_add_tail(&i2c_fifo_head->busfifohead, &wmt_i2c_fifohead); if (pmsg->flags & I2C_M_RD) { i2c_fifo_head->xfer_length = 1; ret = i2c_wmt_read_buf(pmsg->addr, pmsg->buf, pmsg->len, restart, last); } else { i2c_fifo_head->xfer_length = 1; if (pmsg->flags & I2C_M_NOSTART) i2c_fifo_head->restart = 1; else i2c_fifo_head->restart = 0; ret = i2c_wmt_write_buf(pmsg->addr, pmsg->buf, pmsg->len, restart, last); } } else { i2c_fifo_head->xfer_length = 0; i2c_fifo_head->xfer_msgnum = 0; i2c_fifo_head->restart = 0; i2c_fifo_head->non_block = non_block; if (non_block == 1) { i2c_fifo_head->callback = callback; i2c_fifo_head->data = data; } else { i2c_fifo_head->callback = 0; i2c_fifo_head->data = 0; } list_add_tail(&i2c_fifo_head->busfifohead, &wmt_i2c_fifohead); } spin_unlock_irqrestore(&i2c_fifolock, flags); if (non_block == 0) { wait_event(i2c3_wait, i2c.isr_int_pending); ret = msg_num; if (i2c.isr_nack == 1) { DPRINTK("i2c_err : write NACK error (rx) \n\r") ; ret = -EIO ; } if (i2c.isr_timeout == 1) { DPRINTK("i2c_err : write SCL timeout error (rx)\n\r") ; ret = -ETIMEDOUT ; } } return ret; } static int i2c_wmt_read_buf( unsigned int slave_addr, char *buf, unsigned int length, int restart, int last ) { unsigned short tcr_value; int ret = 0; DPRINTK("[%s]:length = %d , slave_addr = %x\n", __func__, length , slave_addr); if (length <=0) return -1; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; /*i2c.isr_int_pending = 0;*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_END); /*clear Tx end*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_NEXT_NO_ACK); /*clear NEXT_NO_ACK*/ if (length <=0) return -1; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; /*i2c.isr_int_pending = 0;*/ tcr_value = 0 ; if (i2c.i2c_mode == I2C_STANDARD_MODE) tcr_value = (unsigned short)(I2C_TCR_STANDARD_MODE|I2C_TCR_MASTER_READ |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; else if (i2c.i2c_mode == I2C_FAST_MODE) tcr_value = (unsigned short)(I2C_TCR_FAST_MODE|I2C_TCR_MASTER_READ |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; if (length == 1) i2c.regs->cr_reg |= I2C_CR_TX_NEXT_NO_ACK; /*only 8-bit to read*/ i2c.regs->tcr_reg = tcr_value ; return ret; } static int i2c_wmt_write_buf( unsigned int slave_addr, char *buf, unsigned int length, int restart, int last ) { unsigned short tcr_value ; unsigned int xfer_length ; int ret = 0 ; DPRINTK("[%s]length = %d , slave_addr = %x\n", __func__, length , slave_addr); if (slave_addr == WMT_I2C_API_I2C_ADDR) return ret ; if (is_master == 0) return -ENXIO; /* special case allow length:0, for i2c_smbus_xfer*/ /**/ if (length < 0) return -1 ; xfer_length = 0 ; /* for array index and also for checking counting*/ i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; /*i2c.isr_int_pending = 0;*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_END); /*clear Tx end*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_NEXT_NO_ACK); /*clear NEXT_NO_ACK*/ if (length == 0) i2c.regs->cdr_reg = 0 ; else i2c.regs->cdr_reg = (unsigned short)(buf[xfer_length] & I2C_CDR_DATA_WRITE_MASK) ; tcr_value = 0 ; if (i2c.i2c_mode == I2C_STANDARD_MODE) tcr_value = (unsigned short)(I2C_TCR_STANDARD_MODE|I2C_TCR_MASTER_WRITE |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; else if (i2c.i2c_mode == I2C_FAST_MODE) tcr_value = (unsigned short)(I2C_TCR_FAST_MODE|I2C_TCR_MASTER_WRITE |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; i2c.regs->tcr_reg = tcr_value ; ret = 0 ; return ret; } static int i2c_wmt_read_msg( unsigned int slave_addr, /*!<; //[IN] Salve address */ char *buf, /*!<; //[OUT] Pointer to data */ unsigned int length, /*!<; //Data length */ int restart, /*!<; //Need to restart after a complete read */ int last /*!<; //Last read */ ) { unsigned short tcr_value ; unsigned int xfer_length ; int is_timeout ; int ret = 0 ; int wait_event_result = 0 ; if (is_master == 0) return -ENXIO; if (length <= 0) return -1 ; xfer_length = 0 ; if (restart == 0) ret = i2c_wmt_wait_bus_not_busy() ; if (ret < 0) return ret ; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; i2c.isr_int_pending = 0; i2c.regs->cr_reg &= ~(I2C_CR_TX_END); /*clear Tx end*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_NEXT_NO_ACK); /*clear NEXT_NO_ACK*/ if (restart == 0) i2c.regs->cr_reg |= (I2C_CR_CPU_RDY); /*release SCL*/ tcr_value = 0 ; if (i2c.i2c_mode == I2C_STANDARD_MODE) { tcr_value = (unsigned short)(I2C_TCR_STANDARD_MODE|I2C_TCR_MASTER_READ |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; } else if (i2c.i2c_mode == I2C_FAST_MODE) { tcr_value = (unsigned short)(I2C_TCR_FAST_MODE|I2C_TCR_MASTER_READ |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; } if (length == 1) i2c.regs->cr_reg |= I2C_CR_TX_NEXT_NO_ACK; /*only 8-bit to read*/ i2c.regs->tcr_reg = tcr_value ; /*repeat start case*/ if (restart == 1) i2c.regs->cr_reg |= (I2C_CR_CPU_RDY); /*release SCL*/ ret = 0 ; for (; ;) { is_timeout = 0 ; wait_event_result = wait_event_interruptible_timeout(i2c3_wait, i2c.isr_int_pending , (MAX_RX_TIMEOUT * HZ / 1000)) ; if (likely(wait_event_result > 0)) { DPRINTK("I2C: wait interrupted (rx) \n"); ret = 0 ; } else if (likely(i2c.isr_int_pending == 0)) { DPRINTK("I2C: wait timeout (rx) \n"); is_timeout = 1 ; ret = -ETIMEDOUT ; } /**/ /* fail case*/ /**/ if (i2c.isr_nack == 1) { DPRINTK("i2c_err : write NACK error (rx) \n\r") ; ret = -EIO ; break ; } if (i2c.isr_timeout == 1) { DPRINTK("i2c_err : write SCL timeout error (rx)\n\r") ; msleep(10); ret = -ETIMEDOUT ; break ; } if (is_timeout == 1) { DPRINTK("i2c_err: write software timeout error (rx) \n\r") ; ret = -ETIMEDOUT ; break ; } /**/ /* pass case*/ /**/ if (i2c.isr_byte_end == 1) { buf[xfer_length] = (i2c.regs->cdr_reg >> 8) ; ++xfer_length ; DPRINTK("i2c_test: received BYTE_END\n\r"); } i2c.isr_int_pending = 0; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; if (length > xfer_length) { if ((length - 1) == xfer_length) { /* next read is the last one*/ i2c.regs->cr_reg |= (I2C_CR_TX_NEXT_NO_ACK | I2C_CR_CPU_RDY); DPRINTK("i2c_test: set CPU_RDY & TX_ACK. next data is last.\r\n"); } else { i2c.regs->cr_reg |= I2C_CR_CPU_RDY ; DPRINTK("i2c_test: more data to read. only set CPU_RDY. \r\n"); } } else if (length == xfer_length) { /* end rx xfer*/ if (last == 1) { /* stop case*/ DPRINTK("i2c_test: read completed \r\n"); break ; } else { /* restart case*/ /* ??? how to handle the restart after read ?*/ DPRINTK("i2c_test: RX ReStart Case \r\n") ; break ; } } else { DPRINTK("i2c_err : read known error\n\r") ; ret = -EIO ; break ; } } DPRINTK("i2c_test: read sequence completed\n\r"); return ret ; } static int i2c_wmt_write_msg( unsigned int slave_addr, /*!<; //[IN] Salve address */ char *buf, /*!<; //[OUT] Pointer to data */ unsigned int length, /*!<; //Data length */ int restart, /*!<; //Need to restart after a complete write */ int last /*!<; //Last read */ ) { unsigned short tcr_value ; unsigned int xfer_length ; int is_timeout ; int ret = 0 ; int wait_event_result ; DPRINTK("length = %d , slave_addr = %x\n", length , slave_addr); if (slave_addr == WMT_I2C_API_I2C_ADDR) return ret ; if (is_master == 0) return -ENXIO; /* special case allow length:0, for i2c_smbus_xfer*/ /**/ if (length < 0) return -1 ; xfer_length = 0 ; /* for array index and also for checking counting*/ if (restart == 0) ret = i2c_wmt_wait_bus_not_busy() ; if (ret < 0) return ret ; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; i2c.isr_int_pending = 0; /**/ /* special case allow length:0, for i2c_smbus_xfer*/ /**/ if (length == 0) i2c.regs->cdr_reg = 0 ; else i2c.regs->cdr_reg = (unsigned short)(buf[xfer_length] & I2C_CDR_DATA_WRITE_MASK) ; if (restart == 0) { i2c.regs->cr_reg &= ~(I2C_CR_TX_END); /*clear Tx end*/ i2c.regs->cr_reg |= (I2C_CR_CPU_RDY); /*release SCL*/ } /**/ /* I2C: Set transfer mode [standard/fast]*/ /**/ tcr_value = 0 ; if (i2c.i2c_mode == I2C_STANDARD_MODE) tcr_value = (unsigned short)(I2C_TCR_STANDARD_MODE|I2C_TCR_MASTER_WRITE |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; else if (i2c.i2c_mode == I2C_FAST_MODE) tcr_value = (unsigned short)(I2C_TCR_FAST_MODE|I2C_TCR_MASTER_WRITE |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; i2c.regs->tcr_reg = tcr_value ; if (restart == 1) i2c.regs->cr_reg |= I2C_CR_CPU_RDY ; ret = 0 ; for (; ;) { is_timeout = 0 ; /**/ /* I2C: Wait for interrupt. if ( i2c.isr_int_pending == 1 ) ==> an interrupt exsits.*/ /**/ wait_event_result = wait_event_interruptible_timeout(i2c3_wait, i2c.isr_int_pending , (MAX_TX_TIMEOUT * HZ / 1000)) ; if (likely(wait_event_result > 0)) { DPRINTK("I2C: wait interrupted (tx)\n"); ret = 0 ; } else if (likely(i2c.isr_int_pending == 0)) { DPRINTK("I2C: wait timeout (tx) \n"); is_timeout = 1 ; ret = -ETIMEDOUT ; } /**/ /* fail case*/ /**/ if (i2c.isr_nack == 1) { DPRINTK("i2c_err : write NACK error (tx) \n\r") ; ret = -EIO ; break ; } if (i2c.isr_timeout == 1) { DPRINTK("i2c_err : write SCL timeout error (tx)\n\r") ; msleep(10); ret = -ETIMEDOUT ; break ; } if (is_timeout == 1) { DPRINTK("i2c_err : write software timeout error (tx)\n\r") ; ret = -ETIMEDOUT ; break ; } /**/ /* pass case*/ /**/ if (i2c.isr_byte_end == 1) { DPRINTK("i2c: isr end byte (tx)\n\r") ; ++xfer_length ; } i2c.isr_int_pending = 0 ; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; if ((i2c.regs->csr_reg & I2C_CSR_RCV_ACK_MASK) == I2C_CSR_RCV_NOT_ACK) { DPRINTK("i2c_err : write RCV NACK error\n\r") ; ret = -EIO ; break ; } /**/ /* special case allow length:0, for i2c_smbus_xfer*/ /**/ if (length == 0) { i2c.regs->cr_reg = (I2C_CR_TX_END|I2C_CR_CPU_RDY|I2C_CR_ENABLE) ; break ; } if (length > xfer_length) { i2c.regs->cdr_reg = (unsigned short) (buf[xfer_length] & I2C_CDR_DATA_WRITE_MASK) ; i2c.regs->cr_reg = (I2C_CR_CPU_RDY | I2C_CR_ENABLE) ; DPRINTK("i2c_test: write register data \n\r") ; } else if (length == xfer_length) { /* end tx xfer*/ if (last == 1) { /* stop case*/ i2c.regs->cr_reg = (I2C_CR_TX_END|I2C_CR_CPU_RDY|I2C_CR_ENABLE) ; DPRINTK("i2c_test: finish write \n\r") ; break ; } else { /* restart case*/ /* handle the restart for first write then the next is read*/ i2c.regs->cr_reg = (I2C_CR_ENABLE) ; DPRINTK("i2c_test: tx restart Case \n\r") ; break ; } } else { DPRINTK("i2c_err : write unknown error\n\r") ; ret = -EIO ; break ; } } ; DPRINTK("i2c_test: write sequence completed\n\r"); return ret ; } static int i2c_wmt_wait_bus_not_busy(void) { int ret ; int cnt ; ret = 0 ; cnt = 0 ; while (1) { if ((REG16_VAL(I2C3_CSR_ADDR) & I2C_STATUS_MASK) == I2C_READY) { ret = 0; break ; } cnt++ ; if (cnt > MAX_BUS_READY_CNT) { ret = (-EBUSY) ; printk("i2c_err 3: wait but not ready time-out\n\r") ; cnt = 0; break; } } return ret ; } static void i2c_wmt_reset(void) { unsigned short tmp ; if (is_master == 0) return; /**/ /* software initial*/ /**/ i2c.regs = (struct i2c_regs_s *)I2C3_BASE_ADDR ; i2c.irq_no = IRQ_I2C3 ; if (speed_mode == 0) i2c.i2c_mode = I2C_STANDARD_MODE ; else i2c.i2c_mode = I2C_FAST_MODE ; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; i2c.isr_int_pending = 0; /**/ /* hardware initial*/ /**/ i2c.regs->cr_reg = 0 ; i2c.regs->div_reg = APB_96M_I2C_DIV ; i2c.regs->isr_reg = I2C_ISR_ALL_WRITE_CLEAR ; /* 0x0007*/ i2c.regs->imr_reg = I2C_IMR_ALL_ENABLE ; /* 0x0007*/ i2c.regs->cr_reg = I2C_CR_ENABLE ; tmp = i2c.regs->csr_reg ; /* read clear*/ i2c.regs->isr_reg = I2C_ISR_ALL_WRITE_CLEAR ; /* 0x0007*/ if (i2c.i2c_mode == I2C_STANDARD_MODE) i2c.regs->tr_reg = I2C_TR_STD_VALUE ; /* 0x8041*/ else if (i2c.i2c_mode == I2C_FAST_MODE) i2c.regs->tr_reg = I2C_TR_FAST_VALUE ; /* 0x8011*/ DPRINTK("Resetting I2C Controller Unit\n"); return ; } static int wmt_i2c_transfer_msg(struct wmt_i2cbusfifo *fifo_head) { int xfer_length = fifo_head->xfer_length; int xfer_msgnum = fifo_head->xfer_msgnum; struct i2c_msg *pmsg = &fifo_head->msg[xfer_msgnum]; int restart = fifo_head->restart; unsigned short tcr_value; unsigned short slave_addr = pmsg->addr; int length = pmsg->len; int ret = 0; if (pmsg->flags & I2C_M_RD) { if (restart == 0) i2c_wmt_wait_bus_not_busy(); i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; /*i2c.isr_int_pending = 0;*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_END); /*clear Tx end*/ i2c.regs->cr_reg &= ~(I2C_CR_TX_NEXT_NO_ACK); /*clear NEXT_NO_ACK*/ if (restart == 0) i2c.regs->cr_reg |= (I2C_CR_CPU_RDY); /*release SCL*/ tcr_value = 0 ; if (i2c.i2c_mode == I2C_STANDARD_MODE) { tcr_value = (unsigned short)(I2C_TCR_STANDARD_MODE|I2C_TCR_MASTER_READ |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; } else if (i2c.i2c_mode == I2C_FAST_MODE) { tcr_value = (unsigned short)(I2C_TCR_FAST_MODE|I2C_TCR_MASTER_READ |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; } if (length == 1) i2c.regs->cr_reg |= I2C_CR_TX_NEXT_NO_ACK; /*only 8-bit to read*/ i2c.regs->tcr_reg = tcr_value ; /*repeat start case*/ if (restart == 1) i2c.regs->cr_reg |= (I2C_CR_CPU_RDY); /*release SCL*/ } else { if (restart == 0) i2c_wmt_wait_bus_not_busy(); i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; /*i2c.isr_int_pending = 0;*/ /**/ /* special case allow length:0, for i2c_smbus_xfer*/ /**/ if (length == 0) i2c.regs->cdr_reg = 0 ; else i2c.regs->cdr_reg = (unsigned short)(pmsg->buf[xfer_length] & I2C_CDR_DATA_WRITE_MASK) ; if (restart == 0) { i2c.regs->cr_reg &= ~(I2C_CR_TX_END); /*clear Tx end*/ i2c.regs->cr_reg |= (I2C_CR_CPU_RDY); /*release SCL*/ } /**/ /* I2C: Set transfer mode [standard/fast]*/ /**/ tcr_value = 0 ; if (i2c.i2c_mode == I2C_STANDARD_MODE) tcr_value = (unsigned short)(I2C_TCR_STANDARD_MODE|I2C_TCR_MASTER_WRITE |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; else if (i2c.i2c_mode == I2C_FAST_MODE) tcr_value = (unsigned short)(I2C_TCR_FAST_MODE|I2C_TCR_MASTER_WRITE |\ (slave_addr & I2C_TCR_SLAVE_ADDR_MASK)) ; i2c.regs->tcr_reg = tcr_value ; if (restart == 1) i2c.regs->cr_reg |= I2C_CR_CPU_RDY ; } return ret; } static irqreturn_t i2c_wmt_handler( int this_irq, /*!<; //[IN] IRQ number */ void *dev_id /*!<; //[IN] Pointer to device ID */ ) { int wakeup ; unsigned short isr_status ; unsigned short tmp ; unsigned long flags; struct wmt_i2cbusfifo *fifo_head; int xfer_length = 0; int xfer_msgnum = 0; struct i2c_msg *pmsg; volatile unsigned short csr_reg; spin_lock_irqsave(&i2c3_wmt_irqlock, flags); isr_status = i2c.regs->isr_reg ; csr_reg = i2c.regs->csr_reg; wakeup = 0 ; fifo_head = list_first_entry(&wmt_i2c_fifohead, struct wmt_i2cbusfifo, busfifohead); if (isr_status & I2C_ISR_NACK_ADDR) { DPRINTK("[%s]:i2c NACK\n", __func__); /*spin_lock(&i2c_fifolock);*/ list_del(&fifo_head->busfifohead);/*del request*/ kfree(fifo_head); /*spin_unlock(&i2c_fifolock);*/ xfer_length = 0; i2c.regs->isr_reg = I2C_ISR_NACK_ADDR_WRITE_CLEAR ; tmp = i2c.regs->csr_reg ; /* read clear*/ i2c.isr_nack = 1 ; wakeup = 1 ; } if ((isr_status & I2C_ISR_BYTE_END && ((csr_reg & I2C_CSR_RCV_ACK_MASK) == I2C_CSR_RCV_NOT_ACK))) { /* printk("data rcv nack\n"); */ list_del(&fifo_head->busfifohead);/*del request*/ kfree(fifo_head); xfer_length = 0; i2c.regs->isr_reg = I2C_ISR_BYTE_END_WRITE_CLEAR ; i2c.isr_nack = 1 ; wakeup = 1 ; } else if (isr_status & I2C_ISR_BYTE_END) { i2c.regs->isr_reg = I2C_ISR_BYTE_END_WRITE_CLEAR ; i2c.isr_byte_end = 1 ; xfer_length = fifo_head->xfer_length; xfer_msgnum = fifo_head->xfer_msgnum; pmsg = &fifo_head->msg[xfer_msgnum]; /*read case*/ if (pmsg->flags & I2C_M_RD) { pmsg->buf[xfer_length - 1] = (i2c.regs->cdr_reg >> 8) ; /*the last data in current msg?*/ if (xfer_length == pmsg->len - 1) { /*last msg of the current request?*/ /*spin_lock(&i2c_fifolock);*/ if (pmsg->flags & I2C_M_NOSTART) { ++fifo_head->xfer_length; fifo_head->restart = 1; /* ++fifo_head->xfer_msgnum; */ i2c.regs->cr_reg |= I2C_CR_CPU_RDY; } else { ++fifo_head->xfer_length; fifo_head->restart = 0; /* ++fifo_head->xfer_msgnum; */ i2c.regs->cr_reg |= (I2C_CR_CPU_RDY | I2C_CR_TX_NEXT_NO_ACK); } /*spin_unlock(&i2c_fifolock);*/ } else if (xfer_length == pmsg->len) {/*next msg*/ if (xfer_msgnum < fifo_head->msg_num - 1) { /*spin_lock(&i2c_fifolock);*/ fifo_head->xfer_length = 0; ++fifo_head->xfer_msgnum; wmt_i2c_transfer_msg(fifo_head); ++fifo_head->xfer_length; /*spin_unlock(&i2c_fifolock);*/ } else { /*data of this msg has been transfered*/ /*spin_lock(&i2c_fifolock);*/ list_del(&fifo_head->busfifohead);/*del request*/ /*next request exist?*/ if (list_empty(&wmt_i2c_fifohead)) {/*no more reqeust*/ /*kfree(fifo_head);*/ if (fifo_head->non_block == 0) { wakeup = 1; } else { fifo_head->callback(fifo_head->data); } kfree(fifo_head); } else { /*more request*/ if (fifo_head->non_block == 0) { wakeup = 1; } else { fifo_head->callback(fifo_head->data); } kfree(fifo_head); fifo_head = list_first_entry(&wmt_i2c_fifohead, struct wmt_i2cbusfifo, busfifohead); /* if (fifo_head->non_block == 0) wakeup = 1; */ fifo_head->xfer_length = 0; wmt_i2c_transfer_msg(fifo_head); ++fifo_head->xfer_length; /* if (fifo_head->non_block == 0) { printk("2 : non callback\n"); wakeup = 1; } else { printk("2 :callback\n"); fifo_head->callback(fifo_head->data); } */ } /*spin_unlock(&i2c_fifolock);*/ } } else {/*next data*/ /*spin_lock(&i2c_fifolock);*/ ++fifo_head->xfer_length; /*spin_unlock(&i2c_fifolock);*/ i2c.regs->cr_reg |= I2C_CR_CPU_RDY; } } else { /*write case*/ /*the last data in current msg?*/ if (xfer_length == pmsg->len) { /*last msg of the current request?*/ if (xfer_msgnum < fifo_head->msg_num - 1) { /*spin_lock(&i2c_fifolock);*/ if (pmsg->flags & I2C_M_NOSTART) { ++fifo_head->xfer_length; fifo_head->restart = 1; } else { ++fifo_head->xfer_length; fifo_head->restart = 0; i2c.regs->cr_reg &= ~(I2C_CR_TX_END); udelay(2); i2c.regs->cr_reg |= (I2C_CR_TX_END); } /*access next msg*/ fifo_head->xfer_length = 0; ++fifo_head->xfer_msgnum; wmt_i2c_transfer_msg(fifo_head); ++fifo_head->xfer_length; /*spin_unlock(&i2c_fifolock);*/ } else {/*this request finish*/ /*spin_lock(&i2c_fifolock);*/ /*next request exist?*/ list_del(&fifo_head->busfifohead);/*del request*/ if (list_empty(&wmt_i2c_fifohead)) { /*kfree(fifo_head);*/ /* if (fifo_head->non_block == 0) wakeup = 1; */ i2c.regs->cr_reg &= ~(I2C_CR_TX_END); udelay(2); i2c.regs->cr_reg |= (I2C_CR_TX_END); if (fifo_head->non_block == 0) { wakeup = 1; } else { fifo_head->callback(fifo_head->data); } kfree(fifo_head); } else { i2c.regs->cr_reg &= ~(I2C_CR_TX_END); udelay(2); i2c.regs->cr_reg |= (I2C_CR_TX_END); if (fifo_head->non_block == 0) { wakeup = 1; } else { fifo_head->callback(fifo_head->data); } kfree(fifo_head); fifo_head = list_first_entry(&wmt_i2c_fifohead, struct wmt_i2cbusfifo, busfifohead); /* if (fifo_head->non_block == 0) wakeup = 1; */ /*next msg*/ fifo_head->xfer_length = 0; ++fifo_head->xfer_msgnum; wmt_i2c_transfer_msg(fifo_head); ++fifo_head->xfer_length; /* if (fifo_head->non_block == 0) { printk("4:non callback\n"); wakeup = 1; } else { printk("4:callback\n"); fifo_head->callback(fifo_head->data); } */ } /*spin_unlock(&i2c_fifolock);*/ } } else {/*next data*/ i2c.regs->cdr_reg = (unsigned short) (pmsg->buf[fifo_head->xfer_length] & I2C_CDR_DATA_WRITE_MASK); /*spin_lock(&i2c_fifolock);*/ ++fifo_head->xfer_length; /*spin_unlock(&i2c_fifolock);*/ i2c.regs->cr_reg |= (I2C_CR_CPU_RDY | I2C_CR_ENABLE); } } } if (isr_status & I2C_ISR_SCL_TIME_OUT) { DPRINTK("[%s]SCL timeout\n", __func__); #if 0 i2c.regs->cr_reg |= BIT7;/*reset status*/ /*spin_lock(&i2c_fifolock);*/ list_del(&fifo_head->busfifohead);/*del request*/ /*spin_unlock(&i2c_fifolock);*/ xfer_length = 0; i2c.regs->isr_reg = I2C_ISR_SCL_TIME_OUT_WRITE_CLEAR | I2C_ISR_BYTE_END_WRITE_CLEAR; i2c.isr_timeout = 1 ; wakeup = 1; #endif i2c.regs->isr_reg = I2C_ISR_SCL_TIME_OUT_WRITE_CLEAR ; } if (wakeup) { /*spin_lock_irqsave(&i2c_wmt_irqlock, flags);*/ i2c.isr_int_pending = 1; /*spin_unlock_irqrestore(&i2c_wmt_irqlock, flags);*/ wake_up(&i2c3_wait); } else DPRINTK("i2c_err : unknown I2C ISR Handle 0x%4.4X" , isr_status) ; spin_unlock_irqrestore(&i2c3_wmt_irqlock, flags); return IRQ_HANDLED; } static int i2c_wmt_resource_init(void) { if (is_master == 0) return 0; if (request_irq(i2c.irq_no , &i2c_wmt_handler, IRQF_DISABLED, "i2c", 0) < 0) { DPRINTK(KERN_INFO "I2C: Failed to register I2C irq %i\n", i2c.irq_no); return -ENODEV; } return 0; } static void i2c_wmt_resource_release(void) { if (is_master == 0) return; free_irq(i2c.irq_no, 0); } static struct i2c_algo_wmt_data i2c_wmt_data = { write_msg: i2c_wmt_write_msg, read_msg: i2c_wmt_read_msg, send_request: i2c_send_request, wait_bus_not_busy: i2c_wmt_wait_bus_not_busy, reset: i2c_wmt_reset, set_mode: i2c_wmt_set_mode, udelay: I2C_ALGO_UDELAY, timeout: I2C_ALGO_TIMEOUT, }; static struct i2c_adapter i2c_wmt_ops = { .owner = THIS_MODULE, /* .id = I2C_ALGO_WMT, */ .algo_data = &i2c_wmt_data, .name = "wmt_i2c3_adapter", .retries = I2C_ADAPTER_RETRIES, .nr = 3, }; #ifdef CONFIG_PM static struct i2c_regs_s wmt_i2c_reg ; static void i2c_shutdown(void) { printk("i2c3 shutdown\n"); wmt_i2c3_power_state = 2; while (!list_empty(&wmt_i2c_fifohead)) msleep(1); while (1) {/*wait busy clear*/ if ((REG16_VAL(I2C3_CSR_ADDR) & I2C_STATUS_MASK) == I2C_READY) break ; msleep(1); } return; } static int i2c_suspend(void) { printk("i2c3 suspend\n"); wmt_i2c_reg.imr_reg = i2c.regs->imr_reg; wmt_i2c_reg.tr_reg = i2c.regs->tr_reg; wmt_i2c_reg.div_reg = i2c.regs->div_reg; return 0; } static void i2c_resume(void) { printk("i2c3 resume\n"); GPIO_CTRL_GP23_I2C3_BYTE_VAL &= ~(BIT0 | BIT1); PULL_EN_GP23_I2C3_BYTE_VAL |= (BIT0 | BIT1); PULL_CTRL_GP23_I2C3_BYTE_VAL |= (BIT0 | BIT1); PIN_SHARING_SEL_4BYTE_VAL &= ~BIT28; auto_pll_divisor(DEV_I2C3, CLK_ENABLE, 0, 0); auto_pll_divisor(DEV_I2C3, SET_DIV, 2, 20);/*20M Hz*/ i2c.regs->cr_reg = 0 ; i2c.regs->div_reg = wmt_i2c_reg.div_reg; i2c.regs->imr_reg = wmt_i2c_reg.imr_reg; i2c.regs->tr_reg = wmt_i2c_reg.tr_reg ; i2c.regs->cr_reg = 0x001 ; } #else #define i2c_suspend NULL #define i2c_resume NULL #define i2c_shutdown NULL #endif extern int wmt_i2c_add_bus(struct i2c_adapter *); extern int wmt_i2c_del_bus(struct i2c_adapter *); #ifdef CONFIG_PM static struct syscore_ops wmt_i2c_syscore_ops = { .suspend = i2c_suspend, .resume = i2c_resume, .shutdown = i2c_shutdown, }; #endif static int __init i2c_adap_wmt_init(void) { unsigned short tmp ; char varname[] = "wmt.i2c.param"; #ifdef CONFIG_I2C_SLAVE_WMT char varname1[] = "wmt.bus.i2c.slave_port"; #endif unsigned char buf[80]; int ret; unsigned int port_num; int idx = 0; int varlen = 80; unsigned int pllb_freq = 0; unsigned int tr_val = 0; #ifdef CONFIG_I2C_SLAVE_WMT #ifdef USE_UBOOT_PARA ret = wmt_getsyspara(varname1, buf, &varlen); #else ret = 1; #endif is_master = 1; if (ret == 0) { ret = sscanf(buf, "%x", &port_num); while (ret) { if (port_num != 0) is_master = 1; else { is_master = 0; break; } idx += ret; ret = sscanf(buf + idx, ",%x", &port_num); } } else is_master = 1; #endif wmt_i2c3_is_master = is_master; if (is_master == 1) { #ifdef USE_UBOOT_PARA ret = wmt_getsyspara(varname, buf, &varlen); #else ret = 1; #endif if (ret == 0) { ret = sscanf(buf, "%x:%x", &port_num, &speed_mode); idx += 3; while (ret) { if (ret < 2) speed_mode = 0; else { if (port_num != 3) speed_mode = 0; else break; } ret = sscanf(buf + idx, ",%x:%x", &port_num, &speed_mode); idx += 4; } } if (speed_mode > 1) speed_mode = 0; wmt_i2c3_speed_mode = speed_mode; /**/ /* software initial*/ /**/ i2c.regs = (struct i2c_regs_s *)I2C3_BASE_ADDR ; i2c.irq_no = IRQ_I2C3 ; printk("PORT 3 speed_mode = %d\n", speed_mode); if (speed_mode == 0) i2c.i2c_mode = I2C_STANDARD_MODE ; else if (speed_mode == 1) i2c.i2c_mode = I2C_FAST_MODE ; i2c.isr_nack = 0 ; i2c.isr_byte_end = 0 ; i2c.isr_timeout = 0 ; i2c.isr_int_pending = 0; /**/ /* hardware initial*/ /**/ auto_pll_divisor(DEV_I2C3, CLK_ENABLE, 0, 0); pllb_freq = auto_pll_divisor(DEV_I2C3, SET_DIV, 2, 20);/*20M Hz*/ printk("pllb_freq = %d\n", pllb_freq); if ((pllb_freq%(1000*2*100)) != 0) tr_val = pllb_freq/(1000*2*100) + 1; else tr_val = pllb_freq/(1000*2*100); *(volatile unsigned char *)CTRL_GPIO &= ~(BIT0 | BIT1); *(volatile unsigned char *)PU_EN_GPIO |= (BIT0 | BIT1); *(volatile unsigned char *)PU_CTRL_GPIO |= (BIT0 | BIT1); PIN_SHARING_SEL_4BYTE_VAL &= ~BIT28; i2c.regs->cr_reg = 0 ; i2c.regs->div_reg = APB_96M_I2C_DIV ; i2c.regs->isr_reg = I2C_ISR_ALL_WRITE_CLEAR ; /* 0x0007*/ i2c.regs->imr_reg = I2C_IMR_ALL_ENABLE ; /* 0x0007*/ i2c.regs->cr_reg = I2C_CR_ENABLE ; tmp = i2c.regs->csr_reg ; /* read clear*/ i2c.regs->isr_reg = I2C_ISR_ALL_WRITE_CLEAR ; /* 0x0007*/ if (i2c.i2c_mode == I2C_STANDARD_MODE) i2c.regs->tr_reg = 0xff00|tr_val; else if (i2c.i2c_mode == I2C_FAST_MODE) { tr_val /= 4; i2c.regs->tr_reg = 0xff00|tr_val ; } } if (i2c_wmt_resource_init() == 0) { if (wmt_i2c_add_bus(&i2c_wmt_ops) < 0) { i2c_wmt_resource_release(); printk(KERN_INFO "i2c: Failed to add bus\n"); return -ENODEV; } } else return -ENODEV; INIT_LIST_HEAD(&wmt_i2c_fifohead); #ifdef CONFIG_PM register_syscore_ops(&wmt_i2c_syscore_ops); #endif printk(KERN_INFO "i2c: successfully added bus\n"); #ifdef I2C_REG_TEST printk("i2c.regs->cr_reg= 0x%08x\n\r", i2c.regs->cr_reg); printk("i2c.regs->tcr_reg= 0x%08x\n\r", i2c.regs->tcr_reg); printk("i2c.regs->csr_reg= 0x%08x\n\r", i2c.regs->csr_reg); printk("i2c.regs->isr_reg= 0x%08x\n\r", i2c.regs->isr_reg); printk("i2c.regs->imr_reg= 0x%08x\n\r", i2c.regs->imr_reg); printk("i2c.regs->cdr_reg= 0x%08x\n\r", i2c.regs->cdr_reg); printk("i2c.regs->tr_reg= 0x%08x\n\r", i2c.regs->tr_reg); printk("i2c.regs->div_reg= 0x%08x\n\r", i2c.regs->div_reg); #endif return 0; } subsys_initcall(i2c_adap_wmt_init); static void i2c_adap_wmt_exit(void) { wmt_i2c_del_bus(&i2c_wmt_ops); i2c_wmt_resource_release(); printk(KERN_INFO "i2c: successfully removed bus\n"); } MODULE_AUTHOR("WonderMedia Technologies, Inc."); MODULE_DESCRIPTION("WMT I2C Adapter Driver"); MODULE_LICENSE("GPL"); module_exit(i2c_adap_wmt_exit);
FOSSEE/FOSSEE-netbook-kernel-source
drivers/i2c/busses/wmt-i2c-bus-3.c
C
gpl-2.0
32,741
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2010 Nokia Corporation Copyright (c) 2011-2012 The Linux Foundation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI Management interface */ #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include <net/bluetooth/smp.h> #define MGMT_VERSION 0 #define MGMT_REVISION 1 #define SCAN_IDLE 0x00 #define SCAN_LE 0x01 #define SCAN_BR 0x02 struct pending_cmd { struct list_head list; __u16 opcode; int index; void *param; struct sock *sk; void *user_data; }; struct mgmt_pending_free_work { struct work_struct work; struct sock *sk; }; LIST_HEAD(cmd_list); static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { struct sk_buff *skb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_status *ev; BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC); if (!skb) return -ENOMEM; hdr = (void *) skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(sizeof(*ev)); ev = (void *) skb_put(skb, sizeof(*ev)); ev->status = status; put_unaligned_le16(cmd, &ev->opcode); if (sock_queue_rcv_skb(sk, skb) < 0) kfree_skb(skb); return 0; } static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, size_t rp_len) { struct sk_buff *skb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_complete *ev; BT_DBG("sock %p", sk); skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC); if (!skb) return -ENOMEM; hdr = (void *) skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); put_unaligned_le16(cmd, &ev->opcode); if (rp) memcpy(ev->data, rp, rp_len); if (sock_queue_rcv_skb(sk, skb) < 0) kfree_skb(skb); return 0; } static int read_version(struct sock *sk) { struct mgmt_rp_read_version rp; BT_DBG("sock %p", sk); rp.version = MGMT_VERSION; put_unaligned_le16(MGMT_REVISION, &rp.revision); return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp, sizeof(rp)); } static int read_index_list(struct sock *sk) { struct mgmt_rp_read_index_list *rp; struct list_head *p; size_t rp_len; u16 count; int i, err; BT_DBG("sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each(p, &hci_dev_list) { struct hci_dev *d = list_entry(p, struct hci_dev, list); if (d->dev_type != HCI_BREDR) continue; count++; } rp_len = sizeof(*rp) + (2 * count); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } put_unaligned_le16(0, &rp->num_controllers); i = 0; list_for_each(p, &hci_dev_list) { struct hci_dev *d = list_entry(p, struct hci_dev, list); hci_del_off_timer(d); if (d->dev_type != HCI_BREDR) continue; set_bit(HCI_MGMT, &d->flags); if (test_bit(HCI_SETUP, &d->flags)) continue; put_unaligned_le16(d->id, &rp->index[i++]); put_unaligned_le16((u16)i, &rp->num_controllers); BT_DBG("Added hci%u", d->id); } read_unlock(&hci_dev_list_lock); err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp, rp_len); kfree(rp); return err; } static int read_controller_info(struct sock *sk, u16 index) { struct mgmt_rp_read_info rp; struct hci_dev *hdev; BT_DBG("sock %p hci%u", sk, index); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV); hci_del_off_timer(hdev); hci_dev_lock_bh(hdev); set_bit(HCI_MGMT, &hdev->flags); memset(&rp, 0, sizeof(rp)); rp.type = hdev->dev_type; rp.powered = test_bit(HCI_UP, &hdev->flags); rp.connectable = test_bit(HCI_PSCAN, &hdev->flags); rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags); rp.pairable = test_bit(HCI_PSCAN, &hdev->flags); if (test_bit(HCI_AUTH, &hdev->flags)) rp.sec_mode = 3; else if (hdev->ssp_mode > 0) rp.sec_mode = 4; else rp.sec_mode = 2; bacpy(&rp.bdaddr, &hdev->bdaddr); memcpy(rp.features, hdev->features, 8); memcpy(rp.dev_class, hdev->dev_class, 3); put_unaligned_le16(hdev->manufacturer, &rp.manufacturer); rp.hci_ver = hdev->hci_ver; put_unaligned_le16(hdev->hci_rev, &rp.hci_rev); memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); rp.le_white_list_size = hdev->le_white_list_size; hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); } static void mgmt_pending_free_worker(struct work_struct *work) { struct mgmt_pending_free_work *free_work = container_of(work, struct mgmt_pending_free_work, work); BT_DBG("sk %p", free_work->sk); sock_put(free_work->sk); kfree(free_work); } static void mgmt_pending_free(struct pending_cmd *cmd) { struct mgmt_pending_free_work *free_work; struct sock *sk = cmd->sk; BT_DBG("opcode %d, sk %p", cmd->opcode, sk); kfree(cmd->param); kfree(cmd); free_work = kzalloc(sizeof(*free_work), GFP_ATOMIC); if (free_work) { INIT_WORK(&free_work->work, mgmt_pending_free_worker); free_work->sk = sk; if (!schedule_work(&free_work->work)) kfree(free_work); } } static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, u16 index, void *data, u16 len) { struct pending_cmd *cmd; BT_DBG("%d", opcode); cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return NULL; cmd->opcode = opcode; cmd->index = index; cmd->param = kmalloc(len, GFP_ATOMIC); if (!cmd->param) { kfree(cmd); return NULL; } if (data) memcpy(cmd->param, data, len); cmd->sk = sk; sock_hold(sk); list_add(&cmd->list, &cmd_list); return cmd; } static void mgmt_pending_foreach(u16 opcode, int index, void (*cb)(struct pending_cmd *cmd, void *data), void *data) { struct list_head *p, *n; BT_DBG(" %d", opcode); list_for_each_safe(p, n, &cmd_list) { struct pending_cmd *cmd; cmd = list_entry(p, struct pending_cmd, list); if (opcode > 0 && cmd->opcode != opcode) continue; if (index >= 0 && cmd->index != index) continue; cb(cmd, data); } } static struct pending_cmd *mgmt_pending_find(u16 opcode, int index) { struct list_head *p; BT_DBG(" %d", opcode); list_for_each(p, &cmd_list) { struct pending_cmd *cmd; cmd = list_entry(p, struct pending_cmd, list); if (cmd->opcode != opcode) continue; if (index >= 0 && cmd->index != index) continue; return cmd; } return NULL; } static void mgmt_pending_remove(struct pending_cmd *cmd) { BT_DBG(" %d", cmd->opcode); list_del(&cmd->list); mgmt_pending_free(cmd); } static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_mode *cp; struct hci_dev *hdev; struct pending_cmd *cmd; int err, up; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); hci_dev_lock_bh(hdev); up = test_bit(HCI_UP, &hdev->flags); if ((cp->val && up) || (!cp->val && !up)) { err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY); goto failed; } if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) { err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } hci_dev_unlock_bh(hdev); if (cp->val) queue_work(hdev->workqueue, &hdev->power_on); else queue_work(hdev->workqueue, &hdev->power_off); err = 0; hci_dev_put(hdev); return err; failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static u8 get_service_classes(struct hci_dev *hdev) { struct list_head *p; u8 val = 0; list_for_each(p, &hdev->uuids) { struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); val |= uuid->svc_hint; } return val; } static int update_class(struct hci_dev *hdev) { u8 cod[3]; int err = 0; BT_DBG("%s", hdev->name); if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) return 0; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); if (memcmp(cod, hdev->dev_class, 3) == 0) return 0; err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); if (err == 0) memcpy(hdev->dev_class, cod, 3); return err; } static int set_limited_discoverable(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_mode *cp; struct hci_dev *hdev; struct pending_cmd *cmd; struct hci_cp_write_current_iac_lap dcp; int update_cod; int err = 0; /* General Inquiry LAP: 0x9E8B33, Limited Inquiry LAP: 0x9E8B00 */ u8 lap[] = { 0x33, 0x8b, 0x9e, 0x00, 0x8b, 0x9e }; cp = (void *) data; BT_DBG("hci%u discoverable: %d", index, cp->val); if (!cp || len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE, ENETDOWN); goto failed; } if (mgmt_pending_find(MGMT_OP_SET_LIMIT_DISCOVERABLE, index)) { err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE, EBUSY); goto failed; } if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && test_bit(HCI_PSCAN, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_LIMIT_DISCOVERABLE, EALREADY); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LIMIT_DISCOVERABLE, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } memset(&dcp, 0, sizeof(dcp)); dcp.num_current_iac = cp->val ? 2 : 1; memcpy(&dcp.lap, lap, dcp.num_current_iac * 3); update_cod = 1; if (cp->val) { if (hdev->major_class & MGMT_MAJOR_CLASS_LIMITED) update_cod = 0; hdev->major_class |= MGMT_MAJOR_CLASS_LIMITED; } else { if (!(hdev->major_class & MGMT_MAJOR_CLASS_LIMITED)) update_cod = 0; hdev->major_class &= ~MGMT_MAJOR_CLASS_LIMITED; } if (update_cod) err = update_class(hdev); if (err >= 0) err = hci_send_cmd(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, sizeof(dcp), &dcp); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_mode *cp; struct hci_dev *hdev; struct pending_cmd *cmd; u8 scan; int err; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); goto failed; } if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY); goto failed; } if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && test_bit(HCI_PSCAN, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } scan = SCAN_PAGE; if (cp->val) scan |= SCAN_INQUIRY; err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_connectable(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_mode *cp; struct hci_dev *hdev; struct pending_cmd *cmd; u8 scan; int err; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); goto failed; } if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY); goto failed; } if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } if (cp->val) scan = SCAN_PAGE; else scan = 0; err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, struct sock *skip_sk) { struct sk_buff *skb; struct mgmt_hdr *hdr; BT_DBG("hci%d %d", index, event); skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); if (!skb) return -ENOMEM; bt_cb(skb)->channel = HCI_CHANNEL_CONTROL; hdr = (void *) skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(event); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(data_len); if (data) memcpy(skb_put(skb, data_len), data, data_len); hci_send_to_sock(NULL, skb, skip_sk); kfree_skb(skb); return 0; } static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val) { struct mgmt_mode rp; rp.val = val; return cmd_complete(sk, index, opcode, &rp, sizeof(rp)); } static int set_pairable(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_mode *cp, ev; struct hci_dev *hdev; int err; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); hci_dev_lock_bh(hdev); if (cp->val) set_bit(HCI_PAIRABLE, &hdev->flags); else clear_bit(HCI_PAIRABLE, &hdev->flags); err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val); if (err < 0) goto failed; ev.val = cp->val; err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } #define EIR_FLAGS 0x01 /* flags */ #define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ #define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ #define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ #define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ #define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ #define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ #define EIR_NAME_SHORT 0x08 /* shortened local name */ #define EIR_NAME_COMPLETE 0x09 /* complete local name */ #define EIR_TX_POWER 0x0A /* transmit power level */ #define EIR_DEVICE_ID 0x10 /* device ID */ #define PNP_INFO_SVCLASS_ID 0x1200 static u8 bluetooth_base_uuid[] = { 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static u16 get_uuid16(u8 *uuid128) { u32 val; int i; for (i = 0; i < 12; i++) { if (bluetooth_base_uuid[i] != uuid128[i]) return 0; } memcpy(&val, &uuid128[12], 4); val = le32_to_cpu(val); if (val > 0xffff) return 0; return (u16) val; } static void create_eir(struct hci_dev *hdev, u8 *data) { u8 *ptr = data; u16 eir_len = 0; u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; int i, truncated = 0; struct list_head *p; size_t name_len; name_len = strnlen(hdev->dev_name, HCI_MAX_EIR_LENGTH); if (name_len > 0) { /* EIR Data type */ if (name_len > 48) { name_len = 48; ptr[1] = EIR_NAME_SHORT; } else ptr[1] = EIR_NAME_COMPLETE; /* EIR Data length */ ptr[0] = name_len + 1; memcpy(ptr + 2, hdev->dev_name, name_len); eir_len += (name_len + 2); ptr += (name_len + 2); } memset(uuid16_list, 0, sizeof(uuid16_list)); /* Group all UUID16 types */ list_for_each(p, &hdev->uuids) { struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); u16 uuid16; uuid16 = get_uuid16(uuid->uuid); if (uuid16 == 0) return; if (uuid16 < 0x1100) continue; if (uuid16 == PNP_INFO_SVCLASS_ID) continue; /* Stop if not enough space to put next UUID */ if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) { truncated = 1; break; } /* Check for duplicates */ for (i = 0; uuid16_list[i] != 0; i++) if (uuid16_list[i] == uuid16) break; if (uuid16_list[i] == 0) { uuid16_list[i] = uuid16; eir_len += sizeof(u16); } } if (uuid16_list[0] != 0) { u8 *length = ptr; /* EIR Data type */ ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL; ptr += 2; eir_len += 2; for (i = 0; uuid16_list[i] != 0; i++) { *ptr++ = (uuid16_list[i] & 0x00ff); *ptr++ = (uuid16_list[i] & 0xff00) >> 8; } /* EIR Data length */ *length = (i * sizeof(u16)) + 1; } } static int update_eir(struct hci_dev *hdev) { struct hci_cp_write_eir cp; if (!(hdev->features[6] & LMP_EXT_INQ)) return 0; if (hdev->ssp_mode == 0) return 0; if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) return 0; memset(&cp, 0, sizeof(cp)); create_eir(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return 0; memcpy(hdev->eir, cp.data, sizeof(cp.data)); return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); } static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_add_uuid *cp; struct hci_dev *hdev; struct bt_uuid *uuid; int err; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); hci_dev_lock_bh(hdev); uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); if (!uuid) { err = -ENOMEM; goto failed; } memcpy(uuid->uuid, cp->uuid, 16); uuid->svc_hint = cp->svc_hint; list_add(&uuid->list, &hdev->uuids); if (test_bit(HCI_UP, &hdev->flags)) { err = update_class(hdev); if (err < 0) goto failed; err = update_eir(hdev); if (err < 0) goto failed; } else err = 0; err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct list_head *p, *n; struct mgmt_cp_remove_uuid *cp; struct hci_dev *hdev; u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int err, found; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); hci_dev_lock_bh(hdev); if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { err = hci_uuids_clear(hdev); goto unlock; } found = 0; list_for_each_safe(p, n, &hdev->uuids) { struct bt_uuid *match = list_entry(p, struct bt_uuid, list); if (memcmp(match->uuid, cp->uuid, 16) != 0) continue; list_del(&match->list); kfree(match); found++; } if (found == 0) { err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT); goto unlock; } if (test_bit(HCI_UP, &hdev->flags)) { err = update_class(hdev); if (err < 0) goto unlock; err = update_eir(hdev); if (err < 0) goto unlock; } else err = 0; err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); unlock: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_set_dev_class *cp; int err; cp = (void *) data; BT_DBG("request for hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); hci_dev_lock_bh(hdev); hdev->major_class &= ~MGMT_MAJOR_CLASS_MASK; hdev->major_class |= cp->major & MGMT_MAJOR_CLASS_MASK; hdev->minor_class = cp->minor; if (test_bit(HCI_UP, &hdev->flags)) { err = update_class(hdev); if (err == 0) err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, hdev->dev_class, sizeof(u8)*3); } else err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_set_service_cache *cp; int err; cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); hci_dev_lock_bh(hdev); BT_DBG("hci%u enable %d", index, cp->enable); if (cp->enable) { set_bit(HCI_SERVICE_CACHE, &hdev->flags); err = 0; } else { clear_bit(HCI_SERVICE_CACHE, &hdev->flags); if (test_bit(HCI_UP, &hdev->flags)) { err = update_class(hdev); if (err == 0) err = update_eir(hdev); } else err = 0; } if (err == 0) err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 0); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_load_keys *cp; u16 key_count, expected_len; int i, err; cp = (void *) data; if (len < sizeof(*cp)) return -EINVAL; key_count = get_unaligned_le16(&cp->key_count); expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); if (expected_len > len) { BT_ERR("load_keys: expected at least %u bytes, got %u bytes", expected_len, len); return -EINVAL; } hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV); BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, key_count); hci_dev_lock_bh(hdev); hci_link_keys_clear(hdev); set_bit(HCI_LINK_KEYS, &hdev->flags); if (cp->debug_keys) set_bit(HCI_DEBUG_KEYS, &hdev->flags); else clear_bit(HCI_DEBUG_KEYS, &hdev->flags); len -= sizeof(*cp); i = 0; while (i < len) { struct mgmt_key_info *key = (void *) cp->keys + i; i += sizeof(*key); if (key->key_type == KEY_TYPE_LTK) { struct key_master_id *id = (void *) key->data; if (key->dlen != sizeof(struct key_master_id)) continue; hci_add_ltk(hdev, 0, &key->bdaddr, key->addr_type, key->pin_len, key->auth, id->ediv, id->rand, key->val); continue; } hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->key_type, key->pin_len); } err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_remove_key *cp; struct hci_conn *conn; int err; cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); hci_dev_lock_bh(hdev); err = hci_remove_link_key(hdev, &cp->bdaddr); if (err < 0) { err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err); goto unlock; } err = 0; if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) { struct hci_cp_disconnect dc; put_unaligned_le16(conn->handle, &dc.handle); dc.reason = 0x13; /* Remote User Terminated Connection */ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL); } unlock: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_disconnect *cp; struct hci_cp_disconnect dc; struct pending_cmd *cmd; struct hci_conn *conn; int err; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); goto failed; } if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) { err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY); goto failed; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (!conn) { conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (!conn) { err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN); goto failed; } } cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } put_unaligned_le16(conn->handle, &dc.handle); dc.reason = 0x13; /* Remote User Terminated Connection */ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static u8 link_to_mgmt(u8 link_type, u8 addr_type) { switch (link_type) { case LE_LINK: switch (addr_type) { case ADDR_LE_DEV_PUBLIC: return MGMT_ADDR_LE_PUBLIC; case ADDR_LE_DEV_RANDOM: return MGMT_ADDR_LE_RANDOM; default: return MGMT_ADDR_INVALID; } case ACL_LINK: return MGMT_ADDR_BREDR; default: return MGMT_ADDR_INVALID; } } static int get_connections(struct sock *sk, u16 index) { struct mgmt_rp_get_connections *rp; struct hci_dev *hdev; struct list_head *p; size_t rp_len; u16 count; int i, err; BT_DBG(""); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); hci_dev_lock_bh(hdev); count = 0; list_for_each(p, &hdev->conn_hash.list) { count++; } rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t)); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { err = -ENOMEM; goto unlock; } put_unaligned_le16(count, &rp->conn_count); read_lock(&hci_dev_list_lock); i = 0; list_for_each(p, &hdev->conn_hash.list) { struct hci_conn *c = list_entry(p, struct hci_conn, list); bacpy(&rp->conn[i++], &c->dst); } read_unlock(&hci_dev_list_lock); err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); unlock: kfree(rp); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_pin_code_reply *cp; struct hci_cp_pin_code_reply reply; struct pending_cmd *cmd; int err; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } bacpy(&reply.bdaddr, &cp->bdaddr); reply.pin_len = cp->pin_len; memcpy(reply.pin_code, cp->pin_code, 16); err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int encrypt_link(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_encrypt_link *cp; struct hci_cp_set_conn_encrypt enc; struct hci_conn *conn; int err = 0; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENETDOWN); goto done; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (!conn) { err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, ENOTCONN); goto done; } if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { err = cmd_status(sk, index, MGMT_OP_ENCRYPT_LINK, EINPROGRESS); goto done; } if (conn->link_mode & HCI_LM_AUTH) { enc.handle = cpu_to_le16(conn->handle); enc.encrypt = cp->enable; err = hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(enc), &enc); } else { conn->auth_initiator = 1; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { struct hci_cp_auth_requested cp; cp.handle = cpu_to_le16(conn->handle); err = hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } } done: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_pin_code_neg_reply *cp; struct pending_cmd *cmd; int err; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, ENETDOWN); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr), &cp->bdaddr); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int le_add_dev_white_list(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_le_add_dev_white_list *cp; int err = 0; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST, ENETDOWN); goto failed; } hci_le_add_dev_white_list(hdev, &cp->bdaddr); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int le_remove_dev_white_list(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_le_remove_dev_white_list *cp; int err = 0; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST, ENETDOWN); goto failed; } hci_le_remove_dev_white_list(hdev, &cp->bdaddr); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int le_create_conn_white_list(struct sock *sk, u16 index) { struct hci_dev *hdev; struct hci_conn *conn; u8 sec_level, auth_type; struct pending_cmd *cmd; bdaddr_t bdaddr; int err = 0; BT_DBG(""); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, ENETDOWN); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, index, NULL, 0); if (!cmd) { err = -ENOMEM; goto failed; } sec_level = BT_SECURITY_MEDIUM; auth_type = HCI_AT_GENERAL_BONDING; memset(&bdaddr, 0, sizeof(bdaddr)); conn = hci_le_connect(hdev, 0, BDADDR_ANY, sec_level, auth_type, NULL); if (IS_ERR(conn)) { err = PTR_ERR(conn); mgmt_pending_remove(cmd); } failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int le_cancel_create_conn_white_list(struct sock *sk, u16 index) { struct hci_dev *hdev; int err = 0; BT_DBG(""); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST, ENETDOWN); goto failed; } hci_le_cancel_create_connect(hdev, BDADDR_ANY); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int le_clear_white_list(struct sock *sk, u16 index) { struct hci_dev *hdev; int err; BT_DBG(""); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LE_CLEAR_WHITE_LIST, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_LE_CLEAR_WHITE_LIST, ENETDOWN); goto failed; } err = hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_set_io_capability *cp; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); hci_dev_lock_bh(hdev); hdev->io_capability = cp->io_capability; BT_DBG("%s IO capability set to 0x%02x", hdev->name, hdev->io_capability); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); } static inline struct pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct list_head *p; list_for_each(p, &cmd_list) { struct pending_cmd *cmd; cmd = list_entry(p, struct pending_cmd, list); if (cmd->opcode != MGMT_OP_PAIR_DEVICE) continue; if (cmd->index != hdev->id) continue; if (cmd->user_data != conn) continue; return cmd; } return NULL; } static void pairing_complete(struct pending_cmd *cmd, u8 status) { struct mgmt_rp_pair_device rp; struct hci_conn *conn = cmd->user_data; BT_DBG(" %u", status); bacpy(&rp.bdaddr, &conn->dst); rp.status = status; cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ conn->connect_cfm_cb = NULL; conn->security_cfm_cb = NULL; conn->disconn_cfm_cb = NULL; mgmt_pending_remove(cmd); } static void pairing_complete_cb(struct hci_conn *conn, u8 status) { struct pending_cmd *cmd; BT_DBG(" %u", status); cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } pairing_complete(cmd, status); hci_conn_put(conn); } static void pairing_security_complete_cb(struct hci_conn *conn, u8 status) { struct pending_cmd *cmd; BT_DBG(" %u", status); cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } if (conn->type == LE_LINK) smp_link_encrypt_cmplt(conn->l2cap_data, status, status ? 0 : 1); else pairing_complete(cmd, status); } static void pairing_connect_complete_cb(struct hci_conn *conn, u8 status) { struct pending_cmd *cmd; BT_DBG("conn: %p %u", conn, status); cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } if (status || conn->pending_sec_level < BT_SECURITY_MEDIUM) pairing_complete(cmd, status); hci_conn_put(conn); } static void discovery_terminated(struct pending_cmd *cmd, void *data) { struct hci_dev *hdev; struct mgmt_mode ev = {0}; BT_DBG(""); hdev = hci_dev_get(cmd->index); if (!hdev) goto not_found; del_timer(&hdev->disco_le_timer); del_timer(&hdev->disco_timer); hci_dev_put(hdev); not_found: mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL); list_del(&cmd->list); mgmt_pending_free(cmd); } static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_pair_device *cp; struct pending_cmd *cmd; u8 sec_level, auth_type, io_cap; struct hci_conn *conn; struct adv_entry *entry; int err; BT_DBG(""); cp = (void *) data; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); hci_dev_lock_bh(hdev); io_cap = cp->io_cap; sec_level = BT_SECURITY_MEDIUM; auth_type = HCI_AT_DEDICATED_BONDING; entry = hci_find_adv_entry(hdev, &cp->bdaddr); if (entry && entry->flags & 0x04) { conn = hci_le_connect(hdev, 0, &cp->bdaddr, sec_level, auth_type, NULL); } else { /* ACL-SSP does not support io_cap 0x04 (KeyboadDisplay) */ if (io_cap == 0x04) io_cap = 0x01; conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type); conn->auth_initiator = 1; } if (IS_ERR(conn)) { err = PTR_ERR(conn); goto unlock; } if (conn->connect_cfm_cb) { hci_conn_put(conn); err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len); if (!cmd) { err = -ENOMEM; hci_conn_put(conn); goto unlock; } conn->connect_cfm_cb = pairing_connect_complete_cb; conn->security_cfm_cb = pairing_security_complete_cb; conn->disconn_cfm_cb = pairing_complete_cb; conn->io_capability = io_cap; cmd->user_data = conn; if (conn->state == BT_CONNECTED && hci_conn_security(conn, sec_level, auth_type)) pairing_complete(cmd, 0); err = 0; unlock: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, u16 len, u16 opcode) { struct mgmt_cp_user_confirm_reply *cp = (void *) data; u16 mgmt_op = opcode, hci_op; struct pending_cmd *cmd; struct hci_dev *hdev; struct hci_conn *le_conn; int err; BT_DBG("%d", mgmt_op); if (mgmt_op == MGMT_OP_USER_CONFIRM_NEG_REPLY) hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY; else hci_op = HCI_OP_USER_CONFIRM_REPLY; if (len < sizeof(*cp)) return cmd_status(sk, index, mgmt_op, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, mgmt_op, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, mgmt_op, ENETDOWN); goto done; } le_conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (le_conn) { err = le_user_confirm_reply(le_conn, mgmt_op, (void *) cp); goto done; } BT_DBG("BR/EDR: %s", mgmt_op == MGMT_OP_USER_CONFIRM_NEG_REPLY ? "Reject" : "Accept"); cmd = mgmt_pending_add(sk, mgmt_op, index, data, len); if (!cmd) { err = -ENOMEM; goto done; } err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr); if (err < 0) mgmt_pending_remove(cmd); done: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int resolve_name(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_resolve_name *mgmt_cp = (void *) data; struct hci_cp_remote_name_req hci_cp; struct hci_dev *hdev; struct pending_cmd *cmd; int err; BT_DBG(""); if (len != sizeof(*mgmt_cp)) return cmd_status(sk, index, MGMT_OP_RESOLVE_NAME, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_RESOLVE_NAME, ENODEV); hci_dev_lock_bh(hdev); cmd = mgmt_pending_add(sk, MGMT_OP_RESOLVE_NAME, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } memset(&hci_cp, 0, sizeof(hci_cp)); bacpy(&hci_cp.bdaddr, &mgmt_cp->bdaddr); err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(hci_cp), &hci_cp); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int cancel_resolve_name(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_cancel_resolve_name *mgmt_cp = (void *) data; struct hci_cp_remote_name_req_cancel hci_cp; struct hci_dev *hdev; int err; BT_DBG(""); if (len != sizeof(*mgmt_cp)) return cmd_status(sk, index, MGMT_OP_CANCEL_RESOLVE_NAME, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_CANCEL_RESOLVE_NAME, ENODEV); hci_dev_lock_bh(hdev); memset(&hci_cp, 0, sizeof(hci_cp)); bacpy(&hci_cp.bdaddr, &mgmt_cp->bdaddr); err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(hci_cp), &hci_cp); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_connection_params(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_set_connection_params *cp = (void *) data; struct hci_dev *hdev; struct hci_conn *conn; int err; BT_DBG(""); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS, ENODEV); hci_dev_lock_bh(hdev); conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (!conn) { err = cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS, ENOTCONN); goto failed; } hci_le_conn_update(conn, le16_to_cpu(cp->interval_min), le16_to_cpu(cp->interval_max), le16_to_cpu(cp->slave_latency), le16_to_cpu(cp->timeout_multiplier)); err = cmd_status(sk, index, MGMT_OP_SET_CONNECTION_PARAMS, 0); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int read_tx_power_level(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_read_tx_power_level *cp = (void *) data; struct hci_cp_read_tx_power hci_cp; struct pending_cmd *cmd; struct hci_conn *conn; int err; BT_DBG("hci%u", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_READ_TX_POWER_LEVEL, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_READ_TX_POWER_LEVEL, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_READ_TX_POWER_LEVEL, ENETDOWN); goto unlock; } if (mgmt_pending_find(MGMT_OP_READ_TX_POWER_LEVEL, index)) { err = cmd_status(sk, index, MGMT_OP_READ_TX_POWER_LEVEL, EBUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_READ_TX_POWER_LEVEL, index, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (!conn) conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (!conn) { err = cmd_status(sk, index, MGMT_OP_READ_TX_POWER_LEVEL, ENOTCONN); mgmt_pending_remove(cmd); goto unlock; } put_unaligned_le16(conn->handle, &hci_cp.handle); put_unaligned_le16(cp->type, &hci_cp.type); err = hci_send_cmd(hdev, HCI_OP_READ_TX_POWER, sizeof(hci_cp), &hci_cp); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_rssi_reporter(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_set_rssi_reporter *cp = (void *) data; struct hci_dev *hdev; struct hci_conn *conn; int err = 0; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER, ENODEV); hci_dev_lock_bh(hdev); conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (!conn) { err = cmd_status(sk, index, MGMT_OP_SET_RSSI_REPORTER, ENOTCONN); goto failed; } BT_DBG("updateOnThreshExceed %d ", cp->updateOnThreshExceed); hci_conn_set_rssi_reporter(conn, cp->rssi_threshold, __le16_to_cpu(cp->interval), cp->updateOnThreshExceed); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int unset_rssi_reporter(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_unset_rssi_reporter *cp = (void *) data; struct hci_dev *hdev; struct hci_conn *conn; int err = 0; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER, ENODEV); hci_dev_lock_bh(hdev); conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); if (!conn) { err = cmd_status(sk, index, MGMT_OP_UNSET_RSSI_REPORTER, ENOTCONN); goto failed; } hci_conn_unset_rssi_reporter(conn); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int le_cancel_create_conn(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_le_cancel_create_conn *cp = (void *) data; struct hci_dev *hdev; int err = 0; if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_LE_CANCEL_CREATE_CONN, ENETDOWN); goto failed; } hci_le_cancel_create_connect(hdev, &cp->bdaddr); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int set_local_name(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct mgmt_cp_set_local_name *mgmt_cp = (void *) data; struct hci_cp_write_local_name hci_cp; struct hci_dev *hdev; struct pending_cmd *cmd; int err; BT_DBG(""); if (len != sizeof(*mgmt_cp)) return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); hci_dev_lock_bh(hdev); cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); if (!cmd) { err = -ENOMEM; goto failed; } memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name)); err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp), &hci_cp); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static void discovery_rsp(struct pending_cmd *cmd, void *data) { struct mgmt_mode ev; BT_DBG(""); if (cmd->opcode == MGMT_OP_START_DISCOVERY) { ev.val = 1; cmd_status(cmd->sk, cmd->index, MGMT_OP_START_DISCOVERY, 0); } else { ev.val = 0; cmd_complete(cmd->sk, cmd->index, MGMT_OP_STOP_DISCOVERY, NULL, 0); if (cmd->opcode == MGMT_OP_STOP_DISCOVERY) { struct hci_dev *hdev = hci_dev_get(cmd->index); if (hdev) { del_timer(&hdev->disco_le_timer); del_timer(&hdev->disco_timer); hci_dev_put(hdev); } } } mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL); list_del(&cmd->list); mgmt_pending_free(cmd); } void mgmt_inquiry_started(u16 index) { BT_DBG(""); mgmt_pending_foreach(MGMT_OP_START_DISCOVERY, index, discovery_rsp, NULL); } void mgmt_inquiry_complete_evt(u16 index, u8 status) { struct hci_dev *hdev; struct hci_cp_le_set_scan_enable le_cp = {1, 0}; struct mgmt_mode cp = {0}; int err = -1; hdev = hci_dev_get(index); if (hdev) BT_DBG("disco_state: %d", hdev->disco_state); if (!hdev || !lmp_le_capable(hdev)) { mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index, discovery_terminated, NULL); mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL); hdev->disco_state = SCAN_IDLE; if (hdev) goto done; else return; } if (hdev->disco_state != SCAN_IDLE) { err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(le_cp), &le_cp); if (err >= 0) { mod_timer(&hdev->disco_le_timer, jiffies + msecs_to_jiffies(hdev->disco_int_phase * 1000)); hdev->disco_state = SCAN_LE; } else hdev->disco_state = SCAN_IDLE; } if (hdev->disco_state == SCAN_IDLE) mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL); if (err < 0) mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index, discovery_terminated, NULL); done: hci_dev_put(hdev); } void mgmt_disco_timeout(unsigned long data) { struct hci_dev *hdev = (void *) data; struct pending_cmd *cmd; struct mgmt_mode cp = {0}; BT_DBG("hci%d", hdev->id); hdev = hci_dev_get(hdev->id); if (!hdev) return; hci_dev_lock_bh(hdev); del_timer(&hdev->disco_le_timer); if (hdev->disco_state != SCAN_IDLE) { struct hci_cp_le_set_scan_enable le_cp = {0, 0}; if (test_bit(HCI_UP, &hdev->flags)) { if (hdev->disco_state == SCAN_LE) hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(le_cp), &le_cp); else hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); } hdev->disco_state = SCAN_IDLE; } mgmt_event(MGMT_EV_DISCOVERING, hdev->id, &cp, sizeof(cp), NULL); cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev->id); if (cmd) mgmt_pending_remove(cmd); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); } void mgmt_disco_le_timeout(unsigned long data) { struct hci_dev *hdev = (void *)data; struct hci_cp_le_set_scan_enable le_cp = {0, 0}; BT_DBG("hci%d", hdev->id); hdev = hci_dev_get(hdev->id); if (!hdev) return; hci_dev_lock_bh(hdev); if (test_bit(HCI_UP, &hdev->flags)) { if (hdev->disco_state == SCAN_LE) hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(le_cp), &le_cp); /* re-start BR scan */ if (hdev->disco_state != SCAN_IDLE) { struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 4, 0}; hdev->disco_int_phase *= 2; hdev->disco_int_count = 0; cp.num_rsp = (u8) hdev->disco_int_phase; hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); hdev->disco_state = SCAN_BR; } } hci_dev_unlock_bh(hdev); hci_dev_put(hdev); } static int start_discovery(struct sock *sk, u16 index) { struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 8, 0}; struct hci_dev *hdev; struct pending_cmd *cmd; int err; BT_DBG(""); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV); BT_DBG("disco_state: %d", hdev->disco_state); hci_dev_lock_bh(hdev); if (hdev->disco_state && timer_pending(&hdev->disco_timer)) { err = -EBUSY; goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0); if (!cmd) { err = -ENOMEM; goto failed; } /* If LE Capable, we will alternate between BR/EDR and LE */ if (lmp_le_capable(hdev)) { struct hci_cp_le_set_scan_parameters le_cp; /* Shorten BR scan params */ cp.num_rsp = 1; cp.length /= 2; /* Setup LE scan params */ memset(&le_cp, 0, sizeof(le_cp)); le_cp.type = 0x01; /* Active scanning */ /* The recommended value for scan interval and window is * 11.25 msec. It is calculated by: time = n * 0.625 msec */ le_cp.interval = cpu_to_le16(0x0012); le_cp.window = cpu_to_le16(0x0012); le_cp.own_bdaddr_type = 0; /* Public address */ le_cp.filter = 0; /* Accept all adv packets */ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAMETERS, sizeof(le_cp), &le_cp); } err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); if (err < 0) { mgmt_pending_remove(cmd); hdev->disco_state = SCAN_IDLE; } else if (lmp_le_capable(hdev)) { cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index); if (!cmd) mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0); hdev->disco_int_phase = 1; hdev->disco_int_count = 0; hdev->disco_state = SCAN_BR; del_timer(&hdev->disco_le_timer); del_timer(&hdev->disco_timer); mod_timer(&hdev->disco_timer, jiffies + msecs_to_jiffies(20000)); } else hdev->disco_state = SCAN_BR; failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); if (err < 0) return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, -err); return err; } static int stop_discovery(struct sock *sk, u16 index) { struct hci_cp_le_set_scan_enable le_cp = {0, 0}; struct mgmt_mode mode_cp = {0}; struct hci_dev *hdev; struct pending_cmd *cmd = NULL; int err = -EPERM; u8 state; BT_DBG(""); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV); BT_DBG("disco_state: %d", hdev->disco_state); hci_dev_lock_bh(hdev); state = hdev->disco_state; hdev->disco_state = SCAN_IDLE; del_timer(&hdev->disco_le_timer); del_timer(&hdev->disco_timer); if (state == SCAN_LE) { err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(le_cp), &le_cp); if (err >= 0) { mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index, discovery_terminated, NULL); err = cmd_complete(sk, index, MGMT_OP_STOP_DISCOVERY, NULL, 0); } } else if (state == SCAN_BR) err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index); if (err < 0 && cmd) mgmt_pending_remove(cmd); mgmt_event(MGMT_EV_DISCOVERING, index, &mode_cp, sizeof(mode_cp), NULL); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); if (err < 0) return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, -err); else return err; } static int read_local_oob_data(struct sock *sk, u16 index) { struct hci_dev *hdev; struct pending_cmd *cmd; int err; BT_DBG("hci%u", index); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, ENODEV); hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, ENETDOWN); goto unlock; } if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EOPNOTSUPP); goto unlock; } if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0); if (!cmd) { err = -ENOMEM; goto unlock; } err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_add_remote_oob_data *cp = (void *) data; int err; BT_DBG("hci%u ", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, ENODEV); hci_dev_lock_bh(hdev); err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, cp->randomizer); if (err < 0) err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err); else err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 0); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int remove_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; struct mgmt_cp_remove_remote_oob_data *cp = (void *) data; int err; BT_DBG("hci%u ", index); if (len != sizeof(*cp)) return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, EINVAL); hdev = hci_dev_get(index); if (!hdev) return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, ENODEV); hci_dev_lock_bh(hdev); err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); if (err < 0) err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, -err); else err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, NULL, 0); hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) { unsigned char *buf; struct mgmt_hdr *hdr; u16 opcode, index, len; int err; BT_DBG("got %zu bytes", msglen); if (msglen < sizeof(*hdr)) return -EINVAL; buf = kmalloc(msglen, GFP_KERNEL); if (!buf) return -ENOMEM; if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) { err = -EFAULT; goto done; } hdr = (struct mgmt_hdr *) buf; opcode = get_unaligned_le16(&hdr->opcode); index = get_unaligned_le16(&hdr->index); len = get_unaligned_le16(&hdr->len); if (len != msglen - sizeof(*hdr)) { err = -EINVAL; goto done; } BT_DBG("got opcode %x", opcode); switch (opcode) { case MGMT_OP_READ_VERSION: err = read_version(sk); break; case MGMT_OP_READ_INDEX_LIST: err = read_index_list(sk); break; case MGMT_OP_READ_INFO: err = read_controller_info(sk, index); break; case MGMT_OP_SET_POWERED: err = set_powered(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_DISCOVERABLE: err = set_discoverable(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_LIMIT_DISCOVERABLE: err = set_limited_discoverable(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_CONNECTABLE: err = set_connectable(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_PAIRABLE: err = set_pairable(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_ADD_UUID: err = add_uuid(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_REMOVE_UUID: err = remove_uuid(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_DEV_CLASS: err = set_dev_class(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_SERVICE_CACHE: err = set_service_cache(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_LOAD_KEYS: err = load_keys(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_REMOVE_KEY: err = remove_key(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_DISCONNECT: err = disconnect(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_GET_CONNECTIONS: err = get_connections(sk, index); break; case MGMT_OP_PIN_CODE_REPLY: err = pin_code_reply(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_PIN_CODE_NEG_REPLY: err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_IO_CAPABILITY: err = set_io_capability(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_PAIR_DEVICE: err = pair_device(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_USER_CONFIRM_REPLY: case MGMT_OP_USER_PASSKEY_REPLY: case MGMT_OP_USER_CONFIRM_NEG_REPLY: err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, opcode); break; case MGMT_OP_SET_LOCAL_NAME: err = set_local_name(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_START_DISCOVERY: err = start_discovery(sk, index); break; case MGMT_OP_STOP_DISCOVERY: err = stop_discovery(sk, index); break; case MGMT_OP_RESOLVE_NAME: err = resolve_name(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_CANCEL_RESOLVE_NAME: err = cancel_resolve_name(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_CONNECTION_PARAMS: err = set_connection_params(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_SET_RSSI_REPORTER: err = set_rssi_reporter(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_UNSET_RSSI_REPORTER: err = unset_rssi_reporter(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_READ_LOCAL_OOB_DATA: err = read_local_oob_data(sk, index); break; case MGMT_OP_ADD_REMOTE_OOB_DATA: err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_REMOVE_REMOTE_OOB_DATA: err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_ENCRYPT_LINK: err = encrypt_link(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_LE_ADD_DEV_WHITE_LIST: err = le_add_dev_white_list(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_LE_REMOVE_DEV_WHITE_LIST: err = le_remove_dev_white_list(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_LE_CLEAR_WHITE_LIST: err = le_clear_white_list(sk, index); break; case MGMT_OP_LE_CREATE_CONN_WHITE_LIST: err = le_create_conn_white_list(sk, index); break; case MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST: err = le_cancel_create_conn_white_list(sk, index); break; case MGMT_OP_LE_CANCEL_CREATE_CONN: err = le_cancel_create_conn(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_READ_TX_POWER_LEVEL: err = read_tx_power_level(sk, index, buf + sizeof(*hdr), len); break; default: BT_DBG("Unknown op %u", opcode); err = cmd_status(sk, index, opcode, 0x01); break; } if (err < 0) goto done; err = msglen; done: kfree(buf); return err; } static void cmd_status_rsp(struct pending_cmd *cmd, void *data) { u8 *status = data; cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); mgmt_pending_remove(cmd); } int mgmt_index_added(u16 index) { BT_DBG("%d", index); return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL); } int mgmt_index_removed(u16 index) { u8 status = ENODEV; BT_DBG("%d", index); mgmt_pending_foreach(0, index, cmd_status_rsp, &status); return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); } struct cmd_lookup { u8 val; struct sock *sk; }; static void mode_rsp(struct pending_cmd *cmd, void *data) { struct mgmt_mode *cp = cmd->param; struct cmd_lookup *match = data; if (cp->val != match->val) return; send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val); list_del(&cmd->list); if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } mgmt_pending_free(cmd); } int mgmt_powered(u16 index, u8 powered) { struct mgmt_mode ev; struct cmd_lookup match = { powered, NULL }; int ret; BT_DBG("hci%u %d", index, powered); mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); if (!powered) { u8 status = ENETDOWN; mgmt_pending_foreach(0, index, cmd_status_rsp, &status); } ev.val = powered; ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); return ret; } int mgmt_set_powered_failed(struct hci_dev *hdev, int err) { struct pending_cmd *cmd; u8 status; cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev->id); if (!cmd) return -ENOENT; if (err == -ERFKILL) status = MGMT_STATUS_RFKILLED; else status = MGMT_STATUS_FAILED; err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); mgmt_pending_remove(cmd); return err; } int mgmt_discoverable(u16 index, u8 discoverable) { struct mgmt_mode ev; struct cmd_lookup match = { discoverable, NULL }; int ret; mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match); ev.val = discoverable; ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); return ret; } int mgmt_connectable(u16 index, u8 connectable) { struct mgmt_mode ev; struct cmd_lookup match = { connectable, NULL }; int ret; mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match); ev.val = connectable; ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); return ret; } int mgmt_new_key(u16 index, struct link_key *key, u8 bonded) { struct mgmt_ev_new_key *ev; int err, total; total = sizeof(struct mgmt_ev_new_key) + key->dlen; ev = kzalloc(total, GFP_ATOMIC); if (!ev) return -ENOMEM; bacpy(&ev->key.bdaddr, &key->bdaddr); ev->key.addr_type = key->addr_type; ev->key.key_type = key->key_type; memcpy(ev->key.val, key->val, 16); ev->key.pin_len = key->pin_len; ev->key.auth = key->auth; ev->store_hint = bonded; ev->key.dlen = key->dlen; memcpy(ev->key.data, key->data, key->dlen); err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL); kfree(ev); return err; } int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le) { struct mgmt_ev_connected ev; struct pending_cmd *cmd; struct hci_dev *hdev; BT_DBG("hci%u", index); hdev = hci_dev_get(index); if (!hdev) return -ENODEV; bacpy(&ev.bdaddr, bdaddr); ev.le = le; cmd = mgmt_pending_find(MGMT_OP_LE_CREATE_CONN_WHITE_LIST, index); if (cmd) { BT_ERR("mgmt_connected remove mgmt pending white_list"); mgmt_pending_remove(cmd); } return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); } int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval, u16 latency, u16 timeout) { struct mgmt_ev_le_conn_params ev; bacpy(&ev.bdaddr, bdaddr); ev.interval = interval; ev.latency = latency; ev.timeout = timeout; return mgmt_event(MGMT_EV_LE_CONN_PARAMS, index, &ev, sizeof(ev), NULL); } static void disconnect_rsp(struct pending_cmd *cmd, void *data) { struct mgmt_cp_disconnect *cp = cmd->param; struct sock **sk = data; struct mgmt_rp_disconnect rp; bacpy(&rp.bdaddr, &cp->bdaddr); cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); *sk = cmd->sk; sock_hold(*sk); mgmt_pending_remove(cmd); } int mgmt_disconnected(u16 index, bdaddr_t *bdaddr, u8 reason) { struct mgmt_ev_disconnected ev; struct sock *sk = NULL; int err; bacpy(&ev.bdaddr, bdaddr); ev.reason = reason; err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); if (sk) sock_put(sk); mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); return err; } int mgmt_disconnect_failed(u16 index) { struct pending_cmd *cmd; int err; cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index); if (!cmd) return -ENOENT; err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO); mgmt_pending_remove(cmd); return err; } int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status) { struct mgmt_ev_connect_failed ev; bacpy(&ev.bdaddr, bdaddr); ev.status = status; return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); } int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr) { struct mgmt_ev_pin_code_request ev; BT_DBG("hci%u", index); bacpy(&ev.bdaddr, bdaddr); ev.secure = 0; return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), NULL); } int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { struct pending_cmd *cmd; struct mgmt_rp_pin_code_reply rp; int err; cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp, sizeof(rp)); mgmt_pending_remove(cmd); return err; } int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { struct pending_cmd *cmd; struct mgmt_rp_pin_code_reply rp; int err; cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, sizeof(rp)); mgmt_pending_remove(cmd); return err; } int mgmt_user_confirm_request(u16 index, u8 event, bdaddr_t *bdaddr, __le32 value) { struct mgmt_ev_user_confirm_request ev; struct hci_conn *conn = NULL; struct hci_dev *hdev; u8 loc_cap, rem_cap, loc_mitm, rem_mitm; BT_DBG("hci%u", index); hdev = hci_dev_get(index); if (!hdev) return -ENODEV; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); ev.auto_confirm = 0; if (!conn || event != HCI_EV_USER_CONFIRM_REQUEST) goto no_auto_confirm; loc_cap = (conn->io_capability == 0x04) ? 0x01 : conn->io_capability; rem_cap = conn->remote_cap; loc_mitm = conn->auth_type & 0x01; rem_mitm = conn->remote_auth & 0x01; if ((conn->auth_type & HCI_AT_DEDICATED_BONDING) && conn->auth_initiator && rem_cap == 0x03) ev.auto_confirm = 1; else if (loc_cap == 0x01 && (rem_cap == 0x00 || rem_cap == 0x03)) { if (!loc_mitm && !rem_mitm) value = 0; goto no_auto_confirm; } /* Show bonding dialog if neither side requires no bonding */ if ((conn->auth_type > 0x01) && (conn->remote_auth > 0x01)) { if (!loc_mitm && !rem_mitm) value = 0; goto no_auto_confirm; } if ((!loc_mitm || rem_cap == 0x03) && (!rem_mitm || loc_cap == 0x03)) ev.auto_confirm = 1; no_auto_confirm: bacpy(&ev.bdaddr, bdaddr); ev.event = event; put_unaligned_le32(value, &ev.value); hci_dev_put(hdev); return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), NULL); } int mgmt_user_passkey_request(u16 index, bdaddr_t *bdaddr) { struct mgmt_ev_user_passkey_request ev; BT_DBG("hci%u", index); bacpy(&ev.bdaddr, bdaddr); return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, index, &ev, sizeof(ev), NULL); } static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status, u8 opcode) { struct pending_cmd *cmd; struct mgmt_rp_user_confirm_reply rp; int err; cmd = mgmt_pending_find(opcode, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp)); mgmt_pending_remove(cmd); return err; } int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { return confirm_reply_complete(index, bdaddr, status, MGMT_OP_USER_CONFIRM_REPLY); } int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) { return confirm_reply_complete(index, bdaddr, status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status) { struct mgmt_ev_auth_failed ev; bacpy(&ev.bdaddr, bdaddr); ev.status = status; return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); } int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status) { struct pending_cmd *cmd; struct hci_dev *hdev; struct mgmt_cp_set_local_name ev; int err; memset(&ev, 0, sizeof(ev)); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index); if (!cmd) goto send_event; if (status) { err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO); goto failed; } hdev = hci_dev_get(index); if (hdev) { update_eir(hdev); hci_dev_put(hdev); } err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev, sizeof(ev)); if (err < 0) goto failed; send_event: err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev), cmd ? cmd->sk : NULL); failed: if (cmd) mgmt_pending_remove(cmd); return err; } int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, u8 status) { struct pending_cmd *cmd; int err; BT_DBG("hci%u status %u", index, status); cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index); if (!cmd) return -ENOENT; if (status) { err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EIO); } else { struct mgmt_rp_read_local_oob_data rp; memcpy(rp.hash, hash, sizeof(rp.hash)); memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, &rp, sizeof(rp)); } mgmt_pending_remove(cmd); return err; } void mgmt_read_rssi_complete(u16 index, s8 rssi, bdaddr_t *bdaddr, u16 handle, u8 status) { struct mgmt_ev_rssi_update ev; struct hci_conn *conn; struct hci_dev *hdev; if (status) return; hdev = hci_dev_get(index); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) return; BT_DBG("rssi_update_thresh_exceed : %d ", conn->rssi_update_thresh_exceed); BT_DBG("RSSI Threshold : %d , recvd RSSI : %d ", conn->rssi_threshold, rssi); if (conn->rssi_update_thresh_exceed == 1) { BT_DBG("rssi_update_thresh_exceed == 1"); if (rssi > conn->rssi_threshold) { memset(&ev, 0, sizeof(ev)); bacpy(&ev.bdaddr, bdaddr); ev.rssi = rssi; mgmt_event(MGMT_EV_RSSI_UPDATE, index, &ev, sizeof(ev), NULL); } else { hci_conn_set_rssi_reporter(conn, conn->rssi_threshold, conn->rssi_update_interval, conn->rssi_update_thresh_exceed); } } else { BT_DBG("rssi_update_thresh_exceed == 0"); if (rssi < conn->rssi_threshold) { memset(&ev, 0, sizeof(ev)); bacpy(&ev.bdaddr, bdaddr); ev.rssi = rssi; mgmt_event(MGMT_EV_RSSI_UPDATE, index, &ev, sizeof(ev), NULL); } else { hci_conn_set_rssi_reporter(conn, conn->rssi_threshold, conn->rssi_update_interval, conn->rssi_update_thresh_exceed); } } } int mgmt_read_tx_power_failed(u16 index) { struct pending_cmd *cmd; int err; cmd = mgmt_pending_find(MGMT_OP_READ_TX_POWER_LEVEL, index); if (!cmd) return -ENOENT; err = cmd_status(cmd->sk, index, MGMT_OP_READ_TX_POWER_LEVEL, EIO); mgmt_pending_remove(cmd); return err; } int mgmt_read_tx_power_complete(u16 index, bdaddr_t *bdaddr, s8 level, u8 status) { struct pending_cmd *cmd; struct mgmt_rp_read_tx_power_level rp; int err; cmd = mgmt_pending_find(MGMT_OP_READ_TX_POWER_LEVEL, index); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; rp.level = level; err = cmd_complete(cmd->sk, index, MGMT_OP_READ_TX_POWER_LEVEL, &rp, sizeof(rp)); mgmt_pending_remove(cmd); return err; } int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 le, u8 *dev_class, s8 rssi, u8 eir_len, u8 *eir) { struct mgmt_ev_device_found ev; struct hci_dev *hdev; int err; BT_DBG("le: %d", le); memset(&ev, 0, sizeof(ev)); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_mgmt(link_type, addr_type); ev.rssi = rssi; ev.le = le; if (dev_class) memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); if (eir && eir_len) memcpy(ev.eir, eir, eir_len); err = mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); if (err < 0) return err; hdev = hci_dev_get(index); if (!hdev) return 0; if (hdev->disco_state == SCAN_IDLE) goto done; hdev->disco_int_count++; if (hdev->disco_int_count >= hdev->disco_int_phase) { /* Inquiry scan for General Discovery LAP */ struct hci_cp_inquiry cp = {{0x33, 0x8b, 0x9e}, 4, 0}; struct hci_cp_le_set_scan_enable le_cp = {0, 0}; hdev->disco_int_phase *= 2; hdev->disco_int_count = 0; if (hdev->disco_state == SCAN_LE) { /* cancel LE scan */ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(le_cp), &le_cp); /* start BR scan */ cp.num_rsp = (u8) hdev->disco_int_phase; hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); hdev->disco_state = SCAN_BR; del_timer_sync(&hdev->disco_le_timer); } } done: hci_dev_put(hdev); return 0; } int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 status, u8 *name) { struct mgmt_ev_remote_name ev; memset(&ev, 0, sizeof(ev)); bacpy(&ev.bdaddr, bdaddr); ev.status = status; memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL); } int mgmt_encrypt_change(u16 index, bdaddr_t *bdaddr, u8 status) { struct mgmt_ev_encrypt_change ev; BT_DBG("hci%u", index); bacpy(&ev.bdaddr, bdaddr); ev.status = status; return mgmt_event(MGMT_EV_ENCRYPT_CHANGE, index, &ev, sizeof(ev), NULL); } int mgmt_remote_class(u16 index, bdaddr_t *bdaddr, u8 dev_class[3]) { struct mgmt_ev_remote_class ev; memset(&ev, 0, sizeof(ev)); bacpy(&ev.bdaddr, bdaddr); memcpy(ev.dev_class, dev_class, 3); return mgmt_event(MGMT_EV_REMOTE_CLASS, index, &ev, sizeof(ev), NULL); } int mgmt_remote_version(u16 index, bdaddr_t *bdaddr, u8 ver, u16 mnf, u16 sub_ver) { struct mgmt_ev_remote_version ev; memset(&ev, 0, sizeof(ev)); bacpy(&ev.bdaddr, bdaddr); ev.lmp_ver = ver; ev.manufacturer = mnf; ev.lmp_subver = sub_ver; return mgmt_event(MGMT_EV_REMOTE_VERSION, index, &ev, sizeof(ev), NULL); } int mgmt_remote_features(u16 index, bdaddr_t *bdaddr, u8 features[8]) { struct mgmt_ev_remote_features ev; memset(&ev, 0, sizeof(ev)); bacpy(&ev.bdaddr, bdaddr); memcpy(ev.features, features, sizeof(ev.features)); return mgmt_event(MGMT_EV_REMOTE_FEATURES, index, &ev, sizeof(ev), NULL); }
BrateloSlava/kernel_apq8064
net/bluetooth/mgmt.c
C
gpl-2.0
77,823
/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /** * DOC: Wireless regulatory infrastructure * * The usual implementation is for a driver to read a device EEPROM to * determine which regulatory domain it should be operating under, then * looking up the allowable channels in a driver-local table and finally * registering those channels in the wiphy structure. * * Another set of compliance enforcement is for drivers to use their * own compliance limits which can be stored on the EEPROM. The host * driver or firmware may ensure these are used. * * In addition to all this we provide an extra layer of regulatory * conformance. For drivers which do not have any regulatory * information CRDA provides the complete regulatory solution. * For others it provides a community effort on further restrictions * to enhance compliance. * * Note: When number of rules --> infinity we will not be able to * index on alpha2 any more, instead we'll probably have to * rely on some SHA1 checksum of the regdomain for example. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/random.h> #include <linux/ctype.h> #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/moduleparam.h> #include <net/cfg80211.h> #include "core.h" #include "reg.h" #include "regdb.h" #include "nl80211.h" #ifdef CONFIG_CFG80211_REG_DEBUG #define REG_DBG_PRINT(format, args...) \ printk(KERN_DEBUG pr_fmt(format), ##args) #else #define REG_DBG_PRINT(args...) #endif static struct regulatory_request core_request_world = { .initiator = NL80211_REGDOM_SET_BY_CORE, .alpha2[0] = '0', .alpha2[1] = '0', .intersect = false, .processed = true, .country_ie_env = ENVIRON_ANY, }; /* Receipt of information from last regulatory request */ static struct regulatory_request *last_request = &core_request_world; /* To trigger userspace events */ static struct platform_device *reg_pdev; static struct device_type reg_device_type = { .uevent = reg_device_uevent, }; /* * Central wireless core regulatory domains, we only need two, * the current one and a world regulatory domain in case we have no * information to give us an alpha2 */ const struct ieee80211_regdomain *cfg80211_regdomain; /* * Protects static reg.c components: * - cfg80211_world_regdom * - cfg80211_regdom * - last_request */ static DEFINE_MUTEX(reg_mutex); static inline void assert_reg_lock(void) { lockdep_assert_held(&reg_mutex); } /* Used to queue up regulatory hints */ static LIST_HEAD(reg_requests_list); static spinlock_t reg_requests_lock; /* Used to queue up beacon hints for review */ static LIST_HEAD(reg_pending_beacons); static spinlock_t reg_pending_beacons_lock; /* Used to keep track of processed beacon hints */ static LIST_HEAD(reg_beacon_list); struct reg_beacon { struct list_head list; struct ieee80211_channel chan; }; static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); static void reg_timeout_work(struct work_struct *work); static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work); /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { .n_reg_rules = 5, .alpha2 = "00", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. No HT40 * channel fits here. */ REG_RULE(2467-10, 2472+10, 20, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* IEEE 802.11 channel 14 - Only JP enables * this and for 802.11b only */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..48 */ REG_RULE(5180-10, 5240+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* NB: 5260 MHz - 5700 MHz requies DFS */ /* IEEE 802.11a, channel 149..165 */ REG_RULE(5745-10, 5825+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), } }; static const struct ieee80211_regdomain *cfg80211_world_regdom = &world_regdom; static char *ieee80211_regdom = "00"; static char user_alpha2[2]; module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); static void reset_regdomains(bool full_reset) { /* avoid freeing static information or freeing something twice */ if (cfg80211_regdomain == cfg80211_world_regdom) cfg80211_regdomain = NULL; if (cfg80211_world_regdom == &world_regdom) cfg80211_world_regdom = NULL; if (cfg80211_regdomain == &world_regdom) cfg80211_regdomain = NULL; kfree(cfg80211_regdomain); kfree(cfg80211_world_regdom); cfg80211_world_regdom = &world_regdom; cfg80211_regdomain = NULL; if (!full_reset) return; if (last_request != &core_request_world) kfree(last_request); last_request = &core_request_world; } /* * Dynamic world regulatory domain requested by the wireless * core upon initialization */ static void update_world_regdomain(const struct ieee80211_regdomain *rd) { BUG_ON(!last_request); reset_regdomains(false); cfg80211_world_regdom = rd; cfg80211_regdomain = rd; } bool is_world_regdom(const char *alpha2) { if (!alpha2) return false; if (alpha2[0] == '0' && alpha2[1] == '0') return true; return false; } static bool is_alpha2_set(const char *alpha2) { if (!alpha2) return false; if (alpha2[0] != 0 && alpha2[1] != 0) return true; return false; } static bool is_unknown_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain was built by driver * but a specific alpha2 cannot be determined */ if (alpha2[0] == '9' && alpha2[1] == '9') return true; return false; } static bool is_intersected_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain is the * result of an intersection between two regulatory domain * structures */ if (alpha2[0] == '9' && alpha2[1] == '8') return true; return false; } static bool is_an_alpha2(const char *alpha2) { if (!alpha2) return false; if (isalpha(alpha2[0]) && isalpha(alpha2[1])) return true; return false; } static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) { if (!alpha2_x || !alpha2_y) return false; if (alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1]) return true; return false; } static bool regdom_changes(const char *alpha2) { assert_cfg80211_lock(); if (!cfg80211_regdomain) return true; if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2)) return false; return true; } /* * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER * has ever been issued. */ static bool is_user_regdom_saved(void) { if (user_alpha2[0] == '9' && user_alpha2[1] == '7') return false; /* This would indicate a mistake on the design */ if (WARN((!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2)), "Unexpected user alpha2: %c%c\n", user_alpha2[0], user_alpha2[1])) return false; return true; } static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd, const struct ieee80211_regdomain *src_regd) { struct ieee80211_regdomain *regd; int size_of_regd = 0; unsigned int i; size_of_regd = sizeof(struct ieee80211_regdomain) + ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule)); regd = kzalloc(size_of_regd, GFP_KERNEL); if (!regd) return -ENOMEM; memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); for (i = 0; i < src_regd->n_reg_rules; i++) memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i], sizeof(struct ieee80211_reg_rule)); *dst_regd = regd; return 0; } #ifdef CONFIG_CFG80211_INTERNAL_REGDB struct reg_regdb_search_request { char alpha2[2]; struct list_head list; }; static LIST_HEAD(reg_regdb_search_list); static DEFINE_MUTEX(reg_regdb_search_mutex); static void reg_regdb_search(struct work_struct *work) { struct reg_regdb_search_request *request; const struct ieee80211_regdomain *curdom, *regdom; int i, r; mutex_lock(&reg_regdb_search_mutex); while (!list_empty(&reg_regdb_search_list)) { request = list_first_entry(&reg_regdb_search_list, struct reg_regdb_search_request, list); list_del(&request->list); for (i=0; i<reg_regdb_size; i++) { curdom = reg_regdb[i]; if (!memcmp(request->alpha2, curdom->alpha2, 2)) { r = reg_copy_regd(&regdom, curdom); if (r) break; mutex_lock(&cfg80211_mutex); set_regdom(regdom); mutex_unlock(&cfg80211_mutex); break; } } kfree(request); } mutex_unlock(&reg_regdb_search_mutex); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); static void reg_regdb_query(const char *alpha2) { struct reg_regdb_search_request *request; if (!alpha2) return; request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL); if (!request) return; memcpy(request->alpha2, alpha2, 2); mutex_lock(&reg_regdb_search_mutex); list_add_tail(&request->list, &reg_regdb_search_list); mutex_unlock(&reg_regdb_search_mutex); schedule_work(&reg_regdb_work); } /* Feel free to add any other sanity checks here */ static void reg_regdb_size_check(void) { /* We should ideally BUILD_BUG_ON() but then random builds would fail */ WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it..."); } #else static inline void reg_regdb_size_check(void) {} static inline void reg_regdb_query(const char *alpha2) {} #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ /* * This lets us keep regulatory code which is updated on a regulatory * basis in userspace. Country information is filled in by * reg_device_uevent */ static int call_crda(const char *alpha2) { if (!is_world_regdom((char *) alpha2)) pr_info("Calling CRDA for country: %c%c\n", alpha2[0], alpha2[1]); else pr_info("Calling CRDA to update world regulatory domain\n"); /* query internal regulatory database (if it exists) */ reg_regdb_query(alpha2); return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE); } /* Used by nl80211 before kmalloc'ing our regulatory domain */ bool reg_is_valid_request(const char *alpha2) { assert_cfg80211_lock(); if (!last_request) return false; return alpha2_equal(last_request->alpha2, alpha2); } /* Sanity check on a regulatory rule */ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; u32 freq_diff; if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0) return false; if (freq_range->start_freq_khz > freq_range->end_freq_khz) return false; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->end_freq_khz <= freq_range->start_freq_khz || freq_range->max_bandwidth_khz > freq_diff) return false; return true; } static bool is_valid_rd(const struct ieee80211_regdomain *rd) { const struct ieee80211_reg_rule *reg_rule = NULL; unsigned int i; if (!rd->n_reg_rules) return false; if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return false; for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; if (!is_valid_reg_rule(reg_rule)) return false; } return true; } static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range, u32 center_freq_khz, u32 bw_khz) { u32 start_freq_khz, end_freq_khz; start_freq_khz = center_freq_khz - (bw_khz/2); end_freq_khz = center_freq_khz + (bw_khz/2); if (start_freq_khz >= freq_range->start_freq_khz && end_freq_khz <= freq_range->end_freq_khz) return true; return false; } /** * freq_in_rule_band - tells us if a frequency is in a frequency band * @freq_range: frequency rule we want to query * @freq_khz: frequency we are inquiring about * * This lets us know if a specific frequency rule is or is not relevant to * a specific frequency's band. Bands are device specific and artificial * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is * safe for now to assume that a frequency rule should not be part of a * frequency's band if the start freq or end freq are off by more than 2 GHz. * This resolution can be lowered and should be considered as we add * regulatory rule support for other "bands". **/ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, u32 freq_khz) { #define ONE_GHZ_IN_KHZ 1000000 if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) return true; if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) return true; return false; #undef ONE_GHZ_IN_KHZ } /* * Helper for regdom_intersect(), this does the real * mathematical intersection fun */ static int reg_rules_intersect( const struct ieee80211_reg_rule *rule1, const struct ieee80211_reg_rule *rule2, struct ieee80211_reg_rule *intersected_rule) { const struct ieee80211_freq_range *freq_range1, *freq_range2; struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule1, *power_rule2; struct ieee80211_power_rule *power_rule; u32 freq_diff; freq_range1 = &rule1->freq_range; freq_range2 = &rule2->freq_range; freq_range = &intersected_rule->freq_range; power_rule1 = &rule1->power_rule; power_rule2 = &rule2->power_rule; power_rule = &intersected_rule->power_rule; freq_range->start_freq_khz = max(freq_range1->start_freq_khz, freq_range2->start_freq_khz); freq_range->end_freq_khz = min(freq_range1->end_freq_khz, freq_range2->end_freq_khz); freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz, freq_range2->max_bandwidth_khz); freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->max_bandwidth_khz > freq_diff) freq_range->max_bandwidth_khz = freq_diff; power_rule->max_eirp = min(power_rule1->max_eirp, power_rule2->max_eirp); power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, power_rule2->max_antenna_gain); intersected_rule->flags = (rule1->flags | rule2->flags); if (!is_valid_reg_rule(intersected_rule)) return -EINVAL; return 0; } /** * regdom_intersect - do the intersection between two regulatory domains * @rd1: first regulatory domain * @rd2: second regulatory domain * * Use this function to get the intersection between two regulatory domains. * Once completed we will mark the alpha2 for the rd as intersected, "98", * as no one single alpha2 can represent this regulatory domain. * * Returns a pointer to the regulatory domain structure which will hold the * resulting intersection of rules between rd1 and rd2. We will * kzalloc() this structure for you. */ static struct ieee80211_regdomain *regdom_intersect( const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2) { int r, size_of_regd; unsigned int x, y; unsigned int num_rules = 0, rule_idx = 0; const struct ieee80211_reg_rule *rule1, *rule2; struct ieee80211_reg_rule *intersected_rule; struct ieee80211_regdomain *rd; /* This is just a dummy holder to help us count */ struct ieee80211_reg_rule irule; /* Uses the stack temporarily for counter arithmetic */ intersected_rule = &irule; memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule)); if (!rd1 || !rd2) return NULL; /* * First we get a count of the rules we'll need, then we actually * build them. This is to so we can malloc() and free() a * regdomain once. The reason we use reg_rules_intersect() here * is it will return -EINVAL if the rule computed makes no sense. * All rules that do check out OK are valid. */ for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; if (!reg_rules_intersect(rule1, rule2, intersected_rule)) num_rules++; memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule)); } } if (!num_rules) return NULL; size_of_regd = sizeof(struct ieee80211_regdomain) + ((num_rules + 1) * sizeof(struct ieee80211_reg_rule)); rd = kzalloc(size_of_regd, GFP_KERNEL); if (!rd) return NULL; for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; /* * This time around instead of using the stack lets * write to the target rule directly saving ourselves * a memcpy() */ intersected_rule = &rd->reg_rules[rule_idx]; r = reg_rules_intersect(rule1, rule2, intersected_rule); /* * No need to memset here the intersected rule here as * we're not using the stack anymore */ if (r) continue; rule_idx++; } } if (rule_idx != num_rules) { kfree(rd); return NULL; } rd->n_reg_rules = num_rules; rd->alpha2[0] = '9'; rd->alpha2[1] = '8'; return rd; } /* * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may * want to just have the channel structure use these */ static u32 map_regdom_flags(u32 rd_flags) { u32 channel_flags = 0; if (rd_flags & NL80211_RRF_PASSIVE_SCAN) channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN; if (rd_flags & NL80211_RRF_NO_IBSS) channel_flags |= IEEE80211_CHAN_NO_IBSS; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; return channel_flags; } static int freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, u32 desired_bw_khz, const struct ieee80211_reg_rule **reg_rule, const struct ieee80211_regdomain *custom_regd) { int i; bool band_rule_found = false; const struct ieee80211_regdomain *regd; bool bw_fits = false; if (!desired_bw_khz) desired_bw_khz = MHZ_TO_KHZ(20); regd = custom_regd ? custom_regd : cfg80211_regdomain; /* * Follow the driver's regulatory domain, if present, unless a country * IE has been processed or a user wants to help complaince further */ if (!custom_regd && last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && last_request->initiator != NL80211_REGDOM_SET_BY_USER && wiphy->regd) regd = wiphy->regd; if (!regd) return -EINVAL; for (i = 0; i < regd->n_reg_rules; i++) { const struct ieee80211_reg_rule *rr; const struct ieee80211_freq_range *fr = NULL; rr = &regd->reg_rules[i]; fr = &rr->freq_range; /* * We only need to know if one frequency rule was * was in center_freq's band, that's enough, so lets * not overwrite it once found */ if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); bw_fits = reg_does_bw_fit(fr, center_freq, desired_bw_khz); if (band_rule_found && bw_fits) { *reg_rule = rr; return 0; } } if (!band_rule_found) return -ERANGE; return -EINVAL; } int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 desired_bw_khz, const struct ieee80211_reg_rule **reg_rule) { assert_cfg80211_lock(); return freq_reg_info_regd(wiphy, center_freq, desired_bw_khz, reg_rule, NULL); } EXPORT_SYMBOL(freq_reg_info); #ifdef CONFIG_CFG80211_REG_DEBUG static const char *reg_initiator_name(enum nl80211_reg_initiator initiator) { switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: return "Set by core"; case NL80211_REGDOM_SET_BY_USER: return "Set by user"; case NL80211_REGDOM_SET_BY_DRIVER: return "Set by driver"; case NL80211_REGDOM_SET_BY_COUNTRY_IE: return "Set by country IE"; default: WARN_ON(1); return "Set by bug"; } } static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan, u32 desired_bw_khz, const struct ieee80211_reg_rule *reg_rule) { const struct ieee80211_power_rule *power_rule; const struct ieee80211_freq_range *freq_range; char max_antenna_gain[32]; power_rule = &reg_rule->power_rule; freq_range = &reg_rule->freq_range; if (!power_rule->max_antenna_gain) snprintf(max_antenna_gain, 32, "N/A"); else snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain); REG_DBG_PRINT("Updating information on frequency %d MHz " "for a %d MHz width channel with regulatory rule:\n", chan->center_freq, KHZ_TO_MHZ(desired_bw_khz)); REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, max_antenna_gain, power_rule->max_eirp); } #else static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan, u32 desired_bw_khz, const struct ieee80211_reg_rule *reg_rule) { return; } #endif /* * Note that right now we assume the desired channel bandwidth * is always 20 MHz for each individual channel (HT40 uses 20 MHz * per channel, the primary and the extension channel). To support * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a * new ieee80211_channel.target_bw and re run the regulatory check * on the wiphy with the target_bw specified. Then we can simply use * that below for the desired_bw_khz below. */ static void handle_channel(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, enum ieee80211_band band, unsigned int chan_idx) { int r; u32 flags, bw_flags = 0; u32 desired_bw_khz = MHZ_TO_KHZ(20); const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; struct wiphy *request_wiphy = NULL; assert_cfg80211_lock(); request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); chan = &sband->channels[chan_idx]; flags = chan->orig_flags; r = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq), desired_bw_khz, &reg_rule); if (r) { /* * We will disable all channels that do not match our * received regulatory rule unless the hint is coming * from a Country IE and the Country IE had no information * about a band. The IEEE 802.11 spec allows for an AP * to send only a subset of the regulatory rules allowed, * so an AP in the US that only supports 2.4 GHz may only send * a country IE with information for the 2.4 GHz band * while 5 GHz is still supported. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && r == -ERANGE) return; REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq); chan->flags = IEEE80211_CHAN_DISABLED; return; } chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule); power_rule = &reg_rule->power_rule; freq_range = &reg_rule->freq_range; if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags = IEEE80211_CHAN_NO_HT40; if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { /* * This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = chan->orig_flags = map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = chan->orig_mag = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_power = chan->orig_mpwr = (int) MBM_TO_DBM(power_rule->max_eirp); return; } chan->beacon_found = false; chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); if (chan->orig_mpwr) chan->max_power = min(chan->orig_mpwr, (int) MBM_TO_DBM(power_rule->max_eirp)); else chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); } static void handle_band(struct wiphy *wiphy, enum ieee80211_band band, enum nl80211_reg_initiator initiator) { unsigned int i; struct ieee80211_supported_band *sband; BUG_ON(!wiphy->bands[band]); sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) handle_channel(wiphy, initiator, band, i); } static bool ignore_reg_update(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { if (!last_request) { REG_DBG_PRINT("Ignoring regulatory request %s since " "last_request is not set\n", reg_initiator_name(initiator)); return true; } if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { REG_DBG_PRINT("Ignoring regulatory request %s " "since the driver uses its own custom " "regulatory domain\n", reg_initiator_name(initiator)); return true; } /* * wiphy->regd will be set once the device has its own * desired regulatory domain set */ if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd && initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && !is_world_regdom(last_request->alpha2)) { REG_DBG_PRINT("Ignoring regulatory request %s " "since the driver requires its own regulatory " "domain to be set first\n", reg_initiator_name(initiator)); return true; } return false; } static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx, struct reg_beacon *reg_beacon) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; bool channel_changed = false; struct ieee80211_channel chan_before; assert_cfg80211_lock(); sband = wiphy->bands[reg_beacon->chan.band]; chan = &sband->channels[chan_idx]; if (likely(chan->center_freq != reg_beacon->chan.center_freq)) return; if (chan->beacon_found) return; chan->beacon_found = true; if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS) return; chan_before.center_freq = chan->center_freq; chan_before.flags = chan->flags; if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; channel_changed = true; } if (chan->flags & IEEE80211_CHAN_NO_IBSS) { chan->flags &= ~IEEE80211_CHAN_NO_IBSS; channel_changed = true; } if (channel_changed) nl80211_send_beacon_hint_event(wiphy, &chan_before, chan); } /* * Called when a scan on a wiphy finds a beacon on * new channel */ static void wiphy_update_new_beacon(struct wiphy *wiphy, struct reg_beacon *reg_beacon) { unsigned int i; struct ieee80211_supported_band *sband; assert_cfg80211_lock(); if (!wiphy->bands[reg_beacon->chan.band]) return; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } /* * Called upon reg changes or a new wiphy is added */ static void wiphy_update_beacon_reg(struct wiphy *wiphy) { unsigned int i; struct ieee80211_supported_band *sband; struct reg_beacon *reg_beacon; assert_cfg80211_lock(); if (list_empty(&reg_beacon_list)) return; list_for_each_entry(reg_beacon, &reg_beacon_list, list) { if (!wiphy->bands[reg_beacon->chan.band]) continue; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } } static bool reg_is_world_roaming(struct wiphy *wiphy) { if (is_world_regdom(cfg80211_regdomain->alpha2) || (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) return true; if (last_request && last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) return true; return false; } /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { /* * Means we are just firing up cfg80211, so no beacons would * have been processed yet. */ if (!last_request) return; if (!reg_is_world_roaming(wiphy)) return; wiphy_update_beacon_reg(wiphy); } static bool is_ht40_not_allowed(struct ieee80211_channel *chan) { if (!chan) return true; if (chan->flags & IEEE80211_CHAN_DISABLED) return true; /* This would happen when regulatory rules disallow HT40 completely */ if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40))) return true; return false; } static void reg_process_ht_flags_channel(struct wiphy *wiphy, enum ieee80211_band band, unsigned int chan_idx) { struct ieee80211_supported_band *sband; struct ieee80211_channel *channel; struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; unsigned int i; assert_cfg80211_lock(); sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); channel = &sband->channels[chan_idx]; if (is_ht40_not_allowed(channel)) { channel->flags |= IEEE80211_CHAN_NO_HT40; return; } /* * We need to ensure the extension channels exist to * be able to use HT40- or HT40+, this finds them (or not) */ for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *c = &sband->channels[i]; if (c->center_freq == (channel->center_freq - 20)) channel_before = c; if (c->center_freq == (channel->center_freq + 20)) channel_after = c; } /* * Please note that this assumes target bandwidth is 20 MHz, * if that ever changes we also need to change the below logic * to include that as well. */ if (is_ht40_not_allowed(channel_before)) channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; if (is_ht40_not_allowed(channel_after)) channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; } static void reg_process_ht_flags_band(struct wiphy *wiphy, enum ieee80211_band band) { unsigned int i; struct ieee80211_supported_band *sband; BUG_ON(!wiphy->bands[band]); sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) reg_process_ht_flags_channel(wiphy, band, i); } static void reg_process_ht_flags(struct wiphy *wiphy) { enum ieee80211_band band; if (!wiphy) return; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) reg_process_ht_flags_band(wiphy, band); } } static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum ieee80211_band band; assert_reg_lock(); if (ignore_reg_update(wiphy, initiator)) return; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) handle_band(wiphy, band, initiator); } reg_process_beacons(wiphy); reg_process_ht_flags(wiphy); if (wiphy->reg_notifier) wiphy->reg_notifier(wiphy, last_request); } void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby) { mutex_lock(&reg_mutex); wiphy_update_regulatory(wiphy, setby); mutex_unlock(&reg_mutex); } static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) { struct cfg80211_registered_device *rdev; list_for_each_entry(rdev, &cfg80211_rdev_list, list) wiphy_update_regulatory(&rdev->wiphy, initiator); } static void handle_channel_custom(struct wiphy *wiphy, enum ieee80211_band band, unsigned int chan_idx, const struct ieee80211_regdomain *regd) { int r; u32 desired_bw_khz = MHZ_TO_KHZ(20); u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; assert_reg_lock(); sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); chan = &sband->channels[chan_idx]; r = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq), desired_bw_khz, &reg_rule, regd); if (r) { REG_DBG_PRINT("Disabling freq %d MHz as custom " "regd has no rule that fits a %d MHz " "wide channel\n", chan->center_freq, KHZ_TO_MHZ(desired_bw_khz)); chan->flags = IEEE80211_CHAN_DISABLED; return; } chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule); power_rule = &reg_rule->power_rule; freq_range = &reg_rule->freq_range; if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags = IEEE80211_CHAN_NO_HT40; chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); } static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band, const struct ieee80211_regdomain *regd) { unsigned int i; struct ieee80211_supported_band *sband; BUG_ON(!wiphy->bands[band]); sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) handle_channel_custom(wiphy, band, i, regd); } /* Used by drivers prior to wiphy registration */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { enum ieee80211_band band; unsigned int bands_set = 0; mutex_lock(&reg_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!wiphy->bands[band]) continue; handle_band_custom(wiphy, band, regd); bands_set++; } mutex_unlock(&reg_mutex); /* * no point in calling this if it won't have any effect * on your device's supportd bands. */ WARN_ON(!bands_set); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); /* * Return value which can be used by ignore_request() to indicate * it has been determined we should intersect two regulatory domains */ #define REG_INTERSECT 1 /* This has the logic which determines when a new request * should be ignored. */ static int ignore_request(struct wiphy *wiphy, struct regulatory_request *pending_request) { struct wiphy *last_wiphy = NULL; assert_cfg80211_lock(); /* All initial requests are respected */ if (!last_request) return 0; switch (pending_request->initiator) { case NL80211_REGDOM_SET_BY_CORE: return 0; case NL80211_REGDOM_SET_BY_COUNTRY_IE: last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (unlikely(!is_an_alpha2(pending_request->alpha2))) return -EINVAL; if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { if (last_wiphy != wiphy) { /* * Two cards with two APs claiming different * Country IE alpha2s. We could * intersect them, but that seems unlikely * to be correct. Reject second one for now. */ if (regdom_changes(pending_request->alpha2)) return -EOPNOTSUPP; return -EALREADY; } /* * Two consecutive Country IE hints on the same wiphy. * This should be picked up early by the driver/stack */ if (WARN_ON(regdom_changes(pending_request->alpha2))) return 0; return -EALREADY; } return 0; case NL80211_REGDOM_SET_BY_DRIVER: if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(pending_request->alpha2)) return 0; return -EALREADY; } /* * This would happen if you unplug and plug your card * back in or if you add a new device for which the previously * loaded card also agrees on the regulatory domain. */ if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !regdom_changes(pending_request->alpha2)) return -EALREADY; return REG_INTERSECT; case NL80211_REGDOM_SET_BY_USER: if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_INTERSECT; /* * If the user knows better the user should set the regdom * to their country before the IE is picked up */ if (last_request->initiator == NL80211_REGDOM_SET_BY_USER && last_request->intersect) return -EOPNOTSUPP; /* * Process user requests only after previous user/driver/core * requests have been processed */ if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE || last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || last_request->initiator == NL80211_REGDOM_SET_BY_USER) { if (regdom_changes(last_request->alpha2)) return -EAGAIN; } if (!regdom_changes(pending_request->alpha2)) return -EALREADY; return 0; } return -EINVAL; } static void reg_set_request_processed(void) { bool need_more_processing = false; last_request->processed = true; spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) need_more_processing = true; spin_unlock(&reg_requests_lock); if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) cancel_delayed_work_sync(&reg_timeout); if (need_more_processing) schedule_work(&reg_work); } /** * __regulatory_hint - hint to the wireless core a regulatory domain * @wiphy: if the hint comes from country information from an AP, this * is required to be set to the wiphy that received the information * @pending_request: the regulatory request currently being processed * * The Wireless subsystem can use this function to hint to the wireless core * what it believes should be the current regulatory domain. * * Returns zero if all went fine, %-EALREADY if a regulatory domain had * already been set or other standard error codes. * * Caller must hold &cfg80211_mutex and &reg_mutex */ static int __regulatory_hint(struct wiphy *wiphy, struct regulatory_request *pending_request) { bool intersect = false; int r = 0; assert_cfg80211_lock(); r = ignore_request(wiphy, pending_request); if (r == REG_INTERSECT) { if (pending_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); if (r) { kfree(pending_request); return r; } } intersect = true; } else if (r) { /* * If the regulatory domain being requested by the * driver has already been set just copy it to the * wiphy */ if (r == -EALREADY && pending_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); if (r) { kfree(pending_request); return r; } r = -EALREADY; goto new_request; } kfree(pending_request); return r; } new_request: if (last_request != &core_request_world) kfree(last_request); last_request = pending_request; last_request->intersect = intersect; pending_request = NULL; if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) { user_alpha2[0] = last_request->alpha2[0]; user_alpha2[1] = last_request->alpha2[1]; } /* When r == REG_INTERSECT we do need to call CRDA */ if (r < 0) { /* * Since CRDA will not be called in this case as we already * have applied the requested regulatory domain before we just * inform userspace we have processed the request */ if (r == -EALREADY) { nl80211_send_reg_change_event(last_request); reg_set_request_processed(); } return r; } return call_crda(last_request->alpha2); } /* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { int r = 0; struct wiphy *wiphy = NULL; enum nl80211_reg_initiator initiator = reg_request->initiator; BUG_ON(!reg_request->alpha2); if (wiphy_idx_valid(reg_request->wiphy_idx)) wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) { kfree(reg_request); return; } r = __regulatory_hint(wiphy, reg_request); /* This is required so that the orig_* parameters are saved */ if (r == -EALREADY && wiphy && wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { wiphy_update_regulatory(wiphy, initiator); return; } /* * We only time out user hints, given that they should be the only * source of bogus requests. */ if (r != -EALREADY && reg_request->initiator == NL80211_REGDOM_SET_BY_USER) schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142)); } /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we * must process each one atomically. */ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request; mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); /* When last_request->processed becomes true this will be rescheduled */ if (last_request && !last_request->processed) { REG_DBG_PRINT("Pending regulatory request, waiting " "for it to be processed...\n"); goto out; } spin_lock(&reg_requests_lock); if (list_empty(&reg_requests_list)) { spin_unlock(&reg_requests_lock); goto out; } reg_request = list_first_entry(&reg_requests_list, struct regulatory_request, list); list_del_init(&reg_request->list); spin_unlock(&reg_requests_lock); reg_process_hint(reg_request); out: mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); } /* Processes beacon hints -- this has nothing to do with country IEs */ static void reg_process_pending_beacon_hints(void) { struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; /* * No need to hold the reg_mutex here as we just touch wiphys * and do not read or access regulatory variables. */ mutex_lock(&cfg80211_mutex); /* This goes through the _pending_ beacon list */ spin_lock_bh(&reg_pending_beacons_lock); if (list_empty(&reg_pending_beacons)) { spin_unlock_bh(&reg_pending_beacons_lock); goto out; } list_for_each_entry_safe(pending_beacon, tmp, &reg_pending_beacons, list) { list_del_init(&pending_beacon->list); /* Applies the beacon hint to current wiphys */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); /* Remembers the beacon hint for new wiphys or reg changes */ list_add_tail(&pending_beacon->list, &reg_beacon_list); } spin_unlock_bh(&reg_pending_beacons_lock); out: mutex_unlock(&cfg80211_mutex); } static void reg_todo(struct work_struct *work) { reg_process_pending_hints(); reg_process_pending_beacon_hints(); } static void queue_regulatory_request(struct regulatory_request *request) { if (isalpha(request->alpha2[0])) request->alpha2[0] = toupper(request->alpha2[0]); if (isalpha(request->alpha2[1])) request->alpha2[1] = toupper(request->alpha2[1]); spin_lock(&reg_requests_lock); list_add_tail(&request->list, &reg_requests_list); spin_unlock(&reg_requests_lock); schedule_work(&reg_work); } /* * Core regulatory hint -- happens during cfg80211_init() * and when we restore regulatory settings. */ static int regulatory_hint_core(const char *alpha2) { struct regulatory_request *request; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; queue_regulatory_request(request); return 0; } /* User hints */ int regulatory_hint_user(const char *alpha2) { struct regulatory_request *request; BUG_ON(!alpha2); request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = WIPHY_IDX_STALE; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_USER; queue_regulatory_request(request); return 0; } /* Driver hints */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) { struct regulatory_request *request; BUG_ON(!alpha2); BUG_ON(!wiphy); request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = get_wiphy_idx(wiphy); /* Must have registered wiphy first */ BUG_ON(!wiphy_idx_valid(request->wiphy_idx)); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_DRIVER; queue_regulatory_request(request); return 0; } EXPORT_SYMBOL(regulatory_hint); /* * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and * therefore cannot iterate over the rdev list here. */ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, u8 *country_ie, u8 country_ie_len) { char alpha2[2]; enum environment_cap env = ENVIRON_ANY; struct regulatory_request *request; mutex_lock(&reg_mutex); if (unlikely(!last_request)) goto out; /* IE len must be evenly divisible by 2 */ if (country_ie_len & 0x01) goto out; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) goto out; alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; if (country_ie[2] == 'I') env = ENVIRON_INDOOR; else if (country_ie[2] == 'O') env = ENVIRON_OUTDOOR; /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold * cfg80211_mutex. */ if (likely(last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy_idx_valid(last_request->wiphy_idx))) goto out; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) goto out; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; request->country_ie_env = env; mutex_unlock(&reg_mutex); queue_regulatory_request(request); return; out: mutex_unlock(&reg_mutex); } static void restore_alpha2(char *alpha2, bool reset_user) { /* indicates there is no alpha2 to consider for restoration */ alpha2[0] = '9'; alpha2[1] = '7'; /* The user setting has precedence over the module parameter */ if (is_user_regdom_saved()) { /* Unless we're asked to ignore it and reset it */ if (reset_user) { REG_DBG_PRINT("Restoring regulatory settings " "including user preference\n"); user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* * If we're ignoring user settings, we still need to * check the module parameter to ensure we put things * back as they were for a full restore. */ if (!is_world_regdom(ieee80211_regdom)) { REG_DBG_PRINT("Keeping preference on " "module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } } else { REG_DBG_PRINT("Restoring regulatory settings " "while preserving user preference for: %c%c\n", user_alpha2[0], user_alpha2[1]); alpha2[0] = user_alpha2[0]; alpha2[1] = user_alpha2[1]; } } else if (!is_world_regdom(ieee80211_regdom)) { REG_DBG_PRINT("Keeping preference on " "module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } else REG_DBG_PRINT("Restoring regulatory settings\n"); } /* * Restoring regulatory settings involves ingoring any * possibly stale country IE information and user regulatory * settings if so desired, this includes any beacon hints * learned as we could have traveled outside to another country * after disconnection. To restore regulatory settings we do * exactly what we did at bootup: * * - send a core regulatory hint * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country * keep their own regulatory domain on wiphy->regd so that does does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user) { char alpha2[2]; struct reg_beacon *reg_beacon, *btmp; struct regulatory_request *reg_request, *tmp; LIST_HEAD(tmp_reg_req_list); mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); reset_regdomains(true); restore_alpha2(alpha2, reset_user); /* * If there's any pending requests we simply * stash them to a temporary pending queue and * add then after we've restored regulatory * settings. */ spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) { list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) { if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER) continue; list_del(&reg_request->list); list_add_tail(&reg_request->list, &tmp_reg_req_list); } } spin_unlock(&reg_requests_lock); /* Clear beacon hints */ spin_lock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_pending_beacons)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } spin_unlock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_beacon_list)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } /* First restore to the basic regulatory settings */ cfg80211_regdomain = cfg80211_world_regdom; mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); regulatory_hint_core(cfg80211_regdomain->alpha2); /* * This restores the ieee80211_regdom module parameter * preference or the last user requested regulatory * settings, user regulatory settings takes precedence. */ if (is_an_alpha2(alpha2)) regulatory_hint_user(user_alpha2); if (list_empty(&tmp_reg_req_list)) return; mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); spin_lock(&reg_requests_lock); list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) { REG_DBG_PRINT("Adding request for country %c%c back " "into the queue\n", reg_request->alpha2[0], reg_request->alpha2[1]); list_del(&reg_request->list); list_add_tail(&reg_request->list, &reg_requests_list); } spin_unlock(&reg_requests_lock); mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); REG_DBG_PRINT("Kicking the queue\n"); schedule_work(&reg_work); } void regulatory_hint_disconnect(void) { REG_DBG_PRINT("All devices are disconnected, going to " "restore regulatory settings\n"); restore_regulatory_settings(false); } static bool freq_is_chan_12_13_14(u16 freq) { if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ)) return true; return false; } int regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp) { struct reg_beacon *reg_beacon; if (likely((beacon_chan->beacon_found || (beacon_chan->flags & IEEE80211_CHAN_RADAR) || (beacon_chan->band == IEEE80211_BAND_2GHZ && !freq_is_chan_12_13_14(beacon_chan->center_freq))))) return 0; reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); if (!reg_beacon) return -ENOMEM; REG_DBG_PRINT("Found new beacon on " "frequency: %d MHz (Ch %d) on %s\n", beacon_chan->center_freq, ieee80211_frequency_to_channel(beacon_chan->center_freq), wiphy_name(wiphy)); memcpy(&reg_beacon->chan, beacon_chan, sizeof(struct ieee80211_channel)); /* * Since we can be called from BH or and non-BH context * we must use spin_lock_bh() */ spin_lock_bh(&reg_pending_beacons_lock); list_add_tail(&reg_beacon->list, &reg_pending_beacons); spin_unlock_bh(&reg_pending_beacons_lock); schedule_work(&reg_work); return 0; } static void print_rd_rules(const struct ieee80211_regdomain *rd) { unsigned int i; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; const struct ieee80211_power_rule *power_rule = NULL; pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; freq_range = &reg_rule->freq_range; power_rule = &reg_rule->power_rule; /* * There may not be documentation for max antenna gain * in certain regions */ if (power_rule->max_antenna_gain) pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, power_rule->max_antenna_gain, power_rule->max_eirp); else pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, power_rule->max_eirp); } } static void print_regdomain(const struct ieee80211_regdomain *rd) { if (is_intersected_alpha2(rd->alpha2)) { if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { struct cfg80211_registered_device *rdev; rdev = cfg80211_rdev_by_wiphy_idx( last_request->wiphy_idx); if (rdev) { pr_info("Current regulatory domain updated by AP to: %c%c\n", rdev->country_ie_alpha2[0], rdev->country_ie_alpha2[1]); } else pr_info("Current regulatory domain intersected:\n"); } else pr_info("Current regulatory domain intersected:\n"); } else if (is_world_regdom(rd->alpha2)) pr_info("World regulatory domain updated:\n"); else { if (is_unknown_alpha2(rd->alpha2)) pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n"); else pr_info("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } print_rd_rules(rd); } static void print_regdomain_info(const struct ieee80211_regdomain *rd) { pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_rd_rules(rd); } /* Takes ownership of rd only if it doesn't fail */ static int __set_regdom(const struct ieee80211_regdomain *rd) { const struct ieee80211_regdomain *intersected_rd = NULL; struct cfg80211_registered_device *rdev = NULL; struct wiphy *request_wiphy; /* Some basic sanity checks first */ if (is_world_regdom(rd->alpha2)) { if (WARN_ON(!reg_is_valid_request(rd->alpha2))) return -EINVAL; update_world_regdomain(rd); return 0; } if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && !is_unknown_alpha2(rd->alpha2)) return -EINVAL; if (!last_request) return -EINVAL; /* * Lets only bother proceeding on the same alpha2 if the current * rd is non static (it means CRDA was present and was used last) * and the pending request came in from a country IE */ if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { /* * If someone else asked us to change the rd lets only bother * checking if the alpha2 changes if CRDA was already called */ if (!regdom_changes(rd->alpha2)) return -EINVAL; } /* * Now lets set the regulatory domain, update all driver channels * and finally inform them of what we have done, in case they want * to review or adjust their own settings based on their own * internal EEPROM data */ if (WARN_ON(!reg_is_valid_request(rd->alpha2))) return -EINVAL; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected:\n"); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (!request_wiphy && (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) { schedule_delayed_work(&reg_timeout, 0); return -ENODEV; } if (!last_request->intersect) { int r; if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { reset_regdomains(false); cfg80211_regdomain = rd; return 0; } /* * For a driver hint, lets copy the regulatory domain the * driver wanted to the wiphy to deal with conflicts */ /* * Userspace could have sent two replies with only * one kernel request. */ if (request_wiphy->regd) return -EALREADY; r = reg_copy_regd(&request_wiphy->regd, rd); if (r) return r; reset_regdomains(false); cfg80211_regdomain = rd; return 0; } /* Intersection requires a bit more work */ if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { intersected_rd = regdom_intersect(rd, cfg80211_regdomain); if (!intersected_rd) return -EINVAL; /* * We can trash what CRDA provided now. * However if a driver requested this specific regulatory * domain we keep it for its private use */ if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) request_wiphy->regd = rd; else kfree(rd); rd = NULL; reset_regdomains(false); cfg80211_regdomain = intersected_rd; return 0; } if (!intersected_rd) return -EINVAL; rdev = wiphy_to_dev(request_wiphy); rdev->country_ie_alpha2[0] = rd->alpha2[0]; rdev->country_ie_alpha2[1] = rd->alpha2[1]; rdev->env = last_request->country_ie_env; BUG_ON(intersected_rd == rd); kfree(rd); rd = NULL; reset_regdomains(false); cfg80211_regdomain = intersected_rd; return 0; } /* * Use this call to set the current regulatory domain. Conflicts with * multiple drivers can be ironed out later. Caller must've already * kmalloc'd the rd structure. Caller must hold cfg80211_mutex */ int set_regdom(const struct ieee80211_regdomain *rd) { int r; assert_cfg80211_lock(); mutex_lock(&reg_mutex); /* Note that this doesn't update the wiphys, this is done below */ r = __set_regdom(rd); if (r) { kfree(rd); mutex_unlock(&reg_mutex); return r; } /* This would make this whole thing pointless */ if (!last_request->intersect) BUG_ON(rd != cfg80211_regdomain); /* update all wiphys now with the new established regulatory domain */ update_all_wiphy_regulatory(last_request->initiator); print_regdomain(cfg80211_regdomain); nl80211_send_reg_change_event(last_request); reg_set_request_processed(); mutex_unlock(&reg_mutex); return r; } #ifdef CONFIG_HOTPLUG int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) { if (last_request && !last_request->processed) { if (add_uevent_var(env, "COUNTRY=%c%c", last_request->alpha2[0], last_request->alpha2[1])) return -ENOMEM; } return 0; } #else int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } #endif /* CONFIG_HOTPLUG */ /* Caller must hold cfg80211_mutex */ void reg_device_remove(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; assert_cfg80211_lock(); mutex_lock(&reg_mutex); kfree(wiphy->regd); if (last_request) request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (!request_wiphy || request_wiphy != wiphy) goto out; last_request->wiphy_idx = WIPHY_IDX_STALE; last_request->country_ie_env = ENVIRON_ANY; out: mutex_unlock(&reg_mutex); } static void reg_timeout_work(struct work_struct *work) { REG_DBG_PRINT("Timeout while waiting for CRDA to reply, " "restoring regulatory settings\n"); restore_regulatory_settings(true); } int __init regulatory_init(void) { int err = 0; reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); if (IS_ERR(reg_pdev)) return PTR_ERR(reg_pdev); reg_pdev->dev.type = &reg_device_type; spin_lock_init(&reg_requests_lock); spin_lock_init(&reg_pending_beacons_lock); reg_regdb_size_check(); cfg80211_regdomain = cfg80211_world_regdom; user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_regdomain->alpha2); if (err) { if (err == -ENOMEM) return err; /* * N.B. kobject_uevent_env() can fail mainly for when we're out * memory which is handled and propagated appropriately above * but it can also fail during a netlink_broadcast() or during * early boot for call_usermodehelper(). For now treat these * errors as non-fatal. */ pr_err("kobject_uevent_env() was unable to call CRDA during init\n"); #ifdef CONFIG_CFG80211_REG_DEBUG /* We want to find out exactly why when debugging */ WARN_ON(err); #endif } /* * Finally, if the user set the module parameter treat it * as a user hint. */ if (!is_world_regdom(ieee80211_regdom)) regulatory_hint_user(ieee80211_regdom); return 0; } void /* __init_or_exit */ regulatory_exit(void) { struct regulatory_request *reg_request, *tmp; struct reg_beacon *reg_beacon, *btmp; cancel_work_sync(&reg_work); cancel_delayed_work_sync(&reg_timeout); mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); reset_regdomains(true); dev_set_uevent_suppress(&reg_pdev->dev, true); platform_device_unregister(reg_pdev); spin_lock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_pending_beacons)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } spin_unlock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_beacon_list)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) { list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) { list_del(&reg_request->list); kfree(reg_request); } } spin_unlock(&reg_requests_lock); mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); }
maurillo71/beaglebone-linux
net/wireless/reg.c
C
gpl-2.0
61,442
/* * IMG Pistachio USB PHY driver * * Copyright (C) 2015 Google, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <dt-bindings/phy/phy-pistachio-usb.h> #define USB_PHY_CONTROL0 0x00 #define USB_PHY_CONTROL0_OTG_DRVVBUS_SHIFT 11 #define USB_PHY_CONTROL0_OTG_DRVVBUS_MASK 1 #define USB_PHY_CONTROL1 0x04 #define USB_PHY_CONTROL1_FSEL_SHIFT 2 #define USB_PHY_CONTROL1_FSEL_MASK 0x7 #define USB_PHY_STRAP_CONTROL 0x10 #define USB_PHY_STRAP_CONTROL_REFCLK_SHIFT 4 #define USB_PHY_STRAP_CONTROL_REFCLK_MASK 0x3 #define USB_PHY_STATUS 0x14 #define USB_PHY_STATUS_RX_PHY_CLK BIT(9) #define USB_PHY_STATUS_RX_UTMI_CLK BIT(8) #define USB_PHY_STATUS_VBUS_FAULT BIT(7) struct pistachio_usb_phy { struct device *dev; struct regmap *cr_top; struct clk *phy_clk; bool vbus_drive; unsigned int refclk; }; static const unsigned long fsel_rate_map[] = { 9600000, 10000000, 12000000, 19200000, 20000000, 24000000, 0, 50000000, }; static int pistachio_usb_phy_power_on(struct phy *phy) { struct pistachio_usb_phy *p_phy = phy_get_drvdata(phy); unsigned long timeout, rate; unsigned int i; int ret; ret = clk_prepare_enable(p_phy->phy_clk); if (ret < 0) { dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret); return ret; } regmap_update_bits(p_phy->cr_top, USB_PHY_STRAP_CONTROL, USB_PHY_STRAP_CONTROL_REFCLK_MASK << USB_PHY_STRAP_CONTROL_REFCLK_SHIFT, p_phy->refclk << USB_PHY_STRAP_CONTROL_REFCLK_SHIFT); if (p_phy->vbus_drive) { /* allow USB block to control VBUS */ regmap_update_bits(p_phy->cr_top, USB_PHY_CONTROL0, USB_PHY_CONTROL0_OTG_DRVVBUS_MASK << USB_PHY_CONTROL0_OTG_DRVVBUS_SHIFT, 1 << USB_PHY_CONTROL0_OTG_DRVVBUS_SHIFT); } rate = clk_get_rate(p_phy->phy_clk); if (p_phy->refclk == REFCLK_XO_CRYSTAL && rate != 12000000) { dev_err(p_phy->dev, "Unsupported rate for XO crystal: %ld\n", rate); ret = -EINVAL; goto disable_clk; } for (i = 0; i < ARRAY_SIZE(fsel_rate_map); i++) { if (rate == fsel_rate_map[i]) break; } if (i == ARRAY_SIZE(fsel_rate_map)) { dev_err(p_phy->dev, "Unsupported clock rate: %lu\n", rate); ret = -EINVAL; goto disable_clk; } regmap_update_bits(p_phy->cr_top, USB_PHY_CONTROL1, USB_PHY_CONTROL1_FSEL_MASK << USB_PHY_CONTROL1_FSEL_SHIFT, i << USB_PHY_CONTROL1_FSEL_SHIFT); timeout = jiffies + msecs_to_jiffies(200); while (time_before(jiffies, timeout)) { unsigned int val; regmap_read(p_phy->cr_top, USB_PHY_STATUS, &val); if (val & USB_PHY_STATUS_VBUS_FAULT) { dev_err(p_phy->dev, "VBUS fault detected\n"); ret = -EIO; goto disable_clk; } if ((val & USB_PHY_STATUS_RX_PHY_CLK) && (val & USB_PHY_STATUS_RX_UTMI_CLK)) return 0; usleep_range(1000, 1500); } dev_err(p_phy->dev, "Timed out waiting for PHY to power on\n"); ret = -ETIMEDOUT; disable_clk: clk_disable_unprepare(p_phy->phy_clk); return ret; } static int pistachio_usb_phy_power_off(struct phy *phy) { struct pistachio_usb_phy *p_phy = phy_get_drvdata(phy); clk_disable_unprepare(p_phy->phy_clk); return 0; } static const struct phy_ops pistachio_usb_phy_ops = { .power_on = pistachio_usb_phy_power_on, .power_off = pistachio_usb_phy_power_off, .owner = THIS_MODULE, }; static int pistachio_usb_phy_probe(struct platform_device *pdev) { struct pistachio_usb_phy *p_phy; struct phy_provider *provider; struct phy *phy; int ret; p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL); if (!p_phy) return -ENOMEM; p_phy->dev = &pdev->dev; platform_set_drvdata(pdev, p_phy); p_phy->cr_top = syscon_regmap_lookup_by_phandle(p_phy->dev->of_node, "img,cr-top"); if (IS_ERR(p_phy->cr_top)) { dev_err(p_phy->dev, "Failed to get CR_TOP registers: %ld\n", PTR_ERR(p_phy->cr_top)); return PTR_ERR(p_phy->cr_top); } p_phy->phy_clk = devm_clk_get(p_phy->dev, "usb_phy"); if (IS_ERR(p_phy->phy_clk)) { dev_err(p_phy->dev, "Failed to get usb_phy clock: %ld\n", PTR_ERR(p_phy->phy_clk)); return PTR_ERR(p_phy->phy_clk); } ret = of_property_read_u32(p_phy->dev->of_node, "img,refclk", &p_phy->refclk); if (ret < 0) { dev_err(p_phy->dev, "No reference clock selector specified\n"); return ret; } p_phy->vbus_drive = of_property_read_bool(p_phy->dev->of_node, "enable-vbus-drive"); phy = devm_phy_create(p_phy->dev, NULL, &pistachio_usb_phy_ops); if (IS_ERR(phy)) { dev_err(p_phy->dev, "Failed to create PHY: %ld\n", PTR_ERR(phy)); return PTR_ERR(phy); } phy_set_drvdata(phy, p_phy); provider = devm_of_phy_provider_register(p_phy->dev, of_phy_simple_xlate); if (IS_ERR(provider)) { dev_err(p_phy->dev, "Failed to register PHY provider: %ld\n", PTR_ERR(provider)); return PTR_ERR(provider); } return 0; } static const struct of_device_id pistachio_usb_phy_of_match[] = { { .compatible = "img,pistachio-usb-phy", }, { }, }; MODULE_DEVICE_TABLE(of, pistachio_usb_phy_of_match); static struct platform_driver pistachio_usb_phy_driver = { .probe = pistachio_usb_phy_probe, .driver = { .name = "pistachio-usb-phy", .of_match_table = pistachio_usb_phy_of_match, }, }; module_platform_driver(pistachio_usb_phy_driver); MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); MODULE_DESCRIPTION("IMG Pistachio USB2.0 PHY driver"); MODULE_LICENSE("GPL v2");
chrisdearman/kernel-common
drivers/phy/phy-pistachio-usb.c
C
gpl-2.0
5,778
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- * * Copyright (C) 2013 Richard Hughes <richard@hughsie.com> * * Licensed under the GNU General Public License Version 2 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** * SECTION:huey-enum * @short_description: Types used by huey and libhuey * * These helper functions provide a way to marshal enumerated values to * text and back again. * * See also: #CdClient, #CdDevice */ #include "config.h" #include <glib.h> #include "huey-enum.h" /** * huey_rc_to_string: * * Since: 0.1.29 **/ const gchar * huey_rc_to_string (guchar value) { if (value == HUEY_RC_SUCCESS) return "success"; if (value == HUEY_RC_LOCKED) return "locked"; if (value == HUEY_RC_ERROR) return "error"; if (value == HUEY_RC_RETRY) return "retry"; if (value == HUEY_RC_UNKNOWN_5A) return "unknown5a"; if (value == HUEY_RC_UNKNOWN_81) return "unknown81"; return NULL; } /** * huey_cmd_code_to_string: * * Since: 0.1.29 **/ const gchar * huey_cmd_code_to_string (guchar value) { if (value == HUEY_CMD_GET_STATUS) return "get-status"; if (value == HUEY_CMD_READ_GREEN) return "read-green"; if (value == HUEY_CMD_READ_BLUE) return "read-blue"; if (value == HUEY_CMD_SET_INTEGRATION_TIME) return "set-integration-time"; if (value == HUEY_CMD_GET_INTEGRATION_TIME) return "get-integration-time"; if (value == HUEY_CMD_REGISTER_WRITE) return "reg-write"; if (value == HUEY_CMD_REGISTER_READ) return "reg-read"; if (value == HUEY_CMD_UNLOCK) return "unlock"; if (value == HUEY_CMD_UNKNOWN_0F) return "unknown0f"; if (value == HUEY_CMD_UNKNOWN_10) return "unknown10"; if (value == HUEY_CMD_UNKNOWN_11) return "unknown11"; if (value == HUEY_CMD_UNKNOWN_12) return "unknown12"; if (value == HUEY_CMD_SENSOR_MEASURE_RGB_CRT) return "measure-rgb-crt"; if (value == HUEY_CMD_UNKNOWN_15) return "unknown15(status?)"; if (value == HUEY_CMD_SENSOR_MEASURE_RGB) return "measure-rgb"; if (value == HUEY_CMD_UNKNOWN_21) return "unknown21"; if (value == HUEY_CMD_GET_AMBIENT) return "get-ambient"; if (value == HUEY_CMD_SET_LEDS) return "set-leds"; if (value == HUEY_CMD_SENSOR_MEASURE_RGB_ALT) return "measure-rgb-alt"; return NULL; }
heysion/colord-clone
lib/huey/huey-enum.c
C
gpl-2.0
2,937
/* * Copyright (C) 2001-2004 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* this file is part of ehci-hcd.c */ /*-------------------------------------------------------------------------*/ /* * EHCI Root Hub ... the nonsharable stuff * * Registers don't need cpu_to_le32, that happens transparently */ #ifdef CONFIG_ARCH_MXS #define MXS_USB_HOST_HACK #include <linux/fsl_devices.h> #endif /*-------------------------------------------------------------------------*/ #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) #ifdef CONFIG_PM static int ehci_hub_control( struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength ); /* After a power loss, ports that were owned by the companion must be * reset so that the companion can still own them. */ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) { u32 __iomem *reg; u32 status; int port; __le32 buf; struct usb_hcd *hcd = ehci_to_hcd(ehci); if (!ehci->owned_ports) return; /* Give the connections some time to appear */ msleep(20); port = HCS_N_PORTS(ehci->hcs_params); while (port--) { if (test_bit(port, &ehci->owned_ports)) { reg = &ehci->regs->port_status[port]; status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; /* Port already owned by companion? */ if (status & PORT_OWNER) clear_bit(port, &ehci->owned_ports); else if (test_bit(port, &ehci->companion_ports)) ehci_writel(ehci, status & ~PORT_PE, reg); else ehci_hub_control(hcd, SetPortFeature, USB_PORT_FEAT_RESET, port + 1, NULL, 0); } } if (!ehci->owned_ports) return; msleep(90); /* Wait for resets to complete */ port = HCS_N_PORTS(ehci->hcs_params); while (port--) { if (test_bit(port, &ehci->owned_ports)) { ehci_hub_control(hcd, GetPortStatus, 0, port + 1, (char *) &buf, sizeof(buf)); /* The companion should now own the port, * but if something went wrong the port must not * remain enabled. */ reg = &ehci->regs->port_status[port]; status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; if (status & PORT_OWNER) ehci_writel(ehci, status | PORT_CSC, reg); else { ehci_dbg(ehci, "failed handover port %d: %x\n", port + 1, status); ehci_writel(ehci, status & ~PORT_PE, reg); } } } ehci->owned_ports = 0; } static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, bool suspending) { int port; u32 temp; /* If remote wakeup is enabled for the root hub but disabled * for the controller, we must adjust all the port wakeup flags * when the controller is suspended or resumed. In all other * cases they don't need to be changed. */ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || device_may_wakeup(ehci_to_hcd(ehci)->self.controller)) return; /* clear phy low-power mode before changing wakeup flags */ if (ehci->has_hostpc) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg; hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + HOSTPC0 + 4 * port); temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); } msleep(5); } port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status[port]; u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; u32 t2 = t1 & ~PORT_WAKE_BITS; /* If we are suspending the controller, clear the flags. * If we are resuming the controller, set the wakeup flags. */ if (!suspending) { if (t1 & PORT_CONNECT) t2 |= PORT_WKOC_E | PORT_WKDISC_E; else t2 |= PORT_WKOC_E | PORT_WKCONN_E; } ehci_vdbg(ehci, "port %d, %08x -> %08x\n", port + 1, t1, t2); ehci_writel(ehci, t2, reg); } /* enter phy low-power mode again */ if (ehci->has_hostpc) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg; hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + HOSTPC0 + 4 * port); temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); } } } static int ehci_bus_suspend (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); int port; int mask; int changed; ehci_dbg(ehci, "suspend root hub\n"); if (time_before (jiffies, ehci->next_statechange)) msleep(5); del_timer_sync(&ehci->watchdog); del_timer_sync(&ehci->iaa_watchdog); spin_lock_irq (&ehci->lock); /* Once the controller is stopped, port resumes that are already * in progress won't complete. Hence if remote wakeup is enabled * for the root hub and any ports are in the middle of a resume or * remote wakeup, we must fail the suspend. */ if (hcd->self.root_hub->do_remote_wakeup) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { if (ehci->reset_done[port] != 0) { spin_unlock_irq(&ehci->lock); ehci_dbg(ehci, "suspend failed because " "port %d is resuming\n", port + 1); return -EBUSY; } } } /* stop schedules, clean any completed work */ if (HC_IS_RUNNING(hcd->state)) { ehci_quiesce (ehci); hcd->state = HC_STATE_QUIESCING; } ehci->command = ehci_readl(ehci, &ehci->regs->command); ehci_work(ehci); /* Unlike other USB host controller types, EHCI doesn't have * any notion of "global" or bus-wide suspend. The driver has * to manually suspend all the active unsuspended ports, and * then manually resume them in the bus_resume() routine. */ ehci->bus_suspended = 0; ehci->owned_ports = 0; changed = 0; port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status [port]; u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; u32 t2 = t1 & ~PORT_WAKE_BITS; /* keep track of which ports we suspend */ if (t1 & PORT_OWNER) set_bit(port, &ehci->owned_ports); else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) { t2 |= PORT_SUSPEND; set_bit(port, &ehci->bus_suspended); } /* enable remote wakeup on all ports, if told to do so */ if (hcd->self.root_hub->do_remote_wakeup) { /* only enable appropriate wake bits, otherwise the * hardware can not go phy low power mode. If a race * condition happens here(connection change during bits * set), the port change detection will finally fix it. */ if (t1 & PORT_CONNECT) t2 |= PORT_WKOC_E | PORT_WKDISC_E; else t2 |= PORT_WKOC_E | PORT_WKCONN_E; } if (t1 != t2) { ehci_vdbg (ehci, "port %d, %08x -> %08x\n", port + 1, t1, t2); ehci_writel(ehci, t2, reg); changed = 1; } } if (changed && ehci->has_hostpc) { spin_unlock_irq(&ehci->lock); msleep(5); /* 5 ms for HCD to enter low-power mode */ spin_lock_irq(&ehci->lock); port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg; u32 t3; hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + HOSTPC0 + 4 * port); t3 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); t3 = ehci_readl(ehci, hostpc_reg); ehci_dbg(ehci, "Port %d phy low-power mode %s\n", port, (t3 & HOSTPC_PHCD) ? "succeeded" : "failed"); } } /* Apparently some devices need a >= 1-uframe delay here */ if (ehci->bus_suspended) udelay(150); /* turn off now-idle HC */ ehci_halt (ehci); hcd->state = HC_STATE_SUSPENDED; if (ehci->reclaim) end_unlink_async(ehci); /* allow remote wakeup */ mask = INTR_MASK; if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); ehci_readl(ehci, &ehci->regs->intr_enable); ehci->next_statechange = jiffies + msecs_to_jiffies(10); spin_unlock_irq (&ehci->lock); /* ehci_work() may have re-enabled the watchdog timer, which we do not * want, and so we must delete any pending watchdog timer events. */ del_timer_sync(&ehci->watchdog); return 0; } /* caller has locked the root hub, and should reset/reinit on error */ static int ehci_bus_resume (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp; u32 power_okay; int i; u8 resume_needed = 0; if (time_before (jiffies, ehci->next_statechange)) msleep(5); spin_lock_irq (&ehci->lock); if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { spin_unlock_irq(&ehci->lock); return -ESHUTDOWN; } if (unlikely(ehci->debug)) { if (!dbgp_reset_prep()) ehci->debug = NULL; else dbgp_external_startup(); } /* Ideally and we've got a real resume here, and no port's power * was lost. (For PCI, that means Vaux was maintained.) But we * could instead be restoring a swsusp snapshot -- so that BIOS was * the last user of the controller, not reset/pm hardware keeping * state we gave to it. */ power_okay = ehci_readl(ehci, &ehci->regs->intr_enable); ehci_dbg(ehci, "resume root hub%s\n", power_okay ? "" : " after power loss"); /* at least some APM implementations will try to deliver * IRQs right away, so delay them until we're ready. */ ehci_writel(ehci, 0, &ehci->regs->intr_enable); /* re-init operational registers */ ehci_writel(ehci, 0, &ehci->regs->segment); ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next); /* restore CMD_RUN, framelist size, and irq threshold */ ehci_writel(ehci, ehci->command, &ehci->regs->command); /* Some controller/firmware combinations need a delay during which * they set up the port statuses. See Bugzilla #8190. */ spin_unlock_irq(&ehci->lock); msleep(8); spin_lock_irq(&ehci->lock); /* clear phy low-power mode before resume */ if (ehci->bus_suspended && ehci->has_hostpc) { i = HCS_N_PORTS(ehci->hcs_params); while (i--) { if (test_bit(i, &ehci->bus_suspended)) { u32 __iomem *hostpc_reg; hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + HOSTPC0 + 4 * i); temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); } } spin_unlock_irq(&ehci->lock); msleep(5); spin_lock_irq(&ehci->lock); } /* manually resume the ports we suspended during bus_suspend() */ i = HCS_N_PORTS (ehci->hcs_params); while (i--) { temp = ehci_readl(ehci, &ehci->regs->port_status [i]); temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(i, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) { temp |= PORT_RESUME; resume_needed = 1; } ehci_writel(ehci, temp, &ehci->regs->port_status [i]); } /* msleep for 20ms only if code is trying to resume port */ if (resume_needed) { spin_unlock_irq(&ehci->lock); msleep(20); #ifdef MXS_USB_HOST_HACK { struct fsl_usb2_platform_data *pdata; pdata = hcd->self.controller->platform_data; if (pdata && pdata->platform_resume) pdata->platform_resume(pdata); } #endif spin_lock_irq(&ehci->lock); } i = HCS_N_PORTS (ehci->hcs_params); while (i--) { temp = ehci_readl(ehci, &ehci->regs->port_status [i]); if (test_bit(i, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) { temp &= ~(PORT_RWC_BITS | PORT_RESUME); ehci_writel(ehci, temp, &ehci->regs->port_status [i]); ehci_vdbg (ehci, "resumed port %d\n", i + 1); } } (void) ehci_readl(ehci, &ehci->regs->command); /* maybe re-activate the schedule(s) */ temp = 0; if (ehci->async->qh_next.qh) temp |= CMD_ASE; if (ehci->periodic_sched) temp |= CMD_PSE; if (temp) { ehci->command |= temp; ehci_writel(ehci, ehci->command, &ehci->regs->command); } ehci->next_statechange = jiffies + msecs_to_jiffies(5); hcd->state = HC_STATE_RUNNING; /* Now we can safely re-enable irqs */ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable); spin_unlock_irq (&ehci->lock); ehci_handover_companion_ports(ehci); return 0; } #else #define ehci_bus_suspend NULL #define ehci_bus_resume NULL #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ /* Display the ports dedicated to the companion controller */ static ssize_t show_companion(struct device *dev, struct device_attribute *attr, char *buf) { struct ehci_hcd *ehci; int nports, index, n; int count = PAGE_SIZE; char *ptr = buf; ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev))); nports = HCS_N_PORTS(ehci->hcs_params); for (index = 0; index < nports; ++index) { if (test_bit(index, &ehci->companion_ports)) { n = scnprintf(ptr, count, "%d\n", index + 1); ptr += n; count -= n; } } return ptr - buf; } /* * Sets the owner of a port */ static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner) { u32 __iomem *status_reg; u32 port_status; int try; status_reg = &ehci->regs->port_status[portnum]; /* * The controller won't set the OWNER bit if the port is * enabled, so this loop will sometimes require at least two * iterations: one to disable the port and one to set OWNER. */ for (try = 4; try > 0; --try) { spin_lock_irq(&ehci->lock); port_status = ehci_readl(ehci, status_reg); if ((port_status & PORT_OWNER) == new_owner || (port_status & (PORT_OWNER | PORT_CONNECT)) == 0) try = 0; else { port_status ^= PORT_OWNER; port_status &= ~(PORT_PE | PORT_RWC_BITS); ehci_writel(ehci, port_status, status_reg); } spin_unlock_irq(&ehci->lock); if (try > 1) msleep(5); } } /* * Dedicate or undedicate a port to the companion controller. * Syntax is "[-]portnum", where a leading '-' sign means * return control of the port to the EHCI controller. */ static ssize_t store_companion(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ehci_hcd *ehci; int portnum, new_owner; ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev))); new_owner = PORT_OWNER; /* Owned by companion */ if (sscanf(buf, "%d", &portnum) != 1) return -EINVAL; if (portnum < 0) { portnum = - portnum; new_owner = 0; /* Owned by EHCI */ } if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params)) return -ENOENT; portnum--; if (new_owner) set_bit(portnum, &ehci->companion_ports); else clear_bit(portnum, &ehci->companion_ports); set_owner(ehci, portnum, new_owner); return count; } static DEVICE_ATTR(companion, 0644, show_companion, store_companion); static inline void create_companion_file(struct ehci_hcd *ehci) { int i; /* with integrated TT there is no companion! */ if (!ehci_is_TDI(ehci)) i = device_create_file(ehci_to_hcd(ehci)->self.controller, &dev_attr_companion); } static inline void remove_companion_file(struct ehci_hcd *ehci) { /* with integrated TT there is no companion! */ if (!ehci_is_TDI(ehci)) device_remove_file(ehci_to_hcd(ehci)->self.controller, &dev_attr_companion); } /*-------------------------------------------------------------------------*/ static int check_reset_complete ( struct ehci_hcd *ehci, int index, u32 __iomem *status_reg, int port_status ) { if (!(port_status & PORT_CONNECT)) return port_status; /* if reset finished and it's still not enabled -- handoff */ if (!(port_status & PORT_PE)) { /* with integrated TT, there's nobody to hand it to! */ if (ehci_is_TDI(ehci)) { ehci_dbg (ehci, "Failed to enable port %d on root hub TT\n", index+1); return port_status; } ehci_dbg (ehci, "port %d full speed --> companion\n", index + 1); // what happens if HCS_N_CC(params) == 0 ? port_status |= PORT_OWNER; port_status &= ~PORT_RWC_BITS; ehci_writel(ehci, port_status, status_reg); /* ensure 440EPX ohci controller state is operational */ if (ehci->has_amcc_usb23) set_ohci_hcfs(ehci, 1); } else { ehci_dbg (ehci, "port %d high speed\n", index + 1); /* ensure 440EPx ohci controller state is suspended */ if (ehci->has_amcc_usb23) set_ohci_hcfs(ehci, 0); } return port_status; } /*-------------------------------------------------------------------------*/ /* build "status change" packet (one or two bytes) from HC registers */ static int ehci_hub_status_data (struct usb_hcd *hcd, char *buf) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp, status = 0; u32 mask; int ports, i, retval = 1; unsigned long flags; /* if !USB_SUSPEND, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; /* init status to no-changes */ buf [0] = 0; ports = HCS_N_PORTS (ehci->hcs_params); if (ports > 7) { buf [1] = 0; retval++; } /* Some boards (mostly VIA?) report bogus overcurrent indications, * causing massive log spam unless we completely ignore them. It * may be relevant that VIA VT8235 controllers, where PORT_POWER is * always set, seem to clear PORT_OCC and PORT_CSC when writing to * PORT_POWER; that's surprising, but maybe within-spec. */ if (!ignore_oc) mask = PORT_CSC | PORT_PEC | PORT_OCC; else mask = PORT_CSC | PORT_PEC; // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND /* no hub change reports (bit 0) for now (power, ...) */ /* port N changes (bit N)? */ spin_lock_irqsave (&ehci->lock, flags); for (i = 0; i < ports; i++) { temp = ehci_readl(ehci, &ehci->regs->port_status [i]); /* * Return status information even for ports with OWNER set. * Otherwise khubd wouldn't see the disconnect event when a * high-speed device is switched over to the companion * controller by the user. */ if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend) || (ehci->reset_done[i] && time_after_eq( jiffies, ehci->reset_done[i]))) { if (i < 7) buf [0] |= 1 << (i + 1); else buf [1] |= 1 << (i - 7); status = STS_PCD; } } /* FIXME autosuspend idle root hubs */ spin_unlock_irqrestore (&ehci->lock, flags); return status ? retval : 0; } /*-------------------------------------------------------------------------*/ static void ehci_hub_descriptor ( struct ehci_hcd *ehci, struct usb_hub_descriptor *desc ) { int ports = HCS_N_PORTS (ehci->hcs_params); u16 temp; desc->bDescriptorType = 0x29; desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */ desc->bHubContrCurrent = 0; desc->bNbrPorts = ports; temp = 1 + (ports / 8); desc->bDescLength = 7 + 2 * temp; /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ memset (&desc->bitmap [0], 0, temp); memset (&desc->bitmap [temp], 0xff, temp); temp = 0x0008; /* per-port overcurrent reporting */ if (HCS_PPC (ehci->hcs_params)) temp |= 0x0001; /* per-port power control */ else temp |= 0x0002; /* no power switching */ #if 0 // re-enable when we support USB_PORT_FEAT_INDICATOR below. if (HCS_INDICATOR (ehci->hcs_params)) temp |= 0x0080; /* per-port indicators (LEDs) */ #endif desc->wHubCharacteristics = cpu_to_le16(temp); } /*-------------------------------------------------------------------------*/ static int ehci_hub_control ( struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength ) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); int ports = HCS_N_PORTS (ehci->hcs_params); u32 __iomem *status_reg = &ehci->regs->port_status[ (wIndex & 0xff) - 1]; u32 __iomem *hostpc_reg = NULL; u32 temp, temp1, status; unsigned long flags; int retval = 0; unsigned selector; /* * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. * HCS_INDICATOR may say we can change LEDs to off/amber/green. * (track current state ourselves) ... blink for diagnostics, * power, "this is the one", etc. EHCI spec supports this. */ if (ehci->has_hostpc) hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs + HOSTPC0 + 4 * ((wIndex & 0xff) - 1)); spin_lock_irqsave (&ehci->lock, flags); switch (typeReq) { case ClearHubFeature: switch (wValue) { case C_HUB_LOCAL_POWER: case C_HUB_OVER_CURRENT: /* no hub-wide feature/status flags */ break; default: goto error; } break; case ClearPortFeature: if (!wIndex || wIndex > ports) goto error; wIndex--; temp = ehci_readl(ehci, status_reg); /* * Even if OWNER is set, so the port is owned by the * companion controller, khubd needs to be able to clear * the port-change status bits (especially * USB_PORT_STAT_C_CONNECTION). */ switch (wValue) { case USB_PORT_FEAT_ENABLE: ehci_writel(ehci, temp & ~PORT_PE, status_reg); break; case USB_PORT_FEAT_C_ENABLE: ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg); break; case USB_PORT_FEAT_SUSPEND: if (temp & PORT_RESET) goto error; if (ehci->no_selective_suspend) break; if (!(temp & PORT_SUSPEND)) break; if ((temp & PORT_PE) == 0) goto error; /* clear phy low-power mode before resume */ if (hostpc_reg) { temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, hostpc_reg); spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* wait to leave low-power mode */ spin_lock_irqsave(&ehci->lock, flags); } /* resume signaling for 20 msec */ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); ehci_writel(ehci, temp | PORT_RESUME, status_reg); ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(20); break; case USB_PORT_FEAT_C_SUSPEND: clear_bit(wIndex, &ehci->port_c_suspend); break; case USB_PORT_FEAT_POWER: if (HCS_PPC (ehci->hcs_params)) ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_POWER), status_reg); break; case USB_PORT_FEAT_C_CONNECTION: ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg); break; case USB_PORT_FEAT_C_OVER_CURRENT: ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg); break; case USB_PORT_FEAT_C_RESET: /* GetPortStatus clears reset */ break; default: goto error; } ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */ break; case GetHubDescriptor: ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *) buf); break; case GetHubStatus: /* no hub-wide feature/status flags */ memset (buf, 0, 4); //cpu_to_le32s ((u32 *) buf); break; case GetPortStatus: if (!wIndex || wIndex > ports) goto error; wIndex--; status = 0; temp = ehci_readl(ehci, status_reg); // wPortChange bits if (temp & PORT_CSC) status |= USB_PORT_STAT_C_CONNECTION << 16; if (temp & PORT_PEC) status |= USB_PORT_STAT_C_ENABLE << 16; if ((temp & PORT_OCC) && !ignore_oc){ status |= USB_PORT_STAT_C_OVERCURRENT << 16; /* * Hubs should disable port power on over-current. * However, not all EHCI implementations do this * automatically, even if they _do_ support per-port * power switching; they're allowed to just limit the * current. khubd will turn the power back on. */ if (HCS_PPC (ehci->hcs_params)){ ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_POWER), status_reg); } } /* whoever resumes must GetPortStatus to complete it!! */ if (temp & PORT_RESUME) { /* Remote Wakeup received? */ if (!ehci->reset_done[wIndex]) { /* resume signaling for 20 msec */ ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(20); /* check the port again */ mod_timer(&ehci_to_hcd(ehci)->rh_timer, ehci->reset_done[wIndex]); } /* resume completed? */ else if (time_after_eq(jiffies, ehci->reset_done[wIndex])) { clear_bit(wIndex, &ehci->suspended_ports); set_bit(wIndex, &ehci->port_c_suspend); ehci->reset_done[wIndex] = 0; /* stop resume signaling */ temp = ehci_readl(ehci, status_reg); ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESUME), status_reg); retval = handshake(ehci, status_reg, PORT_RESUME, 0, 2000 /* 2msec */); if (retval != 0) { ehci_err(ehci, "port %d resume error %d\n", wIndex + 1, retval); goto error; } temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); } } /* whoever resets must GetPortStatus to complete it!! */ if ((temp & PORT_RESET) && time_after_eq(jiffies, ehci->reset_done[wIndex])) { status |= USB_PORT_STAT_C_RESET << 16; ehci->reset_done [wIndex] = 0; /* force reset to complete */ ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET), status_reg); /* REVISIT: some hardware needs 550+ usec to clear * this bit; seems too long to spin routinely... */ retval = handshake(ehci, status_reg, PORT_RESET, 0, 1000); if (retval != 0) { ehci_err (ehci, "port %d reset error %d\n", wIndex + 1, retval); goto error; } /* see what we found out */ temp = check_reset_complete (ehci, wIndex, status_reg, ehci_readl(ehci, status_reg)); } if (!(temp & (PORT_RESUME|PORT_RESET))) ehci->reset_done[wIndex] = 0; /* transfer dedicated ports to the companion hc */ if ((temp & PORT_CONNECT) && test_bit(wIndex, &ehci->companion_ports)) { temp &= ~PORT_RWC_BITS; temp |= PORT_OWNER; ehci_writel(ehci, temp, status_reg); ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1); temp = ehci_readl(ehci, status_reg); } /* * Even if OWNER is set, there's no harm letting khubd * see the wPortStatus values (they should all be 0 except * for PORT_POWER anyway). */ if (temp & PORT_CONNECT) { status |= USB_PORT_STAT_CONNECTION; // status may be from integrated TT if (ehci->has_hostpc) { temp1 = ehci_readl(ehci, hostpc_reg); status |= ehci_port_speed(ehci, temp1); } else status |= ehci_port_speed(ehci, temp); } if (temp & PORT_PE) status |= USB_PORT_STAT_ENABLE; /* maybe the port was unsuspended without our knowledge */ if (temp & (PORT_SUSPEND|PORT_RESUME)) { status |= USB_PORT_STAT_SUSPEND; } else if (test_bit(wIndex, &ehci->suspended_ports)) { clear_bit(wIndex, &ehci->suspended_ports); ehci->reset_done[wIndex] = 0; if (temp & PORT_PE) set_bit(wIndex, &ehci->port_c_suspend); } if (temp & PORT_OC) status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) status |= USB_PORT_STAT_POWER; if (test_bit(wIndex, &ehci->port_c_suspend)) status |= USB_PORT_STAT_C_SUSPEND << 16; #ifndef VERBOSE_DEBUG if (status & ~0xffff) /* only if wPortChange is interesting */ #endif dbg_port (ehci, "GetStatus", wIndex + 1, temp); put_unaligned_le32(status, buf); break; case SetHubFeature: switch (wValue) { case C_HUB_LOCAL_POWER: case C_HUB_OVER_CURRENT: /* no hub-wide feature/status flags */ break; default: goto error; } break; case SetPortFeature: selector = wIndex >> 8; wIndex &= 0xff; if (unlikely(ehci->debug)) { /* If the debug port is active any port * feature requests should get denied */ if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) && (readl(&ehci->debug->control) & DBGP_ENABLED)) { retval = -ENODEV; goto error_exit; } } if (!wIndex || wIndex > ports) goto error; wIndex--; temp = ehci_readl(ehci, status_reg); if (temp & PORT_OWNER) break; temp &= ~PORT_RWC_BITS; switch (wValue) { case USB_PORT_FEAT_SUSPEND: if (ehci->no_selective_suspend) break; if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) goto error; /* After above check the port must be connected. * Set appropriate bit thus could put phy into low power * mode if we have hostpc feature */ temp &= ~PORT_WKCONN_E; temp |= PORT_WKDISC_E | PORT_WKOC_E; ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); #ifdef MXS_USB_HOST_HACK spin_unlock_irqrestore(&ehci->lock, flags); { struct fsl_usb2_platform_data *pdata; pdata = hcd->self.controller->platform_data; if (pdata && pdata->platform_suspend) pdata->platform_suspend(pdata); } spin_lock_irqsave(&ehci->lock, flags); #endif if (hostpc_reg) { spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* 5ms for HCD enter low pwr mode */ spin_lock_irqsave(&ehci->lock, flags); temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 | HOSTPC_PHCD, hostpc_reg); temp1 = ehci_readl(ehci, hostpc_reg); ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", wIndex, (temp1 & HOSTPC_PHCD) ? "succeeded" : "failed"); } set_bit(wIndex, &ehci->suspended_ports); break; case USB_PORT_FEAT_POWER: if (HCS_PPC (ehci->hcs_params)) ehci_writel(ehci, temp | PORT_POWER, status_reg); break; case USB_PORT_FEAT_RESET: if (temp & PORT_RESUME) goto error; /* line status bits may report this as low speed, * which can be fine if this root hub has a * transaction translator built in. */ if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT && !ehci_is_TDI(ehci) && PORT_USB11 (temp)) { ehci_dbg (ehci, "port %d low speed --> companion\n", wIndex + 1); temp |= PORT_OWNER; } else { ehci_vdbg (ehci, "port %d reset\n", wIndex + 1); temp |= PORT_RESET; temp &= ~PORT_PE; /* * caller must wait, then call GetPortStatus * usb 2.0 spec says 50 ms resets on root */ ehci->reset_done [wIndex] = jiffies + msecs_to_jiffies (50); } ehci_writel(ehci, temp, status_reg); break; /* For downstream facing ports (these): one hub port is put * into test mode according to USB2 11.24.2.13, then the hub * must be reset (which for root hub now means rmmod+modprobe, * or else system reboot). See EHCI 2.3.9 and 4.14 for info * about the EHCI-specific stuff. */ case USB_PORT_FEAT_TEST: if (!selector || selector > 5) goto error; ehci_quiesce(ehci); ehci_halt(ehci); temp |= selector << 16; ehci_writel(ehci, temp, status_reg); break; default: goto error; } ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ break; default: error: /* "stall" on error */ retval = -EPIPE; } error_exit: spin_unlock_irqrestore (&ehci->lock, flags); return retval; } static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); if (ehci_is_TDI(ehci)) return; set_owner(ehci, --portnum, PORT_OWNER); } static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 __iomem *reg; if (ehci_is_TDI(ehci)) return 0; reg = &ehci->regs->port_status[portnum - 1]; return ehci_readl(ehci, reg) & PORT_OWNER; }
btolfa/kernel_tion_pro28
drivers/usb/host/ehci-hub.c
C
gpl-2.0
31,050
/** * @file error.c Error functions * * purple * * Purple is the legal property of its developers, whose names are too numerous * to list here. Please refer to the COPYRIGHT file distributed with this * source distribution. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "nateon.h" #include "error.h" const char * nateon_error_get_text(unsigned int type, gboolean *debug) { static char msg[NATEON_BUF_LEN]; *debug = FALSE; switch (type) { // case 0: // g_snprintf(msg, sizeof(msg), // _("Unable to parse message")); // *debug = TRUE; // break; // case 200: // g_snprintf(msg, sizeof(msg), // _("Syntax Error (probably a client bug)")); // *debug = TRUE; // break; // case 201: // g_snprintf(msg, sizeof(msg), // _("Invalid e-mail address")); // break; // case 205: // g_snprintf(msg, sizeof(msg), _("User does not exist")); // break; // case 206: // g_snprintf(msg, sizeof(msg), // _("Fully qualified domain name missing")); // break; // case 207: // g_snprintf(msg, sizeof(msg), _("Already logged in")); // break; // case 208: // g_snprintf(msg, sizeof(msg), _("Invalid screen name")); // break; // case 209: // g_snprintf(msg, sizeof(msg), _("Invalid friendly name")); // break; // case 210: // g_snprintf(msg, sizeof(msg), _("List full")); // break; // case 215: // g_snprintf(msg, sizeof(msg), _("Already there")); // *debug = TRUE; // break; // case 216: // g_snprintf(msg, sizeof(msg), _("Not on list")); // break; // case 217: // g_snprintf(msg, sizeof(msg), _("User is offline")); // break; // case 218: // g_snprintf(msg, sizeof(msg), _("Already in the mode")); // *debug = TRUE; // break; // case 219: // g_snprintf(msg, sizeof(msg), _("Already in opposite list")); // *debug = TRUE; // break; // case 223: // g_snprintf(msg, sizeof(msg), _("Too many groups")); // break; // case 224: // g_snprintf(msg, sizeof(msg), _("Invalid group")); // break; // case 225: // g_snprintf(msg, sizeof(msg), _("User not in group")); // break; // case 229: // g_snprintf(msg, sizeof(msg), _("Group name too long")); // break; // case 230: // g_snprintf(msg, sizeof(msg), _("Cannot remove group zero")); // *debug = TRUE; // break; // case 231: // g_snprintf(msg, sizeof(msg), // _("Tried to add a user to a group " // "that doesn't exist")); // break; // case 280: // g_snprintf(msg, sizeof(msg), _("Switchboard failed")); // *debug = TRUE; // break; // case 281: // g_snprintf(msg, sizeof(msg), _("Notify transfer failed")); // *debug = TRUE; // break; // // case 300: // g_snprintf(msg, sizeof(msg), _("Required fields missing")); // *debug = TRUE; // break; // case 301: // g_snprintf(msg, sizeof(msg), _("Too many hits to a FND")); // *debug = TRUE; // break; // case 302: // g_snprintf(msg, sizeof(msg), _("Not logged in")); // break; // // case 500: // g_snprintf(msg, sizeof(msg), _("Service temporarily unavailable")); // break; // case 501: // g_snprintf(msg, sizeof(msg), _("Database server error")); // *debug = TRUE; // break; // case 502: // g_snprintf(msg, sizeof(msg), _("Command disabled")); // *debug = TRUE; // break; // case 510: // g_snprintf(msg, sizeof(msg), _("File operation error")); // *debug = TRUE; // break; // case 520: // g_snprintf(msg, sizeof(msg), _("Memory allocation error")); // *debug = TRUE; // break; // case 540: // g_snprintf(msg, sizeof(msg), _("Wrong CHL value sent to server")); // *debug = TRUE; // break; // // case 600: // g_snprintf(msg, sizeof(msg), _("Server busy")); // break; // case 601: // g_snprintf(msg, sizeof(msg), _("Server unavailable")); // break; // case 602: // g_snprintf(msg, sizeof(msg), _("Peer notification server down")); // *debug = TRUE; // break; // case 603: // g_snprintf(msg, sizeof(msg), _("Database connect error")); // *debug = TRUE; // break; // case 604: // g_snprintf(msg, sizeof(msg), // _("Server is going down (abandon ship)")); // break; // case 605: // g_snprintf(msg, sizeof(msg), _("Server unavailable")); // break; // // case 707: // g_snprintf(msg, sizeof(msg), _("Error creating connection")); // *debug = TRUE; // break; // case 710: // g_snprintf(msg, sizeof(msg), // _("CVR parameters are either unknown or not allowed")); // *debug = TRUE; // break; // case 711: // g_snprintf(msg, sizeof(msg), _("Unable to write")); // break; // case 712: // g_snprintf(msg, sizeof(msg), _("Session overload")); // *debug = TRUE; // break; // case 713: // g_snprintf(msg, sizeof(msg), _("User is too active")); // break; // case 714: // g_snprintf(msg, sizeof(msg), _("Too many sessions")); // break; // case 715: // g_snprintf(msg, sizeof(msg), _("Passport not verified")); // break; // case 717: // g_snprintf(msg, sizeof(msg), _("Bad friend file")); // *debug = TRUE; // break; // case 731: // g_snprintf(msg, sizeof(msg), _("Not expected")); // *debug = TRUE; // break; // // case 800: // g_snprintf(msg, sizeof(msg), // _("Friendly name changes too rapidly")); // break; // // case 910: // case 912: // case 918: // case 919: // case 921: // case 922: // g_snprintf(msg, sizeof(msg), _("Server too busy")); // break; // case 911: // case 917: // g_snprintf(msg, sizeof(msg), _("Authentication failed")); // break; // case 913: // g_snprintf(msg, sizeof(msg), _("Not allowed when offline")); // break; // case 914: // case 915: // case 916: // g_snprintf(msg, sizeof(msg), _("Server unavailable")); // break; // case 920: // g_snprintf(msg, sizeof(msg), _("Not accepting new users")); // break; // case 923: // g_snprintf(msg, sizeof(msg), // _("Kids Passport without parental consent")); // break; // case 924: // g_snprintf(msg, sizeof(msg), // _("Passport account not yet verified")); // break; // case 928: // g_snprintf(msg, sizeof(msg), _("Bad ticket")); // *debug = TRUE; // break; default: g_snprintf(msg, sizeof(msg), _("Unknown Error Code %d"), type); *debug = TRUE; break; } return msg; } void nateon_error_handle(NateonSession *session, unsigned int type) { char buf[NATEON_BUF_LEN]; gboolean debug; g_snprintf(buf, sizeof(buf), _("NATEON Error: %s\n"), nateon_error_get_text(type, &debug)); if (debug) purple_debug_warning("nateon", "error %d: %s\n", type, buf); else purple_notify_error(session->account->gc, NULL, buf, NULL); }
difro/pidgin-nateon
src/error.c
C
gpl-2.0
7,255
/* * rtl_tcp_andro is a library that uses libusb and librtlsdr to * turn your Realtek RTL2832 based DVB dongle into a SDR receiver. * It independently implements the rtl-tcp API protocol for native Android usage. * Copyright (C) 2016 by Martin Marinov <martintzvetomirov@gmail.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "queue.h" #include <string.h> #include <stdio.h> typedef struct queue_element queue_element_t; struct queue_element { queue_element_t * next; queue_element_t * prev; queue_element_t * last; void * payload; }; void queue_add(queue_t * queue, void * ptr) { if (*queue == NULL) { queue_element_t * new_el = (queue_element_t *) malloc(sizeof(queue_element_t)); new_el->last = new_el; new_el->next = NULL; new_el->prev = NULL; new_el->payload = ptr; *queue = (void *) new_el; } else { queue_element_t * qe = (queue_element_t *) *queue; queue_element_t * new_el = (queue_element_t *) malloc(sizeof(queue_element_t)); new_el->last = NULL; new_el->next = NULL; new_el->prev = qe->last; new_el->payload = ptr; qe->last->next = new_el; qe->last = new_el; } } void * queue_pop(queue_t * queue) { if (*queue == NULL) return NULL; queue_element_t * qe = (queue_element_t *) *queue; if (qe->next != NULL) { qe->next->prev = NULL; qe->next->last = qe->last; } void * result = qe->payload; *queue = (void *) qe->next; free(qe); return result; } /* Usage: void queue_unit_test(void) { queue_t queue = NULL; int a = 1; int b = 3; int c = 5; int d = 18; queue_add(&queue, (void *) &a); queue_add(&queue, (void *) &b); queue_add(&queue, (void *) &c); queue_add(&queue, (void *) &d); c = 12; int * ans; while ((ans = (int *) queue_pop(&queue))) printf("%d ", *ans); // prints out 1 3 12 18 } */
AnthonyQuan/AndroidRTLPower
app/src/main/jni/queue.c
C
gpl-2.0
2,396
/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Foobar; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ /***************************************************************************** * name: be_aas_file.c * * desc: AAS file loading/writing * * $Archive: /MissionPack/code/botlib/be_aas_file.c $ * *****************************************************************************/ #include "../game/q_shared.h" #include "l_memory.h" #include "l_script.h" #include "l_precomp.h" #include "l_struct.h" #include "l_libvar.h" #include "l_utils.h" #include "aasfile.h" #include "../game/botlib.h" #include "../game/be_aas.h" #include "be_aas_funcs.h" #include "be_interface.h" #include "be_aas_def.h" //#define AASFILEDEBUG //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_SwapAASData(void) { int i, j; //bounding boxes for (i = 0; i < aasworld.numbboxes; i++) { aasworld.bboxes[i].presencetype = LittleLong(aasworld.bboxes[i].presencetype); aasworld.bboxes[i].flags = LittleLong(aasworld.bboxes[i].flags); for (j = 0; j < 3; j++) { aasworld.bboxes[i].mins[j] = LittleLong(aasworld.bboxes[i].mins[j]); aasworld.bboxes[i].maxs[j] = LittleLong(aasworld.bboxes[i].maxs[j]); } //end for } //end for //vertexes for (i = 0; i < aasworld.numvertexes; i++) { for (j = 0; j < 3; j++) aasworld.vertexes[i][j] = LittleFloat(aasworld.vertexes[i][j]); } //end for //planes for (i = 0; i < aasworld.numplanes; i++) { for (j = 0; j < 3; j++) aasworld.planes[i].normal[j] = LittleFloat(aasworld.planes[i].normal[j]); aasworld.planes[i].dist = LittleFloat(aasworld.planes[i].dist); aasworld.planes[i].type = LittleLong(aasworld.planes[i].type); } //end for //edges for (i = 0; i < aasworld.numedges; i++) { aasworld.edges[i].v[0] = LittleLong(aasworld.edges[i].v[0]); aasworld.edges[i].v[1] = LittleLong(aasworld.edges[i].v[1]); } //end for //edgeindex for (i = 0; i < aasworld.edgeindexsize; i++) { aasworld.edgeindex[i] = LittleLong(aasworld.edgeindex[i]); } //end for //faces for (i = 0; i < aasworld.numfaces; i++) { aasworld.faces[i].planenum = LittleLong(aasworld.faces[i].planenum); aasworld.faces[i].faceflags = LittleLong(aasworld.faces[i].faceflags); aasworld.faces[i].numedges = LittleLong(aasworld.faces[i].numedges); aasworld.faces[i].firstedge = LittleLong(aasworld.faces[i].firstedge); aasworld.faces[i].frontarea = LittleLong(aasworld.faces[i].frontarea); aasworld.faces[i].backarea = LittleLong(aasworld.faces[i].backarea); } //end for //face index for (i = 0; i < aasworld.faceindexsize; i++) { aasworld.faceindex[i] = LittleLong(aasworld.faceindex[i]); } //end for //convex areas for (i = 0; i < aasworld.numareas; i++) { aasworld.areas[i].areanum = LittleLong(aasworld.areas[i].areanum); aasworld.areas[i].numfaces = LittleLong(aasworld.areas[i].numfaces); aasworld.areas[i].firstface = LittleLong(aasworld.areas[i].firstface); for (j = 0; j < 3; j++) { aasworld.areas[i].mins[j] = LittleFloat(aasworld.areas[i].mins[j]); aasworld.areas[i].maxs[j] = LittleFloat(aasworld.areas[i].maxs[j]); aasworld.areas[i].center[j] = LittleFloat(aasworld.areas[i].center[j]); } //end for } //end for //area settings for (i = 0; i < aasworld.numareasettings; i++) { aasworld.areasettings[i].contents = LittleLong(aasworld.areasettings[i].contents); aasworld.areasettings[i].areaflags = LittleLong(aasworld.areasettings[i].areaflags); aasworld.areasettings[i].presencetype = LittleLong(aasworld.areasettings[i].presencetype); aasworld.areasettings[i].cluster = LittleLong(aasworld.areasettings[i].cluster); aasworld.areasettings[i].clusterareanum = LittleLong(aasworld.areasettings[i].clusterareanum); aasworld.areasettings[i].numreachableareas = LittleLong(aasworld.areasettings[i].numreachableareas); aasworld.areasettings[i].firstreachablearea = LittleLong(aasworld.areasettings[i].firstreachablearea); } //end for //area reachability for (i = 0; i < aasworld.reachabilitysize; i++) { aasworld.reachability[i].areanum = LittleLong(aasworld.reachability[i].areanum); aasworld.reachability[i].facenum = LittleLong(aasworld.reachability[i].facenum); aasworld.reachability[i].edgenum = LittleLong(aasworld.reachability[i].edgenum); for (j = 0; j < 3; j++) { aasworld.reachability[i].start[j] = LittleFloat(aasworld.reachability[i].start[j]); aasworld.reachability[i].end[j] = LittleFloat(aasworld.reachability[i].end[j]); } //end for aasworld.reachability[i].traveltype = LittleLong(aasworld.reachability[i].traveltype); aasworld.reachability[i].traveltime = LittleShort(aasworld.reachability[i].traveltime); } //end for //nodes for (i = 0; i < aasworld.numnodes; i++) { aasworld.nodes[i].planenum = LittleLong(aasworld.nodes[i].planenum); aasworld.nodes[i].children[0] = LittleLong(aasworld.nodes[i].children[0]); aasworld.nodes[i].children[1] = LittleLong(aasworld.nodes[i].children[1]); } //end for //cluster portals for (i = 0; i < aasworld.numportals; i++) { aasworld.portals[i].areanum = LittleLong(aasworld.portals[i].areanum); aasworld.portals[i].frontcluster = LittleLong(aasworld.portals[i].frontcluster); aasworld.portals[i].backcluster = LittleLong(aasworld.portals[i].backcluster); aasworld.portals[i].clusterareanum[0] = LittleLong(aasworld.portals[i].clusterareanum[0]); aasworld.portals[i].clusterareanum[1] = LittleLong(aasworld.portals[i].clusterareanum[1]); } //end for //cluster portal index for (i = 0; i < aasworld.portalindexsize; i++) { aasworld.portalindex[i] = LittleLong(aasworld.portalindex[i]); } //end for //cluster for (i = 0; i < aasworld.numclusters; i++) { aasworld.clusters[i].numareas = LittleLong(aasworld.clusters[i].numareas); aasworld.clusters[i].numreachabilityareas = LittleLong(aasworld.clusters[i].numreachabilityareas); aasworld.clusters[i].numportals = LittleLong(aasworld.clusters[i].numportals); aasworld.clusters[i].firstportal = LittleLong(aasworld.clusters[i].firstportal); } //end for } //end of the function AAS_SwapAASData //=========================================================================== // dump the current loaded aas file // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_DumpAASData(void) { aasworld.numbboxes = 0; if (aasworld.bboxes) FreeMemory(aasworld.bboxes); aasworld.bboxes = NULL; aasworld.numvertexes = 0; if (aasworld.vertexes) FreeMemory(aasworld.vertexes); aasworld.vertexes = NULL; aasworld.numplanes = 0; if (aasworld.planes) FreeMemory(aasworld.planes); aasworld.planes = NULL; aasworld.numedges = 0; if (aasworld.edges) FreeMemory(aasworld.edges); aasworld.edges = NULL; aasworld.edgeindexsize = 0; if (aasworld.edgeindex) FreeMemory(aasworld.edgeindex); aasworld.edgeindex = NULL; aasworld.numfaces = 0; if (aasworld.faces) FreeMemory(aasworld.faces); aasworld.faces = NULL; aasworld.faceindexsize = 0; if (aasworld.faceindex) FreeMemory(aasworld.faceindex); aasworld.faceindex = NULL; aasworld.numareas = 0; if (aasworld.areas) FreeMemory(aasworld.areas); aasworld.areas = NULL; aasworld.numareasettings = 0; if (aasworld.areasettings) FreeMemory(aasworld.areasettings); aasworld.areasettings = NULL; aasworld.reachabilitysize = 0; if (aasworld.reachability) FreeMemory(aasworld.reachability); aasworld.reachability = NULL; aasworld.numnodes = 0; if (aasworld.nodes) FreeMemory(aasworld.nodes); aasworld.nodes = NULL; aasworld.numportals = 0; if (aasworld.portals) FreeMemory(aasworld.portals); aasworld.portals = NULL; aasworld.numportals = 0; if (aasworld.portalindex) FreeMemory(aasworld.portalindex); aasworld.portalindex = NULL; aasworld.portalindexsize = 0; if (aasworld.clusters) FreeMemory(aasworld.clusters); aasworld.clusters = NULL; aasworld.numclusters = 0; // aasworld.loaded = qfalse; aasworld.initialized = qfalse; aasworld.savefile = qfalse; } //end of the function AAS_DumpAASData //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== #ifdef AASFILEDEBUG void AAS_FileInfo(void) { int i, n, optimized; botimport.Print(PRT_MESSAGE, "version = %d\n", AASVERSION); botimport.Print(PRT_MESSAGE, "numvertexes = %d\n", aasworld.numvertexes); botimport.Print(PRT_MESSAGE, "numplanes = %d\n", aasworld.numplanes); botimport.Print(PRT_MESSAGE, "numedges = %d\n", aasworld.numedges); botimport.Print(PRT_MESSAGE, "edgeindexsize = %d\n", aasworld.edgeindexsize); botimport.Print(PRT_MESSAGE, "numfaces = %d\n", aasworld.numfaces); botimport.Print(PRT_MESSAGE, "faceindexsize = %d\n", aasworld.faceindexsize); botimport.Print(PRT_MESSAGE, "numareas = %d\n", aasworld.numareas); botimport.Print(PRT_MESSAGE, "numareasettings = %d\n", aasworld.numareasettings); botimport.Print(PRT_MESSAGE, "reachabilitysize = %d\n", aasworld.reachabilitysize); botimport.Print(PRT_MESSAGE, "numnodes = %d\n", aasworld.numnodes); botimport.Print(PRT_MESSAGE, "numportals = %d\n", aasworld.numportals); botimport.Print(PRT_MESSAGE, "portalindexsize = %d\n", aasworld.portalindexsize); botimport.Print(PRT_MESSAGE, "numclusters = %d\n", aasworld.numclusters); // for (n = 0, i = 0; i < aasworld.numareasettings; i++) { if (aasworld.areasettings[i].areaflags & AREA_GROUNDED) n++; } //end for botimport.Print(PRT_MESSAGE, "num grounded areas = %d\n", n); // botimport.Print(PRT_MESSAGE, "planes size %d bytes\n", aasworld.numplanes * sizeof(aas_plane_t)); botimport.Print(PRT_MESSAGE, "areas size %d bytes\n", aasworld.numareas * sizeof(aas_area_t)); botimport.Print(PRT_MESSAGE, "areasettings size %d bytes\n", aasworld.numareasettings * sizeof(aas_areasettings_t)); botimport.Print(PRT_MESSAGE, "nodes size %d bytes\n", aasworld.numnodes * sizeof(aas_node_t)); botimport.Print(PRT_MESSAGE, "reachability size %d bytes\n", aasworld.reachabilitysize * sizeof(aas_reachability_t)); botimport.Print(PRT_MESSAGE, "portals size %d bytes\n", aasworld.numportals * sizeof(aas_portal_t)); botimport.Print(PRT_MESSAGE, "clusters size %d bytes\n", aasworld.numclusters * sizeof(aas_cluster_t)); optimized = aasworld.numplanes * sizeof(aas_plane_t) + aasworld.numareas * sizeof(aas_area_t) + aasworld.numareasettings * sizeof(aas_areasettings_t) + aasworld.numnodes * sizeof(aas_node_t) + aasworld.reachabilitysize * sizeof(aas_reachability_t) + aasworld.numportals * sizeof(aas_portal_t) + aasworld.numclusters * sizeof(aas_cluster_t); botimport.Print(PRT_MESSAGE, "optimzed size %d KB\n", optimized >> 10); } //end of the function AAS_FileInfo #endif //AASFILEDEBUG //=========================================================================== // allocate memory and read a lump of a AAS file // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== char *AAS_LoadAASLump(fileHandle_t fp, int offset, int length, int *lastoffset, int size) { char *buf; // if (!length) { //just alloc a dummy return (char *) GetClearedHunkMemory(size+1); } //end if //seek to the data if (offset != *lastoffset) { botimport.Print(PRT_WARNING, "AAS file not sequentially read\n"); if (botimport.FS_Seek(fp, offset, FS_SEEK_SET)) { AAS_Error("can't seek to aas lump\n"); AAS_DumpAASData(); botimport.FS_FCloseFile(fp); return NULL; } //end if } //end if //allocate memory buf = (char *) GetClearedHunkMemory(length+1); //read the data if (length) { botimport.FS_Read(buf, length, fp ); *lastoffset += length; } //end if return buf; } //end of the function AAS_LoadAASLump //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AAS_DData(unsigned char *data, int size) { int i; for (i = 0; i < size; i++) { data[i] ^= (unsigned char) i * 119; } //end for } //end of the function AAS_DData //=========================================================================== // load an aas file // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== int AAS_LoadAASFile(char *filename) { fileHandle_t fp; aas_header_t header; int offset, length, lastoffset; botimport.Print(PRT_MESSAGE, "trying to load %s\n", filename); //dump current loaded aas file AAS_DumpAASData(); //open the file botimport.FS_FOpenFile( filename, &fp, FS_READ ); if (!fp) { AAS_Error("can't open %s\n", filename); return BLERR_CANNOTOPENAASFILE; } //end if //read the header botimport.FS_Read(&header, sizeof(aas_header_t), fp ); lastoffset = sizeof(aas_header_t); //check header identification header.ident = LittleLong(header.ident); if (header.ident != AASID) { AAS_Error("%s is not an AAS file\n", filename); botimport.FS_FCloseFile(fp); return BLERR_WRONGAASFILEID; } //end if //check the version header.version = LittleLong(header.version); // if (header.version != AASVERSION_OLD && header.version != AASVERSION) { AAS_Error("aas file %s is version %i, not %i\n", filename, header.version, AASVERSION); botimport.FS_FCloseFile(fp); return BLERR_WRONGAASFILEVERSION; } //end if // if (header.version == AASVERSION) { AAS_DData((unsigned char *) &header + 8, sizeof(aas_header_t) - 8); } //end if // aasworld.bspchecksum = atoi(LibVarGetString( "sv_mapChecksum")); if (LittleLong(header.bspchecksum) != aasworld.bspchecksum) { AAS_Error("aas file %s is out of date\n", filename); botimport.FS_FCloseFile(fp); return BLERR_WRONGAASFILEVERSION; } //end if //load the lumps: //bounding boxes offset = LittleLong(header.lumps[AASLUMP_BBOXES].fileofs); length = LittleLong(header.lumps[AASLUMP_BBOXES].filelen); aasworld.bboxes = (aas_bbox_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_bbox_t)); aasworld.numbboxes = length / sizeof(aas_bbox_t); if (aasworld.numbboxes && !aasworld.bboxes) return BLERR_CANNOTREADAASLUMP; //vertexes offset = LittleLong(header.lumps[AASLUMP_VERTEXES].fileofs); length = LittleLong(header.lumps[AASLUMP_VERTEXES].filelen); aasworld.vertexes = (aas_vertex_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_vertex_t)); aasworld.numvertexes = length / sizeof(aas_vertex_t); if (aasworld.numvertexes && !aasworld.vertexes) return BLERR_CANNOTREADAASLUMP; //planes offset = LittleLong(header.lumps[AASLUMP_PLANES].fileofs); length = LittleLong(header.lumps[AASLUMP_PLANES].filelen); aasworld.planes = (aas_plane_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_plane_t)); aasworld.numplanes = length / sizeof(aas_plane_t); if (aasworld.numplanes && !aasworld.planes) return BLERR_CANNOTREADAASLUMP; //edges offset = LittleLong(header.lumps[AASLUMP_EDGES].fileofs); length = LittleLong(header.lumps[AASLUMP_EDGES].filelen); aasworld.edges = (aas_edge_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_edge_t)); aasworld.numedges = length / sizeof(aas_edge_t); if (aasworld.numedges && !aasworld.edges) return BLERR_CANNOTREADAASLUMP; //edgeindex offset = LittleLong(header.lumps[AASLUMP_EDGEINDEX].fileofs); length = LittleLong(header.lumps[AASLUMP_EDGEINDEX].filelen); aasworld.edgeindex = (aas_edgeindex_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_edgeindex_t)); aasworld.edgeindexsize = length / sizeof(aas_edgeindex_t); if (aasworld.edgeindexsize && !aasworld.edgeindex) return BLERR_CANNOTREADAASLUMP; //faces offset = LittleLong(header.lumps[AASLUMP_FACES].fileofs); length = LittleLong(header.lumps[AASLUMP_FACES].filelen); aasworld.faces = (aas_face_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_face_t)); aasworld.numfaces = length / sizeof(aas_face_t); if (aasworld.numfaces && !aasworld.faces) return BLERR_CANNOTREADAASLUMP; //faceindex offset = LittleLong(header.lumps[AASLUMP_FACEINDEX].fileofs); length = LittleLong(header.lumps[AASLUMP_FACEINDEX].filelen); aasworld.faceindex = (aas_faceindex_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_faceindex_t)); aasworld.faceindexsize = length / sizeof(aas_faceindex_t); if (aasworld.faceindexsize && !aasworld.faceindex) return BLERR_CANNOTREADAASLUMP; //convex areas offset = LittleLong(header.lumps[AASLUMP_AREAS].fileofs); length = LittleLong(header.lumps[AASLUMP_AREAS].filelen); aasworld.areas = (aas_area_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_area_t)); aasworld.numareas = length / sizeof(aas_area_t); if (aasworld.numareas && !aasworld.areas) return BLERR_CANNOTREADAASLUMP; //area settings offset = LittleLong(header.lumps[AASLUMP_AREASETTINGS].fileofs); length = LittleLong(header.lumps[AASLUMP_AREASETTINGS].filelen); aasworld.areasettings = (aas_areasettings_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_areasettings_t)); aasworld.numareasettings = length / sizeof(aas_areasettings_t); if (aasworld.numareasettings && !aasworld.areasettings) return BLERR_CANNOTREADAASLUMP; //reachability list offset = LittleLong(header.lumps[AASLUMP_REACHABILITY].fileofs); length = LittleLong(header.lumps[AASLUMP_REACHABILITY].filelen); aasworld.reachability = (aas_reachability_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_reachability_t)); aasworld.reachabilitysize = length / sizeof(aas_reachability_t); if (aasworld.reachabilitysize && !aasworld.reachability) return BLERR_CANNOTREADAASLUMP; //nodes offset = LittleLong(header.lumps[AASLUMP_NODES].fileofs); length = LittleLong(header.lumps[AASLUMP_NODES].filelen); aasworld.nodes = (aas_node_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_node_t)); aasworld.numnodes = length / sizeof(aas_node_t); if (aasworld.numnodes && !aasworld.nodes) return BLERR_CANNOTREADAASLUMP; //cluster portals offset = LittleLong(header.lumps[AASLUMP_PORTALS].fileofs); length = LittleLong(header.lumps[AASLUMP_PORTALS].filelen); aasworld.portals = (aas_portal_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_portal_t)); aasworld.numportals = length / sizeof(aas_portal_t); if (aasworld.numportals && !aasworld.portals) return BLERR_CANNOTREADAASLUMP; //cluster portal index offset = LittleLong(header.lumps[AASLUMP_PORTALINDEX].fileofs); length = LittleLong(header.lumps[AASLUMP_PORTALINDEX].filelen); aasworld.portalindex = (aas_portalindex_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_portalindex_t)); aasworld.portalindexsize = length / sizeof(aas_portalindex_t); if (aasworld.portalindexsize && !aasworld.portalindex) return BLERR_CANNOTREADAASLUMP; //clusters offset = LittleLong(header.lumps[AASLUMP_CLUSTERS].fileofs); length = LittleLong(header.lumps[AASLUMP_CLUSTERS].filelen); aasworld.clusters = (aas_cluster_t *) AAS_LoadAASLump(fp, offset, length, &lastoffset, sizeof(aas_cluster_t)); aasworld.numclusters = length / sizeof(aas_cluster_t); if (aasworld.numclusters && !aasworld.clusters) return BLERR_CANNOTREADAASLUMP; //swap everything AAS_SwapAASData(); //aas file is loaded aasworld.loaded = qtrue; //close the file botimport.FS_FCloseFile(fp); // #ifdef AASFILEDEBUG AAS_FileInfo(); #endif //AASFILEDEBUG // return BLERR_NOERROR; } //end of the function AAS_LoadAASFile //=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== static int AAS_WriteAASLump_offset; int AAS_WriteAASLump(fileHandle_t fp, aas_header_t *h, int lumpnum, void *data, int length) { aas_lump_t *lump; lump = &h->lumps[lumpnum]; lump->fileofs = LittleLong(AAS_WriteAASLump_offset); //LittleLong(ftell(fp)); lump->filelen = LittleLong(length); if (length > 0) { botimport.FS_Write(data, length, fp ); } //end if AAS_WriteAASLump_offset += length; return qtrue; } //end of the function AAS_WriteAASLump //=========================================================================== // aas data is useless after writing to file because it is byte swapped // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== qboolean AAS_WriteAASFile(char *filename) { aas_header_t header; fileHandle_t fp; botimport.Print(PRT_MESSAGE, "writing %s\n", filename); //swap the aas data AAS_SwapAASData(); //initialize the file header Com_Memset(&header, 0, sizeof(aas_header_t)); header.ident = LittleLong(AASID); header.version = LittleLong(AASVERSION); header.bspchecksum = LittleLong(aasworld.bspchecksum); //open a new file botimport.FS_FOpenFile( filename, &fp, FS_WRITE ); if (!fp) { botimport.Print(PRT_ERROR, "error opening %s\n", filename); return qfalse; } //end if //write the header botimport.FS_Write(&header, sizeof(aas_header_t), fp); AAS_WriteAASLump_offset = sizeof(aas_header_t); //add the data lumps to the file if (!AAS_WriteAASLump(fp, &header, AASLUMP_BBOXES, aasworld.bboxes, aasworld.numbboxes * sizeof(aas_bbox_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_VERTEXES, aasworld.vertexes, aasworld.numvertexes * sizeof(aas_vertex_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_PLANES, aasworld.planes, aasworld.numplanes * sizeof(aas_plane_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_EDGES, aasworld.edges, aasworld.numedges * sizeof(aas_edge_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_EDGEINDEX, aasworld.edgeindex, aasworld.edgeindexsize * sizeof(aas_edgeindex_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_FACES, aasworld.faces, aasworld.numfaces * sizeof(aas_face_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_FACEINDEX, aasworld.faceindex, aasworld.faceindexsize * sizeof(aas_faceindex_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_AREAS, aasworld.areas, aasworld.numareas * sizeof(aas_area_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_AREASETTINGS, aasworld.areasettings, aasworld.numareasettings * sizeof(aas_areasettings_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_REACHABILITY, aasworld.reachability, aasworld.reachabilitysize * sizeof(aas_reachability_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_NODES, aasworld.nodes, aasworld.numnodes * sizeof(aas_node_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_PORTALS, aasworld.portals, aasworld.numportals * sizeof(aas_portal_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_PORTALINDEX, aasworld.portalindex, aasworld.portalindexsize * sizeof(aas_portalindex_t))) return qfalse; if (!AAS_WriteAASLump(fp, &header, AASLUMP_CLUSTERS, aasworld.clusters, aasworld.numclusters * sizeof(aas_cluster_t))) return qfalse; //rewrite the header with the added lumps botimport.FS_Seek(fp, 0, FS_SEEK_SET); AAS_DData((unsigned char *) &header + 8, sizeof(aas_header_t) - 8); botimport.FS_Write(&header, sizeof(aas_header_t), fp); //close the file botimport.FS_FCloseFile(fp); return qtrue; } //end of the function AAS_WriteAASFile
entdark/q3mme
trunk/code/botlib/be_aas_file.c
C
gpl-2.0
24,509
/* Core Hardware driver for Hx4700 (ASIC3, EGPIOs) * * Copyright (c) 2005 SDG Systems, LLC * * 2005-03-29 Todd Blumer Converted basic structure to support hx4700 * 2005-04-30 Todd Blumer Add IRDA code from H2200 */ #include <linux/module.h> #include <linux/version.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/dpm.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/mach/irq.h> #include <asm/arch/pxa-regs.h> #include <asm/arch/pxa-pm_ll.h> #include <asm/arch/hx4700-gpio.h> #include <asm/arch/hx4700-asic.h> #include <asm/arch/hx4700-core.h> #include <linux/mfd/asic3_base.h> #include <asm/hardware/ipaq-asic3.h> #define EGPIO_OFFSET 0 #define EGPIO_BASE (PXA_CS5_PHYS+EGPIO_OFFSET) volatile u_int16_t *egpios; u_int16_t egpio_reg; static int htc_bootloader = 0; /* Is the stock HTC bootloader installed? */ static u32 save[4]; static u32 save2[13]; /* * may make sense to put egpios elsewhere, but they're here now * since they share some of the same address space with the TI WLAN * * EGPIO register is write-only */ void hx4700_egpio_enable( u_int16_t bits ) { unsigned long flags; local_irq_save(flags); egpio_reg |= bits; *egpios = egpio_reg; local_irq_restore(flags); } EXPORT_SYMBOL(hx4700_egpio_enable); void hx4700_egpio_disable( u_int16_t bits ) { unsigned long flags; local_irq_save(flags); egpio_reg &= ~bits; *egpios = egpio_reg; local_irq_restore(flags); } EXPORT_SYMBOL(hx4700_egpio_disable); #ifdef CONFIG_PM static int hx4700_suspend(struct platform_device *pdev, pm_message_t state) { /* Turn off external clocks here, because hx4700_power and asic3_mmc * scared to do so to not hurt each other. (-5 mA) */ #if 0 asic3_set_clock_cdex(&hx4700_asic3.dev, CLOCK_CDEX_EX0 | CLOCK_CDEX_EX1, 0 | 0); #endif /* 0x20c2 is HTC clock value * CLOCK_CDEX_SOURCE 2 * CLOCK_CDEX_SPI 0 * CLOCK_CDEX_OWM 0 * * CLOCK_CDEX_PWM0 0 * CLOCK_CDEX_PWM1 0 * CLOCK_CDEX_LED0 1 * CLOCK_CDEX_LED1 1 * * CLOCK_CDEX_LED2 0 * CLOCK_CDEX_SD_HOST 0 * CLOCK_CDEX_SD_BUS 0 * CLOCK_CDEX_SMBUS 0 * * CLOCK_CDEX_CONTROL_CX 0 * CLOCK_CDEX_EX0 1 * CLOCK_CDEX_EX1 0 * */ asic3_set_clock_cdex(&hx4700_asic3.dev, 0xffff, 0x21c2); *egpios = 0; /* turn off all egpio power */ /* * Note that WEP1 wake up event is used by bootldr to set the * LEDS when power is applied/removed for charging. */ PWER = PWER_RTC | PWER_GPIO0 | PWER_GPIO1 | PWER_GPIO12 | PWER_WEP1; // rtc + power + reset + asic3 + wep1 PFER = PWER_GPIO1; // Falling Edge Detect PRER = PWER_GPIO0 | PWER_GPIO12; // Rising Edge Detect PGSR0 = 0x080DC01C; PGSR1 = 0x34CF0002; PGSR2 = 0x0123C18C; /* PGSR3 = 0x00104202; */ PGSR3 = 0x00100202; /* These next checks are specifically for charging. We want to enable * it if it is already enabled */ /* Check for charge enable, GPIO 72 */ if(GPLR2 & (1 << 8)) { /* Set it */ PGSR2 |= (1U << 8); } else { /* Clear it */ PGSR2 &= ~(1U << 8); } /* Check for USB_CHARGE_RATE, GPIO 96 */ if(GPLR3 & (1 << 0)) { /* Set it */ PGSR3 |= (1U << 0); } else { /* Clear it */ PGSR3 &= ~(1U << 0); } PCFR = PCFR_GPROD|PCFR_DC_EN|PCFR_GPR_EN|PCFR_OPDE |PCFR_FP|PCFR_PI2CEN; /* was 0x1091; */ /* The 2<<2 below turns on the Power Island state preservation * and counters. This allows us to wake up bootldr after a * period of time, and it can set the LEDs correctly based on * the power state. The bootldr turns it off when it's * charged. */ PSLR=0xc8000000 | (2 << 2); /* * If we're using bootldr and not the stock HTC bootloader, * we want to wake up periodically to see if the charge is full while * it is suspended. We do this with the OS timer 4 in the pxa270. */ if (!htc_bootloader) { OMCR4 = 0x4b; /* Periodic, self-resetting, 1-second timer */ OSMR4 = 5; /* Wake up bootldr after x seconds so it can figure out what to do with the LEDs. */ OIER |= 0x10; /* Enable interrupt source for Timer 4 */ OSCR4 = 0; /* This starts the timer */ } asic3_set_extcf_select(&hx4700_asic3.dev, ASIC3_EXTCF_OWM_EN, 0); return 0; } static int hx4700_resume(struct platform_device *pdev) { hx4700_egpio_enable(0); return 0; } #else # define hx4700_suspend NULL # define hx4700_resume NULL #endif static void hx4700_pxa_ll_pm_suspend(unsigned long resume_addr) { int i; u32 csum, tmp, *p; /* Save the 13 words at 0xa0038000. */ for (p = phys_to_virt(0xa0038000), i = 0; i < 13; i++) save2[i] = p[i]; /* Save the first four words at 0xa0000000. */ for (p = phys_to_virt(0xa0000000), i = 0; i < 4; i++) save[i] = p[i]; /* Set the first four words at 0xa0000000 to: * resume address; MMU control; TLB base addr; domain id */ p[0] = resume_addr; asm( "mrc\tp15, 0, %0, c1, c0, 0" : "=r" (tmp) ); p[1] = tmp & ~(0x3987); /* mmu off */ asm( "mrc\tp15, 0, %0, c2, c0, 0" : "=r" (tmp) ); p[2] = tmp; /* Shouldn't matter, since MMU will be off. */ asm( "mrc\tp15, 0, %0, c3, c0, 0" : "=r" (tmp) ); p[3] = tmp; /* Shouldn't matter, since MMU will be off. */ /* Set PSPR to the checksum the HTC bootloader wants to see. */ for (csum = 0, i = 0; i < 52; i++) { tmp = p[i] & 0x1; tmp = tmp << 31; tmp |= tmp >> 1; csum += tmp; } PSPR = csum; } static void hx4700_pxa_ll_pm_resume(void) { int i; u32 *p; /* Restore the first four words at 0xa0000000. */ for (p = phys_to_virt(0xa0000000), i = 0; i < 4; i++) p[i] = save[i]; /* Restore the 13 words at 0xa0038000. */ for (p = phys_to_virt(0xa0038000), i = 0; i < 13; i++) p[i] = save2[i]; /* XXX Do we need to flush the cache? */ } struct pxa_ll_pm_ops hx4700_ll_pm_ops = { .suspend = hx4700_pxa_ll_pm_suspend, .resume = hx4700_pxa_ll_pm_resume, }; /* automatic backlight brightness control */ static ssize_t auto_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", GET_HX4700_GPIO(AUTO_SENSE) ? 1 : 0); } static ssize_t auto_brightness_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int auto_brightness = simple_strtoul(buf, NULL, 10) ? 1 : 0; SET_HX4700_GPIO(AUTO_SENSE, auto_brightness); return count; } static DEVICE_ATTR(auto_brightness, 0644, auto_brightness_show, auto_brightness_store); static int hx4700_core_probe( struct platform_device *pdev ) { u32 *bootldr; int i; int ret = 0; printk( KERN_NOTICE "hx4700 Core Hardware Driver\n" ); egpios = (volatile u_int16_t *)ioremap_nocache( EGPIO_BASE, sizeof *egpios ); if (!egpios) return -ENODEV; /* Is the stock HTC bootloader installed? */ bootldr = (u32 *) ioremap(PXA_CS0_PHYS, 1024 * 1024); /* Windows Mobile 2003 Second Edition v. 4.21.1088 Build 15045.2.6.0 * ROM date 4/13/05, rev 1.10.08 ENG, bootloader 1.01, * XIP v4.21.15045.0 */ i = 0x000414dc / 4; if (bootldr[i] == 0xe59f1360 && /* ldr r1, [pc, #864] ; power base */ bootldr[i+1] == 0xe5914008 && /* ldr r4, [r1, #8] ; PSPR */ bootldr[i+2] == 0xe1320004) { /* teq r2, r4 */ printk("Stock HTC WM2003 bootloader detected\n"); htc_bootloader = 1; pxa_pm_set_ll_ops(&hx4700_ll_pm_ops); } /* XXX Which version of WM2005 is this? */ i = 0x00041d68 / 4; if (bootldr[i] == 0xe59f1354 && /* ldr r1, [pc, #852] ; power base */ bootldr[i+1] == 0xe5914008 && /* ldr r4, [r1, #8] ; PSPR */ bootldr[i+2] == 0xe1320004) { /* teq r2, r4 */ printk("Stock HTC WM2005 bootloader detected\n"); htc_bootloader = 1; pxa_pm_set_ll_ops(&hx4700_ll_pm_ops); } /* WM 5.0 OS 5.1.70 Build 14406.1.1.1 */ i = 0x00041340 / 4; if (bootldr[i] == 0xe59f1354 && /* ldr r1, [pc, #852] ; power base */ bootldr[i+1] == 0xe5914008 && /* ldr r4, [r1, #8] ; PSPR */ bootldr[i+2] == 0xe1320004) { /* teq r2, r4 */ printk("Stock HTC WM2005 bootloader detected\n"); htc_bootloader = 1; pxa_pm_set_ll_ops(&hx4700_ll_pm_ops); } iounmap(bootldr); ret = device_create_file(&pdev->dev, &dev_attr_auto_brightness); if (ret) iounmap(egpios); return ret; } static int hx4700_core_remove( struct platform_device *pdev ) { struct hx4700_core_funcs *funcs = pdev->dev.platform_data; device_remove_file(&pdev->dev, &dev_attr_auto_brightness); if (egpios != NULL) iounmap( (void *)egpios ); funcs->udc_detect = NULL; return 0; } static struct platform_driver hx4700_core_driver = { .driver = { .name = "hx4700-core", }, .probe = hx4700_core_probe, .remove = hx4700_core_remove, .suspend = hx4700_suspend, .resume = hx4700_resume, }; static int __init hx4700_core_init( void ) { return platform_driver_register( &hx4700_core_driver ); } static void __exit hx4700_core_exit( void ) { platform_driver_unregister( &hx4700_core_driver ); } module_init( hx4700_core_init ); module_exit( hx4700_core_exit ); MODULE_AUTHOR("Todd Blumer, SDG Systems, LLC"); MODULE_DESCRIPTION("hx4700 Core Hardware Driver"); MODULE_LICENSE("GPL"); /* vim600: set noexpandtab sw=8 ts=8 :*/
janrinze/loox7xxport.loox2-6-22
arch/arm/mach-pxa/hx4700/hx4700_core.c
C
gpl-2.0
9,113
/** * */ #include <stdio.h> #define NUM_ROWS_A 12 //rows of input [A] #define NUM_COLUMNS_A 12 //columns of input [A] #define NUM_ROWS_B 12 //rows of input [B] #define NUM_COLUMNS_B 12 //columns of input [B] #pragma xmp nodes p(*) #pragma xmp template t(0:11) #pragma xmp distribute t(block) onto p double a[NUM_ROWS_A][NUM_COLUMNS_A]; //declare input [A] double b[NUM_ROWS_B][NUM_COLUMNS_B]; double c[NUM_ROWS_A][NUM_COLUMNS_B]; #pragma xmp align b[i][*] with t(i) #pragma xmp align c[i][*] with t(i) int main(void){ int i; #pragma xmp loop on t(j) for (int j = 0; j < NUM_COLUMNS_B; j++) { for(int i= 0; i < NUM_ROWS_A; i++) { for(int k = 0; k < NUM_COLUMNS_A; k++) { c[j][i] = c[j][i] + a[k][i] * b[j][k]; printf("Process %d is computing c[%d][%d]\n", xmp_node_num(), j, i); } } } return 0; }
tempbottle/pop-cpp
examples/xmp-matrix/xmp-matrix.c
C
gpl-2.0
871
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Written by Sergei A. Golubchik, who has a shared copyright to this code */ #include "ftdefs.h" typedef struct st_ft_docstat { FT_WORD *list; uint uniq; double sum; } FT_DOCSTAT; typedef struct st_my_ft_parser_param { TREE *wtree; MEM_ROOT *mem_root; } MY_FT_PARSER_PARAM; static int FT_WORD_cmp(CHARSET_INFO* cs, FT_WORD *w1, FT_WORD *w2) { return ha_compare_text(cs, (uchar*) w1->pos, w1->len, (uchar*) w2->pos, w2->len, 0); } static int walk_and_copy(FT_WORD *word,uint32 count,FT_DOCSTAT *docstat) { word->weight=LWS_IN_USE; docstat->sum+=word->weight; memcpy((docstat->list)++, word, sizeof(FT_WORD)); return 0; } /* transforms tree of words into the array, applying normalization */ FT_WORD * ft_linearize(TREE *wtree, MEM_ROOT *mem_root) { FT_WORD *wlist,*p; FT_DOCSTAT docstat; DBUG_ENTER("ft_linearize"); if ((wlist=(FT_WORD *) alloc_root(mem_root, sizeof(FT_WORD)* (1+wtree->elements_in_tree)))) { docstat.list=wlist; docstat.uniq=wtree->elements_in_tree; docstat.sum=0; tree_walk(wtree,(tree_walk_action)&walk_and_copy,&docstat,left_root_right); } delete_tree(wtree, 0); if (!wlist) DBUG_RETURN(NULL); docstat.list->pos=NULL; for (p=wlist;p->pos;p++) { p->weight=PRENORM_IN_USE; } for (p=wlist;p->pos;p++) { p->weight/=NORM_IN_USE; } DBUG_RETURN(wlist); } my_bool ft_boolean_check_syntax_string(const uchar *str) { uint i, j; if (!str || (strlen((char*) str)+1 != sizeof(DEFAULT_FTB_SYNTAX)) || (str[0] != ' ' && str[1] != ' ')) return 1; for (i=0; i<sizeof(DEFAULT_FTB_SYNTAX); i++) { /* limiting to 7-bit ascii only */ if ((unsigned char)(str[i]) > 127 || my_isalnum(default_charset_info, str[i])) return 1; for (j=0; j<i; j++) if (str[i] == str[j] && (i != 11 || j != 10)) return 1; } return 0; } /* RETURN VALUE 0 - eof 1 - word found 2 - left bracket 3 - right bracket 4 - stopword found */ uchar ft_get_word(CHARSET_INFO *cs, const uchar **start, const uchar *end, FT_WORD *word, MYSQL_FTPARSER_BOOLEAN_INFO *param) { const uchar *doc=*start; int ctype; uint mwc, length; int mbl; param->yesno=(FTB_YES==' ') ? 1 : (param->quot != 0); param->weight_adjust= param->wasign= 0; param->type= FT_TOKEN_EOF; while (doc<end) { for (; doc < end; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) break; if (*doc == FTB_RQUOT && param->quot) { *start=doc+1; param->type= FT_TOKEN_RIGHT_PAREN; goto ret; } if (!param->quot) { if (*doc == FTB_LBR || *doc == FTB_RBR || *doc == FTB_LQUOT) { /* param->prev=' '; */ *start=doc+1; if (*doc == FTB_LQUOT) param->quot= (char*) 1; param->type= (*doc == FTB_RBR ? FT_TOKEN_RIGHT_PAREN : FT_TOKEN_LEFT_PAREN); goto ret; } if (param->prev == ' ') { if (*doc == FTB_YES ) { param->yesno=+1; continue; } else if (*doc == FTB_EGAL) { param->yesno= 0; continue; } else if (*doc == FTB_NO ) { param->yesno=-1; continue; } else if (*doc == FTB_INC ) { param->weight_adjust++; continue; } else if (*doc == FTB_DEC ) { param->weight_adjust--; continue; } else if (*doc == FTB_NEG ) { param->wasign= !param->wasign; continue; } } } param->prev=*doc; param->yesno=(FTB_YES==' ') ? 1 : (param->quot != 0); param->weight_adjust= param->wasign= 0; } mwc=length=0; for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) mwc=0; else if (!misc_word_char(*doc) || mwc) break; else mwc++; } param->prev='A'; /* be sure *prev is true_word_char */ word->len= (uint)(doc-word->pos) - mwc; if ((param->trunc=(doc<end && *doc == FTB_TRUNC))) doc++; if (((length >= ft_min_word_len && !is_stopword((char*) word->pos, word->len)) || param->trunc) && length < ft_max_word_len) { *start=doc; param->type= FT_TOKEN_WORD; goto ret; } else if (length) /* make sure length > 0 (if start contains spaces only) */ { *start= doc; param->type= FT_TOKEN_STOPWORD; goto ret; } } if (param->quot) { *start= doc; param->type= 3; /* FT_RBR */ goto ret; } ret: return param->type; } uchar ft_simple_get_word(CHARSET_INFO *cs, uchar **start, const uchar *end, FT_WORD *word, my_bool skip_stopwords) { uchar *doc= *start; uint mwc, length; int mbl; int ctype; DBUG_ENTER("ft_simple_get_word"); do { for (;; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { if (doc >= end) DBUG_RETURN(0); mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) break; } mwc= length= 0; for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) mwc= 0; else if (!misc_word_char(*doc) || mwc) break; else mwc++; } word->len= (uint)(doc-word->pos) - mwc; if (skip_stopwords == FALSE || (length >= ft_min_word_len && length < ft_max_word_len && !is_stopword((char*) word->pos, word->len))) { *start= doc; DBUG_RETURN(1); } } while (doc < end); DBUG_RETURN(0); } void ft_parse_init(TREE *wtree, CHARSET_INFO *cs) { DBUG_ENTER("ft_parse_init"); if (!is_tree_inited(wtree)) init_tree(wtree, 0, 0, sizeof(FT_WORD), (qsort_cmp2)&FT_WORD_cmp, 0, (void*)cs, MYF(0)); DBUG_VOID_RETURN; } static int ft_add_word(MYSQL_FTPARSER_PARAM *param, const char *word, int word_len, MYSQL_FTPARSER_BOOLEAN_INFO *boolean_info __attribute__((unused))) { TREE *wtree; FT_WORD w; MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam; DBUG_ENTER("ft_add_word"); wtree= ft_param->wtree; if (param->flags & MYSQL_FTFLAGS_NEED_COPY) { uchar *ptr; DBUG_ASSERT(wtree->with_delete == 0); ptr= (uchar *)alloc_root(ft_param->mem_root, word_len); memcpy(ptr, word, word_len); w.pos= ptr; } else w.pos= (uchar*) word; w.len= word_len; if (!tree_insert(wtree, &w, 0, wtree->custom_arg)) { delete_tree(wtree, 0); DBUG_RETURN(1); } DBUG_RETURN(0); } static int ft_parse_internal(MYSQL_FTPARSER_PARAM *param, const char *doc_arg, int doc_len) { uchar *doc= (uchar*) doc_arg; uchar *end= doc + doc_len; MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam; TREE *wtree= ft_param->wtree; FT_WORD w; DBUG_ENTER("ft_parse_internal"); while (ft_simple_get_word(wtree->custom_arg, &doc, end, &w, TRUE)) if (param->mysql_add_word(param, (char*) w.pos, w.len, 0)) DBUG_RETURN(1); DBUG_RETURN(0); } int ft_parse(TREE *wtree, uchar *doc, int doclen, struct st_mysql_ftparser *parser, MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root) { MY_FT_PARSER_PARAM my_param; DBUG_ENTER("ft_parse"); DBUG_ASSERT(parser); my_param.wtree= wtree; my_param.mem_root= mem_root; param->mysql_parse= ft_parse_internal; param->mysql_add_word= ft_add_word; param->mysql_ftparam= &my_param; param->cs= wtree->custom_arg; param->doc= (char*) doc; param->length= doclen; param->mode= MYSQL_FTPARSER_SIMPLE_MODE; DBUG_RETURN(parser->parse(param)); } #define MAX_PARAM_NR 2 MYSQL_FTPARSER_PARAM* ftparser_alloc_param(MI_INFO *info) { if (!info->ftparser_param) { /* . info->ftparser_param can not be zero after the initialization, because it always includes built-in fulltext parser. And built-in parser can be called even if the table has no fulltext indexes and no varchar/text fields. ftb_find_relevance... parser (ftb_find_relevance_parse, ftb_find_relevance_add_word) calls ftb_check_phrase... parser (ftb_check_phrase_internal, ftb_phrase_add_word). Thus MAX_PARAM_NR=2. */ info->ftparser_param= (MYSQL_FTPARSER_PARAM *) my_malloc(MAX_PARAM_NR * sizeof(MYSQL_FTPARSER_PARAM) * info->s->ftkeys, MYF(MY_WME | MY_ZEROFILL)); init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0)); } return info->ftparser_param; } MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info, uint keynr, uint paramnr) { uint32 ftparser_nr; struct st_mysql_ftparser *parser; if (!ftparser_alloc_param(info)) return 0; if (keynr == NO_SUCH_KEY) { ftparser_nr= 0; parser= &ft_default_parser; } else { ftparser_nr= info->s->keyinfo[keynr].ftkey_nr; parser= info->s->keyinfo[keynr].parser; } DBUG_ASSERT(paramnr < MAX_PARAM_NR); ftparser_nr= ftparser_nr*MAX_PARAM_NR + paramnr; if (! info->ftparser_param[ftparser_nr].mysql_add_word) { /* Note, that mysql_add_word is used here as a flag: mysql_add_word == 0 - parser is not initialized mysql_add_word != 0 - parser is initialized, or no initialization needed. */ info->ftparser_param[ftparser_nr].mysql_add_word= (int (*)(struct st_mysql_ftparser_param *, const char *, int, MYSQL_FTPARSER_BOOLEAN_INFO *)) 1; if (parser->init && parser->init(&info->ftparser_param[ftparser_nr])) return 0; } return &info->ftparser_param[ftparser_nr]; } void ftparser_call_deinitializer(MI_INFO *info) { uint i, j, keys= info->s->state.header.keys; free_root(&info->ft_memroot, MYF(0)); if (! info->ftparser_param) return; for (i= 0; i < keys; i++) { MI_KEYDEF *keyinfo= &info->s->keyinfo[i]; for (j=0; j < MAX_PARAM_NR; j++) { MYSQL_FTPARSER_PARAM *ftparser_param= &info->ftparser_param[keyinfo->ftkey_nr * MAX_PARAM_NR + j]; if (keyinfo->flag & HA_FULLTEXT && ftparser_param->mysql_add_word) { if (keyinfo->parser->deinit) keyinfo->parser->deinit(ftparser_param); ftparser_param->mysql_add_word= 0; } else break; } } }
tempesta-tech/mariadb_10.2
storage/myisam/ft_parser.c
C
gpl-2.0
11,421
/* * linux/fs/ext4/ialloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * BSD ufs-inspired inode and directory allocation by * Stephen Tweedie (sct@redhat.com), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/random.h> #include <linux/bitops.h> #include <linux/blkdev.h> #include <asm/byteorder.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include <trace/events/ext4.h> /* * ialloc.c contains the inodes allocation and deallocation routines */ /* * The free inodes are managed by bitmaps. A file system contains several * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block. Each descriptor contains the number of the bitmap block and * the free blocks count in the block. */ /* * To avoid calling the atomic setbit hundreds or thousands of times, we only * need to use it within a single byte (to ensure we get endianness right). * We can use memset for the rest of the bitmap as there are no other users. */ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) { int i; if (start_bit >= end_bit) return; ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) ext4_set_bit(i, bitmap); if (i < end_bit) memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); } /* Initializes an uninitialized inode bitmap */ static unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, ext4_group_t block_group, struct ext4_group_desc *gdp) { struct ext4_sb_info *sbi = EXT4_SB(sb); J_ASSERT_BH(bh, buffer_locked(bh)); /* If checksum is bad mark all blocks and inodes use to prevent * allocation, essentially implementing a per-group read-only flag. */ if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { ext4_error(sb, "Checksum bad for group %u", block_group); ext4_free_group_clusters_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0); memset(bh->b_data, 0xff, sb->s_blocksize); return 0; } memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); return EXT4_INODES_PER_GROUP(sb); } void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); set_bitmap_uptodate(bh); } unlock_buffer(bh); put_bh(bh); } /* * Read the inode allocation bitmap for a given block_group, reading * into the specified slot in the superblock's bitmap cache. * * Return buffer_head of bitmap on success or NULL. */ static struct buffer_head * ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) { struct ext4_group_desc *desc; struct buffer_head *bh = NULL; ext4_fsblk_t bitmap_blk; desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return NULL; bitmap_blk = ext4_inode_bitmap(sb, desc); bh = sb_getblk(sb, bitmap_blk); if (unlikely(!bh)) { ext4_error(sb, "Cannot read inode bitmap - " "block_group = %u, inode_bitmap = %llu", block_group, bitmap_blk); return NULL; } if (bitmap_uptodate(bh)) return bh; lock_buffer(bh); if (bitmap_uptodate(bh)) { unlock_buffer(bh); return bh; } ext4_lock_group(sb, block_group); if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { ext4_init_inode_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); return bh; } ext4_unlock_group(sb, block_group); if (buffer_uptodate(bh)) { /* * if not uninit if bh is uptodate, * bitmap is also uptodate */ set_bitmap_uptodate(bh); unlock_buffer(bh); return bh; } /* * submit the buffer_head for reading */ trace_ext4_load_inode_bitmap(sb, block_group); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); submit_bh(READ, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { put_bh(bh); ext4_error(sb, "Cannot read inode bitmap - " "block_group = %u, inode_bitmap = %llu", block_group, bitmap_blk); return NULL; } return bh; } /* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk. */ void ext4_free_inode(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; int is_directory; unsigned long ino; struct buffer_head *bitmap_bh = NULL; struct buffer_head *bh2; ext4_group_t block_group; unsigned long bit; struct ext4_group_desc *gdp; struct ext4_super_block *es; struct ext4_sb_info *sbi; int fatal = 0, err, count, cleared; if (!sb) { printk(KERN_ERR "EXT4-fs: %s:%d: inode on " "nonexistent device\n", __func__, __LINE__); return; } if (atomic_read(&inode->i_count) > 1) { ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d", __func__, __LINE__, inode->i_ino, atomic_read(&inode->i_count)); return; } if (inode->i_nlink) { ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n", __func__, __LINE__, inode->i_ino, inode->i_nlink); return; } sbi = EXT4_SB(sb); ino = inode->i_ino; ext4_debug("freeing inode %lu\n", ino); trace_ext4_free_inode(inode); /* * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ dquot_initialize(inode); ext4_xattr_delete_inode(handle, inode); dquot_free_inode(inode); dquot_drop(inode); is_directory = S_ISDIR(inode->i_mode); /* Do this BEFORE marking the inode not in use or returning an error */ ext4_clear_inode(inode); es = EXT4_SB(sb)->s_es; if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { ext4_error(sb, "reserved or nonexistent inode %lu", ino); goto error_return; } block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); if (!bitmap_bh) goto error_return; BUFFER_TRACE(bitmap_bh, "get_write_access"); fatal = ext4_journal_get_write_access(handle, bitmap_bh); if (fatal) goto error_return; fatal = -ESRCH; gdp = ext4_get_group_desc(sb, block_group, &bh2); if (gdp) { BUFFER_TRACE(bh2, "get_write_access"); fatal = ext4_journal_get_write_access(handle, bh2); } ext4_lock_group(sb, block_group); cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data); if (fatal || !cleared) { ext4_unlock_group(sb, block_group); goto out; } count = ext4_free_inodes_count(sb, gdp) + 1; ext4_free_inodes_set(sb, gdp, count); if (is_directory) { count = ext4_used_dirs_count(sb, gdp) - 1; ext4_used_dirs_set(sb, gdp, count); percpu_counter_dec(&sbi->s_dirs_counter); } gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_unlock_group(sb, block_group); percpu_counter_inc(&sbi->s_freeinodes_counter); if (sbi->s_log_groups_per_flex) { ext4_group_t f = ext4_flex_group(sbi, block_group); atomic_inc(&sbi->s_flex_groups[f].free_inodes); if (is_directory) atomic_dec(&sbi->s_flex_groups[f].used_dirs); } BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); out: if (cleared) { BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!fatal) fatal = err; ext4_mark_super_dirty(sb); } else { /* for debugging, sangwoo2.lee */ print_bh(sb, bitmap_bh, 0, EXT4_BLOCK_SIZE(sb)); /* for debugging */ ext4_error(sb, "bit already cleared for inode %lu", ino); } error_return: brelse(bitmap_bh); ext4_std_error(sb, fatal); } struct orlov_stats { __u64 free_clusters; __u32 free_inodes; __u32 used_dirs; }; /* * Helper function for Orlov's allocator; returns critical information * for a particular block group or flex_bg. If flex_size is 1, then g * is a block group number; otherwise it is flex_bg number. */ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, int flex_size, struct orlov_stats *stats) { struct ext4_group_desc *desc; struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; if (flex_size > 1) { stats->free_inodes = atomic_read(&flex_group[g].free_inodes); stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); stats->used_dirs = atomic_read(&flex_group[g].used_dirs); return; } desc = ext4_get_group_desc(sb, g, NULL); if (desc) { stats->free_inodes = ext4_free_inodes_count(sb, desc); stats->free_clusters = ext4_free_group_clusters(sb, desc); stats->used_dirs = ext4_used_dirs_count(sb, desc); } else { stats->free_inodes = 0; stats->free_clusters = 0; stats->used_dirs = 0; } } /* * Orlov's allocator for directories. * * We always try to spread first-level directories. * * If there are blockgroups with both free inodes and free blocks counts * not worse than average we return one with smallest directory count. * Otherwise we simply return a random group. * * For the rest rules look so: * * It's OK to put directory into a group unless * it has too many directories already (max_dirs) or * it has too few free inodes left (min_inodes) or * it has too few free blocks left (min_blocks) or * Parent's group is preferred, if it doesn't satisfy these * conditions we search cyclically through the rest. If none * of the groups look good we just look for a group with more * free inodes than average (starting at parent's group). */ static int find_group_orlov(struct super_block *sb, struct inode *parent, ext4_group_t *group, umode_t mode, const struct qstr *qstr) { ext4_group_t parent_group = EXT4_I(parent)->i_block_group; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t real_ngroups = ext4_get_groups_count(sb); int inodes_per_group = EXT4_INODES_PER_GROUP(sb); unsigned int freei, avefreei, grp_free; ext4_fsblk_t freeb, avefreec; unsigned int ndirs; int max_dirs, min_inodes; ext4_grpblk_t min_clusters; ext4_group_t i, grp, g, ngroups; struct ext4_group_desc *desc; struct orlov_stats stats; int flex_size = ext4_flex_bg_size(sbi); struct dx_hash_info hinfo; ngroups = real_ngroups; if (flex_size > 1) { ngroups = (real_ngroups + flex_size - 1) >> sbi->s_log_groups_per_flex; parent_group >>= sbi->s_log_groups_per_flex; } freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; freeb = EXT4_C2B(sbi, percpu_counter_read_positive(&sbi->s_freeclusters_counter)); avefreec = freeb; do_div(avefreec, ngroups); ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); if (S_ISDIR(mode) && ((parent == sb->s_root->d_inode) || (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { int best_ndir = inodes_per_group; int ret = -1; if (qstr) { hinfo.hash_version = DX_HASH_HALF_MD4; hinfo.seed = sbi->s_hash_seed; ext4fs_dirhash(qstr->name, qstr->len, &hinfo); grp = hinfo.hash; } else get_random_bytes(&grp, sizeof(grp)); parent_group = (unsigned)grp % ngroups; for (i = 0; i < ngroups; i++) { g = (parent_group + i) % ngroups; get_orlov_stats(sb, g, flex_size, &stats); if (!stats.free_inodes) continue; if (stats.used_dirs >= best_ndir) continue; if (stats.free_inodes < avefreei) continue; if (stats.free_clusters < avefreec) continue; grp = g; ret = 0; best_ndir = stats.used_dirs; } if (ret) goto fallback; found_flex_bg: if (flex_size == 1) { *group = grp; return 0; } /* * We pack inodes at the beginning of the flexgroup's * inode tables. Block allocation decisions will do * something similar, although regular files will * start at 2nd block group of the flexgroup. See * ext4_ext_find_goal() and ext4_find_near(). */ grp *= flex_size; for (i = 0; i < flex_size; i++) { if (grp+i >= real_ngroups) break; desc = ext4_get_group_desc(sb, grp+i, NULL); if (desc && ext4_free_inodes_count(sb, desc)) { *group = grp+i; return 0; } } goto fallback; } max_dirs = ndirs / ngroups + inodes_per_group / 16; min_inodes = avefreei - inodes_per_group*flex_size / 4; if (min_inodes < 1) min_inodes = 1; min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4; /* * Start looking in the flex group where we last allocated an * inode for this parent directory */ if (EXT4_I(parent)->i_last_alloc_group != ~0) { parent_group = EXT4_I(parent)->i_last_alloc_group; if (flex_size > 1) parent_group >>= sbi->s_log_groups_per_flex; } for (i = 0; i < ngroups; i++) { grp = (parent_group + i) % ngroups; get_orlov_stats(sb, grp, flex_size, &stats); if (stats.used_dirs >= max_dirs) continue; if (stats.free_inodes < min_inodes) continue; if (stats.free_clusters < min_clusters) continue; goto found_flex_bg; } fallback: ngroups = real_ngroups; avefreei = freei / ngroups; fallback_retry: parent_group = EXT4_I(parent)->i_block_group; for (i = 0; i < ngroups; i++) { grp = (parent_group + i) % ngroups; desc = ext4_get_group_desc(sb, grp, NULL); if (desc) { grp_free = ext4_free_inodes_count(sb, desc); if (grp_free && grp_free >= avefreei) { *group = grp; return 0; } } } if (avefreei) { /* * The free-inodes counter is approximate, and for really small * filesystems the above test can fail to find any blockgroups */ avefreei = 0; goto fallback_retry; } return -1; } static int find_group_other(struct super_block *sb, struct inode *parent, ext4_group_t *group, umode_t mode) { ext4_group_t parent_group = EXT4_I(parent)->i_block_group; ext4_group_t i, last, ngroups = ext4_get_groups_count(sb); struct ext4_group_desc *desc; int flex_size = ext4_flex_bg_size(EXT4_SB(sb)); /* * Try to place the inode is the same flex group as its * parent. If we can't find space, use the Orlov algorithm to * find another flex group, and store that information in the * parent directory's inode information so that use that flex * group for future allocations. */ if (flex_size > 1) { int retry = 0; try_again: parent_group &= ~(flex_size-1); last = parent_group + flex_size; if (last > ngroups) last = ngroups; for (i = parent_group; i < last; i++) { desc = ext4_get_group_desc(sb, i, NULL); if (desc && ext4_free_inodes_count(sb, desc)) { *group = i; return 0; } } if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) { retry = 1; parent_group = EXT4_I(parent)->i_last_alloc_group; goto try_again; } /* * If this didn't work, use the Orlov search algorithm * to find a new flex group; we pass in the mode to * avoid the topdir algorithms. */ *group = parent_group + flex_size; if (*group > ngroups) *group = 0; return find_group_orlov(sb, parent, group, mode, NULL); } /* * Try to place the inode in its parent directory */ *group = parent_group; desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc) && ext4_free_group_clusters(sb, desc)) return 0; /* * We're going to place this inode in a different blockgroup from its * parent. We want to cause files in a common directory to all land in * the same blockgroup. But we want files which are in a different * directory which shares a blockgroup with our parent to land in a * different blockgroup. * * So add our directory's i_ino into the starting point for the hash. */ *group = (*group + parent->i_ino) % ngroups; /* * Use a quadratic hash to find a group with a free inode and some free * blocks. */ for (i = 1; i < ngroups; i <<= 1) { *group += i; if (*group >= ngroups) *group -= ngroups; desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc) && ext4_free_group_clusters(sb, desc)) return 0; } /* * That failed: try linear search for a free inode, even if that group * has no free blocks. */ *group = parent_group; for (i = 0; i < ngroups; i++) { if (++*group >= ngroups) *group = 0; desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc)) return 0; } return -1; } /* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, const struct qstr *qstr, __u32 goal, uid_t *owner, int nblocks) { struct super_block *sb; struct buffer_head *inode_bitmap_bh = NULL; struct buffer_head *group_desc_bh; ext4_group_t ngroups, group = 0; unsigned long ino = 0; struct inode *inode; struct ext4_group_desc *gdp = NULL; struct ext4_inode_info *ei; struct ext4_sb_info *sbi; int ret2, err = 0; struct inode *ret; ext4_group_t i; ext4_group_t flex_group; /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; ngroups = ext4_get_groups_count(sb); trace_ext4_request_inode(dir, mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ei = EXT4_I(inode); sbi = EXT4_SB(sb); if (!goal) goal = sbi->s_inode_goal; if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); ret2 = 0; goto got_group; } if (S_ISDIR(mode)) ret2 = find_group_orlov(sb, dir, &group, mode, qstr); else ret2 = find_group_other(sb, dir, &group, mode); got_group: EXT4_I(dir)->i_last_alloc_group = group; err = -ENOSPC; if (ret2 == -1) goto out; /* * Normally we will only go through one pass of this loop, * unless we get unlucky and it turns out the group we selected * had its last inode grabbed by someone else. */ for (i = 0; i < ngroups; i++, ino = 0) { err = -EIO; gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) { ext4_debug("ext4_get_group_desc error: %d\n", group); print_bh(sb, group_desc_bh, 0, EXT4_BLOCK_SIZE(sb)); goto fail; } if (inode_bitmap_bh) { ext4_handle_release_buffer(handle, inode_bitmap_bh); brelse(inode_bitmap_bh); } inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); if (!inode_bitmap_bh) { ext4_debug("ext4_read_inode_bitmap error: %d\n", group); goto fail; } repeat_in_this_group: ino = ext4_find_next_zero_bit((unsigned long *) inode_bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino); if (ino >= EXT4_INODES_PER_GROUP(sb)) goto next_group; if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { ext4_error(sb, "reserved inode found cleared - " "inode=%lu", ino + 1); continue; } if (!handle) { BUG_ON(nblocks <= 0); handle = ext4_journal_start_sb(dir->i_sb, nblocks); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto fail; } } BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode_bitmap_bh); if (err) goto fail; ext4_lock_group(sb, group); ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data); ext4_unlock_group(sb, group); ino++; /* the inode bitmap is zero-based */ if (!ret2) goto got; /* we grabbed the inode! */ if (ino < EXT4_INODES_PER_GROUP(sb)) goto repeat_in_this_group; next_group: if (++group == ngroups) group = 0; } ext4_handle_release_buffer(handle, inode_bitmap_bh); err = -ENOSPC; goto out; got: BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); if (err) { ext4_debug("ext4_handle_dirty_metadata error\n"); goto fail; } /* We may have to initialize the block bitmap if it isn't already */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { struct buffer_head *block_bitmap_bh; block_bitmap_bh = ext4_read_block_bitmap(sb, group); if (!block_bitmap_bh) { err = -EIO; goto out; } BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); err = ext4_journal_get_write_access(handle, block_bitmap_bh); if (err) { brelse(block_bitmap_bh); ext4_debug("ext4_journal_get_write_access error\n"); goto fail; } BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh); /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); } ext4_unlock_group(sb, group); brelse(block_bitmap_bh); if (err) { ext4_debug("ext4_handle_dirty_metadata error\n"); goto fail; } } BUFFER_TRACE(group_desc_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, group_desc_bh); if (err) { ext4_debug("ext4_journal_get_write_access error\n"); goto fail; } /* Update the relevant bg descriptor fields */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { int free; struct ext4_group_info *grp = ext4_get_group_info(sb, group); down_read(&grp->alloc_sem); /* protect vs itable lazyinit */ ext4_lock_group(sb, group); /* while we modify the bg desc */ free = EXT4_INODES_PER_GROUP(sb) - ext4_itable_unused_count(sb, gdp); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); free = 0; } /* * Check the relative inode number against the last used * relative inode number in this group. if it is greater * we need to update the bg_itable_unused count */ if (ino > free) ext4_itable_unused_set(sb, gdp, (EXT4_INODES_PER_GROUP(sb) - ino)); up_read(&grp->alloc_sem); } else { ext4_lock_group(sb, group); } ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); if (S_ISDIR(mode)) { ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); if (sbi->s_log_groups_per_flex) { ext4_group_t f = ext4_flex_group(sbi, group); atomic_inc(&sbi->s_flex_groups[f].used_dirs); } } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); } ext4_unlock_group(sb, group); BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); if (err) { ext4_debug("ext4_handle_dirty_metadata error\n"); goto fail; } percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); ext4_mark_super_dirty(sb); if (sbi->s_log_groups_per_flex) { flex_group = ext4_flex_group(sbi, group); atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); } if (owner) { inode->i_mode = mode; inode->i_uid = owner[0]; inode->i_gid = owner[1]; } else if (test_opt(sb, GRPID)) { inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; } else inode_init_owner(inode, dir, mode); inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = ext4_current_time(inode); memset(ei->i_data, 0, sizeof(ei->i_data)); ei->i_dir_start_lookup = 0; ei->i_disksize = 0; /* Don't inherit extent flag from directory, amongst others. */ ei->i_flags = ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED); ei->i_file_acl = 0; ei->i_dtime = 0; ei->i_block_group = group; ei->i_last_alloc_group = ~0; ext4_set_inode_flags(inode); if (IS_DIRSYNC(inode)) ext4_handle_sync(handle); if (insert_inode_locked(inode) < 0) { /* * Likely a bitmap corruption causing inode to be allocated * twice. */ ext4_debug("insert_inode_locked error\n"); if(inode_bitmap_bh) print_bh(sb, inode_bitmap_bh, 0, EXT4_BLOCK_SIZE(sb)); err = -EIO; goto fail; } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ext4_set_inode_state(inode, EXT4_STATE_NEW); ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; ret = inode; dquot_initialize(inode); err = dquot_alloc_inode(inode); if (err) goto fail_drop; err = ext4_init_acl(handle, inode, dir); if (err) goto fail_free_drop; err = ext4_init_security(handle, inode, dir, qstr); if (err) goto fail_free_drop; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { /* set extent flag only for directory, file and normal symlink*/ if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); ext4_ext_tree_init(handle, inode); } } if (ext4_handle_valid(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; ei->i_datasync_tid = handle->h_transaction->t_tid; } err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_std_error(sb, err); goto fail_free_drop; } ext4_debug("allocating inode %lu\n", inode->i_ino); trace_ext4_allocate_inode(inode, dir, mode); goto really_out; fail: ext4_std_error(sb, err); out: iput(inode); ret = ERR_PTR(err); really_out: brelse(inode_bitmap_bh); return ret; fail_free_drop: dquot_free_inode(inode); fail_drop: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; clear_nlink(inode); unlock_new_inode(inode); iput(inode); brelse(inode_bitmap_bh); return ERR_PTR(err); } /* Verify that we are loading a valid orphan from disk */ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) { unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); ext4_group_t block_group; int bit; struct buffer_head *bitmap_bh; struct inode *inode = NULL; long err = -EIO; /* Error cases - e2fsck has already cleaned up for us */ if (ino > max_ino) { ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); goto error; } block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); if (!bitmap_bh) { ext4_warning(sb, "inode bitmap error for orphan %lu", ino); goto error; } /* Having the inode bit set should be a 100% indicator that this * is a valid orphan (no e2fsck run on fs). Orphans also include * inodes that were being truncated, so we can't check i_nlink==0. */ if (!ext4_test_bit(bit, bitmap_bh->b_data)) goto bad_orphan; inode = ext4_iget(sb, ino); if (IS_ERR(inode)) goto iget_failed; /* * If the orphans has i_nlinks > 0 then it should be able to be * truncated, otherwise it won't be removed from the orphan list * during processing and an infinite loop will result. */ if (inode->i_nlink && !ext4_can_truncate(inode)) goto bad_orphan; if (NEXT_ORPHAN(inode) > max_ino) goto bad_orphan; brelse(bitmap_bh); return inode; iget_failed: err = PTR_ERR(inode); inode = NULL; bad_orphan: ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", bit, (unsigned long long)bitmap_bh->b_blocknr, ext4_test_bit(bit, bitmap_bh->b_data)); printk(KERN_NOTICE "inode=%p\n", inode); if (inode) { printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", is_bad_inode(inode)); printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", NEXT_ORPHAN(inode)); printk(KERN_NOTICE "max_ino=%lu\n", max_ino); printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink); /* Avoid freeing blocks if we got a bad deleted inode */ if (inode->i_nlink == 0) inode->i_blocks = 0; iput(inode); } brelse(bitmap_bh); error: return ERR_PTR(err); } unsigned long ext4_count_free_inodes(struct super_block *sb) { unsigned long desc_count; struct ext4_group_desc *gdp; ext4_group_t i, ngroups = ext4_get_groups_count(sb); #ifdef EXT4FS_DEBUG struct ext4_super_block *es; unsigned long bitmap_count, x; struct buffer_head *bitmap_bh = NULL; es = EXT4_SB(sb)->s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; desc_count += ext4_free_inodes_count(sb, gdp); brelse(bitmap_bh); bitmap_bh = ext4_read_inode_bitmap(sb, i); if (!bitmap_bh) continue; x = ext4_count_free(bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb) / 8); printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", (unsigned long) i, ext4_free_inodes_count(sb, gdp), x); bitmap_count += x; } brelse(bitmap_bh); printk(KERN_DEBUG "ext4_count_free_inodes: " "stored = %u, computed = %lu, %lu\n", le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); return desc_count; #else desc_count = 0; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; desc_count += ext4_free_inodes_count(sb, gdp); cond_resched(); } return desc_count; #endif } /* Called at mount-time, super-block is locked */ unsigned long ext4_count_dirs(struct super_block * sb) { unsigned long count = 0; ext4_group_t i, ngroups = ext4_get_groups_count(sb); for (i = 0; i < ngroups; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; count += ext4_used_dirs_count(sb, gdp); } return count; } /* * Zeroes not yet zeroed inode table - just write zeroes through the whole * inode table. Must be called without any spinlock held. The only place * where it is called from on active part of filesystem is ext4lazyinit * thread, so we do not need any special locks, however we have to prevent * inode allocation from the current group, so we take alloc_sem lock, to * block ext4_new_inode() until we are finished. */ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, int barrier) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; struct buffer_head *group_desc_bh; handle_t *handle; ext4_fsblk_t blk; int num, ret = 0, used_blks = 0; /* This should not happen, but just to be sure check this */ if (sb->s_flags & MS_RDONLY) { ret = 1; goto out; } gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) goto out; /* * We do not need to lock this, because we are the only one * handling this flag. */ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) goto out; handle = ext4_journal_start_sb(sb, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } down_write(&grp->alloc_sem); /* * If inode bitmap was already initialized there may be some * used inodes so we need to skip blocks with used inodes in * inode table. */ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - ext4_itable_unused_count(sb, gdp)), sbi->s_inodes_per_block); if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { ext4_error(sb, "Something is wrong with group %u: " "used itable blocks: %d; " "itable unused count: %u", group, used_blks, ext4_itable_unused_count(sb, gdp)); ret = 1; goto err_out; } blk = ext4_inode_table(sb, gdp) + used_blks; num = sbi->s_itb_per_group - used_blks; BUFFER_TRACE(group_desc_bh, "get_write_access"); ret = ext4_journal_get_write_access(handle, group_desc_bh); if (ret) goto err_out; /* * Skip zeroout if the inode table is full. But we set the ZEROED * flag anyway, because obviously, when it is full it does not need * further zeroing. */ if (unlikely(num == 0)) goto skip_zeroout; ext4_debug("going to zero out inode table in group %d\n", group); ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS); if (ret < 0) goto err_out; if (barrier) blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL); skip_zeroout: ext4_lock_group(sb, group); gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ext4_unlock_group(sb, group); BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); ret = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); err_out: up_write(&grp->alloc_sem); ext4_journal_stop(handle); out: return ret; }
CurtisMJ/g800f_custom_kernel
fs/ext4/ialloc.c
C
gpl-2.0
33,970
/* *************************************************************************** * Ralink Tech Inc. * 4F, No. 2 Technology 5th Rd. * Science-based Industrial Park * Hsin-chu, Taiwan, R.O.C. * * (c) Copyright 2002, Ralink Technology, Inc. * * All rights reserved. Ralink's source code is an unpublished work and the * use of a copyright notice does not imply otherwise. This source code * contains confidential trade secret material of Ralink Tech. Any attemp * or participation in deciphering, decoding, reverse engineering or in any * way altering the source code is stricitly prohibited, unless the prior * written consent of Ralink Technology, Inc. is obtained. *************************************************************************** Module Name: rbus_prop_dev.c Abstract: Create and register network interface for RBUS based chipsets in linux platform. Revision History: Who When What -------- ---------- ---------------------------------------------- */ #ifdef RTMP_RBUS_SUPPORT #define RTMP_MODULE_OS #include "rt_config.h" #if defined(CONFIG_RA_CLASSIFIER) && (!defined(CONFIG_RA_CLASSIFIER_MODULE)) extern int (*ra_classifier_init_func)(void); extern void (*ra_classifier_release_func)(void); extern struct proc_dir_entry *proc_ptr, *proc_ralink_wl_video; #endif #ifdef MEM_ALLOC_INFO_SUPPORT extern MEM_INFO_LIST MemInfoList; extern MEM_INFO_LIST PktInfoList; #endif /*MEM_ALLOC_INFO_SUPPORT*/ static struct pci_device_id mt_rbus_tbl[] DEVINITDATA = { #ifdef MT7622 {PCI_DEVICE(0x14c3, 0x7622)}, #endif /* MT7622 */ {} /* terminate list */ }; MODULE_DEVICE_TABLE(pci, mt_rbus_tbl); #define RBUS_TSSI_CTRL_OFFSET 0x34 #define RBUS_PA_LNA_CTRL_OFFSET 0x38 int rbus_tssi_set(struct _RTMP_ADAPTER *ad, UCHAR mode) { struct pci_dev *dev = ((POS_COOKIE)ad->OS_Cookie)->pci_dev; pci_write_config_byte(dev, RBUS_TSSI_CTRL_OFFSET, mode); return 0; } int rbus_pa_lna_set(struct _RTMP_ADAPTER *ad, UINT32 mode) { struct pci_dev *dev = ((POS_COOKIE)ad->OS_Cookie)->pci_dev; pci_write_config_dword(dev, RBUS_PA_LNA_CTRL_OFFSET, mode); return 0; } static int DEVINIT mt_rbus_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct net_device *net_dev; ULONG csr_addr; INT rv; void *handle = NULL; RTMP_ADAPTER *pAd; RTMP_OS_NETDEV_OP_HOOK netDevHook; UINT32 Value; MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_TRACE, ("===> rt2880_probe\n")); if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { /* * pci_set_consistent_dma_mask() will always be able to set the same * or a smaller mask as pci_set_dma_mask() */ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } else { MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_ERROR, ("set DMA mask failed\n")); goto err_out; } #ifdef MEM_ALLOC_INFO_SUPPORT MemInfoListInital(); #endif /* MEM_ALLOC_INFO_SUPPORT */ /* map physical address to virtual address for accessing register */ csr_addr = (unsigned long)ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); /* Allocate RTMP_ADAPTER adapter structure */ os_alloc_mem(NULL, (UCHAR **)&handle, sizeof(struct os_cookie)); if (!handle) { MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_ERROR, ("Allocate memory for os_cookie failed!\n")); goto err_out; } os_zero_mem(handle, sizeof(struct os_cookie)); #ifdef OS_ABL_FUNC_SUPPORT /* get DRIVER operations */ RTMP_DRV_OPS_FUNCTION(pRtmpDrvOps, NULL, NULL, NULL); #endif /* OS_ABL_FUNC_SUPPORT */ rv = RTMPAllocAdapterBlock(handle, (VOID **)&pAd); if (rv != NDIS_STATUS_SUCCESS) { MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_ERROR, (" RTMPAllocAdapterBlock != NDIS_STATUS_SUCCESS\n")); os_free_mem(handle); goto err_out; } /* Here are the RTMP_ADAPTER structure with rbus-bus specific parameters. */ pAd->PciHif.CSRBaseAddress = (PUCHAR)csr_addr; RTMP_IO_READ32(pAd, TOP_HCR, &Value); pAd->ChipID = Value; /*is not a regular method*/ ((POS_COOKIE)handle)->pci_dev = (VOID *)pdev; ((POS_COOKIE)handle)->pDev = &pdev->dev; RtmpRaDevCtrlInit(pAd, RTMP_DEV_INF_RBUS); net_dev = RtmpPhyNetDevInit(pAd, &netDevHook); if (net_dev == NULL) goto err_out_free_radev; /*assign net_dev as pdev's privdate*/ pci_set_drvdata(pdev, net_dev); /* Here are the net_device structure with pci-bus specific parameters. */ net_dev->irq = pdev->irq; /* Interrupt IRQ number */ net_dev->base_addr = csr_addr; /* Save CSR virtual address and irq to device structure */ RTMP_DRIVER_CHIP_PREPARE(pAd); /*All done, it's time to register the net device to kernel. */ /* Register this device */ rv = RtmpOSNetDevAttach(pAd->OpMode, net_dev, &netDevHook); if (rv) { MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_ERROR, ("failed to call RtmpOSNetDevAttach(), rv=%d!\n", rv)); goto err_out_free_netdev; } MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_TRACE, ("%s: at CSR addr 0x%lx, IRQ %ld.\n", net_dev->name, (ULONG)csr_addr, (long int)net_dev->irq)); MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_TRACE, ("<=== %s()\n", __func__)); #if defined(CONFIG_RA_CLASSIFIER) && (!defined(CONFIG_RA_CLASSIFIER_MODULE)) proc_ptr = proc_ralink_wl_video; if (ra_classifier_init_func != NULL) ra_classifier_init_func(); #endif return 0; err_out_free_netdev: RtmpOSNetDevFree(net_dev); #ifdef MEM_ALLOC_INFO_SUPPORT { UINT32 memalctotal, pktalctotal; memalctotal = ShowMemAllocInfo(); pktalctotal = ShowPktAllocInfo(); if ((memalctotal != 0) || (pktalctotal != 0)) { MTWF_LOG(DBG_CAT_INIT, DBG_SUBCAT_ALL, DBG_LVL_ERROR, ("Error: Memory leak!!\n")); ASSERT(0); } MIListExit(&MemInfoList); MIListExit(&PktInfoList); } #endif /* MEM_ALLOC_INFO_SUPPORT */ err_out_free_radev: /* free RTMP_ADAPTER strcuture and os_cookie*/ RTMPFreeAdapter(pAd); err_out: return -ENODEV; } static VOID DEVEXIT mt_rbus_remove(struct pci_dev *pci_dev) { struct net_device *net_dev = pci_get_drvdata(pci_dev); RTMP_ADAPTER *pAd; if (net_dev == NULL) return; /* pAd = net_dev->priv; */ GET_PAD_FROM_NET_DEV(pAd, net_dev); if (pAd != NULL) { RtmpPhyNetDevExit(pAd, net_dev); RtmpRaDevCtrlExit(pAd); } else RtmpOSNetDevDetach(net_dev); /* Free the root net_device. */ RtmpOSNetDevFree(net_dev); #if defined(CONFIG_RA_CLASSIFIER) && (!defined(CONFIG_RA_CLASSIFIER_MODULE)) proc_ptr = proc_ralink_wl_video; if (ra_classifier_release_func != NULL) ra_classifier_release_func(); #endif #ifdef MEM_ALLOC_INFO_SUPPORT { UINT32 memalctotal, pktalctotal; memalctotal = ShowMemAllocInfo(); pktalctotal = ShowPktAllocInfo(); if ((memalctotal != 0) || (pktalctotal != 0)) { MTWF_LOG(DBG_CAT_INIT, DBG_SUBCAT_ALL, DBG_LVL_ERROR, ("Error: Memory leak!!\n")); ASSERT(0); } MIListExit(&MemInfoList); MIListExit(&PktInfoList); } #endif /* MEM_ALLOC_INFO_SUPPORT */ } /* * Our PCI driver structure */ static struct pci_driver mt_rbus_driver = { name: "mt_rbus", id_table : mt_rbus_tbl, probe : mt_rbus_probe, remove : DEVEXIT_P(mt_rbus_remove), }; /* * Driver module load/unload function */ int __init wbsys_module_init(void) { MTWF_LOG(DBG_CAT_HIF, CATHIF_PCI, DBG_LVL_ERROR, ("register %s\n", RTMP_DRV_NAME)); #ifdef MEM_ALLOC_INFO_SUPPORT MemInfoListInital(); #endif /* MEM_ALLOC_INFO_SUPPORT */ return pci_register_driver(&mt_rbus_driver); } void __exit wbsys_module_exit(void) { pci_unregister_driver(&mt_rbus_driver); } /** @} */ /** @} */ #ifndef MULTI_INF_SUPPORT module_init(wbsys_module_init); module_exit(wbsys_module_exit); #endif /* MULTI_INF_SUPPORT */ #endif /* RTMP_RBUS_SUPPORT */
tossp/lede-k3
package/lean/mt/drivers/mt7615d/src/mt_wifi/os/linux/rbus_prop_dev.c
C
gpl-2.0
7,562
/* Copyright 2007-2011 David Robillard <http://drobilla.net> Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define _XOPEN_SOURCE 500 #include <assert.h> #include <locale.h> #include <stdlib.h> #include <string.h> #include "lilv_internal.h" static void lilv_node_set_numerics_from_string(LilvNode* val) { char* locale; char* endptr; switch (val->type) { case LILV_VALUE_URI: case LILV_VALUE_BLANK: case LILV_VALUE_STRING: break; case LILV_VALUE_INT: // FIXME: locale kludge, need a locale independent strtol locale = lilv_strdup(setlocale(LC_NUMERIC, NULL)); setlocale(LC_NUMERIC, "POSIX"); val->val.int_val = strtol(val->str_val, &endptr, 10); setlocale(LC_NUMERIC, locale); free(locale); break; case LILV_VALUE_FLOAT: // FIXME: locale kludge, need a locale independent strtod locale = lilv_strdup(setlocale(LC_NUMERIC, NULL)); setlocale(LC_NUMERIC, "POSIX"); val->val.float_val = strtod(val->str_val, &endptr); setlocale(LC_NUMERIC, locale); free(locale); break; case LILV_VALUE_BOOL: val->val.bool_val = (!strcmp(val->str_val, "true")); break; } } /** Note that if @a type is numeric or boolean, the returned value is corrupt * until lilv_node_set_numerics_from_string is called. It is not * automatically called from here to avoid overhead and imprecision when the * exact string value is known. */ LilvNode* lilv_node_new(LilvWorld* world, LilvNodeType type, const char* str) { LilvNode* val = malloc(sizeof(struct LilvNodeImpl)); val->world = world; val->type = type; switch (type) { case LILV_VALUE_URI: val->val.uri_val = sord_new_uri(world->world, (const uint8_t*)str); val->str_val = (char*)sord_node_get_string(val->val.uri_val); break; case LILV_VALUE_BLANK: val->val.uri_val = sord_new_blank(world->world, (const uint8_t*)str); val->str_val = (char*)sord_node_get_string(val->val.uri_val); case LILV_VALUE_STRING: case LILV_VALUE_INT: case LILV_VALUE_FLOAT: case LILV_VALUE_BOOL: val->str_val = lilv_strdup(str); break; } return val; } /** Create a new LilvNode from @a node, or return NULL if impossible */ LilvNode* lilv_node_new_from_node(LilvWorld* world, const SordNode* node) { LilvNode* result = NULL; SordNode* datatype_uri = NULL; LilvNodeType type = LILV_VALUE_STRING; switch (sord_node_get_type(node)) { case SORD_URI: result = malloc(sizeof(struct LilvNodeImpl)); result->world = (LilvWorld*)world; result->type = LILV_VALUE_URI; result->val.uri_val = sord_node_copy(node); result->str_val = (char*)sord_node_get_string(result->val.uri_val); break; case SORD_BLANK: result = malloc(sizeof(struct LilvNodeImpl)); result->world = (LilvWorld*)world; result->type = LILV_VALUE_BLANK; result->val.uri_val = sord_node_copy(node); result->str_val = (char*)sord_node_get_string(result->val.uri_val); break; case SORD_LITERAL: datatype_uri = sord_node_get_datatype(node); if (datatype_uri) { if (sord_node_equals(datatype_uri, world->xsd_boolean_node)) type = LILV_VALUE_BOOL; else if (sord_node_equals(datatype_uri, world->xsd_decimal_node) || sord_node_equals(datatype_uri, world->xsd_double_node)) type = LILV_VALUE_FLOAT; else if (sord_node_equals(datatype_uri, world->xsd_integer_node)) type = LILV_VALUE_INT; else LILV_ERRORF("Unknown datatype `%s'\n", sord_node_get_string(datatype_uri)); } result = lilv_node_new(world, type, (const char*)sord_node_get_string(node)); switch (result->type) { case LILV_VALUE_INT: case LILV_VALUE_FLOAT: case LILV_VALUE_BOOL: lilv_node_set_numerics_from_string(result); default: break; } break; default: assert(false); } return result; } LILV_API LilvNode* lilv_new_uri(LilvWorld* world, const char* uri) { return lilv_node_new(world, LILV_VALUE_URI, uri); } LILV_API LilvNode* lilv_new_string(LilvWorld* world, const char* str) { return lilv_node_new(world, LILV_VALUE_STRING, str); } LILV_API LilvNode* lilv_new_int(LilvWorld* world, int val) { char str[32]; snprintf(str, sizeof(str), "%d", val); LilvNode* ret = lilv_node_new(world, LILV_VALUE_INT, str); ret->val.int_val = val; return ret; } LILV_API LilvNode* lilv_new_float(LilvWorld* world, float val) { char str[32]; snprintf(str, sizeof(str), "%f", val); LilvNode* ret = lilv_node_new(world, LILV_VALUE_FLOAT, str); ret->val.float_val = val; return ret; } LILV_API LilvNode* lilv_new_bool(LilvWorld* world, bool val) { LilvNode* ret = lilv_node_new(world, LILV_VALUE_BOOL, val ? "true" : "false"); ret->val.bool_val = val; return ret; } LILV_API LilvNode* lilv_node_duplicate(const LilvNode* val) { if (val == NULL) return NULL; LilvNode* result = malloc(sizeof(struct LilvNodeImpl)); result->world = val->world; result->type = val->type; switch (val->type) { case LILV_VALUE_URI: case LILV_VALUE_BLANK: result->val.uri_val = sord_node_copy(val->val.uri_val); result->str_val = (char*)sord_node_get_string(result->val.uri_val); break; default: result->str_val = lilv_strdup(val->str_val); result->val = val->val; } return result; } LILV_API void lilv_node_free(LilvNode* val) { if (val) { switch (val->type) { case LILV_VALUE_URI: case LILV_VALUE_BLANK: sord_node_free(val->world->world, val->val.uri_val); break; default: free(val->str_val); } free(val); } } LILV_API bool lilv_node_equals(const LilvNode* value, const LilvNode* other) { if (value == NULL && other == NULL) return true; else if (value == NULL || other == NULL) return false; else if (value->type != other->type) return false; switch (value->type) { case LILV_VALUE_URI: return sord_node_equals(value->val.uri_val, other->val.uri_val); case LILV_VALUE_BLANK: case LILV_VALUE_STRING: return !strcmp(value->str_val, other->str_val); case LILV_VALUE_INT: return (value->val.int_val == other->val.int_val); case LILV_VALUE_FLOAT: return (value->val.float_val == other->val.float_val); case LILV_VALUE_BOOL: return (value->val.bool_val == other->val.bool_val); } return false; /* shouldn't get here */ } LILV_API char* lilv_node_get_turtle_token(const LilvNode* value) { size_t len = 0; char* result = NULL; char* locale = NULL; switch (value->type) { case LILV_VALUE_URI: len = strlen(value->str_val) + 3; result = calloc(len, 1); snprintf(result, len, "<%s>", value->str_val); break; case LILV_VALUE_BLANK: len = strlen(value->str_val) + 3; result = calloc(len, 1); snprintf(result, len, "_:%s", value->str_val); break; case LILV_VALUE_STRING: case LILV_VALUE_BOOL: result = lilv_strdup(value->str_val); break; case LILV_VALUE_INT: // INT64_MAX is 9223372036854775807 (19 digits) + 1 for sign // FIXME: locale kludge, need a locale independent snprintf locale = lilv_strdup(setlocale(LC_NUMERIC, NULL)); len = 20; result = calloc(len, 1); setlocale(LC_NUMERIC, "POSIX"); snprintf(result, len, "%d", value->val.int_val); setlocale(LC_NUMERIC, locale); break; case LILV_VALUE_FLOAT: // FIXME: locale kludge, need a locale independent snprintf locale = lilv_strdup(setlocale(LC_NUMERIC, NULL)); len = 20; // FIXME: proper maximum value? result = calloc(len, 1); setlocale(LC_NUMERIC, "POSIX"); snprintf(result, len, "%f", value->val.float_val); setlocale(LC_NUMERIC, locale); break; } free(locale); return result; } LILV_API bool lilv_node_is_uri(const LilvNode* value) { return (value && value->type == LILV_VALUE_URI); } LILV_API const char* lilv_node_as_uri(const LilvNode* value) { assert(lilv_node_is_uri(value)); return value->str_val; } const SordNode* lilv_node_as_node(const LilvNode* value) { assert(lilv_node_is_uri(value)); return value->val.uri_val; } LILV_API bool lilv_node_is_blank(const LilvNode* value) { return (value && value->type == LILV_VALUE_BLANK); } LILV_API const char* lilv_node_as_blank(const LilvNode* value) { assert(lilv_node_is_blank(value)); return value->str_val; } LILV_API bool lilv_node_is_literal(const LilvNode* value) { if (!value) return false; switch (value->type) { case LILV_VALUE_STRING: case LILV_VALUE_INT: case LILV_VALUE_FLOAT: return true; default: return false; } } LILV_API bool lilv_node_is_string(const LilvNode* value) { return (value && value->type == LILV_VALUE_STRING); } LILV_API const char* lilv_node_as_string(const LilvNode* value) { return value->str_val; } LILV_API bool lilv_node_is_int(const LilvNode* value) { return (value && value->type == LILV_VALUE_INT); } LILV_API int lilv_node_as_int(const LilvNode* value) { assert(value); assert(lilv_node_is_int(value)); return value->val.int_val; } LILV_API bool lilv_node_is_float(const LilvNode* value) { return (value && value->type == LILV_VALUE_FLOAT); } LILV_API float lilv_node_as_float(const LilvNode* value) { assert(lilv_node_is_float(value) || lilv_node_is_int(value)); if (lilv_node_is_float(value)) return value->val.float_val; else // lilv_node_is_int(value) return (float)value->val.int_val; } LILV_API bool lilv_node_is_bool(const LilvNode* value) { return (value && value->type == LILV_VALUE_BOOL); } LILV_API bool lilv_node_as_bool(const LilvNode* value) { assert(value); assert(lilv_node_is_bool(value)); return value->val.bool_val; }
ccherrett/oom
dependencies/lilvbundle/lilv-0.5.0/src/node.c
C
gpl-2.0
10,040
/* Operating system support for run-time dynamic linker. Hurd version. Copyright (C) 1995-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* In the static library, this is all handled by dl-support.c or by the vanilla definitions in the rest of the C library. */ #ifdef SHARED #include <hurd.h> #include <link.h> #include <unistd.h> #include <fcntl.h> #include <stdlib.h> #include <sys/mman.h> #include <ldsodefs.h> #include <sys/wait.h> #include <assert.h> #include <sysdep.h> #include <mach/mig_support.h> #include "hurdstartup.h" #include <hurd/lookup.h> #include <hurd/auth.h> #include <hurd/term.h> #include <stdarg.h> #include <ctype.h> #include <sys/stat.h> #include <sys/uio.h> #include <entry.h> #include <dl-machine.h> #include <dl-procinfo.h> extern void __mach_init (void); extern int _dl_argc; extern char **_dl_argv; extern char **_environ; int __libc_enable_secure = 0; INTVARDEF(__libc_enable_secure) int __libc_multiple_libcs = 0; /* Defining this here avoids the inclusion of init-first. */ /* This variable contains the lowest stack address ever used. */ void *__libc_stack_end; #if HP_TIMING_AVAIL hp_timing_t _dl_cpuclock_offset; #endif /* TODO: this is never properly initialized in here. */ void *_dl_random attribute_relro = NULL; struct hurd_startup_data *_dl_hurd_data; #define FMH defined(__i386__) #if ! FMH # define fmh() ((void)0) # define unfmh() ((void)0) #else /* XXX loser kludge for vm_map kernel bug */ #undef ELF_MACHINE_USER_ADDRESS_MASK #define ELF_MACHINE_USER_ADDRESS_MASK 0 static vm_address_t fmha; static vm_size_t fmhs; static void unfmh(void){ __vm_deallocate(__mach_task_self(),fmha,fmhs);} static void fmh(void) { error_t err;int x;mach_port_t p; vm_address_t a=0x08000000U,max=VM_MAX_ADDRESS; while (!(err=__vm_region(__mach_task_self(),&a,&fmhs,&x,&x,&x,&x,&p,&x))){ __mach_port_deallocate(__mach_task_self(),p); if (a+fmhs>=0x80000000U){ max=a; break;} fmha=a+=fmhs;} if (err) assert(err==KERN_NO_SPACE); if (!fmha) fmhs=0; else while (1) { fmhs=max-fmha; if (fmhs == 0) break; err = __vm_map (__mach_task_self (), &fmha, fmhs, 0, 0, MACH_PORT_NULL, 0, 1, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_COPY); if (!err) break; if (err != KERN_INVALID_ADDRESS && err != KERN_NO_SPACE) assert_perror(err); vm_address_t new_max = (max - 1) & 0xf0000000U; if (new_max >= max) { fmhs = 0; fmha = 0; break; } max = new_max; } } /* XXX loser kludge for vm_map kernel bug */ #endif ElfW(Addr) _dl_sysdep_start (void **start_argptr, void (*dl_main) (const ElfW(Phdr) *phdr, ElfW(Word) phent, ElfW(Addr) *user_entry, ElfW(auxv_t) *auxv)) { void go (intptr_t *argdata) { char **p; /* Cache the information in various global variables. */ _dl_argc = *argdata; _dl_argv = 1 + (char **) argdata; _environ = &_dl_argv[_dl_argc + 1]; for (p = _environ; *p++;); /* Skip environ pointers and terminator. */ if ((void *) p == _dl_argv[0]) { static struct hurd_startup_data nodata; _dl_hurd_data = &nodata; nodata.user_entry = (vm_address_t) ENTRY_POINT; } else _dl_hurd_data = (void *) p; INTUSE(__libc_enable_secure) = _dl_hurd_data->flags & EXEC_SECURE; if (_dl_hurd_data->flags & EXEC_STACK_ARGS && _dl_hurd_data->user_entry == 0) _dl_hurd_data->user_entry = (vm_address_t) ENTRY_POINT; unfmh(); /* XXX */ #if 0 /* XXX make this work for real someday... */ if (_dl_hurd_data->user_entry == (vm_address_t) ENTRY_POINT) /* We were invoked as a command, not as the program interpreter. The generic ld.so code supports this: it will parse the args as "ld.so PROGRAM [ARGS...]". For booting the Hurd, we support an additional special syntax: ld.so [-LIBS...] PROGRAM [ARGS...] Each LIBS word consists of "FILENAME=MEMOBJ"; for example "-/lib/libc.so=123" says that the contents of /lib/libc.so are found in a memory object whose port name in our task is 123. */ while (_dl_argc > 2 && _dl_argv[1][0] == '-' && _dl_argv[1][1] != '-') { char *lastslash, *memobjname, *p; struct link_map *l; mach_port_t memobj; error_t err; ++_dl_skip_args; --_dl_argc; p = _dl_argv++[1] + 1; memobjname = strchr (p, '='); if (! memobjname) _dl_sysdep_fatal ("Bogus library spec: ", p, "\n", NULL); *memobjname++ = '\0'; memobj = 0; while (*memobjname != '\0') memobj = (memobj * 10) + (*memobjname++ - '0'); /* Add a user reference on the memory object port, so we will still have one after _dl_map_object_from_fd calls our `close'. */ err = __mach_port_mod_refs (__mach_task_self (), memobj, MACH_PORT_RIGHT_SEND, +1); assert_perror (err); lastslash = strrchr (p, '/'); l = _dl_map_object_from_fd (lastslash ? lastslash + 1 : p, memobj, strdup (p), 0); /* Squirrel away the memory object port where it can be retrieved by the program later. */ l->l_info[DT_NULL] = (void *) memobj; } #endif /* Call elf/rtld.c's main program. It will set everything up and leave us to transfer control to USER_ENTRY. */ (*dl_main) ((const ElfW(Phdr) *) _dl_hurd_data->phdr, _dl_hurd_data->phdrsz / sizeof (ElfW(Phdr)), &_dl_hurd_data->user_entry, NULL); /* The call above might screw a few things up. First of all, if _dl_skip_args is nonzero, we are ignoring the first few arguments. However, if we have no Hurd startup data, it is the magical convention that ARGV[0] == P. The startup code in init-first.c will get confused if this is not the case, so we must rearrange things to make it so. We'll overwrite the origional ARGV[0] at P with ARGV[_dl_skip_args]. Secondly, if we need to be secure, it removes some dangerous environment variables. If we have no Hurd startup date this changes P (since that's the location after the terminating NULL in the list of environment variables). We do the same thing as in the first case but make sure we recalculate P. If we do have Hurd startup data, we have to move the data such that it starts just after the terminating NULL in the environment list. We use memmove, since the locations might overlap. */ if (INTUSE(__libc_enable_secure) || _dl_skip_args) { char **newp; for (newp = _environ; *newp++;); if (_dl_argv[-_dl_skip_args] == (char *) p) { if ((char *) newp != _dl_argv[0]) { assert ((char *) newp < _dl_argv[0]); _dl_argv[0] = memmove ((char *) newp, _dl_argv[0], strlen (_dl_argv[0]) + 1); } } else { if ((void *) newp != _dl_hurd_data) memmove (newp, _dl_hurd_data, sizeof (*_dl_hurd_data)); } } { extern void _dl_start_user (void); /* Unwind the stack to ARGDATA and simulate a return from _dl_start to the RTLD_START code which will run the user's entry point. */ RETURN_TO (argdata, &_dl_start_user, _dl_hurd_data->user_entry); } } /* Set up so we can do RPCs. */ __mach_init (); /* Initialize frequently used global variable. */ GLRO(dl_pagesize) = __getpagesize (); #if HP_TIMING_AVAIL HP_TIMING_NOW (_dl_cpuclock_offset); #endif fmh(); /* XXX */ /* See hurd/hurdstartup.c; this deals with getting information from the exec server and slicing up the arguments. Then it will call `go', above. */ _hurd_startup (start_argptr, &go); LOSE; abort (); } void internal_function _dl_sysdep_start_cleanup (void) { /* Deallocate the reply port and task port rights acquired by __mach_init. We are done with them now, and the user will reacquire them for himself when he wants them. */ __mig_dealloc_reply_port (MACH_PORT_NULL); __mach_port_deallocate (__mach_task_self (), __mach_task_self_); } /* Minimal open/close/mmap implementation sufficient for initial loading of shared libraries. These are weak definitions so that when the dynamic linker re-relocates itself to be user-visible (for -ldl), it will get the user's definition (i.e. usually libc's). */ /* Open FILE_NAME and return a Hurd I/O for it in *PORT, or return an error. If STAT is non-zero, stat the file into that stat buffer. */ static error_t open_file (const char *file_name, int flags, mach_port_t *port, struct stat64 *stat) { enum retry_type doretry; char retryname[1024]; /* XXX string_t LOSES! */ file_t startdir; error_t err; error_t use_init_port (int which, error_t (*operate) (file_t)) { return (which < _dl_hurd_data->portarraysize ? ((*operate) (_dl_hurd_data->portarray[which])) : EGRATUITOUS); } file_t get_dtable_port (int fd) { if ((unsigned int) fd < _dl_hurd_data->dtablesize && _dl_hurd_data->dtable[fd] != MACH_PORT_NULL) { __mach_port_mod_refs (__mach_task_self (), _dl_hurd_data->dtable[fd], MACH_PORT_RIGHT_SEND, +1); return _dl_hurd_data->dtable[fd]; } errno = EBADF; return MACH_PORT_NULL; } assert (!(flags & ~(O_READ | O_CLOEXEC))); startdir = _dl_hurd_data->portarray[file_name[0] == '/' ? INIT_PORT_CRDIR : INIT_PORT_CWDIR]; while (file_name[0] == '/') file_name++; err = __dir_lookup (startdir, (char *)file_name, O_RDONLY, 0, &doretry, retryname, port); if (!err) err = __hurd_file_name_lookup_retry (use_init_port, get_dtable_port, __dir_lookup, doretry, retryname, O_RDONLY, 0, port); if (!err && stat) { err = __io_stat (*port, stat); if (err) __mach_port_deallocate (__mach_task_self (), *port); } return err; } int weak_function __open (const char *file_name, int mode, ...) { mach_port_t port; error_t err = open_file (file_name, mode, &port, 0); if (err) return __hurd_fail (err); else return (int)port; } int weak_function __close (int fd) { if (fd != (int) MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), (mach_port_t) fd); return 0; } __ssize_t weak_function __libc_read (int fd, void *buf, size_t nbytes) { error_t err; char *data; mach_msg_type_number_t nread; data = buf; nread = nbytes; err = __io_read ((mach_port_t) fd, &data, &nread, -1, nbytes); if (err) return __hurd_fail (err); if (data != buf) { memcpy (buf, data, nread); __vm_deallocate (__mach_task_self (), (vm_address_t) data, nread); } return nread; } libc_hidden_weak (__libc_read) __ssize_t weak_function __libc_write (int fd, const void *buf, size_t nbytes) { error_t err; mach_msg_type_number_t nwrote; assert (fd < _hurd_init_dtablesize); err = __io_write (_hurd_init_dtable[fd], buf, nbytes, -1, &nwrote); if (err) return __hurd_fail (err); return nwrote; } libc_hidden_weak (__libc_write) /* This is only used for printing messages (see dl-misc.c). */ __ssize_t weak_function __writev (int fd, const struct iovec *iov, int niov) { if (fd >= _hurd_init_dtablesize) { errno = EBADF; return -1; } int i; size_t total = 0; for (i = 0; i < niov; ++i) total += iov[i].iov_len; if (total != 0) { char buf[total], *bufp = buf; error_t err; mach_msg_type_number_t nwrote; for (i = 0; i < niov; ++i) bufp = (memcpy (bufp, iov[i].iov_base, iov[i].iov_len) + iov[i].iov_len); err = __io_write (_hurd_init_dtable[fd], buf, total, -1, &nwrote); if (err) return __hurd_fail (err); return nwrote; } return 0; } off64_t weak_function __libc_lseek64 (int fd, off64_t offset, int whence) { error_t err; err = __io_seek ((mach_port_t) fd, offset, whence, &offset); if (err) return __hurd_fail (err); return offset; } __ptr_t weak_function __mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset) { error_t err; vm_prot_t vmprot; vm_address_t mapaddr; mach_port_t memobj_rd, memobj_wr; vmprot = VM_PROT_NONE; if (prot & PROT_READ) vmprot |= VM_PROT_READ; if (prot & PROT_WRITE) vmprot |= VM_PROT_WRITE; if (prot & PROT_EXEC) vmprot |= VM_PROT_EXECUTE; if (flags & MAP_ANON) memobj_rd = MACH_PORT_NULL; else { assert (!(flags & MAP_SHARED)); err = __io_map ((mach_port_t) fd, &memobj_rd, &memobj_wr); if (err) return __hurd_fail (err), MAP_FAILED; __mach_port_deallocate (__mach_task_self (), memobj_wr); } mapaddr = (vm_address_t) addr; err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, ELF_MACHINE_USER_ADDRESS_MASK, !(flags & MAP_FIXED), memobj_rd, (vm_offset_t) offset, flags & (MAP_COPY|MAP_PRIVATE), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); if (err == KERN_NO_SPACE && (flags & MAP_FIXED)) { /* XXX this is not atomic as it is in unix! */ /* The region is already allocated; deallocate it first. */ err = __vm_deallocate (__mach_task_self (), mapaddr, len); if (! err) err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, ELF_MACHINE_USER_ADDRESS_MASK, !(flags & MAP_FIXED), memobj_rd, (vm_offset_t) offset, flags & (MAP_COPY|MAP_PRIVATE), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); } if ((flags & MAP_ANON) == 0) __mach_port_deallocate (__mach_task_self (), memobj_rd); if (err) return __hurd_fail (err), MAP_FAILED; return (__ptr_t) mapaddr; } int weak_function __fxstat64 (int vers, int fd, struct stat64 *buf) { error_t err; assert (vers == _STAT_VER); err = __io_stat ((mach_port_t) fd, buf); if (err) return __hurd_fail (err); return 0; } libc_hidden_def (__fxstat64) int weak_function __xstat64 (int vers, const char *file, struct stat64 *buf) { error_t err; mach_port_t port; assert (vers == _STAT_VER); err = open_file (file, 0, &port, buf); if (err) return __hurd_fail (err); __mach_port_deallocate (__mach_task_self (), port); return 0; } libc_hidden_def (__xstat64) /* This function is called by the dynamic linker (rtld.c) to check whether debugging malloc is allowed even for SUID binaries. This stub will always fail, which means that malloc-debugging is always disabled for SUID binaries. */ int weak_function __access (const char *file, int type) { errno = ENOSYS; return -1; } pid_t weak_function __getpid (void) { pid_t pid, ppid; int orphaned; if (__proc_getpids (_dl_hurd_data->portarray[INIT_PORT_PROC], &pid, &ppid, &orphaned)) return -1; return pid; } /* This is called only in some strange cases trying to guess a value for $ORIGIN for the executable. The dynamic linker copes with getcwd failing (dl-object.c), and it's too much hassle to include the functionality here. (We could, it just requires duplicating or reusing getcwd.c's code but using our special lookup function as in `open', above.) */ char * weak_function __getcwd (char *buf, size_t size) { errno = ENOSYS; return NULL; } void weak_function attribute_hidden _exit (int status) { __proc_mark_exit (_dl_hurd_data->portarray[INIT_PORT_PROC], W_EXITCODE (status, 0), 0); while (__task_terminate (__mach_task_self ())) __mach_task_self_ = (__mach_task_self) (); } /* We need this alias to satisfy references from libc_pic.a objects that were affected by the libc_hidden_proto declaration for _exit. */ strong_alias (_exit, __GI__exit) /* Try to get a machine dependent instruction which will make the program crash. This is used in case everything else fails. */ #include <abort-instr.h> #ifndef ABORT_INSTRUCTION /* No such instruction is available. */ # define ABORT_INSTRUCTION #endif void weak_function abort (void) { /* Try to abort using the system specific command. */ ABORT_INSTRUCTION; /* If the abort instruction failed, exit. */ _exit (127); /* If even this fails, make sure we never return. */ while (1) /* Try for ever and ever. */ ABORT_INSTRUCTION; } /* We need this alias to satisfy references from libc_pic.a objects that were affected by the libc_hidden_proto declaration for abort. */ strong_alias (abort, __GI_abort) /* This function is called by interruptible RPC stubs. For initial dynamic linking, just use the normal mach_msg. Since this defn is weak, the real defn in libc.so will override it if we are linked into the user program (-ldl). */ error_t weak_function _hurd_intr_rpc_mach_msg (mach_msg_header_t *msg, mach_msg_option_t option, mach_msg_size_t send_size, mach_msg_size_t rcv_size, mach_port_t rcv_name, mach_msg_timeout_t timeout, mach_port_t notify) { return __mach_msg (msg, option, send_size, rcv_size, rcv_name, timeout, notify); } void internal_function _dl_show_auxv (void) { /* There is nothing to print. Hurd has no auxiliary vector. */ } void weak_function _dl_init_first (int argc, ...) { /* This no-op definition only gets used if libc is not linked in. */ } #endif /* SHARED */
Phant0mas/glibc-hurd
sysdeps/mach/hurd/dl-sysdep.c
C
gpl-2.0
17,842
/* * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Author: Andrew Christian * 28 May 2002 */ #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> #include <linux/sysfs.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <asm/uaccess.h> #include "queue.h" MODULE_ALIAS("mmc:block"); #if defined(CONFIG_MMC_CPRM) #include "cprmdrv_samsung.h" #include <linux/ioctl.h> #define MMC_IOCTL_BASE 0xB3 /* Same as MMC block device major number */ #define MMC_IOCTL_GET_SECTOR_COUNT _IOR(MMC_IOCTL_BASE, 100, int) #define MMC_IOCTL_GET_SECTOR_SIZE _IOR(MMC_IOCTL_BASE, 101, int) #define MMC_IOCTL_GET_BLOCK_SIZE _IOR(MMC_IOCTL_BASE, 102, int) #define MMC_IOCTL_SET_RETRY_AKE_PROCESS _IOR(MMC_IOCTL_BASE, 104, int) static int cprm_ake_retry_flag; #endif #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "mmcblk." #define INAND_CMD38_ARG_EXT_CSD 113 #define INAND_CMD38_ARG_ERASE 0x00 #define INAND_CMD38_ARG_TRIM 0x01 #define INAND_CMD38_ARG_SECERASE 0x80 #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 #define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */ #define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */ #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ (req->cmd_flags & REQ_META)) && \ (rq_data_dir(req) == WRITE)) #define PACKED_CMD_VER 0x01 #define PACKED_CMD_WR 0x02 #define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \ do { \ if (stats->enabled) \ stats->pack_stop_reason[reason]++; \ } while (0) static DEFINE_MUTEX(block_mutex); /* * The defaults come from config options but can be overriden by module * or bootarg options. */ static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; /* * We've only got one major, so number of mmcblk devices is * limited to 256 / number of minors per device. */ static int max_devices; /* 256 minors, so at most 256 separate devices */ static DECLARE_BITMAP(dev_use, 256); static DECLARE_BITMAP(name_use, 256); /* * There is one mmc_blk_data per slot. */ struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; struct mmc_queue queue; struct list_head part; unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ unsigned int usage; unsigned int read_only; unsigned int part_type; unsigned int name_idx; unsigned int reset_done; #define MMC_BLK_READ BIT(0) #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) /* * Only set in main mmc_blk_data associated * with mmc_card with mmc_set_drvdata, and keeps * track of the current selected device partition. */ unsigned int part_curr; struct device_attribute force_ro; struct device_attribute power_ro_lock; struct device_attribute num_wr_reqs_to_start_packing; struct device_attribute bkops_check_threshold; int area_type; }; static DEFINE_MUTEX(open_lock); enum { MMC_PACKED_N_IDX = -1, MMC_PACKED_N_ZERO, MMC_PACKED_N_SINGLE, }; module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) { mqrq->packed_cmd = MMC_PACKED_NONE; mqrq->packed_num = MMC_PACKED_N_ZERO; } static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; mutex_lock(&open_lock); md = disk->private_data; if (md && md->usage == 0) md = NULL; if (md) md->usage++; mutex_unlock(&open_lock); return md; } static inline int mmc_get_devidx(struct gendisk *disk) { int devidx = disk->first_minor / perdev_minors; return devidx; } static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { int devidx = mmc_get_devidx(md->disk); blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); } mutex_unlock(&open_lock); } static ssize_t power_ro_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int locked = 0; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) locked = 2; else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) locked = 1; ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); return ret; } static ssize_t power_ro_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct mmc_blk_data *md, *part_md; struct mmc_card *card; unsigned long set; if (kstrtoul(buf, 0, &set)) return -EINVAL; if (set != 1) return count; md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; mmc_claim_host(card->host); ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | EXT_CSD_BOOT_WP_B_PWR_WP_EN, card->ext_csd.part_time); if (ret) pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); else card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; mmc_release_host(card->host); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", md->disk->disk_name); set_disk_ro(md->disk, 1); list_for_each_entry(part_md, &md->part, part) if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); set_disk_ro(part_md->disk, 1); } } mmc_blk_put(md); return count; } static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); ret = snprintf(buf, PAGE_SIZE, "%d", get_disk_ro(dev_to_disk(dev)) ^ md->read_only); mmc_blk_put(md); return ret; } static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; char *end; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); unsigned long set = simple_strtoul(buf, &end, 0); if (end == buf) { ret = -EINVAL; goto out; } set_disk_ro(dev_to_disk(dev), set || md->read_only); ret = count; out: mmc_blk_put(md); return ret; } static ssize_t num_wr_reqs_to_start_packing_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); int num_wr_reqs_to_start_packing; int ret; num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing; ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing); mmc_blk_put(md); return ret; } static ssize_t num_wr_reqs_to_start_packing_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); sscanf(buf, "%d", &value); if (value >= 0) md->queue.num_wr_reqs_to_start_packing = value; mmc_blk_put(md); return count; } static ssize_t bkops_check_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int ret; if (!card) ret = -EINVAL; else ret = snprintf(buf, PAGE_SIZE, "%d\n", card->bkops_info.size_percentage_to_queue_delayed_work); mmc_blk_put(md); return ret; } static ssize_t bkops_check_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; unsigned int card_size; int ret = count; if (!card) { ret = -EINVAL; goto exit; } sscanf(buf, "%d", &value); if ((value <= 0) || (value >= 100)) { ret = -EINVAL; goto exit; } card_size = (unsigned int)get_capacity(md->disk); if (card_size <= 0) { ret = -EINVAL; goto exit; } card->bkops_info.size_percentage_to_queue_delayed_work = value; card->bkops_info.min_sectors_to_queue_delayed_work = (card_size * value) / 100; pr_debug("%s: size_percentage = %d, min_sectors = %d", mmc_hostname(card->host), card->bkops_info.size_percentage_to_queue_delayed_work, card->bkops_info.min_sectors_to_queue_delayed_work); exit: mmc_blk_put(md); return count; } static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; mutex_lock(&block_mutex); if (md) { if (md->usage == 2) check_disk_change(bdev); ret = 0; if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } } mutex_unlock(&block_mutex); return ret; } static int mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); return 0; } static int mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); geo->heads = 4; geo->sectors = 16; return 0; } struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; }; static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( struct mmc_ioc_cmd __user *user) { struct mmc_blk_ioc_data *idata; int err; idata = kzalloc(sizeof(*idata), GFP_KERNEL); if (!idata) { err = -ENOMEM; goto out; } if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { err = -EFAULT; goto idata_err; } idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { err = -EOVERFLOW; goto idata_err; } if (!idata->buf_bytes) return idata; idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); if (!idata->buf) { err = -ENOMEM; goto idata_err; } if (copy_from_user(idata->buf, (void __user *)(unsigned long) idata->ic.data_ptr, idata->buf_bytes)) { err = -EFAULT; goto copy_err; } return idata; copy_err: kfree(idata->buf); idata_err: kfree(idata); out: return ERR_PTR(err); } struct scatterlist *mmc_blk_get_sg(struct mmc_card *card, unsigned char *buf, int *sg_len, int size) { struct scatterlist *sg; struct scatterlist *sl; int total_sec_cnt, sec_cnt; int max_seg_size, len; total_sec_cnt = size; max_seg_size = card->host->max_seg_size; len = (size - 1 + max_seg_size) / max_seg_size; sl = kmalloc(sizeof(struct scatterlist) * len, GFP_KERNEL); if (!sl) { return NULL; } sg = (struct scatterlist *)sl; sg_init_table(sg, len); while (total_sec_cnt) { if (total_sec_cnt < max_seg_size) sec_cnt = total_sec_cnt; else sec_cnt = max_seg_size; sg_set_page(sg, virt_to_page(buf), sec_cnt, offset_in_page(buf)); buf = buf + sec_cnt; total_sec_cnt = total_sec_cnt - sec_cnt; if (total_sec_cnt == 0) break; sg = sg_next(sg); } if (sg) sg_mark_end(sg); *sg_len = len; return sl; } static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { struct mmc_blk_ioc_data *idata; struct mmc_blk_data *md; struct mmc_card *card; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct mmc_request mrq = {NULL}; struct scatterlist *sg = 0; int err=0; /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the * whole block device, not on a partition. This prevents overspray * between sibling partitions. */ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) return -EPERM; idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; goto cmd_done; } card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); goto cmd_done; } cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; cmd.flags = idata->ic.flags; if (idata->buf_bytes) { int len; data.blksz = idata->ic.blksz; data.blocks = idata->ic.blocks; sg = mmc_blk_get_sg(card, idata->buf, &len, idata->buf_bytes); data.sg = sg; data.sg_len = len; if (idata->ic.write_flag) data.flags = MMC_DATA_WRITE; else data.flags = MMC_DATA_READ; /* data.flags must already be set before doing this. */ mmc_set_data_timeout(&data, card); /* Allow overriding the timeout_ns for empirical tuning. */ if (idata->ic.data_timeout_ns) data.timeout_ns = idata->ic.data_timeout_ns; if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Pretend this is a data transfer and rely on the * host driver to compute timeout. When all host * drivers support cmd.cmd_timeout for R1B, this * can be changed to: * * mrq.data = NULL; * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; */ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; } mrq.data = &data; } mrq.cmd = &cmd; mmc_claim_host(card->host); if (idata->ic.is_acmd) { err = mmc_app_cmd(card->host, card); if (err) goto cmd_rel_host; } mmc_wait_for_req(card->host, &mrq); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); err = cmd.error; goto cmd_rel_host; } if (data.error) { dev_err(mmc_dev(card->host), "%s: data error %d\n", __func__, data.error); err = data.error; goto cmd_rel_host; } /* * According to the SD specs, some commands require a delay after * issuing the command. */ if (idata->ic.postsleep_min_us) usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { err = -EFAULT; goto cmd_rel_host; } if (!idata->ic.write_flag) { if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, idata->buf, idata->buf_bytes)) { err = -EFAULT; goto cmd_rel_host; } } cmd_rel_host: mmc_release_host(card->host); cmd_done: mmc_blk_put(md); if (sg) kfree(sg); kfree(idata->buf); kfree(idata); return err; } static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { #if defined(CONFIG_MMC_CPRM) struct mmc_blk_data *md = bdev->bd_disk->private_data; struct mmc_card *card = md->queue.card; static int i; static unsigned long temp_arg[16] = {0}; #endif int ret = -EINVAL; if (cmd == MMC_IOC_CMD) ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); #if defined(CONFIG_MMC_CPRM) printk(KERN_DEBUG " %s ], %x ", __func__, cmd); switch (cmd) { case MMC_IOCTL_SET_RETRY_AKE_PROCESS: cprm_ake_retry_flag = 1; ret = 0; break; case MMC_IOCTL_GET_SECTOR_COUNT: { int size = 0; size = (int)get_capacity(md->disk) << 9; printk(KERN_DEBUG "[%s]:MMC_IOCTL_GET_SECTOR_COUNT size = %d\n", __func__, size); return copy_to_user((void *)arg, &size, sizeof(u64)); } break; case ACMD13: case ACMD18: case ACMD25: case ACMD43: case ACMD44: case ACMD45: case ACMD46: case ACMD47: case ACMD48: { struct cprm_request *req = (struct cprm_request *)arg; printk(KERN_DEBUG "%s:cmd [%x]\n", __func__, cmd); if (cmd == ACMD43) { printk(KERN_DEBUG"storing acmd43 arg[%d] = %ul\n", i, (unsigned int)req->arg); temp_arg[i] = req->arg; i++; if (i >= 16) { printk(KERN_DEBUG"reset acmd43 i = %d\n", i); i = 0; } } if (cmd == ACMD45 && cprm_ake_retry_flag == 1) { cprm_ake_retry_flag = 0; printk(KERN_DEBUG"ACMD45.. I'll call ACMD43 and ACMD44 first\n"); for (i = 0; i < 16; i++) { printk(KERN_DEBUG"calling ACMD43 with arg[%d] = %ul\n", i, (unsigned int)temp_arg[i]); if (stub_sendcmd(card, ACMD43, temp_arg[i], 512, NULL) < 0) { printk(KERN_DEBUG"error ACMD43 %d\n", i); return -EINVAL; } } printk(KERN_DEBUG"calling ACMD44\n"); if (stub_sendcmd(card, ACMD44, 0, 8, NULL) < 0) { printk(KERN_DEBUG"error in ACMD44 %d\n", i); return -EINVAL; } } return stub_sendcmd(card, req->cmd, req->arg, req->len, req->buff); } break; default: printk(KERN_DEBUG"%s: Invalid ioctl command\n", __func__); break; } #endif return ret; } #ifdef CONFIG_COMPAT static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); } #endif static const struct block_device_operations mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, .owner = THIS_MODULE, .ioctl = mmc_blk_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mmc_blk_compat_ioctl, #endif }; static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) { int ret; struct mmc_blk_data *main_md = mmc_get_drvdata(card); if (main_md->part_curr == md->part_type) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) return ret; card->ext_csd.part_config = part_config; } main_md->part_curr = md->part_type; return 0; } static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) { int err; u32 result; __be32 *blocks; struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; mmc_set_data_timeout(&data, card); mrq.cmd = &cmd; mrq.data = &data; blocks = kmalloc(4, GFP_KERNEL); if (!blocks) return (u32)-1; sg_init_one(&sg, blocks, 4); mmc_wait_for_req(card->host, &mrq); result = ntohl(*blocks); kfree(blocks); if (cmd.error || data.error) result = (u32)-1; return result; } static int send_stop(struct mmc_card *card, u32 *status) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_STOP_TRANSMISSION; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err == 0) *status = cmd.resp[0]; return err; } static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, retries); if (err == 0) *status = cmd.resp[0]; return err; } #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 #define ERR_CONTINUE 0 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, bool status_valid, u32 status) { switch (error) { case -EILSEQ: /* response crc error, retry the r/w cmd */ pr_err("%s: %s sending %s command, card status %#x\n", req->rq_disk->disk_name, "response CRC error", name, status); return ERR_RETRY; case -ETIMEDOUT: pr_err("%s: %s sending %s command, card status %#x\n", req->rq_disk->disk_name, "timed out", name, status); /* If the status cmd initially failed, retry the r/w cmd */ if (!status_valid) { pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name); return ERR_RETRY; } /* * If it was a r/w cmd crc error, or illegal command * (eg, issued in wrong state) then retry - we should * have corrected the state problem above. */ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name); return ERR_RETRY; } /* Otherwise abort the command */ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name); return ERR_ABORT; default: /* We don't understand the error code the driver gave us */ pr_err("%s: unknown error %d sending read/write command, card status %#x\n", req->rq_disk->disk_name, error, status); return ERR_ABORT; } } /* * Initial r/w and stop cmd error recovery. * We don't know whether the card received the r/w cmd or not, so try to * restore things back to a sane state. Essentially, we do this as follows: * - Obtain card status. If the first attempt to obtain card status fails, * the status word will reflect the failed status cmd, not the failed * r/w cmd. If we fail to obtain card status, it suggests we can no * longer communicate with the card. * - Check the card state. If the card received the cmd but there was a * transient problem with the response, it might still be in a data transfer * mode. Try to send it a stop command. If this fails, we can't recover. * - If the r/w cmd failed due to a response CRC error, it was probably * transient, so retry the cmd. * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or * illegal cmd, retry. * Otherwise we don't understand what happened, so abort. */ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool prev_cmd_status_valid = true; u32 status, stop_status = 0; int err, retry; if (mmc_card_removed(card)) return ERR_NOMEDIUM; /* * Try to get card status which indicates both the card state * and why there was no response. If the first attempt fails, * we can't be sure the returned status is for the r/w command. */ for (retry = 2; retry >= 0; retry--) { err = get_card_status(card, &status, 0); if (!err) break; prev_cmd_status_valid = false; pr_err("%s: error %d sending status command, %sing\n", req->rq_disk->disk_name, err, retry ? "retry" : "abort"); } /* We couldn't get a response from the card. Give up. */ if (err) { /* Check if the card is removed */ if (mmc_detect_card_removed(card->host)) return ERR_NOMEDIUM; return ERR_ABORT; } /* Flag ECC errors */ if ((status & R1_CARD_ECC_FAILED) || (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; /* Flag General errors */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) if ((status & R1_ERROR) || (brq->stop.resp[0] & R1_ERROR)) { pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0], status); *gen_err = 1; } /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { err = send_stop(card, &stop_status); if (err) pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); /* * If the stop cmd also timed out, the card is probably * not present, so abort. Other errors are bad news too. */ if (err) return ERR_ABORT; if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) if (stop_status & R1_ERROR) { pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", req->rq_disk->disk_name, __func__, stop_status); *gen_err = 1; } } /* Check for set block count errors */ if (brq->sbc.error) return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, prev_cmd_status_valid, status); /* Check for r/w command errors */ if (brq->cmd.error) return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, prev_cmd_status_valid, status); /* Data errors */ if (!brq->stop.error) return ERR_CONTINUE; /* Now for stop errors. These aren't fatal to the transfer. */ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->stop.error, brq->cmd.resp[0], status); /* * Subsitute in our own stop status as this will give the error * state which happened during the execution of the r/w command. */ if (stop_status) { brq->stop.resp[0] = stop_status; brq->stop.error = 0; } return ERR_CONTINUE; } static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { int err; if (md->reset_done & type) return -EEXIST; md->reset_done |= type; err = mmc_hw_reset(host); /* Ensure we switch back to the correct partition */ if (err != -EOPNOTSUPP) { struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); int part_err; main_md->part_curr = main_md->part_type; part_err = mmc_blk_part_switch(host->card, md); if (part_err) { /* * We have failed to get back into the correct * partition, so we need to abort the whole request. */ return -ENODEV; } } return err; } static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) { md->reset_done &= ~type; } static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_DISCARD; if (!mmc_can_erase(card)) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (card->ext_csd.bkops_en) card->bkops_info.sectors_changed += blk_rq_sectors(req); if (mmc_can_discard(card)) arg = MMC_DISCARD_ARG; else if (mmc_can_trim(card)) arg = MMC_TRIM_ARG; else arg = MMC_ERASE_ARG; retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, 0); if (err) goto out; } err = mmc_erase(card, from, nr, arg); out: if (err == -EIO && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; if (!(mmc_can_secure_erase_trim(card))) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) arg = MMC_SECURE_TRIM1_ARG; else arg = MMC_SECURE_ERASE_ARG; retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, arg); if (err == -EIO) goto out_retry; if (err) goto out; if (arg == MMC_SECURE_TRIM1_ARG) { if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); if (err == -EIO) goto out_retry; if (err) goto out; } if (mmc_can_sanitize(card) && (card->host->caps2 & MMC_CAP2_SANITIZE)) err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1, 0); out_retry: if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); out: blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int err = 0; BUG_ON(!card); BUG_ON(!card->host); if (!(mmc_can_sanitize(card) && (card->host->caps2 & MMC_CAP2_SANITIZE))) { pr_warning("%s: %s - SANITIZE is not supported\n", mmc_hostname(card->host), __func__); err = -EOPNOTSUPP; goto out; } pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", mmc_hostname(card->host), __func__); err = mmc_switch_ignore_timeout(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1, MMC_SANITIZE_REQ_TIMEOUT); if (err) pr_err("%s: %s - mmc_switch() with " "EXT_CSD_SANITIZE_START failed. err=%d\n", mmc_hostname(card->host), __func__, err); pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), __func__); out: blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int ret = 0; ret = mmc_flush_cache(card); if (ret) ret = -EIO; blk_end_request_all(req, ret); return ret ? 0 : 1; } /* * Reformat current write as a reliable write, supporting * both legacy and the enhanced reliable write MMC cards. * In each transfer we'll handle only as much as a single * reliable write can handle, thus finish the request in * partial completions. */ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, struct mmc_card *card, struct request *req) { if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { /* Legacy mode imposes restrictions on transfers. */ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) brq->data.blocks = 1; if (brq->data.blocks > card->ext_csd.rel_sectors) brq->data.blocks = card->ext_csd.rel_sectors; else if (brq->data.blocks < card->ext_csd.rel_sectors) brq->data.blocks = 1; } } #define CMD_ERRORS \ (R1_OUT_OF_RANGE | /* Command argument out of range */ \ R1_ADDRESS_ERROR | /* Misaligned address */ \ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ R1_WP_VIOLATION | /* Tried to write to protected block */ \ R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ static int mmc_blk_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count * command. No data will have been transferred. * * cmd.error indicates a problem with the r/w command. No * data will have been transferred. * * stop.error indicates a problem with the stop command. Data * may have been transferred, or may still be transferring. */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: return MMC_BLK_ABORT; case ERR_NOMEDIUM: return MMC_BLK_NOMEDIUM; case ERR_CONTINUE: break; } } /* * Check for errors relating to the execution of the * initial command - such as address errors. No data * has been transferred. */ if (brq->cmd.resp[0] & CMD_ERRORS) { pr_err("%s: r/w command failed, status = %#x\n", req->rq_disk->disk_name, brq->cmd.resp[0]); return MMC_BLK_ABORT; } /* * Everything else is either success, or a data error of some * kind. If it was a write, we may have transitioned to * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { u32 status; unsigned long timeout; /* Check stop command response */ if (brq->stop.resp[0] & R1_ERROR) { pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0]); gen_err = 1; } timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS); do { int err = get_card_status(card, &status, 5); if (err) { pr_err("%s: error %d requesting status\n", req->rq_disk->disk_name, err); return MMC_BLK_CMD_ERR; } /* Timeout if the device never becomes ready for data * and never leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state!"\ " %s %s\n", mmc_hostname(card->host), req->rq_disk->disk_name, __func__); return MMC_BLK_CMD_ERR; } if (status & R1_ERROR) { pr_err("%s: %s: general error sending status command, card status %#x\n", req->rq_disk->disk_name, __func__, status); gen_err = 1; } /* * Some cards mishandle the status bits, * so make sure to check both the busy * indication and the card state. */ } while (!(status & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(status) == R1_STATE_PRG)); } /* if general error occurs, retry the write operation. */ if (gen_err) { pr_warning("%s: retrying write for general error\n", req->rq_disk->disk_name); return MMC_BLK_RETRY; } if (brq->data.error) { pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->data.error, (unsigned)blk_rq_pos(req), (unsigned)blk_rq_sectors(req), brq->cmd.resp[0], brq->stop.resp[0]); if (rq_data_dir(req) == READ) { if (ecc_err) return MMC_BLK_ECC_ERR; return MMC_BLK_DATA_ERR; } else { return MMC_BLK_CMD_ERR; } } if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; if (mq_mrq->packed_cmd != MMC_PACKED_NONE) { if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) return MMC_BLK_PARTIAL; else return MMC_BLK_SUCCESS; } if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } static int mmc_blk_packed_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); struct request *req = mq_rq->req; int err, check, status; u8 ext_csd[512]; mq_rq->packed_retries--; check = mmc_blk_err_check(card, areq); err = get_card_status(card, &status, 0); if (err) { pr_err("%s: error %d sending status command\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if (status & R1_EXCEPTION_EVENT) { err = mmc_send_ext_csd(card, ext_csd); if (err) { pr_err("%s: error %d sending ext_csd\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & EXT_CSD_PACKED_FAILURE) && (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & EXT_CSD_PACKED_GENERIC_ERROR)) { if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & EXT_CSD_PACKED_INDEXED_ERROR) { mq_rq->packed_fail_idx = ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; return MMC_BLK_PARTIAL; } } } return check; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, struct mmc_queue *mq) { u32 readcmd, writecmd; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct mmc_blk_data *md = mq->data; bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * REQ_META accesses, and are supported only on MMCs. * * XXX: this really needs a good explanation of why REQ_META * is treated special. */ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || (req->cmd_flags & REQ_META)) && (rq_data_dir(req) == WRITE) && (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; if (rq_data_dir(req) == WRITE) brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; else brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1B | MMC_CMD_AC; brq->data.blocks = blk_rq_sectors(req); brq->data.fault_injected = false; /* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big * requests. */ if (brq->data.blocks > card->host->max_blk_count) brq->data.blocks = card->host->max_blk_count; if (brq->data.blocks > 1) { /* * After a read error, we redo the request one sector * at a time in order to accurately determine which * sectors can be read successfully. */ if (disable_multi) brq->data.blocks = 1; /* Some controllers can't do multiblock reads due to hw bugs */ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && rq_data_dir(req) == READ) brq->data.blocks = 1; } if (brq->data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. */ if (!mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) brq->mrq.stop = &brq->stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq->mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; } if (do_rel_wr) mmc_apply_rel_rw(brq, card, req); /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ do_data_tag = (card->ext_csd.data_tag_unit_size) && (req->cmd_flags & REQ_META) && (rq_data_dir(req) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, * and avoid the final CMD12, as on an error condition * CMD12 (stop) needs to be sent anyway. This, coupled * with Auto-CMD23 enhancements provided by some * hosts, means that the complexity of dealing * with this is best left to the host. If CMD23 is * supported by card and host, we'll fill sbc in and let * the host deal with handling it correctly. This means * that for hosts that don't expose MMC_CAP_CMD23, no * change of behavior will be observed. * * N.B: Some MMC cards experience perf degradation. * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes. */ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || do_data_tag)) { brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = brq->data.blocks | (do_rel_wr ? (1 << 31) : 0) | (do_data_tag ? (1 << 29) : 0); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } mmc_set_data_timeout(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); /* * Adjust the sg list so it is the same size as the * request. */ if (brq->data.blocks != blk_rq_sectors(req)) { int i, data_size = brq->data.blocks << 9; struct scatterlist *sg; for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { data_size -= sg->length; if (data_size <= 0) { sg->length += data_size; i++; break; } } brq->data.sg_len = i; } mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.err_check = mmc_blk_err_check; mmc_queue_bounce_pre(mqrq); } static void mmc_blk_write_packing_control(struct mmc_queue *mq, struct request *req) { struct mmc_host *host = mq->card->host; int data_dir; if (!(host->caps2 & MMC_CAP2_PACKED_WR)) return; /* * In case the packing control is not supported by the host, it should * not have an effect on the write packing. Therefore we have to enable * the write packing */ if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) { mq->wr_packing_enabled = true; return; } if (!req || (req && (req->cmd_flags & REQ_FLUSH))) { if (mq->num_of_potential_packed_wr_reqs > mq->num_wr_reqs_to_start_packing) mq->wr_packing_enabled = true; mq->num_of_potential_packed_wr_reqs = 0; return; } data_dir = rq_data_dir(req); if (data_dir == READ) { mq->num_of_potential_packed_wr_reqs = 0; mq->wr_packing_enabled = false; return; } else if (data_dir == WRITE) { mq->num_of_potential_packed_wr_reqs++; } if (mq->num_of_potential_packed_wr_reqs > mq->num_wr_reqs_to_start_packing) mq->wr_packing_enabled = true; } struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card) { if (!card) return NULL; return &card->wr_pack_stats; } EXPORT_SYMBOL(mmc_blk_get_packed_statistics); void mmc_blk_init_packed_statistics(struct mmc_card *card) { int max_num_of_packed_reqs = 0; if (!card || !card->wr_pack_stats.packing_events) return; max_num_of_packed_reqs = card->ext_csd.max_packed_writes; spin_lock(&card->wr_pack_stats.lock); memset(card->wr_pack_stats.packing_events, 0, (max_num_of_packed_reqs + 1) * sizeof(*card->wr_pack_stats.packing_events)); memset(&card->wr_pack_stats.pack_stop_reason, 0, sizeof(card->wr_pack_stats.pack_stop_reason)); card->wr_pack_stats.enabled = true; spin_unlock(&card->wr_pack_stats.lock); } EXPORT_SYMBOL(mmc_blk_init_packed_statistics); void print_mmc_packing_stats(struct mmc_card *card) { int i; int max_num_of_packed_reqs = 0; if ((!card) || (!card->wr_pack_stats.packing_events)) return; max_num_of_packed_reqs = card->ext_csd.max_packed_writes; spin_lock(&card->wr_pack_stats.lock); pr_info("%s: write packing statistics:\n", mmc_hostname(card->host)); for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) { if (card->wr_pack_stats.packing_events[i] != 0) pr_info("%s: Packed %d reqs - %d times\n", mmc_hostname(card->host), i, card->wr_pack_stats.packing_events[i]); } pr_info("%s: stopped packing due to the following reasons:\n", mmc_hostname(card->host)); if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) pr_info("%s: %d times: exceedmax num of segments\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]); if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]) pr_info("%s: %d times: exceeding the max num of sectors\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]); if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]) pr_info("%s: %d times: wrong data direction\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]); if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]) pr_info("%s: %d times: flush or discard\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]); if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]) pr_info("%s: %d times: empty queue\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]); if (card->wr_pack_stats.pack_stop_reason[REL_WRITE]) pr_info("%s: %d times: rel write\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[REL_WRITE]); if (card->wr_pack_stats.pack_stop_reason[THRESHOLD]) pr_info("%s: %d times: Threshold\n", mmc_hostname(card->host), card->wr_pack_stats.pack_stop_reason[THRESHOLD]); spin_unlock(&card->wr_pack_stats.lock); } EXPORT_SYMBOL(print_mmc_packing_stats); static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) { struct request_queue *q = mq->queue; struct mmc_card *card = mq->card; struct request *cur = req, *next = NULL; struct mmc_blk_data *md = mq->data; bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; unsigned int req_sectors = 0, phys_segments = 0; unsigned int max_blk_count, max_phys_segs; u8 put_back = 0; u8 max_packed_rw = 0; u8 reqs = 0; struct mmc_wr_pack_stats *stats = &card->wr_pack_stats; mmc_blk_clear_packed(mq->mqrq_cur); if (!(md->flags & MMC_BLK_CMD23) || !card->ext_csd.packed_event_en) goto no_packed; if (!mq->wr_packing_enabled) goto no_packed; if ((rq_data_dir(cur) == WRITE) && (card->host->caps2 & MMC_CAP2_PACKED_WR)) max_packed_rw = card->ext_csd.max_packed_writes; if (max_packed_rw == 0) goto no_packed; if (mmc_req_rel_wr(cur) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) goto no_packed; if (mmc_large_sec(card) && !IS_ALIGNED(blk_rq_sectors(cur), 8)) goto no_packed; max_blk_count = min(card->host->max_blk_count, card->host->max_req_size >> 9); if (unlikely(max_blk_count > 0xffff)) max_blk_count = 0xffff; max_phys_segs = queue_max_segments(q); req_sectors += blk_rq_sectors(cur); phys_segments += cur->nr_phys_segments; if (rq_data_dir(cur) == WRITE) { req_sectors++; phys_segments++; } spin_lock(&stats->lock); while (reqs < max_packed_rw - 1) { /* We should stop no-more packing its nopacked_period */ if ((card->host->caps2 & MMC_CAP2_ADAPT_PACKED) && time_is_after_jiffies(mq->nopacked_period)) break; spin_lock_irq(q->queue_lock); next = blk_fetch_request(q); spin_unlock_irq(q->queue_lock); if (!next) { MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE); break; } if (mmc_large_sec(card) && !IS_ALIGNED(blk_rq_sectors(next), 8)) { MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN); put_back = 1; break; } if (next->cmd_flags & REQ_DISCARD || next->cmd_flags & REQ_FLUSH) { MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD); put_back = 1; break; } if (rq_data_dir(cur) != rq_data_dir(next)) { MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR); put_back = 1; break; } if (mmc_req_rel_wr(next) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) { MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE); put_back = 1; break; } req_sectors += blk_rq_sectors(next); if (req_sectors > max_blk_count) { if (stats->enabled) stats->pack_stop_reason[EXCEEDS_SECTORS]++; put_back = 1; break; } phys_segments += next->nr_phys_segments; if (phys_segments > max_phys_segs) { MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS); put_back = 1; break; } if (rq_data_dir(next) == WRITE) { mq->num_of_potential_packed_wr_reqs++; if (card->ext_csd.bkops_en) card->bkops_info.sectors_changed += blk_rq_sectors(next); } list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list); cur = next; reqs++; } if (put_back) { spin_lock_irq(q->queue_lock); blk_requeue_request(q, next); spin_unlock_irq(q->queue_lock); } if (stats->enabled) { if (reqs + 1 <= card->ext_csd.max_packed_writes) stats->packing_events[reqs + 1]++; if (reqs + 1 == max_packed_rw) MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD); } spin_unlock(&stats->lock); /* if (stats->enabled) { if (reqs + 1 <= card->ext_csd.max_packed_writes) stats->packing_events[reqs + 1]++; if (reqs + 1 == max_packed_rw) MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD); } spin_unlock(&stats->lock); */ if (reqs > 0) { list_add(&req->queuelist, &mq->mqrq_cur->packed_list); mq->mqrq_cur->packed_num = ++reqs; mq->mqrq_cur->packed_retries = reqs; return reqs; } no_packed: mmc_blk_clear_packed(mq->mqrq_cur); return 0; } static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, struct mmc_queue *mq) { struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct request *prq; struct mmc_blk_data *md = mq->data; bool do_rel_wr, do_data_tag; u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr; u8 i = 1; mqrq->packed_cmd = MMC_PACKED_WRITE; mqrq->packed_blocks = 0; mqrq->packed_fail_idx = MMC_PACKED_N_IDX; memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr)); packed_cmd_hdr[0] = (mqrq->packed_num << 16) | (PACKED_CMD_WR << 8) | PACKED_CMD_VER; /* * Argument for each entry of packed group */ list_for_each_entry(prq, &mqrq->packed_list, queuelist) { do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); do_data_tag = (card->ext_csd.data_tag_unit_size) && (prq->cmd_flags & REQ_META) && (rq_data_dir(prq) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* Argument of CMD23 */ packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | blk_rq_sectors(prq); /* Argument of CMD18 or CMD25 */ packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ? blk_rq_pos(prq) : blk_rq_pos(prq) << 9; mqrq->packed_blocks += blk_rq_sectors(prq); i++; } memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->mrq.sbc = &brq->sbc; brq->mrq.stop = &brq->stop; brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->data.blocks = mqrq->packed_blocks + 1; brq->data.flags |= MMC_DATA_WRITE; brq->data.fault_injected = false; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; mmc_set_data_timeout(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); mqrq->mmc_active.mrq = &brq->mrq; /* * This is intended for packed commands tests usage - in case these * functions are not in use the respective pointers are NULL */ if (mq->err_check_fn) mqrq->mmc_active.err_check = mq->err_check_fn; else mqrq->mmc_active.err_check = mmc_blk_packed_err_check; if (mq->packed_test_fn) mq->packed_test_fn(mq->queue, mqrq); mmc_queue_bounce_pre(mqrq); } static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { struct mmc_queue_req *mq_rq; mq_rq = container_of(brq, struct mmc_queue_req, brq); /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * as reported by the controller (which might be less than * the real number of written sectors, but never more). */ if (mmc_card_sd(card)) { u32 blocks; if (!brq->data.fault_injected) { blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) ret = blk_end_request(req, 0, blocks << 9); } else ret = blk_end_request(req, 0, brq->data.bytes_xfered); } else { if (mq_rq->packed_cmd == MMC_PACKED_NONE) ret = blk_end_request(req, 0, brq->data.bytes_xfered); } return ret; } static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) { struct request *prq; int idx = mq_rq->packed_fail_idx, i = 0; int ret = 0; while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.next); if (idx == i) { /* retry from error index */ mq_rq->packed_num -= idx; mq_rq->req = prq; ret = 1; if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) { list_del_init(&prq->queuelist); mmc_blk_clear_packed(mq_rq); } return ret; } list_del_init(&prq->queuelist); blk_end_request(prq, 0, blk_rq_bytes(prq)); i++; } mmc_blk_clear_packed(mq_rq); return ret; } static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) { struct request *prq; while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.next); list_del_init(&prq->queuelist); blk_end_request(prq, -EIO, blk_rq_bytes(prq)); } mmc_blk_clear_packed(mq_rq); } static void mmc_blk_revert_packed_req(struct mmc_queue *mq, struct mmc_queue_req *mq_rq) { struct request *prq; struct request_queue *q = mq->queue; while (!list_empty(&mq_rq->packed_list)) { prq = list_entry_rq(mq_rq->packed_list.prev); if (prq->queuelist.prev != &mq_rq->packed_list) { list_del_init(&prq->queuelist); spin_lock_irq(q->queue_lock); blk_requeue_request(mq->queue, prq); spin_unlock_irq(q->queue_lock); } else { list_del_init(&prq->queuelist); } } mmc_blk_clear_packed(mq_rq); } static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mq->mqrq_cur->brq; int ret = 1, disable_multi = 0, retry = 0, type; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; struct request *req; struct mmc_async_req *areq; const u8 packed_num = 2; u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; if (rqc) { if ((card->ext_csd.bkops_en) && (rq_data_dir(rqc) == WRITE)) card->bkops_info.sectors_changed += blk_rq_sectors(rqc); reqs = mmc_blk_prep_packed_list(mq, rqc); } do { if (rqc) { if (reqs >= packed_num) mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq); else mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); if (!areq) { if (status == MMC_BLK_NEW_REQUEST) mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; req = mq_rq->req; type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; mmc_queue_bounce_post(mq_rq); switch (status) { case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: /* * A block was successfully transferred. */ mmc_blk_reset_success(md, type); if (mq_rq->packed_cmd != MMC_PACKED_NONE) { ret = mmc_blk_end_packed_req(mq_rq); break; } else { ret = blk_end_request(req, 0, brq->data.bytes_xfered); } /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors * were returned by the host controller, it's a bug. */ if (status == MMC_BLK_SUCCESS && ret) { pr_err("%s BUG rq_tot %d d_xfer %d\n", __func__, blk_rq_bytes(req), brq->data.bytes_xfered); rqc = NULL; goto cmd_abort; } break; case MMC_BLK_CMD_ERR: ret = mmc_blk_cmd_err(md, card, brq, req, ret); if (!mmc_blk_reset(md, card->host, type)) break; goto cmd_abort; case MMC_BLK_RETRY: if (retry++ < 5) break; /* Fall through */ case MMC_BLK_ABORT: if (!mmc_blk_reset(md, card->host, type)) break; goto cmd_abort; case MMC_BLK_DATA_ERR: { int err; err = mmc_blk_reset(md, card->host, type); if (!err) break; if (err == -ENODEV || mq_rq->packed_cmd != MMC_PACKED_NONE) goto cmd_abort; /* Fall through */ } case MMC_BLK_ECC_ERR: if (brq->data.blocks > 1) { /* Redo read one sector at a time */ pr_warning("%s: retrying using single block read\n", req->rq_disk->disk_name); disable_multi = 1; break; } /* * After an error, we redo I/O one sector at a * time, so we only reach here after trying to * read a single sector. */ ret = blk_end_request(req, -EIO, brq->data.blksz); if (!ret) goto start_new_req; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; default: pr_err("%s: Unhandled return value (%d)", req->rq_disk->disk_name, status); goto cmd_abort; } if (ret) { if (mq_rq->packed_cmd == MMC_PACKED_NONE) { /* * In case of a incomplete request * prepare it again and resend. */ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } else { if (!mq_rq->packed_retries) goto cmd_abort; mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } } } while (ret); return 1; cmd_abort: if (mq_rq->packed_cmd == MMC_PACKED_NONE) { if (mmc_card_removed(card)) req->cmd_flags |= REQ_QUIET; while (ret) ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); } else { mmc_blk_abort_packed_req(mq_rq); } start_new_req: if (rqc) { /* * If current request is packed, it needs to put back. */ if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) mmc_blk_revert_packed_req(mq, mq->mqrq_cur); mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); } return 0; } static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_host *host = card->host; unsigned long flags; #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME if (mmc_bus_needs_resume(card->host)) { mmc_resume_bus(card->host); mmc_blk_set_blksize(md, card); } #endif if (req && !mq->mqrq_prev->req) { /* claim host only for the first request */ mmc_claim_host(card->host); if (card->ext_csd.bkops_en) mmc_stop_bkops(card); } ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { blk_end_request_all(req, -EIO); } ret = 0; goto out; } mmc_blk_write_packing_control(mq, req); mq->flags &= ~MMC_QUEUE_NEW_REQUEST; if (req && req->cmd_flags & REQ_SANITIZE) { /* complete ongoing async transfer before issuing sanitize */ if (card->host && card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_sanitize_rq(mq, req); } else if (req && req->cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); if (req->cmd_flags & REQ_SECURE && !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); } else if (req && req->cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { if (!req && host->areq) { spin_lock_irqsave(&host->context_info.lock, flags); host->context_info.is_waiting_last_req = true; spin_unlock_irqrestore(&host->context_info.lock, flags); } ret = mmc_blk_issue_rw_rq(mq, req); } out: if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) /* release host only when there are no more requests */ mmc_release_host(card->host); return ret; } static inline int mmc_blk_readonly(struct mmc_card *card) { return mmc_card_readonly(card) || !(card->csd.cmdclass & CCC_BLOCK_WRITE); } static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct device *parent, sector_t size, bool default_ro, const char *subname, int area_type) { struct mmc_blk_data *md; int devidx, ret; unsigned int percentage = BKOPS_SIZE_PERCENTAGE_TO_QUEUE_DELAYED_WORK; devidx = find_first_zero_bit(dev_use, max_devices); if (devidx >= max_devices) return ERR_PTR(-ENOSPC); __set_bit(devidx, dev_use); md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { ret = -ENOMEM; goto out; } /* * !subname implies we are creating main mmc_blk_data that will be * associated with mmc_card with mmc_set_drvdata. Due to device * partitions, devidx will not coincide with a per-physical card * index anymore so we keep track of a name index. */ if (!subname) { md->name_idx = find_first_zero_bit(name_use, max_devices); __set_bit(md->name_idx, name_use); } else md->name_idx = ((struct mmc_blk_data *) dev_to_disk(parent)->private_data)->name_idx; md->area_type = area_type; /* * Set the read-only status based on the supported commands * and the write protect switch. */ md->read_only = mmc_blk_readonly(card); md->disk = alloc_disk(perdev_minors); if (md->disk == NULL) { ret = -ENOMEM; goto err_kfree; } spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock, subname); if (ret) goto err_putdisk; md->queue.issue_fn = mmc_blk_issue_rq; md->queue.data = md; md->disk->major = MMC_BLOCK_MAJOR; md->disk->first_minor = devidx * perdev_minors; md->disk->fops = &mmc_bdops; md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); md->disk->flags = GENHD_FL_EXT_DEVT; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: * * - be set for removable media with permanent block devices * - be unset for removable block devices with permanent media * * Since MMC block devices clearly fall under the second * case, we do not set GENHD_FL_REMOVABLE. Userspace * should use the block device creation/destruction hotplug * messages to tell when the card is present. */ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%d%s", md->name_idx, subname ? subname : ""); blk_queue_logical_block_size(md->queue.queue, 512); set_capacity(md->disk, size); card->bkops_info.size_percentage_to_queue_delayed_work = percentage; card->bkops_info.min_sectors_to_queue_delayed_work = ((unsigned int)size * percentage) / 100; if (mmc_host_cmd23(card->host)) { if (mmc_card_mmc(card) || (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT && mmc_sd_card_uhs(card))) md->flags |= MMC_BLK_CMD23; } if (mmc_card_mmc(card) && md->flags & MMC_BLK_CMD23 && ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || card->ext_csd.rel_sectors)) { md->flags |= MMC_BLK_REL_WR; blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } return md; err_putdisk: put_disk(md->disk); err_kfree: kfree(md); out: return ERR_PTR(ret); } static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) { sector_t size; struct mmc_blk_data *md; if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* * The EXT_CSD sector count is in number or 512 byte * sectors. */ size = card->ext_csd.sectors; } else { /* * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ size = card->csd.capacity << (card->csd.read_blkbits - 9); } md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, MMC_BLK_DATA_AREA_MAIN); return md; } static int mmc_blk_alloc_part(struct mmc_card *card, struct mmc_blk_data *md, unsigned int part_type, sector_t size, bool default_ro, const char *subname, int area_type) { char cap_str[10]; struct mmc_blk_data *part_md; part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, subname, area_type); if (IS_ERR(part_md)) return PTR_ERR(part_md); part_md->part_type = part_type; list_add(&part_md->part, &md->part); string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s partition %u %s\n", part_md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), part_md->part_type, cap_str); return 0; } /* MMC Physical partitions consist of two boot partitions and * up to four general purpose partitions. * For each partition enabled in EXT_CSD a block device will be allocatedi * to provide access to the partition. */ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) { int idx, ret = 0; if (!mmc_card_mmc(card)) return 0; for (idx = 0; idx < card->nr_parts; idx++) { if (card->part[idx].size) { ret = mmc_blk_alloc_part(card, md, card->part[idx].part_cfg, card->part[idx].size >> 9, card->part[idx].force_ro, card->part[idx].name, card->part[idx].area_type); if (ret) return ret; } } return ret; } static void mmc_blk_remove_req(struct mmc_blk_data *md) { struct mmc_card *card; if (md) { card = md->queue.card; device_remove_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); /* Stop new requests from getting into the queue */ del_gendisk(md->disk); } /* Then flush out any already in there */ mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } } static void mmc_blk_remove_parts(struct mmc_card *card, struct mmc_blk_data *md) { struct list_head *pos, *q; struct mmc_blk_data *part_md; __clear_bit(md->name_idx, name_use); list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); mmc_blk_remove_req(part_md); } } static int mmc_add_disk(struct mmc_blk_data *md) { int ret; struct mmc_card *card = md->queue.card; add_disk(md->disk); md->force_ro.show = force_ro_show; md->force_ro.store = force_ro_store; sysfs_attr_init(&md->force_ro.attr); md->force_ro.attr.name = "force_ro"; md->force_ro.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); if (ret) goto force_ro_fail; if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) { umode_t mode; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) mode = S_IRUGO; else mode = S_IRUGO | S_IWUSR; md->power_ro_lock.show = power_ro_lock_show; md->power_ro_lock.store = power_ro_lock_store; sysfs_attr_init(&md->power_ro_lock.attr); md->power_ro_lock.attr.mode = mode; md->power_ro_lock.attr.name = "ro_lock_until_next_power_on"; ret = device_create_file(disk_to_dev(md->disk), &md->power_ro_lock); if (ret) goto power_ro_lock_fail; } md->num_wr_reqs_to_start_packing.show = num_wr_reqs_to_start_packing_show; md->num_wr_reqs_to_start_packing.store = num_wr_reqs_to_start_packing_store; sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr); md->num_wr_reqs_to_start_packing.attr.name = "num_wr_reqs_to_start_packing"; md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); if (ret) goto num_wr_reqs_to_start_packing_fail; md->bkops_check_threshold.show = bkops_check_threshold_show; md->bkops_check_threshold.store = bkops_check_threshold_store; sysfs_attr_init(&md->bkops_check_threshold.attr); md->bkops_check_threshold.attr.name = "bkops_check_threshold"; md->bkops_check_threshold.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->bkops_check_threshold); if (ret) goto bkops_check_threshold_fails; return ret; bkops_check_threshold_fails: device_remove_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); num_wr_reqs_to_start_packing_fail: device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); power_ro_lock_fail: device_remove_file(disk_to_dev(md->disk), &md->force_ro); force_ro_fail: del_gendisk(md->disk); return ret; } #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), /* * Some MMC cards experience performance degradation with CMD23 * instead of CMD12-bounded multiblock transfers. For now we'll * black list what's bad... * - Certain Toshiba cards. * * N.B. This doesn't affect SD cards. */ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), /* * Some Micron MMC cards needs longer data read timeout than * indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), /* Some INAND MCP devices advertise incorrect timeout values */ MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_INAND_DATA_TIMEOUT), /* * On these Samsung MoviNAND parts, performing secure erase or * secure trim can result in unrecoverable corruption due to a * firmware bug. */ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), END_FIXUP }; #ifdef CONFIG_MMC_SUPPORT_BKOPS_MODE static ssize_t bkops_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk; struct mmc_blk_data *md; struct mmc_card *card; disk = dev_to_disk(dev); if (disk) md = disk->private_data; else goto show_out; if (md) card = md->queue.card; else goto show_out; return snprintf(buf, PAGE_SIZE, "%u\n", card->bkops_enable); show_out: return snprintf(buf, PAGE_SIZE, "\n"); } static ssize_t bkops_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk; struct mmc_blk_data *md; struct mmc_card *card; u8 value; int err = 0; disk = dev_to_disk(dev); if (disk) md = disk->private_data; else goto store_out; if (md) card = md->queue.card; else goto store_out; if (kstrtou8(buf, 0, &value)) goto store_out; err = mmc_bkops_enable(card->host, value); if (err) return err; return count; store_out: return -EINVAL; } static inline void mmc_blk_bkops_sysfs_init(struct mmc_card *card) { struct mmc_blk_data *md = mmc_get_drvdata(card); card->bkops_attr.show = bkops_mode_show; card->bkops_attr.store = bkops_mode_store; sysfs_attr_init(&card->bkops_attr.attr); card->bkops_attr.attr.name = "bkops_en"; card->bkops_attr.attr.mode = S_IRUGO | S_IWUSR | S_IWGRP; if (device_create_file((disk_to_dev(md->disk)), &card->bkops_attr)) { pr_err("%s: Failed to create bkops_en sysfs entry\n", mmc_hostname(card->host)); #if defined(CONFIG_MMC_BKOPS_NODE_UID) || defined(CONFIG_MMC_BKOPS_NODE_GID) } else { int rc; struct device * dev; dev = disk_to_dev(md->disk); rc = sysfs_chown_file(&dev->kobj, &card->bkops_attr.attr, CONFIG_MMC_BKOPS_NODE_UID, CONFIG_MMC_BKOPS_NODE_GID); if (rc) pr_err("%s: Failed to change mode of sysfs entry\n", mmc_hostname(card->host)); #endif } } #else static inline void mmc_blk_bkops_sysfs_init(struct mmc_card *card) { } #endif static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; char cap_str[10]; /* * Check that the card supports the command class(es) we need. */ if (!(card->csd.cmdclass & CCC_BLOCK_READ)) return -ENODEV; md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s %s\n", md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), cap_str, md->read_only ? "(ro)" : ""); if (mmc_blk_alloc_parts(card, md)) goto out; mmc_set_drvdata(card, md); mmc_fixup_device(card, blk_fixups); #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_set_bus_resume_policy(card->host, 1); #endif if (mmc_add_disk(md)) goto out; list_for_each_entry(part_md, &md->part, part) { if (mmc_add_disk(part_md)) goto out; } /* init sysfs for bkops mode */ if (card && mmc_card_mmc(card)) { mmc_blk_bkops_sysfs_init(card); spin_lock_init(&card->bkops_lock); } return 0; out: mmc_blk_remove_parts(card, md); mmc_blk_remove_req(md); return 0; } static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = mmc_get_drvdata(card); mmc_blk_remove_parts(card, md); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); mmc_blk_remove_req(md); mmc_set_drvdata(card, NULL); #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_set_bus_resume_policy(card->host, 0); #endif } #ifdef CONFIG_PM static int mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); int rc = 0; if (md) { rc = mmc_queue_suspend(&md->queue); if (rc) goto out; list_for_each_entry(part_md, &md->part, part) { rc = mmc_queue_suspend(&part_md->queue); if (rc) goto out_resume; } } goto out; out_resume: mmc_queue_resume(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } out: return rc; } static int mmc_blk_resume(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { /* * Resume involves the card going into idle state, * so current partition is always the main one. */ md->part_curr = md->part_type; mmc_queue_resume(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } } return 0; } #else #define mmc_blk_suspend NULL #define mmc_blk_resume NULL #endif static struct mmc_driver mmc_driver = { .drv = { .name = "mmcblk", }, .probe = mmc_blk_probe, .remove = mmc_blk_remove, .suspend = mmc_blk_suspend, .resume = mmc_blk_resume, }; static int __init mmc_blk_init(void) { int res; if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) pr_info("mmcblk: using %d minors per device\n", perdev_minors); max_devices = 256 / perdev_minors; res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) goto out; res = mmc_register_driver(&mmc_driver); if (res) goto out2; return 0; out2: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); out: return res; } static void __exit mmc_blk_exit(void) { mmc_unregister_driver(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); } module_init(mmc_blk_init); module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
iJo09/Hybridmax_Kernel_I9505_Lollipop-1
drivers/mmc/card/block.c
C
gpl-2.0
76,834
/* * This file is part of the coreboot project. * * Copyright (C) 2007-2009 coresystems GmbH * Copyright (C) 2013 Google Inc. * Copyright (C) 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <arch/acpi.h> #include <console/console.h> #include <device/device.h> #include <gpio.h> #include <stdlib.h> #include <string.h> #include <soc/nhlt.h> #include "ec.h" #include "gpio.h" static const char *oem_id_maxim = "INTEL"; static const char *oem_table_id_maxim = "SCRDMAX"; static void mainboard_init(device_t dev) { mainboard_ec_init(); } static uint8_t select_audio_codec(void) { int audio_db_sel = gpio_get(AUDIO_DB_ID); return audio_db_sel; } static unsigned long mainboard_write_acpi_tables( device_t device, unsigned long current, acpi_rsdp_t *rsdp) { uintptr_t start_addr; uintptr_t end_addr; struct nhlt *nhlt; const char *oem_id = NULL; const char *oem_table_id = NULL; start_addr = current; nhlt = nhlt_init(); if (nhlt == NULL) return start_addr; /* 2 Channel DMIC array. */ if (nhlt_soc_add_dmic_array(nhlt, 2)) printk(BIOS_ERR, "Couldn't add 2CH DMIC array.\n"); /* 4 Channel DMIC array. */ if (nhlt_soc_add_dmic_array(nhlt, 4)) printk(BIOS_ERR, "Couldn't add 4CH DMIC arrays.\n"); if (select_audio_codec()) { /* ADI Smart Amps for left and right. */ if (nhlt_soc_add_ssm4567(nhlt, AUDIO_LINK_SSP0)) printk(BIOS_ERR, "Couldn't add ssm4567.\n"); } else { /* MAXIM Smart Amps for left and right. */ if (nhlt_soc_add_max98357(nhlt, AUDIO_LINK_SSP0)) printk(BIOS_ERR, "Couldn't add max98357.\n"); oem_id = oem_id_maxim; oem_table_id = oem_table_id_maxim; } /* NAU88l25 Headset codec. */ if (nhlt_soc_add_nau88l25(nhlt, AUDIO_LINK_SSP1)) printk(BIOS_ERR, "Couldn't add headset codec.\n"); end_addr = nhlt_soc_serialize_oem_overrides(nhlt, start_addr, oem_id, oem_table_id); if (end_addr != start_addr) acpi_add_table(rsdp, (void *)start_addr); return end_addr; } /* * mainboard_enable is executed as first thing after * enumerate_buses(). */ static void mainboard_enable(device_t dev) { dev->ops->init = mainboard_init; dev->ops->write_acpi_tables = mainboard_write_acpi_tables; } struct chip_operations mainboard_ops = { .enable_dev = mainboard_enable, };
latelee/coreboot
src/mainboard/intel/kunimitsu/mainboard.c
C
gpl-2.0
2,726
/* * Driver O/S-independent utility routines * * Copyright (C) 1999-2010, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * $Id: bcmutils.c,v 1.210.4.5.2.4.6.19 2010/04/26 06:05:25 Exp $ */ #include <typedefs.h> #include <bcmdefs.h> #include <stdarg.h> #include <bcmutils.h> #ifdef BCMDRIVER #include <osl.h> #include <siutils.h> #else #include <stdio.h> #include <string.h> /* This case for external supplicant use */ #if defined(BCMEXTSUP) #include <bcm_osl.h> #endif #endif /* BCMDRIVER */ #include <bcmendian.h> #include <bcmdevs.h> #include <proto/ethernet.h> #include <proto/vlan.h> #include <proto/bcmip.h> #include <proto/802.1d.h> #include <proto/802.11.h> #ifdef BCMDRIVER /* copy a pkt buffer chain into a buffer */ uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) { uint n, ret = 0; if (len < 0) len = 4096; /* "infinite" */ /* skip 'offset' bytes */ for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; offset -= PKTLEN(osh, p); } if (!p) return 0; /* copy the data */ for (; p && len; p = PKTNEXT(osh, p)) { n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); bcopy(PKTDATA(osh, p) + offset, buf, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } /* copy a buffer into a pkt buffer chain */ uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) { uint n, ret = 0; /* skip 'offset' bytes */ for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; offset -= PKTLEN(osh, p); } if (!p) return 0; /* copy the data */ for (; p && len; p = PKTNEXT(osh, p)) { n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); bcopy(buf, PKTDATA(osh, p) + offset, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } /* return total length of buffer chain */ uint pkttotlen(osl_t *osh, void *p) { uint total; total = 0; for (; p; p = PKTNEXT(osh, p)) total += PKTLEN(osh, p); return (total); } /* return the last buffer of chained pkt */ void * pktlast(osl_t *osh, void *p) { for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) ; return (p); } /* count segments of a chained packet */ uint pktsegcnt(osl_t *osh, void *p) { uint cnt; for (cnt = 0; p; p = PKTNEXT(osh, p)) cnt++; return cnt; } /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence */ void * pktq_penq(struct pktq *pq, int prec, void *p) { struct pktq_prec *q; ASSERT(prec >= 0 && prec < pq->num_prec); ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); q = &pq->q[prec]; if (q->head) PKTSETLINK(q->tail, p); else q->head = p; q->tail = p; q->len++; pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; return p; } void * pktq_penq_head(struct pktq *pq, int prec, void *p) { struct pktq_prec *q; ASSERT(prec >= 0 && prec < pq->num_prec); ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); q = &pq->q[prec]; if (q->head == NULL) q->tail = p; PKTSETLINK(p, q->head); q->head = p; q->len++; pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; return p; } void * pktq_pdeq(struct pktq *pq, int prec) { struct pktq_prec *q; void *p; ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; q->len--; pq->len--; PKTSETLINK(p, NULL); return p; } void * pktq_pdeq_tail(struct pktq *pq, int prec) { struct pktq_prec *q; void *p, *prev; ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; for (prev = NULL; p != q->tail; p = PKTLINK(p)) prev = p; if (prev) PKTSETLINK(prev, NULL); else q->head = NULL; q->tail = prev; q->len--; pq->len--; return p; } void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir) { struct pktq_prec *q; void *p; q = &pq->q[prec]; p = q->head; while (p) { q->head = PKTLINK(p); PKTSETLINK(p, NULL); PKTFREE(osh, p, dir); q->len--; pq->len--; p = q->head; } ASSERT(q->len == 0); q->tail = NULL; } bool pktq_pdel(struct pktq *pq, void *pktbuf, int prec) { struct pktq_prec *q; void *p; ASSERT(prec >= 0 && prec < pq->num_prec); if (!pktbuf) return FALSE; q = &pq->q[prec]; if (q->head == pktbuf) { if ((q->head = PKTLINK(pktbuf)) == NULL) q->tail = NULL; } else { for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) ; if (p == NULL) return FALSE; PKTSETLINK(p, PKTLINK(pktbuf)); if (q->tail == pktbuf) q->tail = p; } q->len--; pq->len--; PKTSETLINK(pktbuf, NULL); return TRUE; } void pktq_init(struct pktq *pq, int num_prec, int max_len) { int prec; ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); /* pq is variable size; only zero out what's requested */ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); pq->num_prec = (uint16)num_prec; pq->max = (uint16)max_len; for (prec = 0; prec < num_prec; prec++) pq->q[prec].max = pq->max; } void * pktq_deq(struct pktq *pq, int *prec_out) { struct pktq_prec *q; void *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; q->len--; pq->len--; if (prec_out) *prec_out = prec; PKTSETLINK(p, NULL); return p; } void * pktq_deq_tail(struct pktq *pq, int *prec_out) { struct pktq_prec *q; void *p, *prev; int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; for (prev = NULL; p != q->tail; p = PKTLINK(p)) prev = p; if (prev) PKTSETLINK(prev, NULL); else q->head = NULL; q->tail = prev; q->len--; pq->len--; if (prec_out) *prec_out = prec; PKTSETLINK(p, NULL); return p; } void * pktq_peek(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; if (prec_out) *prec_out = prec; return (pq->q[prec].head); } void * pktq_peek_tail(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; if (prec_out) *prec_out = prec; return (pq->q[prec].tail); } void pktq_flush(osl_t *osh, struct pktq *pq, bool dir) { int prec; for (prec = 0; prec < pq->num_prec; prec++) pktq_pflush(osh, pq, prec, dir); ASSERT(pq->len == 0); } /* Return sum of lengths of a specific set of precedences */ int pktq_mlen(struct pktq *pq, uint prec_bmp) { int prec, len; len = 0; for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) len += pq->q[prec].len; return len; } /* Priority dequeue from a specific set of precedences */ void * pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { struct pktq_prec *q; void *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) if (prec-- == 0) return NULL; q = &pq->q[prec]; if ((p = q->head) == NULL) return NULL; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; q->len--; if (prec_out) *prec_out = prec; pq->len--; PKTSETLINK(p, NULL); return p; } #endif /* BCMDRIVER */ const unsigned char bcm_ctype[] = { _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, _BCM_C, /* 8-15 */ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ }; ulong bcm_strtoul(char *cp, char **endp, uint base) { ulong result, last_result = 0, value; bool minus; minus = FALSE; while (bcm_isspace(*cp)) cp++; if (cp[0] == '+') cp++; else if (cp[0] == '-') { minus = TRUE; cp++; } if (base == 0) { if (cp[0] == '0') { if ((cp[1] == 'x') || (cp[1] == 'X')) { base = 16; cp = &cp[2]; } else { base = 8; cp = &cp[1]; } } else base = 10; } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { cp = &cp[2]; } result = 0; while (bcm_isxdigit(*cp) && (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { result = result*base + value; /* Detected overflow */ if (result < last_result && !minus) return (ulong)-1; last_result = result; cp++; } if (minus) result = (ulong)(-(long)result); if (endp) *endp = (char *)cp; return (result); } int bcm_atoi(char *s) { return (int)bcm_strtoul(s, NULL, 10); } /* return pointer to location of substring 'needle' in 'haystack' */ char* bcmstrstr(char *haystack, char *needle) { int len, nlen; int i; if ((haystack == NULL) || (needle == NULL)) return (haystack); nlen = strlen(needle); len = strlen(haystack) - nlen + 1; for (i = 0; i < len; i++) if (memcmp(needle, &haystack[i], nlen) == 0) return (&haystack[i]); return (NULL); } char* bcmstrcat(char *dest, const char *src) { char *p; p = dest + strlen(dest); while ((*p++ = *src++) != '\0') ; return (dest); } char* bcmstrncat(char *dest, const char *src, uint size) { char *endp; char *p; p = dest + strlen(dest); endp = p + size; while (p != endp && (*p++ = *src++) != '\0') ; return (dest); } /**************************************************************************** * Function: bcmstrtok * * Purpose: * Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), * but allows strToken() to be used by different strings or callers at the same * time. Each call modifies '*string' by substituting a NULL character for the * first delimiter that is encountered, and updates 'string' to point to the char * after the delimiter. Leading delimiters are skipped. * * Parameters: * string (mod) Ptr to string ptr, updated by token. * delimiters (in) Set of delimiter characters. * tokdelim (out) Character that delimits the returned token. (May * be set to NULL if token delimiter is not required). * * Returns: Pointer to the next token found. NULL when no more tokens are found. ***************************************************************************** */ char * bcmstrtok(char **string, const char *delimiters, char *tokdelim) { unsigned char *str; unsigned long map[8]; int count; char *nextoken; if (tokdelim != NULL) { /* Prime the token delimiter */ *tokdelim = '\0'; } /* Clear control map */ for (count = 0; count < 8; count++) { map[count] = 0; } /* Set bits in delimiter table */ do { map[*delimiters >> 5] |= (1 << (*delimiters & 31)); } while (*delimiters++); str = (unsigned char*)*string; /* Find beginning of token (skip over leading delimiters). Note that * there is no token iff this loop sets str to point to the terminal * null (*str == '\0') */ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { str++; } nextoken = (char*)str; /* Find the end of the token. If it is not the end of the string, * put a null there. */ for (; *str; str++) { if (map[*str >> 5] & (1 << (*str & 31))) { if (tokdelim != NULL) { *tokdelim = *str; } *str++ = '\0'; break; } } *string = (char*)str; /* Determine if a token has been found. */ if (nextoken == (char *) str) { return NULL; } else { return nextoken; } } #define xToLower(C) \ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) /**************************************************************************** * Function: bcmstricmp * * Purpose: Compare to strings case insensitively. * * Parameters: s1 (in) First string to compare. * s2 (in) Second string to compare. * * Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if * t1 > t2, when ignoring case sensitivity. ***************************************************************************** */ int bcmstricmp(const char *s1, const char *s2) { char dc, sc; while (*s2 && *s1) { dc = xToLower(*s1); sc = xToLower(*s2); if (dc < sc) return -1; if (dc > sc) return 1; s1++; s2++; } if (*s1 && !*s2) return 1; if (!*s1 && *s2) return -1; return 0; } /**************************************************************************** * Function: bcmstrnicmp * * Purpose: Compare to strings case insensitively, upto a max of 'cnt' * characters. * * Parameters: s1 (in) First string to compare. * s2 (in) Second string to compare. * cnt (in) Max characters to compare. * * Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if * t1 > t2, when ignoring case sensitivity. ***************************************************************************** */ int bcmstrnicmp(const char* s1, const char* s2, int cnt) { char dc, sc; while (*s2 && *s1 && cnt) { dc = xToLower(*s1); sc = xToLower(*s2); if (dc < sc) return -1; if (dc > sc) return 1; s1++; s2++; cnt--; } if (!cnt) return 0; if (*s1 && !*s2) return 1; if (!*s1 && *s2) return -1; return 0; } /* parse a xx:xx:xx:xx:xx:xx format ethernet address */ int bcm_ether_atoe(char *p, struct ether_addr *ea) { int i = 0; for (;;) { ea->octet[i++] = (char) bcm_strtoul(p, &p, 16); if (!*p++ || i == 6) break; } return (i == 6); } #if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) /* registry routine buffer preparation utility functions: * parameter order is like strncpy, but returns count * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) */ ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) { ulong copyct = 1; ushort i; if (abuflen == 0) return 0; /* wbuflen is in bytes */ wbuflen /= sizeof(ushort); for (i = 0; i < wbuflen; ++i) { if (--abuflen == 0) break; *abuf++ = (char) *wbuf++; ++copyct; } *abuf = '\0'; return copyct; } #endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ char * bcm_ether_ntoa(const struct ether_addr *ea, char *buf) { static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x"; snprintf(buf, 18, template, ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff, ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff); return (buf); } char * bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) { snprintf(buf, 16, "%d.%d.%d.%d", ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); return (buf); } #ifdef BCMDRIVER void bcm_mdelay(uint ms) { uint i; for (i = 0; i < ms; i++) { OSL_DELAY(1000); } } #if defined(DHD_DEBUG) /* pretty hex print a pkt buffer chain */ void prpkt(const char *msg, osl_t *osh, void *p0) { void *p; if (msg && (msg[0] != '\0')) printf("%s:\n", msg); for (p = p0; p; p = PKTNEXT(osh, p)) prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); } #endif /* Takes an Ethernet frame and sets out-of-bound PKTPRIO. * Also updates the inplace vlan tag if requested. * For debugging, it returns an indication of what it did. */ uint pktsetprio(void *pkt, bool update_vtag) { struct ether_header *eh; struct ethervlan_header *evh; uint8 *pktdata; int priority = 0; int rc = 0; pktdata = (uint8 *) PKTDATA(NULL, pkt); ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); eh = (struct ether_header *) pktdata; if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) { uint16 vlan_tag; int vlan_prio, dscp_prio = 0; evh = (struct ethervlan_header *)eh; vlan_tag = ntoh16(evh->vlan_tag); vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) { uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); uint8 tos_tc = IP_TOS(ip_body); /* FMC fix */ uint8 tos = tos_tc >> 2; if (tos == 0x2E) { dscp_prio = 6; } else if (tos == 0x1A) { dscp_prio = 4; } else { dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); } } /* DSCP priority gets precedence over 802.1P (vlan tag) */ if (dscp_prio != 0) { priority = dscp_prio; rc |= PKTPRIO_VDSCP; } else { priority = vlan_prio; rc |= PKTPRIO_VLAN; } /* * If the DSCP priority is not the same as the VLAN priority, * then overwrite the priority field in the vlan tag, with the * DSCP priority value. This is required for Linux APs because * the VLAN driver on Linux, overwrites the skb->priority field * with the priority value in the vlan tag */ if (update_vtag && (priority != vlan_prio)) { vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; evh->vlan_tag = hton16(vlan_tag); rc |= PKTPRIO_UPD; } } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { uint8 *ip_body = pktdata + sizeof(struct ether_header); uint8 tos_tc = IP_TOS(ip_body); /* FMC fix */ uint8 tos = tos_tc >> 2; if (tos == 0x2E) { priority = 6; } else if (tos == 0x1A) { priority = 4; } else { priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); } rc |= PKTPRIO_DSCP; } ASSERT(priority >= 0 && priority <= MAXPRIO); PKTSETPRIO(pkt, priority); return (rc | priority); } static char bcm_undeferrstr[BCME_STRLEN]; static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; /* Convert the error codes into related error strings */ const char * bcmerrorstr(int bcmerror) { /* check if someone added a bcmerror code but forgot to add errorstring */ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); if (bcmerror > 0 || bcmerror < BCME_LAST) { snprintf(bcm_undeferrstr, BCME_STRLEN, "Undefined error %d", bcmerror); return bcm_undeferrstr; } ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); return bcmerrorstrtable[-bcmerror]; } /* iovar table lookup */ const bcm_iovar_t* bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) { const bcm_iovar_t *vi; const char *lookup_name; /* skip any ':' delimited option prefixes */ lookup_name = strrchr(name, ':'); if (lookup_name != NULL) lookup_name++; else lookup_name = name; ASSERT(table != NULL); for (vi = table; vi->name; vi++) { if (!strcmp(vi->name, lookup_name)) return vi; } /* ran to end of table */ return NULL; /* var name not found */ } int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) { int bcmerror = 0; /* length check on io buf */ switch (vi->type) { case IOVT_BOOL: case IOVT_INT8: case IOVT_INT16: case IOVT_INT32: case IOVT_UINT8: case IOVT_UINT16: case IOVT_UINT32: /* all integers are int32 sized args at the ioctl interface */ if (len < (int)sizeof(int)) { bcmerror = BCME_BUFTOOSHORT; } break; case IOVT_BUFFER: /* buffer must meet minimum length requirement */ if (len < vi->minlen) { bcmerror = BCME_BUFTOOSHORT; } break; case IOVT_VOID: if (!set) { /* Cannot return nil... */ bcmerror = BCME_UNSUPPORTED; } else if (len) { /* Set is an action w/o parameters */ bcmerror = BCME_BUFTOOLONG; } break; default: /* unknown type for length check in iovar info */ ASSERT(0); bcmerror = BCME_UNSUPPORTED; } return bcmerror; } #endif /* BCMDRIVER */ /******************************************************************************* * crc8 * * Computes a crc8 over the input data using the polynomial: * * x^8 + x^7 +x^6 + x^4 + x^2 + 1 * * The caller provides the initial value (either CRC8_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC8_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ STATIC const uint8 crc8_table[256] = { 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F }; #define CRC_INNER_LOOP(n, c, x) \ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] uint8 hndcrc8( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint8 crc /* either CRC8_INIT_VALUE or previous return value */ ) { /* hard code the crc loop instead of using CRC_INNER_LOOP macro * to avoid the undefined and unnecessary (uint8 >> 8) operation. */ while (nbytes-- > 0) crc = crc8_table[(crc ^ *pdata++) & 0xff]; return crc; } /******************************************************************************* * crc16 * * Computes a crc16 over the input data using the polynomial: * * x^16 + x^12 +x^5 + 1 * * The caller provides the initial value (either CRC16_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC16_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ static const uint16 crc16_table[256] = { 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 }; uint16 hndcrc16( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint16 crc /* either CRC16_INIT_VALUE or previous return value */ ) { while (nbytes-- > 0) CRC_INNER_LOOP(16, crc, *pdata++); return crc; } STATIC const uint32 crc32_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; uint32 hndcrc32( uint8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ uint32 crc /* either CRC32_INIT_VALUE or previous return value */ ) { uint8 *pend; #ifdef __mips__ uint8 tmp[4]; ulong *tptr = (ulong *)tmp; /* in case the beginning of the buffer isn't aligned */ pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc); nbytes -= (pend - pdata); while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); /* handle bulk of data as 32-bit words */ pend = pdata + (nbytes & 0xfffffffc); while (pdata < pend) { *tptr = *(ulong *)pdata; pdata += sizeof(ulong *); CRC_INNER_LOOP(32, crc, tmp[0]); CRC_INNER_LOOP(32, crc, tmp[1]); CRC_INNER_LOOP(32, crc, tmp[2]); CRC_INNER_LOOP(32, crc, tmp[3]); } /* 1-3 bytes at end of buffer */ pend = pdata + (nbytes & 0x03); while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); #else pend = pdata + nbytes; while (pdata < pend) CRC_INNER_LOOP(32, crc, *pdata++); #endif /* __mips__ */ return crc; } #ifdef notdef #define CLEN 1499 /* CRC Length */ #define CBUFSIZ (CLEN+4) #define CNBUFS 5 /* # of bufs */ void testcrc32(void) { uint j, k, l; uint8 *buf; uint len[CNBUFS]; uint32 crcr; uint32 crc32tv[CNBUFS] = {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); /* step through all possible alignments */ for (l = 0; l <= 4; l++) { for (j = 0; j < CNBUFS; j++) { len[j] = CLEN; for (k = 0; k < len[j]; k++) *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; } for (j = 0; j < CNBUFS; j++) { crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); ASSERT(crcr == crc32tv[j]); } } MFREE(buf, CBUFSIZ*CNBUFS); return; } #endif /* notdef */ /* * Advance from the current 1-byte tag/1-byte length/variable-length value * triple, to the next, returning a pointer to the next. * If the current or next TLV is invalid (does not fit in given buffer length), * NULL is returned. * *buflen is not modified if the TLV elt parameter is invalid, or is decremented * by the TLV parameter's length if it is valid. */ bcm_tlv_t * bcm_next_tlv(bcm_tlv_t *elt, int *buflen) { int len; /* validate current elt */ if (!bcm_valid_tlv(elt, *buflen)) return NULL; /* advance to next elt */ len = elt->len; elt = (bcm_tlv_t*)(elt->data + len); *buflen -= (2 + len); /* validate next elt */ if (!bcm_valid_tlv(elt, *buflen)) return NULL; return elt; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag */ bcm_tlv_t * bcm_parse_tlvs(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t*)buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { int len = elt->len; /* validate remaining totlen */ if ((elt->id == key) && (totlen >= (len + 2))) return (elt); elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); totlen -= (len + 2); } return NULL; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag. Stop parsing when we see an element whose ID is greater * than the target key. */ bcm_tlv_t * bcm_parse_ordered_tlvs(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t*)buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { uint id = elt->id; int len = elt->len; /* Punt if we start seeing IDs > than target key */ if (id > key) return (NULL); /* validate remaining totlen */ if ((id == key) && (totlen >= (len + 2))) return (elt); elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); totlen -= (len + 2); } return NULL; } #if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ defined(DHD_DEBUG) int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) { int i; char* p = buf; char hexstr[16]; int slen = 0; uint32 bit; const char* name; if (len < 2 || !buf) return 0; buf[0] = '\0'; len -= 1; for (i = 0; flags != 0; i++) { bit = bd[i].bit; name = bd[i].name; if (bit == 0 && flags) { /* print any unnamed bits */ sprintf(hexstr, "0x%X", flags); name = hexstr; flags = 0; /* exit loop */ } else if ((flags & bit) == 0) continue; slen += strlen(name); if (len < slen) break; if (p != buf) p += sprintf(p, " "); /* btwn flag space */ strcat(p, name); p += strlen(name); flags &= ~bit; len -= slen; slen = 1; /* account for btwn flag space */ } /* indicate the str was too short */ if (flags != 0) { if (len == 0) p--; /* overwrite last char */ p += sprintf(p, ">"); } return (int)(p - buf); } /* print bytes formatted as hex to a string. return the resulting string length */ int bcm_format_hex(char *str, const void *bytes, int len) { int i; char *p = str; const uint8 *src = (const uint8*)bytes; for (i = 0; i < len; i++) { p += sprintf(p, "%02X", *src); src++; } return (int)(p - str); } /* pretty hex print a contiguous buffer */ void prhex(const char *msg, uchar *buf, uint nbytes) { char line[128], *p; uint i; if (msg && (msg[0] != '\0')) printf("%s:\n", msg); p = line; for (i = 0; i < nbytes; i++) { if (i % 16 == 0) { p += sprintf(p, " %04d: ", i); /* line prefix */ } p += sprintf(p, "%02x ", buf[i]); if (i % 16 == 15) { printf("%s\n", line); /* flush line */ p = line; } } /* flush last partial line */ if (p != line) printf("%s\n", line); } #endif /* Produce a human-readable string for boardrev */ char * bcm_brev_str(uint32 brev, char *buf) { if (brev < 0x100) snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); else snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); return (buf); } #define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ /* dump large strings to console */ void printbig(char *buf) { uint len, max_len; char c; len = strlen(buf); max_len = BUFSIZE_TODUMP_ATONCE; while (len > max_len) { c = buf[max_len]; buf[max_len] = '\0'; printf("%s", buf); buf[max_len] = c; buf += max_len; len -= max_len; } /* print the remaining string */ printf("%s\n", buf); return; } /* routine to dump fields in a fileddesc structure */ uint bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, char *buf, uint32 bufsize) { uint filled_len; int len; struct fielddesc *cur_ptr; filled_len = 0; cur_ptr = fielddesc_array; while (bufsize > 1) { if (cur_ptr->nameandfmt == NULL) break; len = snprintf(buf, bufsize, cur_ptr->nameandfmt, read_rtn(arg0, arg1, cur_ptr->offset)); /* check for snprintf overflow or error */ if (len < 0 || (uint32)len >= bufsize) len = bufsize - 1; buf += len; bufsize -= len; filled_len += len; cur_ptr++; } return filled_len; } uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) { uint len; len = strlen(name) + 1; if ((len + datalen) > buflen) return 0; strncpy(buf, name, buflen); /* append data onto the end of the name string */ memcpy(&buf[len], data, datalen); len += datalen; return len; } /* Quarter dBm units to mW * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 * Table is offset so the last entry is largest mW value that fits in * a uint16. */ #define QDBM_OFFSET 153 /* Offset for first entry */ #define QDBM_TABLE_LEN 40 /* Table size */ /* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 */ #define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ /* Largest mW value that will round down to the last table entry, * QDBM_OFFSET + QDBM_TABLE_LEN-1. * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. */ #define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { /* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ /* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, /* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, /* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, /* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, /* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 }; uint16 bcm_qdbm_to_mw(uint8 qdbm) { uint factor = 1; int idx = qdbm - QDBM_OFFSET; if (idx >= QDBM_TABLE_LEN) { /* clamp to max uint16 mW value */ return 0xFFFF; } /* scale the qdBm index up to the range of the table 0-40 * where an offset of 40 qdBm equals a factor of 10 mW. */ while (idx < 0) { idx += 40; factor *= 10; } /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ return ((nqdBm_to_mW_map[idx] + factor/2) / factor); } uint8 bcm_mw_to_qdbm(uint16 mw) { uint8 qdbm; int offset; uint mw_uint = mw; uint boundary; /* handle boundary case */ if (mw_uint <= 1) return 0; offset = QDBM_OFFSET; /* move mw into the range of the table */ while (mw_uint < QDBM_TABLE_LOW_BOUND) { mw_uint *= 10; offset -= 40; } for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - nqdBm_to_mW_map[qdbm])/2; if (mw_uint < boundary) break; } qdbm += (uint8)offset; return (qdbm); } uint bcm_bitcount(uint8 *bitmap, uint length) { uint bitcount = 0, i; uint8 tmp; for (i = 0; i < length; i++) { tmp = bitmap[i]; while (tmp) { bitcount++; tmp &= (tmp - 1); } } return bitcount; } #ifdef BCMDRIVER /* Initialization of bcmstrbuf structure */ void bcm_binit(struct bcmstrbuf *b, char *buf, uint size) { b->origsize = b->size = size; b->origbuf = b->buf = buf; } /* Buffer sprintf wrapper to guard against buffer overflow */ int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) { va_list ap; int r; va_start(ap, fmt); r = vsnprintf(b->buf, b->size, fmt, ap); /* Non Ansi C99 compliant returns -1, * Ansi compliant return r >= b->size, * bcmstdlib returns 0, handle all */ if ((r == -1) || (r >= (int)b->size) || (r == 0)) { b->size = 0; } else { b->size -= r; b->buf += r; } va_end(ap); return r; } void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) { int i; for (i = 0; i < num_bytes; i++) { num[i] += amount; if (num[i] >= amount) break; amount = 1; } } int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes) { int i; for (i = nbytes - 1; i >= 0; i--) { if (arg1[i] != arg2[i]) return (arg1[i] - arg2[i]); } return 0; } void bcm_print_bytes(char *name, const uchar *data, int len) { int i; int per_line = 0; printf("%s: %d \n", name ? name : "", len); for (i = 0; i < len; i++) { printf("%02x ", *data++); per_line++; if (per_line == 16) { per_line = 0; printf("\n"); } } printf("\n"); } /* * buffer length needed for wlc_format_ssid * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL. */ #if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) { uint i, c; char *p = buf; char *endp = buf + SSID_FMT_BUF_LEN; if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; for (i = 0; i < ssid_len; i++) { c = (uint)ssid[i]; if (c == '\\') { *p++ = '\\'; *p++ = '\\'; } else if (bcm_isprint((uchar)c)) { *p++ = (char)c; } else { p += snprintf(p, (endp - p), "\\x%02X", c); } } *p = '\0'; ASSERT(p < endp); return (int)(p - buf); } #endif #endif /* BCMDRIVER */
sktjdgns1189/android_kernel_samsung_SHW-M130L
drivers/net/wireless/bcm4329/src/shared/bcmutils.c
C
gpl-2.0
44,532
/* * Interface to libmp3lame for mp3 encoding * Copyright (c) 2002 Lennert Buytenhek <buytenh@gnu.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file mp3lameaudio.c * Interface to libmp3lame for mp3 encoding. */ #include "avcodec.h" #include "mpegaudio.h" #include <lame/lame.h> #define BUFFER_SIZE (7200 + MPA_FRAME_SIZE + MPA_FRAME_SIZE/4) typedef struct Mp3AudioContext { lame_global_flags *gfp; int stereo; uint8_t buffer[BUFFER_SIZE]; int buffer_index; } Mp3AudioContext; static av_cold int MP3lame_encode_init(AVCodecContext *avctx) { Mp3AudioContext *s = avctx->priv_data; if (avctx->channels > 2) return -1; s->stereo = avctx->channels > 1 ? 1 : 0; if ((s->gfp = lame_init()) == NULL) goto err; lame_set_in_samplerate(s->gfp, avctx->sample_rate); lame_set_out_samplerate(s->gfp, avctx->sample_rate); lame_set_num_channels(s->gfp, avctx->channels); /* lame 3.91 dies on quality != 5 */ lame_set_quality(s->gfp, 5); /* lame 3.91 doesn't work in mono */ lame_set_mode(s->gfp, JOINT_STEREO); lame_set_brate(s->gfp, avctx->bit_rate/1000); if(avctx->flags & CODEC_FLAG_QSCALE) { lame_set_brate(s->gfp, 0); lame_set_VBR(s->gfp, vbr_default); lame_set_VBR_q(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA); } lame_set_bWriteVbrTag(s->gfp,0); lame_set_disable_reservoir(s->gfp, avctx->flags2 & CODEC_FLAG2_BIT_RESERVOIR ? 0 : 1); if (lame_init_params(s->gfp) < 0) goto err_close; avctx->frame_size = lame_get_framesize(s->gfp); avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; err_close: lame_close(s->gfp); err: return -1; } static const int sSampleRates[3] = { 44100, 48000, 32000 }; static const int sBitRates[2][3][15] = { { { 0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448}, { 0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384}, { 0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320} }, { { 0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256}, { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}, { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160} }, }; static const int sSamplesPerFrame[2][3] = { { 384, 1152, 1152 }, { 384, 1152, 576 } }; static const int sBitsPerSlot[3] = { 32, 8, 8 }; static int mp3len(void *data, int *samplesPerFrame, int *sampleRate) { uint32_t header = AV_RB32(data); int layerID = 3 - ((header >> 17) & 0x03); int bitRateID = ((header >> 12) & 0x0f); int sampleRateID = ((header >> 10) & 0x03); int bitsPerSlot = sBitsPerSlot[layerID]; int isPadded = ((header >> 9) & 0x01); static int const mode_tab[4]= {2,3,1,0}; int mode= mode_tab[(header >> 19) & 0x03]; int mpeg_id= mode>0; int temp0, temp1, bitRate; if ( (( header >> 21 ) & 0x7ff) != 0x7ff || mode == 3 || layerID==3 || sampleRateID==3) { return -1; } if(!samplesPerFrame) samplesPerFrame= &temp0; if(!sampleRate ) sampleRate = &temp1; // *isMono = ((header >> 6) & 0x03) == 0x03; *sampleRate = sSampleRates[sampleRateID]>>mode; bitRate = sBitRates[mpeg_id][layerID][bitRateID] * 1000; *samplesPerFrame = sSamplesPerFrame[mpeg_id][layerID]; //av_log(NULL, AV_LOG_DEBUG, "sr:%d br:%d spf:%d l:%d m:%d\n", *sampleRate, bitRate, *samplesPerFrame, layerID, mode); return *samplesPerFrame * bitRate / (bitsPerSlot * *sampleRate) + isPadded; } static int MP3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { Mp3AudioContext *s = avctx->priv_data; int len; int lame_result; /* lame 3.91 dies on '1-channel interleaved' data */ if(data){ if (s->stereo) { lame_result = lame_encode_buffer_interleaved( s->gfp, data, avctx->frame_size, s->buffer + s->buffer_index, BUFFER_SIZE - s->buffer_index ); } else { lame_result = lame_encode_buffer( s->gfp, data, data, avctx->frame_size, s->buffer + s->buffer_index, BUFFER_SIZE - s->buffer_index ); } }else{ lame_result= lame_encode_flush( s->gfp, s->buffer + s->buffer_index, BUFFER_SIZE - s->buffer_index ); } if(lame_result==-1) { /* output buffer too small */ av_log(avctx, AV_LOG_ERROR, "lame: output buffer too small (buffer index: %d, free bytes: %d)\n", s->buffer_index, BUFFER_SIZE - s->buffer_index); return 0; } s->buffer_index += lame_result; if(s->buffer_index<4) return 0; len= mp3len(s->buffer, NULL, NULL); //av_log(avctx, AV_LOG_DEBUG, "in:%d packet-len:%d index:%d\n", avctx->frame_size, len, s->buffer_index); if(len <= s->buffer_index){ memcpy(frame, s->buffer, len); s->buffer_index -= len; memmove(s->buffer, s->buffer+len, s->buffer_index); //FIXME fix the audio codec API, so we do not need the memcpy() /*for(i=0; i<len; i++){ av_log(avctx, AV_LOG_DEBUG, "%2X ", frame[i]); }*/ return len; }else return 0; } static av_cold int MP3lame_encode_close(AVCodecContext *avctx) { Mp3AudioContext *s = avctx->priv_data; av_freep(&avctx->coded_frame); lame_close(s->gfp); return 0; } AVCodec libmp3lame_encoder = { "libmp3lame", CODEC_TYPE_AUDIO, CODEC_ID_MP3, sizeof(Mp3AudioContext), MP3lame_encode_init, MP3lame_encode_frame, MP3lame_encode_close, .capabilities= CODEC_CAP_DELAY, };
uwehermann/easybox-904-lte-firmware
package/ffmpeg/src/libavcodec/libmp3lame.c
C
gpl-2.0
6,658
/* $Id: capi.c,v 1.1.1.1 2007-05-25 06:50:09 bruce Exp $ * * ISDN lowlevel-module for the IBM ISDN-S0 Active 2000. * CAPI encoder/decoder * * Author Fritz Elfert * Copyright by Fritz Elfert <fritz@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Thanks to Friedemann Baitinger and IBM Germany * */ #include "act2000.h" #include "capi.h" static actcapi_msgdsc valid_msg[] = { {{ 0x86, 0x02}, "DATA_B3_IND"}, /* DATA_B3_IND/CONF must be first because of speed!!! */ {{ 0x86, 0x01}, "DATA_B3_CONF"}, {{ 0x02, 0x01}, "CONNECT_CONF"}, {{ 0x02, 0x02}, "CONNECT_IND"}, {{ 0x09, 0x01}, "CONNECT_INFO_CONF"}, {{ 0x03, 0x02}, "CONNECT_ACTIVE_IND"}, {{ 0x04, 0x01}, "DISCONNECT_CONF"}, {{ 0x04, 0x02}, "DISCONNECT_IND"}, {{ 0x05, 0x01}, "LISTEN_CONF"}, {{ 0x06, 0x01}, "GET_PARAMS_CONF"}, {{ 0x07, 0x01}, "INFO_CONF"}, {{ 0x07, 0x02}, "INFO_IND"}, {{ 0x08, 0x01}, "DATA_CONF"}, {{ 0x08, 0x02}, "DATA_IND"}, {{ 0x40, 0x01}, "SELECT_B2_PROTOCOL_CONF"}, {{ 0x80, 0x01}, "SELECT_B3_PROTOCOL_CONF"}, {{ 0x81, 0x01}, "LISTEN_B3_CONF"}, {{ 0x82, 0x01}, "CONNECT_B3_CONF"}, {{ 0x82, 0x02}, "CONNECT_B3_IND"}, {{ 0x83, 0x02}, "CONNECT_B3_ACTIVE_IND"}, {{ 0x84, 0x01}, "DISCONNECT_B3_CONF"}, {{ 0x84, 0x02}, "DISCONNECT_B3_IND"}, {{ 0x85, 0x01}, "GET_B3_PARAMS_CONF"}, {{ 0x01, 0x01}, "RESET_B3_CONF"}, {{ 0x01, 0x02}, "RESET_B3_IND"}, /* {{ 0x87, 0x02, "HANDSET_IND"}, not implemented */ {{ 0xff, 0x01}, "MANUFACTURER_CONF"}, {{ 0xff, 0x02}, "MANUFACTURER_IND"}, #ifdef DEBUG_MSG /* Requests */ {{ 0x01, 0x00}, "RESET_B3_REQ"}, {{ 0x02, 0x00}, "CONNECT_REQ"}, {{ 0x04, 0x00}, "DISCONNECT_REQ"}, {{ 0x05, 0x00}, "LISTEN_REQ"}, {{ 0x06, 0x00}, "GET_PARAMS_REQ"}, {{ 0x07, 0x00}, "INFO_REQ"}, {{ 0x08, 0x00}, "DATA_REQ"}, {{ 0x09, 0x00}, "CONNECT_INFO_REQ"}, {{ 0x40, 0x00}, "SELECT_B2_PROTOCOL_REQ"}, {{ 0x80, 0x00}, "SELECT_B3_PROTOCOL_REQ"}, {{ 0x81, 0x00}, "LISTEN_B3_REQ"}, {{ 0x82, 0x00}, "CONNECT_B3_REQ"}, {{ 0x84, 0x00}, "DISCONNECT_B3_REQ"}, {{ 0x85, 0x00}, "GET_B3_PARAMS_REQ"}, {{ 0x86, 0x00}, "DATA_B3_REQ"}, {{ 0xff, 0x00}, "MANUFACTURER_REQ"}, /* Responses */ {{ 0x01, 0x03}, "RESET_B3_RESP"}, {{ 0x02, 0x03}, "CONNECT_RESP"}, {{ 0x03, 0x03}, "CONNECT_ACTIVE_RESP"}, {{ 0x04, 0x03}, "DISCONNECT_RESP"}, {{ 0x07, 0x03}, "INFO_RESP"}, {{ 0x08, 0x03}, "DATA_RESP"}, {{ 0x82, 0x03}, "CONNECT_B3_RESP"}, {{ 0x83, 0x03}, "CONNECT_B3_ACTIVE_RESP"}, {{ 0x84, 0x03}, "DISCONNECT_B3_RESP"}, {{ 0x86, 0x03}, "DATA_B3_RESP"}, {{ 0xff, 0x03}, "MANUFACTURER_RESP"}, #endif {{ 0x00, 0x00}, NULL}, }; #define num_valid_msg (sizeof(valid_msg)/sizeof(actcapi_msgdsc)) #define num_valid_imsg 27 /* MANUFACTURER_IND */ /* * Check for a valid incoming CAPI message. * Return: * 0 = Invalid message * 1 = Valid message, no B-Channel-data * 2 = Valid message, B-Channel-data */ int actcapi_chkhdr(act2000_card * card, actcapi_msghdr *hdr) { int i; if (hdr->applicationID != 1) return 0; if (hdr->len < 9) return 0; for (i = 0; i < num_valid_imsg; i++) if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) && (hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) { return (i?1:2); } return 0; } #define ACTCAPI_MKHDR(l, c, s) { \ skb = alloc_skb(l + 8, GFP_ATOMIC); \ if (skb) { \ m = (actcapi_msg *)skb_put(skb, l + 8); \ m->hdr.len = l + 8; \ m->hdr.applicationID = 1; \ m->hdr.cmd.cmd = c; \ m->hdr.cmd.subcmd = s; \ m->hdr.msgnum = actcapi_nextsmsg(card); \ } else m = NULL;\ } #define ACTCAPI_CHKSKB if (!skb) { \ printk(KERN_WARNING "actcapi: alloc_skb failed\n"); \ return; \ } #define ACTCAPI_QUEUE_TX { \ actcapi_debug_msg(skb, 1); \ skb_queue_tail(&card->sndq, skb); \ act2000_schedule_tx(card); \ } int actcapi_listen_req(act2000_card *card) { __u16 eazmask = 0; int i; actcapi_msg *m; struct sk_buff *skb; for (i = 0; i < ACT2000_BCH; i++) eazmask |= card->bch[i].eazmask; ACTCAPI_MKHDR(9, 0x05, 0x00); if (!skb) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); return -ENOMEM; } m->msg.listen_req.controller = 0; m->msg.listen_req.infomask = 0x3f; /* All information */ m->msg.listen_req.eazmask = eazmask; m->msg.listen_req.simask = (eazmask)?0x86:0; /* All SI's */ ACTCAPI_QUEUE_TX; return 0; } int actcapi_connect_req(act2000_card *card, act2000_chan *chan, char *phone, char eaz, int si1, int si2) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR((11 + strlen(phone)), 0x02, 0x00); if (!skb) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); chan->fsm_state = ACT2000_STATE_NULL; return -ENOMEM; } m->msg.connect_req.controller = 0; m->msg.connect_req.bchan = 0x83; m->msg.connect_req.infomask = 0x3f; m->msg.connect_req.si1 = si1; m->msg.connect_req.si2 = si2; m->msg.connect_req.eaz = eaz?eaz:'0'; m->msg.connect_req.addr.len = strlen(phone) + 1; m->msg.connect_req.addr.tnp = 0x81; memcpy(m->msg.connect_req.addr.num, phone, strlen(phone)); chan->callref = m->hdr.msgnum; ACTCAPI_QUEUE_TX; return 0; } static void actcapi_connect_b3_req(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(17, 0x82, 0x00); ACTCAPI_CHKSKB; m->msg.connect_b3_req.plci = chan->plci; memset(&m->msg.connect_b3_req.ncpi, 0, sizeof(m->msg.connect_b3_req.ncpi)); m->msg.connect_b3_req.ncpi.len = 13; m->msg.connect_b3_req.ncpi.modulo = 8; ACTCAPI_QUEUE_TX; } /* * Set net type (1TR6) or (EDSS1) */ int actcapi_manufacturer_req_net(act2000_card *card) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(5, 0xff, 0x00); if (!skb) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); return -ENOMEM; } m->msg.manufacturer_req_net.manuf_msg = 0x11; m->msg.manufacturer_req_net.controller = 1; m->msg.manufacturer_req_net.nettype = (card->ptype == ISDN_PTYPE_EURO)?1:0; ACTCAPI_QUEUE_TX; printk(KERN_INFO "act2000 %s: D-channel protocol now %s\n", card->interface.id, (card->ptype == ISDN_PTYPE_EURO)?"euro":"1tr6"); card->interface.features &= ~(ISDN_FEATURE_P_UNKNOWN | ISDN_FEATURE_P_EURO | ISDN_FEATURE_P_1TR6); card->interface.features |= ((card->ptype == ISDN_PTYPE_EURO)?ISDN_FEATURE_P_EURO:ISDN_FEATURE_P_1TR6); return 0; } /* * Switch V.42 on or off */ #if 0 int actcapi_manufacturer_req_v42(act2000_card *card, ulong arg) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(8, 0xff, 0x00); if (!skb) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); return -ENOMEM; } m->msg.manufacturer_req_v42.manuf_msg = 0x10; m->msg.manufacturer_req_v42.controller = 0; m->msg.manufacturer_req_v42.v42control = (arg?1:0); ACTCAPI_QUEUE_TX; return 0; } #endif /* 0 */ /* * Set error-handler */ int actcapi_manufacturer_req_errh(act2000_card *card) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(4, 0xff, 0x00); if (!skb) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); return -ENOMEM; } m->msg.manufacturer_req_err.manuf_msg = 0x03; m->msg.manufacturer_req_err.controller = 0; ACTCAPI_QUEUE_TX; return 0; } /* * Set MSN-Mapping. */ int actcapi_manufacturer_req_msn(act2000_card *card) { msn_entry *p = card->msn_list; actcapi_msg *m; struct sk_buff *skb; int len; while (p) { int i; len = strlen(p->msn); for (i = 0; i < 2; i++) { ACTCAPI_MKHDR(6 + len, 0xff, 0x00); if (!skb) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); return -ENOMEM; } m->msg.manufacturer_req_msn.manuf_msg = 0x13 + i; m->msg.manufacturer_req_msn.controller = 0; m->msg.manufacturer_req_msn.msnmap.eaz = p->eaz; m->msg.manufacturer_req_msn.msnmap.len = len; memcpy(m->msg.manufacturer_req_msn.msnmap.msn, p->msn, len); ACTCAPI_QUEUE_TX; } p = p->next; } return 0; } void actcapi_select_b2_protocol_req(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(10, 0x40, 0x00); ACTCAPI_CHKSKB; m->msg.select_b2_protocol_req.plci = chan->plci; memset(&m->msg.select_b2_protocol_req.dlpd, 0, sizeof(m->msg.select_b2_protocol_req.dlpd)); m->msg.select_b2_protocol_req.dlpd.len = 6; switch (chan->l2prot) { case ISDN_PROTO_L2_TRANS: m->msg.select_b2_protocol_req.protocol = 0x03; m->msg.select_b2_protocol_req.dlpd.dlen = 4000; break; case ISDN_PROTO_L2_HDLC: m->msg.select_b2_protocol_req.protocol = 0x02; m->msg.select_b2_protocol_req.dlpd.dlen = 4000; break; case ISDN_PROTO_L2_X75I: case ISDN_PROTO_L2_X75UI: case ISDN_PROTO_L2_X75BUI: m->msg.select_b2_protocol_req.protocol = 0x01; m->msg.select_b2_protocol_req.dlpd.dlen = 4000; m->msg.select_b2_protocol_req.dlpd.laa = 3; m->msg.select_b2_protocol_req.dlpd.lab = 1; m->msg.select_b2_protocol_req.dlpd.win = 7; m->msg.select_b2_protocol_req.dlpd.modulo = 8; break; } ACTCAPI_QUEUE_TX; } static void actcapi_select_b3_protocol_req(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(17, 0x80, 0x00); ACTCAPI_CHKSKB; m->msg.select_b3_protocol_req.plci = chan->plci; memset(&m->msg.select_b3_protocol_req.ncpd, 0, sizeof(m->msg.select_b3_protocol_req.ncpd)); switch (chan->l3prot) { case ISDN_PROTO_L3_TRANS: m->msg.select_b3_protocol_req.protocol = 0x04; m->msg.select_b3_protocol_req.ncpd.len = 13; m->msg.select_b3_protocol_req.ncpd.modulo = 8; break; } ACTCAPI_QUEUE_TX; } static void actcapi_listen_b3_req(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(2, 0x81, 0x00); ACTCAPI_CHKSKB; m->msg.listen_b3_req.plci = chan->plci; ACTCAPI_QUEUE_TX; } static void actcapi_disconnect_req(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(3, 0x04, 0x00); ACTCAPI_CHKSKB; m->msg.disconnect_req.plci = chan->plci; m->msg.disconnect_req.cause = 0; ACTCAPI_QUEUE_TX; } void actcapi_disconnect_b3_req(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(17, 0x84, 0x00); ACTCAPI_CHKSKB; m->msg.disconnect_b3_req.ncci = chan->ncci; memset(&m->msg.disconnect_b3_req.ncpi, 0, sizeof(m->msg.disconnect_b3_req.ncpi)); m->msg.disconnect_b3_req.ncpi.len = 13; m->msg.disconnect_b3_req.ncpi.modulo = 8; chan->fsm_state = ACT2000_STATE_BHWAIT; ACTCAPI_QUEUE_TX; } void actcapi_connect_resp(act2000_card *card, act2000_chan *chan, __u8 cause) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(3, 0x02, 0x03); ACTCAPI_CHKSKB; m->msg.connect_resp.plci = chan->plci; m->msg.connect_resp.rejectcause = cause; if (cause) { chan->fsm_state = ACT2000_STATE_NULL; chan->plci = 0x8000; } else chan->fsm_state = ACT2000_STATE_IWAIT; ACTCAPI_QUEUE_TX; } static void actcapi_connect_active_resp(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(2, 0x03, 0x03); ACTCAPI_CHKSKB; m->msg.connect_resp.plci = chan->plci; if (chan->fsm_state == ACT2000_STATE_IWAIT) chan->fsm_state = ACT2000_STATE_IBWAIT; ACTCAPI_QUEUE_TX; } static void actcapi_connect_b3_resp(act2000_card *card, act2000_chan *chan, __u8 rejectcause) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR((rejectcause?3:17), 0x82, 0x03); ACTCAPI_CHKSKB; m->msg.connect_b3_resp.ncci = chan->ncci; m->msg.connect_b3_resp.rejectcause = rejectcause; if (!rejectcause) { memset(&m->msg.connect_b3_resp.ncpi, 0, sizeof(m->msg.connect_b3_resp.ncpi)); m->msg.connect_b3_resp.ncpi.len = 13; m->msg.connect_b3_resp.ncpi.modulo = 8; chan->fsm_state = ACT2000_STATE_BWAIT; } ACTCAPI_QUEUE_TX; } static void actcapi_connect_b3_active_resp(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(2, 0x83, 0x03); ACTCAPI_CHKSKB; m->msg.connect_b3_active_resp.ncci = chan->ncci; chan->fsm_state = ACT2000_STATE_ACTIVE; ACTCAPI_QUEUE_TX; } static void actcapi_info_resp(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(2, 0x07, 0x03); ACTCAPI_CHKSKB; m->msg.info_resp.plci = chan->plci; ACTCAPI_QUEUE_TX; } static void actcapi_disconnect_b3_resp(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(2, 0x84, 0x03); ACTCAPI_CHKSKB; m->msg.disconnect_b3_resp.ncci = chan->ncci; chan->ncci = 0x8000; chan->queued = 0; ACTCAPI_QUEUE_TX; } static void actcapi_disconnect_resp(act2000_card *card, act2000_chan *chan) { actcapi_msg *m; struct sk_buff *skb; ACTCAPI_MKHDR(2, 0x04, 0x03); ACTCAPI_CHKSKB; m->msg.disconnect_resp.plci = chan->plci; chan->plci = 0x8000; ACTCAPI_QUEUE_TX; } static int new_plci(act2000_card *card, __u16 plci) { int i; for (i = 0; i < ACT2000_BCH; i++) if (card->bch[i].plci == 0x8000) { card->bch[i].plci = plci; return i; } return -1; } static int find_plci(act2000_card *card, __u16 plci) { int i; for (i = 0; i < ACT2000_BCH; i++) if (card->bch[i].plci == plci) return i; return -1; } static int find_ncci(act2000_card *card, __u16 ncci) { int i; for (i = 0; i < ACT2000_BCH; i++) if (card->bch[i].ncci == ncci) return i; return -1; } static int find_dialing(act2000_card *card, __u16 callref) { int i; for (i = 0; i < ACT2000_BCH; i++) if ((card->bch[i].callref == callref) && (card->bch[i].fsm_state == ACT2000_STATE_OCALL)) return i; return -1; } static int actcapi_data_b3_ind(act2000_card *card, struct sk_buff *skb) { __u16 plci; __u16 ncci; __u16 controller; __u8 blocknr; int chan; actcapi_msg *msg = (actcapi_msg *)skb->data; EVAL_NCCI(msg->msg.data_b3_ind.fakencci, plci, controller, ncci); chan = find_ncci(card, ncci); if (chan < 0) return 0; if (card->bch[chan].fsm_state != ACT2000_STATE_ACTIVE) return 0; if (card->bch[chan].plci != plci) return 0; blocknr = msg->msg.data_b3_ind.blocknr; skb_pull(skb, 19); card->interface.rcvcallb_skb(card->myid, chan, skb); if (!(skb = alloc_skb(11, GFP_ATOMIC))) { printk(KERN_WARNING "actcapi: alloc_skb failed\n"); return 1; } msg = (actcapi_msg *)skb_put(skb, 11); msg->hdr.len = 11; msg->hdr.applicationID = 1; msg->hdr.cmd.cmd = 0x86; msg->hdr.cmd.subcmd = 0x03; msg->hdr.msgnum = actcapi_nextsmsg(card); msg->msg.data_b3_resp.ncci = ncci; msg->msg.data_b3_resp.blocknr = blocknr; ACTCAPI_QUEUE_TX; return 1; } /* * Walk over ackq, unlink DATA_B3_REQ from it, if * ncci and blocknr are matching. * Decrement queued-bytes counter. */ static int handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) { unsigned long flags; struct sk_buff *skb; struct sk_buff *tmp; struct actcapi_msg *m; int ret = 0; spin_lock_irqsave(&card->lock, flags); skb = skb_peek(&card->ackq); spin_unlock_irqrestore(&card->lock, flags); if (!skb) { printk(KERN_WARNING "act2000: handle_ack nothing found!\n"); return 0; } tmp = skb; while (1) { m = (actcapi_msg *)tmp->data; if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && (m->msg.data_b3_req.blocknr == blocknr)) { /* found corresponding DATA_B3_REQ */ skb_unlink(tmp, &card->ackq); chan->queued -= m->msg.data_b3_req.datalen; if (m->msg.data_b3_req.flags) ret = m->msg.data_b3_req.datalen; dev_kfree_skb(tmp); if (chan->queued < 0) chan->queued = 0; return ret; } spin_lock_irqsave(&card->lock, flags); tmp = skb_peek((struct sk_buff_head *)tmp); spin_unlock_irqrestore(&card->lock, flags); if ((tmp == skb) || (tmp == NULL)) { /* reached end of queue */ printk(KERN_WARNING "act2000: handle_ack nothing found!\n"); return 0; } } } void actcapi_dispatch(struct work_struct *work) { struct act2000_card *card = container_of(work, struct act2000_card, rcv_tq); struct sk_buff *skb; actcapi_msg *msg; __u16 ccmd; int chan; int len; act2000_chan *ctmp; isdn_ctrl cmd; char tmp[170]; while ((skb = skb_dequeue(&card->rcvq))) { actcapi_debug_msg(skb, 0); msg = (actcapi_msg *)skb->data; ccmd = ((msg->hdr.cmd.cmd << 8) | msg->hdr.cmd.subcmd); switch (ccmd) { case 0x8602: /* DATA_B3_IND */ if (actcapi_data_b3_ind(card, skb)) return; break; case 0x8601: /* DATA_B3_CONF */ chan = find_ncci(card, msg->msg.data_b3_conf.ncci); if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_ACTIVE)) { if (msg->msg.data_b3_conf.info != 0) printk(KERN_WARNING "act2000: DATA_B3_CONF: %04x\n", msg->msg.data_b3_conf.info); len = handle_ack(card, &card->bch[chan], msg->msg.data_b3_conf.blocknr); if (len) { cmd.driver = card->myid; cmd.command = ISDN_STAT_BSENT; cmd.arg = chan; cmd.parm.length = len; card->interface.statcallb(&cmd); } } break; case 0x0201: /* CONNECT_CONF */ chan = find_dialing(card, msg->hdr.msgnum); if (chan >= 0) { if (msg->msg.connect_conf.info) { card->bch[chan].fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } else { card->bch[chan].fsm_state = ACT2000_STATE_OWAIT; card->bch[chan].plci = msg->msg.connect_conf.plci; } } break; case 0x0202: /* CONNECT_IND */ chan = new_plci(card, msg->msg.connect_ind.plci); if (chan < 0) { ctmp = (act2000_chan *)tmp; ctmp->plci = msg->msg.connect_ind.plci; actcapi_connect_resp(card, ctmp, 0x11); /* All Card-Cannels busy */ } else { card->bch[chan].fsm_state = ACT2000_STATE_ICALL; cmd.driver = card->myid; cmd.command = ISDN_STAT_ICALL; cmd.arg = chan; cmd.parm.setup.si1 = msg->msg.connect_ind.si1; cmd.parm.setup.si2 = msg->msg.connect_ind.si2; if (card->ptype == ISDN_PTYPE_EURO) strcpy(cmd.parm.setup.eazmsn, act2000_find_eaz(card, msg->msg.connect_ind.eaz)); else { cmd.parm.setup.eazmsn[0] = msg->msg.connect_ind.eaz; cmd.parm.setup.eazmsn[1] = 0; } memset(cmd.parm.setup.phone, 0, sizeof(cmd.parm.setup.phone)); memcpy(cmd.parm.setup.phone, msg->msg.connect_ind.addr.num, msg->msg.connect_ind.addr.len - 1); cmd.parm.setup.plan = msg->msg.connect_ind.addr.tnp; cmd.parm.setup.screen = 0; if (card->interface.statcallb(&cmd) == 2) actcapi_connect_resp(card, &card->bch[chan], 0x15); /* Reject Call */ } break; case 0x0302: /* CONNECT_ACTIVE_IND */ chan = find_plci(card, msg->msg.connect_active_ind.plci); if (chan >= 0) switch (card->bch[chan].fsm_state) { case ACT2000_STATE_IWAIT: actcapi_connect_active_resp(card, &card->bch[chan]); break; case ACT2000_STATE_OWAIT: actcapi_connect_active_resp(card, &card->bch[chan]); actcapi_select_b2_protocol_req(card, &card->bch[chan]); break; } break; case 0x8202: /* CONNECT_B3_IND */ chan = find_plci(card, msg->msg.connect_b3_ind.plci); if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_IBWAIT)) { card->bch[chan].ncci = msg->msg.connect_b3_ind.ncci; actcapi_connect_b3_resp(card, &card->bch[chan], 0); } else { ctmp = (act2000_chan *)tmp; ctmp->ncci = msg->msg.connect_b3_ind.ncci; actcapi_connect_b3_resp(card, ctmp, 0x11); /* All Card-Cannels busy */ } break; case 0x8302: /* CONNECT_B3_ACTIVE_IND */ chan = find_ncci(card, msg->msg.connect_b3_active_ind.ncci); if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_BWAIT)) { actcapi_connect_b3_active_resp(card, &card->bch[chan]); cmd.driver = card->myid; cmd.command = ISDN_STAT_BCONN; cmd.arg = chan; card->interface.statcallb(&cmd); } break; case 0x8402: /* DISCONNECT_B3_IND */ chan = find_ncci(card, msg->msg.disconnect_b3_ind.ncci); if (chan >= 0) { ctmp = &card->bch[chan]; actcapi_disconnect_b3_resp(card, ctmp); switch (ctmp->fsm_state) { case ACT2000_STATE_ACTIVE: ctmp->fsm_state = ACT2000_STATE_DHWAIT2; cmd.driver = card->myid; cmd.command = ISDN_STAT_BHUP; cmd.arg = chan; card->interface.statcallb(&cmd); break; case ACT2000_STATE_BHWAIT2: actcapi_disconnect_req(card, ctmp); ctmp->fsm_state = ACT2000_STATE_DHWAIT; cmd.driver = card->myid; cmd.command = ISDN_STAT_BHUP; cmd.arg = chan; card->interface.statcallb(&cmd); break; } } break; case 0x0402: /* DISCONNECT_IND */ chan = find_plci(card, msg->msg.disconnect_ind.plci); if (chan >= 0) { ctmp = &card->bch[chan]; actcapi_disconnect_resp(card, ctmp); ctmp->fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } else { ctmp = (act2000_chan *)tmp; ctmp->plci = msg->msg.disconnect_ind.plci; actcapi_disconnect_resp(card, ctmp); } break; case 0x4001: /* SELECT_B2_PROTOCOL_CONF */ chan = find_plci(card, msg->msg.select_b2_protocol_conf.plci); if (chan >= 0) switch (card->bch[chan].fsm_state) { case ACT2000_STATE_ICALL: case ACT2000_STATE_OWAIT: ctmp = &card->bch[chan]; if (msg->msg.select_b2_protocol_conf.info == 0) actcapi_select_b3_protocol_req(card, ctmp); else { ctmp->fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } break; } break; case 0x8001: /* SELECT_B3_PROTOCOL_CONF */ chan = find_plci(card, msg->msg.select_b3_protocol_conf.plci); if (chan >= 0) switch (card->bch[chan].fsm_state) { case ACT2000_STATE_ICALL: case ACT2000_STATE_OWAIT: ctmp = &card->bch[chan]; if (msg->msg.select_b3_protocol_conf.info == 0) actcapi_listen_b3_req(card, ctmp); else { ctmp->fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } } break; case 0x8101: /* LISTEN_B3_CONF */ chan = find_plci(card, msg->msg.listen_b3_conf.plci); if (chan >= 0) switch (card->bch[chan].fsm_state) { case ACT2000_STATE_ICALL: ctmp = &card->bch[chan]; if (msg->msg.listen_b3_conf.info == 0) actcapi_connect_resp(card, ctmp, 0); else { ctmp->fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } break; case ACT2000_STATE_OWAIT: ctmp = &card->bch[chan]; if (msg->msg.listen_b3_conf.info == 0) { actcapi_connect_b3_req(card, ctmp); ctmp->fsm_state = ACT2000_STATE_OBWAIT; cmd.driver = card->myid; cmd.command = ISDN_STAT_DCONN; cmd.arg = chan; card->interface.statcallb(&cmd); } else { ctmp->fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } break; } break; case 0x8201: /* CONNECT_B3_CONF */ chan = find_plci(card, msg->msg.connect_b3_conf.plci); if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_OBWAIT)) { ctmp = &card->bch[chan]; if (msg->msg.connect_b3_conf.info) { ctmp->fsm_state = ACT2000_STATE_NULL; cmd.driver = card->myid; cmd.command = ISDN_STAT_DHUP; cmd.arg = chan; card->interface.statcallb(&cmd); } else { ctmp->ncci = msg->msg.connect_b3_conf.ncci; ctmp->fsm_state = ACT2000_STATE_BWAIT; } } break; case 0x8401: /* DISCONNECT_B3_CONF */ chan = find_ncci(card, msg->msg.disconnect_b3_conf.ncci); if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_BHWAIT)) card->bch[chan].fsm_state = ACT2000_STATE_BHWAIT2; break; case 0x0702: /* INFO_IND */ chan = find_plci(card, msg->msg.info_ind.plci); if (chan >= 0) /* TODO: Eval Charging info / cause */ actcapi_info_resp(card, &card->bch[chan]); break; case 0x0401: /* LISTEN_CONF */ case 0x0501: /* LISTEN_CONF */ case 0xff01: /* MANUFACTURER_CONF */ break; case 0xff02: /* MANUFACTURER_IND */ if (msg->msg.manuf_msg == 3) { memset(tmp, 0, sizeof(tmp)); strncpy(tmp, &msg->msg.manufacturer_ind_err.errstring, msg->hdr.len - 16); if (msg->msg.manufacturer_ind_err.errcode) printk(KERN_WARNING "act2000: %s\n", tmp); else { printk(KERN_DEBUG "act2000: %s\n", tmp); if ((!strncmp(tmp, "INFO: Trace buffer con", 22)) || (!strncmp(tmp, "INFO: Compile Date/Tim", 22))) { card->flags |= ACT2000_FLAGS_RUNNING; cmd.command = ISDN_STAT_RUN; cmd.driver = card->myid; cmd.arg = 0; actcapi_manufacturer_req_net(card); actcapi_manufacturer_req_msn(card); actcapi_listen_req(card); card->interface.statcallb(&cmd); } } } break; default: printk(KERN_WARNING "act2000: UNHANDLED Message %04x\n", ccmd); break; } dev_kfree_skb(skb); } } #ifdef DEBUG_MSG static void actcapi_debug_caddr(actcapi_addr *addr) { char tmp[30]; printk(KERN_DEBUG " Alen = %d\n", addr->len); if (addr->len > 0) printk(KERN_DEBUG " Atnp = 0x%02x\n", addr->tnp); if (addr->len > 1) { memset(tmp, 0, 30); memcpy(tmp, addr->num, addr->len - 1); printk(KERN_DEBUG " Anum = '%s'\n", tmp); } } static void actcapi_debug_ncpi(actcapi_ncpi *ncpi) { printk(KERN_DEBUG " ncpi.len = %d\n", ncpi->len); if (ncpi->len >= 2) printk(KERN_DEBUG " ncpi.lic = 0x%04x\n", ncpi->lic); if (ncpi->len >= 4) printk(KERN_DEBUG " ncpi.hic = 0x%04x\n", ncpi->hic); if (ncpi->len >= 6) printk(KERN_DEBUG " ncpi.ltc = 0x%04x\n", ncpi->ltc); if (ncpi->len >= 8) printk(KERN_DEBUG " ncpi.htc = 0x%04x\n", ncpi->htc); if (ncpi->len >= 10) printk(KERN_DEBUG " ncpi.loc = 0x%04x\n", ncpi->loc); if (ncpi->len >= 12) printk(KERN_DEBUG " ncpi.hoc = 0x%04x\n", ncpi->hoc); if (ncpi->len >= 13) printk(KERN_DEBUG " ncpi.mod = %d\n", ncpi->modulo); } static void actcapi_debug_dlpd(actcapi_dlpd *dlpd) { printk(KERN_DEBUG " dlpd.len = %d\n", dlpd->len); if (dlpd->len >= 2) printk(KERN_DEBUG " dlpd.dlen = 0x%04x\n", dlpd->dlen); if (dlpd->len >= 3) printk(KERN_DEBUG " dlpd.laa = 0x%02x\n", dlpd->laa); if (dlpd->len >= 4) printk(KERN_DEBUG " dlpd.lab = 0x%02x\n", dlpd->lab); if (dlpd->len >= 5) printk(KERN_DEBUG " dlpd.modulo = %d\n", dlpd->modulo); if (dlpd->len >= 6) printk(KERN_DEBUG " dlpd.win = %d\n", dlpd->win); } #ifdef DEBUG_DUMP_SKB static void dump_skb(struct sk_buff *skb) { char tmp[80]; char *p = skb->data; char *t = tmp; int i; for (i = 0; i < skb->len; i++) { t += sprintf(t, "%02x ", *p++ & 0xff); if ((i & 0x0f) == 8) { printk(KERN_DEBUG "dump: %s\n", tmp); t = tmp; } } if (i & 0x07) printk(KERN_DEBUG "dump: %s\n", tmp); } #endif void actcapi_debug_msg(struct sk_buff *skb, int direction) { actcapi_msg *msg = (actcapi_msg *)skb->data; char *descr; int i; char tmp[170]; #ifndef DEBUG_DATA_MSG if (msg->hdr.cmd.cmd == 0x86) return; #endif descr = "INVALID"; #ifdef DEBUG_DUMP_SKB dump_skb(skb); #endif for (i = 0; i < num_valid_msg; i++) if ((msg->hdr.cmd.cmd == valid_msg[i].cmd.cmd) && (msg->hdr.cmd.subcmd == valid_msg[i].cmd.subcmd)) { descr = valid_msg[i].description; break; } printk(KERN_DEBUG "%s %s msg\n", direction?"Outgoing":"Incoming", descr); printk(KERN_DEBUG " ApplID = %d\n", msg->hdr.applicationID); printk(KERN_DEBUG " Len = %d\n", msg->hdr.len); printk(KERN_DEBUG " MsgNum = 0x%04x\n", msg->hdr.msgnum); printk(KERN_DEBUG " Cmd = 0x%02x\n", msg->hdr.cmd.cmd); printk(KERN_DEBUG " SubCmd = 0x%02x\n", msg->hdr.cmd.subcmd); switch (i) { case 0: /* DATA B3 IND */ printk(KERN_DEBUG " BLOCK = 0x%02x\n", msg->msg.data_b3_ind.blocknr); break; case 2: /* CONNECT CONF */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.connect_conf.plci); printk(KERN_DEBUG " Info = 0x%04x\n", msg->msg.connect_conf.info); break; case 3: /* CONNECT IND */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.connect_ind.plci); printk(KERN_DEBUG " Contr = %d\n", msg->msg.connect_ind.controller); printk(KERN_DEBUG " SI1 = %d\n", msg->msg.connect_ind.si1); printk(KERN_DEBUG " SI2 = %d\n", msg->msg.connect_ind.si2); printk(KERN_DEBUG " EAZ = '%c'\n", msg->msg.connect_ind.eaz); actcapi_debug_caddr(&msg->msg.connect_ind.addr); break; case 5: /* CONNECT ACTIVE IND */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.connect_active_ind.plci); actcapi_debug_caddr(&msg->msg.connect_active_ind.addr); break; case 8: /* LISTEN CONF */ printk(KERN_DEBUG " Contr = %d\n", msg->msg.listen_conf.controller); printk(KERN_DEBUG " Info = 0x%04x\n", msg->msg.listen_conf.info); break; case 11: /* INFO IND */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.info_ind.plci); printk(KERN_DEBUG " Imsk = 0x%04x\n", msg->msg.info_ind.nr.mask); if (msg->hdr.len > 12) { int l = msg->hdr.len - 12; int j; char *p = tmp; for (j = 0; j < l ; j++) p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]); printk(KERN_DEBUG " D = '%s'\n", tmp); } break; case 14: /* SELECT B2 PROTOCOL CONF */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.select_b2_protocol_conf.plci); printk(KERN_DEBUG " Info = 0x%04x\n", msg->msg.select_b2_protocol_conf.info); break; case 15: /* SELECT B3 PROTOCOL CONF */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.select_b3_protocol_conf.plci); printk(KERN_DEBUG " Info = 0x%04x\n", msg->msg.select_b3_protocol_conf.info); break; case 16: /* LISTEN B3 CONF */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.listen_b3_conf.plci); printk(KERN_DEBUG " Info = 0x%04x\n", msg->msg.listen_b3_conf.info); break; case 18: /* CONNECT B3 IND */ printk(KERN_DEBUG " NCCI = 0x%04x\n", msg->msg.connect_b3_ind.ncci); printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.connect_b3_ind.plci); actcapi_debug_ncpi(&msg->msg.connect_b3_ind.ncpi); break; case 19: /* CONNECT B3 ACTIVE IND */ printk(KERN_DEBUG " NCCI = 0x%04x\n", msg->msg.connect_b3_active_ind.ncci); actcapi_debug_ncpi(&msg->msg.connect_b3_active_ind.ncpi); break; case 26: /* MANUFACTURER IND */ printk(KERN_DEBUG " Mmsg = 0x%02x\n", msg->msg.manufacturer_ind_err.manuf_msg); switch (msg->msg.manufacturer_ind_err.manuf_msg) { case 3: printk(KERN_DEBUG " Contr = %d\n", msg->msg.manufacturer_ind_err.controller); printk(KERN_DEBUG " Code = 0x%08x\n", msg->msg.manufacturer_ind_err.errcode); memset(tmp, 0, sizeof(tmp)); strncpy(tmp, &msg->msg.manufacturer_ind_err.errstring, msg->hdr.len - 16); printk(KERN_DEBUG " Emsg = '%s'\n", tmp); break; } break; case 30: /* LISTEN REQ */ printk(KERN_DEBUG " Imsk = 0x%08x\n", msg->msg.listen_req.infomask); printk(KERN_DEBUG " Emsk = 0x%04x\n", msg->msg.listen_req.eazmask); printk(KERN_DEBUG " Smsk = 0x%04x\n", msg->msg.listen_req.simask); break; case 35: /* SELECT_B2_PROTOCOL_REQ */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.select_b2_protocol_req.plci); printk(KERN_DEBUG " prot = 0x%02x\n", msg->msg.select_b2_protocol_req.protocol); if (msg->hdr.len >= 11) printk(KERN_DEBUG "No dlpd\n"); else actcapi_debug_dlpd(&msg->msg.select_b2_protocol_req.dlpd); break; case 44: /* CONNECT RESP */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.connect_resp.plci); printk(KERN_DEBUG " CAUSE = 0x%02x\n", msg->msg.connect_resp.rejectcause); break; case 45: /* CONNECT ACTIVE RESP */ printk(KERN_DEBUG " PLCI = 0x%04x\n", msg->msg.connect_active_resp.plci); break; } } #endif
shaowei-wang/520board-v1-linux-2.6.21.x
drivers/isdn/act2000/capi.c
C
gpl-2.0
33,040
/* arch/arm/mach-msm/cpufreq.c * * MSM architecture cpufreq driver * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. * Author: Mike A. Chan <mikechan@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/earlysuspend.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/sched.h> #include <linux/suspend.h> #include <mach/socinfo.h> #include "acpuclock.h" #ifdef CONFIG_SMP struct cpufreq_work_struct { struct work_struct work; struct cpufreq_policy *policy; struct completion complete; int frequency; int status; }; static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work); static struct workqueue_struct *msm_cpufreq_wq; #endif struct cpufreq_suspend_t { struct mutex suspend_mutex; int device_suspended; }; static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend); static int override_cpu; static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq) { int ret = 0; struct cpufreq_freqs freqs; freqs.old = policy->cur; if (override_cpu) { if (policy->cur == policy->max) return 0; else freqs.new = policy->max; } else freqs.new = new_freq; freqs.cpu = policy->cpu; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ); if (!ret) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return ret; } #ifdef CONFIG_SMP static void set_cpu_work(struct work_struct *work) { struct cpufreq_work_struct *cpu_work = container_of(work, struct cpufreq_work_struct, work); cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency); complete(&cpu_work->complete); } #endif static int msm_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int ret = -EFAULT; int index; struct cpufreq_frequency_table *table; #ifdef CONFIG_SMP struct cpufreq_work_struct *cpu_work = NULL; cpumask_var_t mask; if (!cpu_active(policy->cpu)) { pr_info("cpufreq: cpu %d is not active.\n", policy->cpu); return -ENODEV; } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; #endif mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) { pr_debug("cpufreq: cpu%d scheduling frequency change " "in suspend.\n", policy->cpu); ret = -EFAULT; goto done; } table = cpufreq_frequency_get_table(policy->cpu); if (cpufreq_frequency_table_target(policy, table, target_freq, relation, &index)) { pr_err("cpufreq: invalid target_freq: %d\n", target_freq); ret = -EINVAL; goto done; } #ifdef CONFIG_CPU_FREQ_DEBUG pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n", policy->cpu, target_freq, relation, policy->min, policy->max, table[index].frequency); #endif #ifdef CONFIG_SMP cpu_work = &per_cpu(cpufreq_work, policy->cpu); cpu_work->policy = policy; cpu_work->frequency = table[index].frequency; cpu_work->status = -ENODEV; cpumask_clear(mask); cpumask_set_cpu(policy->cpu, mask); if (cpumask_equal(mask, &current->cpus_allowed)) { ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency); goto done; } else { cancel_work_sync(&cpu_work->work); INIT_COMPLETION(cpu_work->complete); queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work); wait_for_completion(&cpu_work->complete); } ret = cpu_work->status; #else ret = set_cpu_freq(policy, table[index].frequency); #endif done: #ifdef CONFIG_SMP free_cpumask_var(mask); #endif mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); return ret; } static int msm_cpufreq_verify(struct cpufreq_policy *policy) { cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; } static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy) { int cur_freq; int index; struct cpufreq_frequency_table *table; #ifdef CONFIG_SMP struct cpufreq_work_struct *cpu_work = NULL; #endif if (cpu_is_apq8064()) return -ENODEV; table = cpufreq_frequency_get_table(policy->cpu); if (cpufreq_frequency_table_cpuinfo(policy, table)) { #ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN; policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX; #endif } #ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX policy->min = CONFIG_MSM_CPU_FREQ_MIN; policy->max = CONFIG_MSM_CPU_FREQ_MAX; #endif cur_freq = acpuclk_get_rate(policy->cpu); if (cpufreq_frequency_table_target(policy, table, cur_freq, CPUFREQ_RELATION_H, &index) && cpufreq_frequency_table_target(policy, table, cur_freq, CPUFREQ_RELATION_L, &index)) { pr_info("cpufreq: cpu%d at invalid freq: %d\n", policy->cpu, cur_freq); return -EINVAL; } if (cur_freq != table[index].frequency) { int ret = 0; ret = acpuclk_set_rate(policy->cpu, table[index].frequency, SETRATE_CPUFREQ); if (ret) return ret; pr_info("cpufreq: cpu%d init at %d switching to %d\n", policy->cpu, cur_freq, table[index].frequency); cur_freq = table[index].frequency; } policy->cur = cur_freq; policy->cpuinfo.transition_latency = acpuclk_get_switch_time() * NSEC_PER_USEC; #ifdef CONFIG_SMP cpu_work = &per_cpu(cpufreq_work, policy->cpu); INIT_WORK(&cpu_work->work, set_cpu_work); init_completion(&cpu_work->complete); #endif return 0; } static int msm_cpufreq_suspend(struct cpufreq_policy *policy) { int cpu; for_each_possible_cpu(cpu) { per_cpu(cpufreq_suspend, cpu).device_suspended = 1; } return 0; } static int msm_cpufreq_resume(struct cpufreq_policy *policy) { int cpu; for_each_possible_cpu(cpu) { per_cpu(cpufreq_suspend, cpu).device_suspended = 0; } return 0; } static ssize_t store_mfreq(struct sysdev_class *class, struct sysdev_class_attribute *attr, const char *buf, size_t count) { u64 val; if (strict_strtoull(buf, 0, &val) < 0) { pr_err("Invalid parameter to mfreq\n"); return 0; } if (val) override_cpu = 1; else override_cpu = 0; return count; } static SYSDEV_CLASS_ATTR(mfreq, 0200, NULL, store_mfreq); static struct freq_attr *msm_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver msm_cpufreq_driver = { /* lps calculations are handled here. */ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, .init = msm_cpufreq_init, .verify = msm_cpufreq_verify, .target = msm_cpufreq_target, .suspend = msm_cpufreq_suspend, .resume = msm_cpufreq_resume, .name = "msm", .attr = msm_freq_attr, }; static int __init msm_cpufreq_register(void) { int cpu; int err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, &attr_mfreq.attr); if (err) pr_err("Failed to create sysfs mfreq\n"); for_each_possible_cpu(cpu) { mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex)); per_cpu(cpufreq_suspend, cpu).device_suspended = 0; } #ifdef CONFIG_SMP msm_cpufreq_wq = create_workqueue("msm-cpufreq"); #endif return cpufreq_register_driver(&msm_cpufreq_driver); } late_initcall(msm_cpufreq_register);
BytecodeMe/vanquish
arch/arm/mach-msm/cpufreq.c
C
gpl-2.0
7,616
/** * \file drawMath.c * \brief outputs the math of a model as a dot graph * \author Sarah Keating * * This file is part of libSBML. Please visit http://sbml.org for more * information about SBML, and the latest version of libSBML. */ #include <stdio.h> #include <stdlib.h> #include <sbml/util/util.h> #include <sbml/SBMLTypes.h> #include "FormulaGraphvizFormatter.h" static int noClusters = 0; FILE * fout; /** * @return the given formula AST as a directed graph. The caller * owns the returned string and is responsible for freeing it. */ char * SBML_formulaToDot (const ASTNode_t *tree) { StringBuffer_t *sb = StringBuffer_create(128); char *name; char *s; if (FormulaGraphvizFormatter_isFunction(tree) || ASTNode_isOperator(tree)) { FormulaGraphvizFormatter_visit(NULL, tree, sb); } else { name = FormulaGraphvizFormatter_format(tree); StringBuffer_append(sb, name); } StringBuffer_append(sb, "}\n"); s = StringBuffer_getBuffer(sb); free(sb); return s; } /** * @return true (non-zero) if the given ASTNode is to formatted as a * function. */ int FormulaGraphvizFormatter_isFunction (const ASTNode_t *node) { return ASTNode_isFunction (node) || ASTNode_isLambda (node) || ASTNode_isLogical (node) || ASTNode_isRelational(node); } /** * Formats the given ASTNode as a directed graph token and returns the result as * a string. */ char * FormulaGraphvizFormatter_format (const ASTNode_t *node) { StringBuffer_t *p = StringBuffer_create(128); char *s = NULL; if (ASTNode_isOperator(node)) { s = FormulaGraphvizFormatter_formatOperator(node); } else if (ASTNode_isFunction(node)) { s = FormulaGraphvizFormatter_formatFunction(node); } else if (ASTNode_isInteger(node)) { StringBuffer_appendInt(p, ASTNode_getInteger(node)); s = StringBuffer_toString(p); } else if (ASTNode_isRational(node)) { s = FormulaGraphvizFormatter_formatRational(node); } else if (ASTNode_isReal(node)) { s = FormulaGraphvizFormatter_formatReal(node); } else if ( !ASTNode_isUnknown(node) ) { if (ASTNode_getName(node) == NULL) { StringBuffer_append(p, "unknown"); } else { StringBuffer_append(p, ASTNode_getName(node)); } s = StringBuffer_toString(p); } free(p); return s; } /** * Since graphviz will interpret identical names as referring to * the same node presentation-wise it is better if each function node * has a unique name. * * Returns the name with the name of the first child * prepended * * THIS COULD BE DONE BETTER */ char * FormulaGraphvizFormatter_getUniqueName (const ASTNode_t *node) { StringBuffer_t *p = StringBuffer_create(128); char *s = NULL; if (ASTNode_isOperator(node)) { s = FormulaGraphvizFormatter_OperatorGetUniqueName(node); } else if (ASTNode_isFunction(node)) { s = FormulaGraphvizFormatter_FunctionGetUniqueName(node); } else if (ASTNode_isInteger(node)) { StringBuffer_appendInt(p, ASTNode_getInteger(node)); s = StringBuffer_toString(p); } else if (ASTNode_isRational(node)) { s = FormulaGraphvizFormatter_formatRational(node); } else if (ASTNode_isReal(node)) { s = FormulaGraphvizFormatter_formatReal(node); } else if ( !ASTNode_isUnknown(node) ) { StringBuffer_append(p, ASTNode_getName(node)); s = StringBuffer_toString(p); } free(p); return s; } /** * Formats the given ASTNode as a directed graph function name and returns the * result as a string. */ char * FormulaGraphvizFormatter_formatFunction (const ASTNode_t *node) { char *s; StringBuffer_t *p = StringBuffer_create(128); ASTNodeType_t type = ASTNode_getType(node); switch (type) { case AST_FUNCTION_ARCCOS: s = "acos"; break; case AST_FUNCTION_ARCSIN: s = "asin"; break; case AST_FUNCTION_ARCTAN: s = "atan"; break; case AST_FUNCTION_CEILING: s = "ceil"; break; case AST_FUNCTION_LN: s = "log"; break; case AST_FUNCTION_POWER: s = "pow"; break; default: if (ASTNode_getName(node) == NULL) { StringBuffer_append(p, "unknown"); } else { StringBuffer_append(p, ASTNode_getName(node)); } s = StringBuffer_toString(p); break; } free(p); return s; } /** * Since graphviz will interpret identical names as referring to * the same node presentation-wise it is better if each function node * has a unique name. * * Returns the name of the function with the name of the first child * prepended * * THIS COULD BE DONE BETTER */ char * FormulaGraphvizFormatter_FunctionGetUniqueName (const ASTNode_t *node) { char *s; StringBuffer_t *p = StringBuffer_create(128); ASTNodeType_t type = ASTNode_getType(node); if (ASTNode_getNumChildren(node) != 0) { const char* name = ASTNode_getName(ASTNode_getChild(node,0)); if (name != NULL) StringBuffer_append(p, name); } else { StringBuffer_append(p, "unknown"); } switch (type) { case AST_FUNCTION_ARCCOS: StringBuffer_append(p, "acos"); break; case AST_FUNCTION_ARCSIN: StringBuffer_append(p, "asin"); break; case AST_FUNCTION_ARCTAN: StringBuffer_append(p, "atan"); break; case AST_FUNCTION_CEILING: StringBuffer_append(p, "ceil"); break; case AST_FUNCTION_LN: StringBuffer_append(p, "log"); break; case AST_FUNCTION_POWER: StringBuffer_append(p, "pow"); break; default: if (ASTNode_getName(node) != NULL) { StringBuffer_append(p, ASTNode_getName(node)); } break; } s = StringBuffer_toString(p); free(p); return s; } /** * Formats the given ASTNode as a directed graph operator and returns the result * as a string. */ char * FormulaGraphvizFormatter_formatOperator (const ASTNode_t *node) { char *s; ASTNodeType_t type = ASTNode_getType(node); StringBuffer_t *p = StringBuffer_create(128); switch (type) { case AST_TIMES: s = "times"; break; case AST_DIVIDE: s = "divide"; break; case AST_PLUS: s = "plus"; break; case AST_MINUS: s = "minus"; break; case AST_POWER: s = "power"; break; default: StringBuffer_appendChar(p, ASTNode_getCharacter(node)); s = StringBuffer_toString(p); break; } free(p); return s; } /** * Since graphviz will interpret identical names as referring to * the same node presentation-wise it is better if each function node * has a unique name. * * Returns the name of the operator with the name of the first child * prepended * * THIS COULD BE DONE BETTER */ char * FormulaGraphvizFormatter_OperatorGetUniqueName (const ASTNode_t *node) { char *s; char number[10]; StringBuffer_t *p = StringBuffer_create(128); ASTNodeType_t type = ASTNode_getType(node); if (FormulaGraphvizFormatter_isFunction(ASTNode_getChild(node,0)) || ASTNode_isOperator(ASTNode_getChild(node,0))) { StringBuffer_append(p, "func"); } else { if (ASTNode_isInteger(ASTNode_getChild(node, 0))) { sprintf(number, "%d", (int)ASTNode_getInteger(ASTNode_getChild(node, 0))); StringBuffer_append(p, number); } else if (ASTNode_isReal(ASTNode_getChild(node, 0))) { sprintf(number, "%ld", ASTNode_getNumerator(ASTNode_getChild(node, 0))); StringBuffer_append(p, number); } else { StringBuffer_append(p, ASTNode_getName(ASTNode_getChild(node,0))); } } switch (type) { case AST_TIMES: StringBuffer_append(p, "times"); break; case AST_DIVIDE: StringBuffer_append(p, "divide"); break; case AST_PLUS: StringBuffer_append(p, "plus"); break; case AST_MINUS: StringBuffer_append(p, "minus"); break; case AST_POWER: StringBuffer_append(p, "power"); break; default: StringBuffer_appendChar(p, ASTNode_getCharacter(node)); break; } s = StringBuffer_toString(p); free(p); return s; } /** * Formats the given ASTNode as a rational number and returns the result as * a string. This amounts to: * * "(numerator/denominator)" */ char * FormulaGraphvizFormatter_formatRational (const ASTNode_t *node) { char *s; StringBuffer_t *p = StringBuffer_create(128); StringBuffer_appendChar( p, '('); StringBuffer_appendInt ( p, ASTNode_getNumerator(node) ); StringBuffer_appendChar( p, '/'); StringBuffer_appendInt ( p, ASTNode_getDenominator(node) ); StringBuffer_appendChar( p, ')'); s = StringBuffer_toString(p); free(p); return s; } /** * Formats the given ASTNode as a real number and returns the result as * a string. */ char * FormulaGraphvizFormatter_formatReal (const ASTNode_t *node) { StringBuffer_t *p = StringBuffer_create(128); double value = ASTNode_getReal(node); int sign; char *s; if (util_isNaN(value)) { s = "NaN"; } else if ((sign = util_isInf(value)) != 0) { if (sign == -1) { s = "-INF"; } else { s = "INF"; } } else if (util_isNegZero(value)) { s = "-0"; } else { StringBuffer_appendReal(p, value); s = StringBuffer_toString(p); } free(p); return s; } /** * Visits the given ASTNode node. This function is really just a * dispatcher to either FormulaGraphvizFormatter_visitFunction() or * FormulaGraphvizFormatter_visitOther(). */ void FormulaGraphvizFormatter_visit (const ASTNode_t *parent, const ASTNode_t *node, StringBuffer_t *sb ) { if (ASTNode_isLog10(node)) { FormulaGraphvizFormatter_visitLog10(parent, node, sb); } else if (ASTNode_isSqrt(node)) { FormulaGraphvizFormatter_visitSqrt(parent, node, sb); } else if (FormulaGraphvizFormatter_isFunction(node)) { FormulaGraphvizFormatter_visitFunction(parent, node, sb); } else if (ASTNode_isUMinus(node)) { FormulaGraphvizFormatter_visitUMinus(parent, node, sb); } else { FormulaGraphvizFormatter_visitOther(parent, node, sb); } } /** * Visits the given ASTNode as a function. For this node only the * traversal is preorder. * Writes the function as a directed graph and appends the result * to the StringBuffer. */ void FormulaGraphvizFormatter_visitFunction (const ASTNode_t *parent, const ASTNode_t *node, StringBuffer_t *sb ) { unsigned int numChildren = ASTNode_getNumChildren(node); unsigned int n; char *name; char *uniqueName; uniqueName = FormulaGraphvizFormatter_getUniqueName(node); name = FormulaGraphvizFormatter_format(node); StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " [shape=box, label="); StringBuffer_append(sb, name); StringBuffer_append(sb, "];\n"); if (parent != NULL) { name = FormulaGraphvizFormatter_getUniqueName(node); uniqueName = FormulaGraphvizFormatter_getUniqueName(parent); if(strcmp(name, uniqueName)) { StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " -> "); StringBuffer_append(sb, name); StringBuffer_append(sb, ";\n"); } } if (numChildren > 0) { FormulaGraphvizFormatter_visit( node, ASTNode_getChild(node, 0), sb ); } for (n = 1; n < numChildren; n++) { FormulaGraphvizFormatter_visit( node, ASTNode_getChild(node, n), sb ); } } /** * Visits the given ASTNode as the function "log(10, x)" and in doing so, * formats it as "log10(x)" (where x is any subexpression). * Writes the function as a directed graph and appends the result * to the StringBuffer. * * A seperate function may not be strictly speaking necessary for graphs */ void FormulaGraphvizFormatter_visitLog10 (const ASTNode_t *parent, const ASTNode_t *node, StringBuffer_t *sb ) { char *uniqueName = FormulaGraphvizFormatter_getUniqueName(node); char *name = FormulaGraphvizFormatter_format(node); StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " [shape=box, label="); StringBuffer_append(sb, name); StringBuffer_append(sb, "];\n"); FormulaGraphvizFormatter_visit(node, ASTNode_getChild(node, 1), sb); } /** * Visits the given ASTNode as the function "root(2, x)" and in doing so, * formats it as "sqrt(x)" (where x is any subexpression). * Writes the function as a directed graph and appends the result * to the StringBuffer. * * A seperate function may not be strictly speaking necessary for graphs */ void FormulaGraphvizFormatter_visitSqrt (const ASTNode_t *parent, const ASTNode_t *node, StringBuffer_t *sb ) { char *uniqueName = FormulaGraphvizFormatter_getUniqueName(node); char *name = FormulaGraphvizFormatter_format(node); StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " [shape=box, label="); StringBuffer_append(sb, name); StringBuffer_append(sb, "];\n"); FormulaGraphvizFormatter_visit(node, ASTNode_getChild(node, 1), sb); } /** * Visits the given ASTNode as a unary minus. For this node only the * traversal is preorder. * Writes the function as a directed graph and appends the result * to the StringBuffer. */ void FormulaGraphvizFormatter_visitUMinus (const ASTNode_t *parent, const ASTNode_t *node, StringBuffer_t *sb ) { char *uniqueName = FormulaGraphvizFormatter_getUniqueName(node); char *name = FormulaGraphvizFormatter_format(node); StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " [shape=box, label="); StringBuffer_append(sb, name); StringBuffer_append(sb, "];\n"); if (parent != NULL) { uniqueName = FormulaGraphvizFormatter_getUniqueName(parent); name = FormulaGraphvizFormatter_getUniqueName(node); if(strcmp(name, uniqueName)) { StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " -> "); StringBuffer_append(sb, name); StringBuffer_append(sb, ";\n"); } } FormulaGraphvizFormatter_visit ( node, ASTNode_getLeftChild(node), sb ); } /** * Visits the given ASTNode and continues the inorder traversal. * Writes the function as a directed graph and appends the result * to the StringBuffer. */ void FormulaGraphvizFormatter_visitOther (const ASTNode_t *parent, const ASTNode_t *node, StringBuffer_t *sb ) { unsigned int numChildren = ASTNode_getNumChildren(node); char *name; char *uniqueName; if (numChildren > 0) { uniqueName = FormulaGraphvizFormatter_getUniqueName(node); name = FormulaGraphvizFormatter_format(node); StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " [shape=box, label="); StringBuffer_append(sb, name); StringBuffer_append(sb, "];\n"); FormulaGraphvizFormatter_visit( node, ASTNode_getLeftChild(node), sb ); } if (parent != NULL) { name = FormulaGraphvizFormatter_getUniqueName(node); uniqueName = FormulaGraphvizFormatter_getUniqueName(parent); if(strcmp(name, uniqueName)) { StringBuffer_append(sb, uniqueName); StringBuffer_append(sb, " -> "); StringBuffer_append(sb, name); StringBuffer_append(sb, ";\n"); } } if (numChildren > 1) { FormulaGraphvizFormatter_visit( node, ASTNode_getRightChild(node), sb ); } } void printFunctionDefinition (unsigned int n, FunctionDefinition_t *fd) { const ASTNode_t *math; char *formula; if ( FunctionDefinition_isSetMath(fd) ) { math = FunctionDefinition_getMath(fd); /* Print function body. */ if (ASTNode_getNumChildren(math) == 0) { printf("(no body defined)"); } else { math = ASTNode_getChild(math, ASTNode_getNumChildren(math) - 1); formula = SBML_formulaToDot(math); fprintf(fout, "subgraph cluster%u {\n", noClusters); fprintf(fout, "label=\"FunctionDefinition: %s\";\n%s\n", FunctionDefinition_getId(fd), formula); free(formula); noClusters++; } } } void printRuleMath (unsigned int n, Rule_t *r) { char *formula; if ( Rule_isSetMath(r) ) { formula = SBML_formulaToDot( Rule_getMath(r)); fprintf(fout, "subgraph cluster%u {\n", noClusters); fprintf(fout, "label=\"Rule: %u\";\n%s\n", n, formula); free(formula); noClusters++; } } void printReactionMath (unsigned int n, Reaction_t *r) { char *formula; KineticLaw_t *kl; if (Reaction_isSetKineticLaw(r)) { kl = Reaction_getKineticLaw(r); if ( KineticLaw_isSetMath(kl) ) { formula = SBML_formulaToDot( KineticLaw_getMath(kl) ); fprintf(fout, "subgraph cluster%u {\n", noClusters); fprintf(fout, "label=\"Reaction: %s\";\n%s\n", Reaction_getId(r), formula); free(formula); noClusters++; } } } void printEventAssignmentMath (unsigned int n, EventAssignment_t *ea) { const char *variable; char *formula; if ( EventAssignment_isSetMath(ea) ) { variable = EventAssignment_getVariable(ea); formula = SBML_formulaToDot( EventAssignment_getMath(ea) ); fprintf(fout, "subgraph cluster%u {\n", noClusters); fprintf(fout, "label=\"EventAssignment: %u\";\n", n); fprintf(fout, "%s [shape=box];\n%s -> %s\n", variable, variable, formula); noClusters++; free(formula); } } void printEventMath (unsigned int n, Event_t *e) { char *formula; unsigned int i; if ( Event_isSetDelay(e) ) { formula = SBML_formulaToDot( Delay_getMath(Event_getDelay(e)) ); fprintf(fout, "subgraph cluster%u {\n", noClusters); fprintf(fout, "label=\"Event %s delay:\";\n%s\n", Event_getId(e), formula); free(formula); noClusters++; } if ( Event_isSetTrigger(e) ) { formula = SBML_formulaToDot( Trigger_getMath(Event_getTrigger(e)) ); fprintf(fout, "subgraph cluster%u {\n", noClusters); fprintf(fout, "label=\"Event %s trigger:\";\n%s\n", Event_getId(e), formula); noClusters++; free(formula); } for (i = 0; i < Event_getNumEventAssignments(e); ++i) { printEventAssignmentMath(i + 1, Event_getEventAssignment(e, i)); } } void printMath (Model_t *m) { unsigned int n; /* a digraph must have a name thus * need to check that Model_getId does not return NULL * and provide a name if it does */ if (Model_getId(m) != NULL) { fprintf(fout, "digraph %s {\n", Model_getId(m)); } else { fprintf(fout, "digraph example {\n"); } fprintf(fout, "compound=true;\n"); for (n = 0; n < Model_getNumFunctionDefinitions(m); ++n) { printFunctionDefinition(n + 1, Model_getFunctionDefinition(m, n)); } for (n = 0; n < Model_getNumRules(m); ++n) { printRuleMath(n + 1, Model_getRule(m, n)); } printf("\n"); for (n = 0; n < Model_getNumReactions(m); ++n) { printReactionMath(n + 1, Model_getReaction(m, n)); } printf("\n"); for (n = 0; n < Model_getNumEvents(m); ++n) { printEventMath(n + 1, Model_getEvent(m, n)); } fprintf(fout, "}\n"); } int main (int argc, char *argv[]) { SBMLDocument_t *d; Model_t *m; if (argc != 3) { printf("\n usage: drawMath <sbml filename> <output dot filename>\n\n"); return 1; } d = readSBML(argv[1]); m = SBMLDocument_getModel(d); SBMLDocument_printErrors(d, stdout); if ((fout = fopen( argv[2], "w" )) == NULL ) { printf( "The output file was not opened\n" ); } else { printMath(m); fclose(fout); } SBMLDocument_free(d); return 0; }
dilawar/moose-full
dependencies/libsbml-5.9.0/examples/c/drawMath.c
C
gpl-2.0
20,197
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/kthread.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/msm_audio_ion.h> #include <asm/mach-types.h> #include <mach/qdsp6v2/rtac.h> #include <mach/socinfo.h> #include <mach/qdsp6v2/apr_tal.h> #include "sound/apr_audio-v2.h" #include "sound/q6afe-v2.h" #include "audio_acdb.h" #include "q6voice.h" #define TIMEOUT_MS 500 #define CMD_STATUS_SUCCESS 0 #define CMD_STATUS_FAIL 1 /* CVP CAL Size: 245760 = 240 * 1024 */ #define CVP_CAL_SIZE 245760 /* CVS CAL Size: 49152 = 48 * 1024 */ #define CVS_CAL_SIZE 49152 enum { VOC_TOKEN_NONE, VOIP_MEM_MAP_TOKEN, VOC_CAL_MEM_MAP_TOKEN, }; static struct common_data common; static int voice_send_enable_vocproc_cmd(struct voice_data *v); static int voice_send_netid_timing_cmd(struct voice_data *v); static int voice_send_attach_vocproc_cmd(struct voice_data *v); static int voice_send_set_device_cmd(struct voice_data *v); static int voice_send_disable_vocproc_cmd(struct voice_data *v); static int voice_send_vol_index_cmd(struct voice_data *v); static int voice_send_mvm_unmap_memory_physical_cmd(struct voice_data *v, uint32_t mem_handle); static int voice_send_mvm_cal_network_cmd(struct voice_data *v); static int voice_send_mvm_media_type_cmd(struct voice_data *v); static int voice_send_cvs_data_exchange_mode_cmd(struct voice_data *v); static int voice_send_cvs_packet_exchange_config_cmd(struct voice_data *v); static int voice_set_packet_exchange_mode_and_config(uint32_t session_id, uint32_t mode); static int voice_send_cvs_register_cal_cmd(struct voice_data *v); static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v); static int voice_send_cvp_register_dev_cfg_cmd(struct voice_data *v); static int voice_send_cvp_deregister_dev_cfg_cmd(struct voice_data *v); static int voice_send_cvp_register_cal_cmd(struct voice_data *v); static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v); static int voice_send_cvp_register_vol_cal_cmd(struct voice_data *v); static int voice_send_cvp_deregister_vol_cal_cmd(struct voice_data *v); static int voice_cvs_stop_playback(struct voice_data *v); static int voice_cvs_start_playback(struct voice_data *v); static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode); static int voice_cvs_stop_record(struct voice_data *v); static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv); static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv); static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv); static int voice_send_set_pp_enable_cmd(struct voice_data *v, uint32_t module_id, int enable); static struct voice_data *voice_get_session_by_idx(int idx); static u16 voice_get_mvm_handle(struct voice_data *v) { if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return 0; } pr_debug("%s: mvm_handle %d\n", __func__, v->mvm_handle); return v->mvm_handle; } static void voice_set_mvm_handle(struct voice_data *v, u16 mvm_handle) { pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return; } v->mvm_handle = mvm_handle; } static u16 voice_get_cvs_handle(struct voice_data *v) { if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return 0; } pr_debug("%s: cvs_handle %d\n", __func__, v->cvs_handle); return v->cvs_handle; } static void voice_set_cvs_handle(struct voice_data *v, u16 cvs_handle) { pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return; } v->cvs_handle = cvs_handle; } static u16 voice_get_cvp_handle(struct voice_data *v) { if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return 0; } pr_debug("%s: cvp_handle %d\n", __func__, v->cvp_handle); return v->cvp_handle; } static void voice_set_cvp_handle(struct voice_data *v, u16 cvp_handle) { pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return; } v->cvp_handle = cvp_handle; } char *voc_get_session_name(u32 session_id) { char *session_name = NULL; if (session_id == common.voice[VOC_PATH_PASSIVE].session_id) { session_name = VOICE_SESSION_NAME; } else if (session_id == common.voice[VOC_PATH_VOLTE_PASSIVE].session_id) { session_name = VOLTE_SESSION_NAME; } else if (session_id == common.voice[VOC_PATH_FULL].session_id) { session_name = VOIP_SESSION_NAME; } return session_name; } uint32_t voc_get_session_id(char *name) { u32 session_id = 0; if (name != NULL) { if (!strncmp(name, "Voice session", 13)) session_id = common.voice[VOC_PATH_PASSIVE].session_id; else if (!strncmp(name, "Voice2 session", 14)) session_id = common.voice[VOC_PATH_VOICE2_PASSIVE].session_id; else if (!strncmp(name, "VoLTE session", 13)) session_id = common.voice[VOC_PATH_VOLTE_PASSIVE].session_id; else session_id = common.voice[VOC_PATH_FULL].session_id; pr_debug("%s: %s has session id 0x%x\n", __func__, name, session_id); } return session_id; } static struct voice_data *voice_get_session(u32 session_id) { struct voice_data *v = NULL; switch (session_id) { case VOICE_SESSION_VSID: v = &common.voice[VOC_PATH_PASSIVE]; break; case VOICE2_SESSION_VSID: v = &common.voice[VOC_PATH_VOICE2_PASSIVE]; break; case VOLTE_SESSION_VSID: v = &common.voice[VOC_PATH_VOLTE_PASSIVE]; break; case VOIP_SESSION_VSID: v = &common.voice[VOC_PATH_FULL]; break; case ALL_SESSION_VSID: break; default: pr_err("%s: Invalid session_id : %x\n", __func__, session_id); break; } pr_debug("%s:session_id 0x%x session handle 0x%x\n", __func__, session_id, (unsigned int)v); return v; } int voice_get_idx_for_session(u32 session_id) { int idx = 0; switch (session_id) { case VOICE_SESSION_VSID: idx = VOC_PATH_PASSIVE; break; case VOICE2_SESSION_VSID: idx = VOC_PATH_VOICE2_PASSIVE; break; case VOLTE_SESSION_VSID: idx = VOC_PATH_VOLTE_PASSIVE; break; case VOIP_SESSION_VSID: idx = VOC_PATH_FULL; break; case ALL_SESSION_VSID: idx = MAX_VOC_SESSIONS - 1; break; default: pr_err("%s: Invalid session_id : %x\n", __func__, session_id); break; } return idx; } static struct voice_data *voice_get_session_by_idx(int idx) { return ((idx < 0 || idx >= MAX_VOC_SESSIONS) ? NULL : &common.voice[idx]); } static bool is_voice_session(u32 session_id) { return (session_id == common.voice[VOC_PATH_PASSIVE].session_id); } static bool is_voip_session(u32 session_id) { return (session_id == common.voice[VOC_PATH_FULL].session_id); } static bool is_volte_session(u32 session_id) { return (session_id == common.voice[VOC_PATH_VOLTE_PASSIVE].session_id); } static bool is_voice2_session(u32 session_id) { return (session_id == common.voice[VOC_PATH_VOICE2_PASSIVE].session_id); } static bool is_voc_state_active(int voc_state) { if ((voc_state == VOC_RUN) || (voc_state == VOC_CHANGE) || (voc_state == VOC_STANDBY)) return true; return false; } static void voc_set_error_state(uint16_t reset_proc) { struct voice_data *v = NULL; int i; for (i = 0; i < MAX_VOC_SESSIONS; i++) { if (reset_proc == APR_DEST_MODEM && i == VOC_PATH_FULL) continue; v = &common.voice[i]; if (v != NULL) v->voc_state = VOC_ERROR; } } static bool is_other_session_active(u32 session_id) { int i; bool ret = false; /* Check if there is other active session except the input one */ for (i = 0; i < MAX_VOC_SESSIONS; i++) { if (common.voice[i].session_id == session_id) continue; if ((common.voice[i].voc_state == VOC_RUN) || (common.voice[i].voc_state == VOC_CHANGE) || (common.voice[i].voc_state == VOC_STANDBY)) { ret = true; break; } } pr_debug("%s: ret %d\n", __func__, ret); return ret; } static void init_session_id(void) { common.voice[VOC_PATH_PASSIVE].session_id = VOICE_SESSION_VSID; common.voice[VOC_PATH_VOLTE_PASSIVE].session_id = VOLTE_SESSION_VSID; common.voice[VOC_PATH_VOICE2_PASSIVE].session_id = VOICE2_SESSION_VSID; common.voice[VOC_PATH_FULL].session_id = VOIP_SESSION_VSID; } static int voice_apr_register(void) { void *modem_mvm, *modem_cvs, *modem_cvp; pr_debug("%s\n", __func__); mutex_lock(&common.common_lock); /* register callback to APR */ if (common.apr_q6_mvm == NULL) { pr_debug("%s: Start to register MVM callback\n", __func__); common.apr_q6_mvm = apr_register("ADSP", "MVM", qdsp_mvm_callback, 0xFFFFFFFF, &common); if (common.apr_q6_mvm == NULL) { pr_err("%s: Unable to register MVM\n", __func__); goto err; } /* * Register with modem for SSR callback. The APR handle * is not stored since it is used only to receive notifications * and not for communication */ modem_mvm = apr_register("MODEM", "MVM", qdsp_mvm_callback, 0xFFFFFFFF, &common); if (modem_mvm == NULL) pr_err("%s: Unable to register MVM for MODEM\n", __func__); } if (common.apr_q6_cvs == NULL) { pr_debug("%s: Start to register CVS callback\n", __func__); common.apr_q6_cvs = apr_register("ADSP", "CVS", qdsp_cvs_callback, 0xFFFFFFFF, &common); if (common.apr_q6_cvs == NULL) { pr_err("%s: Unable to register CVS\n", __func__); goto err; } rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs); /* * Register with modem for SSR callback. The APR handle * is not stored since it is used only to receive notifications * and not for communication */ modem_cvs = apr_register("MODEM", "CVS", qdsp_cvs_callback, 0xFFFFFFFF, &common); if (modem_cvs == NULL) pr_err("%s: Unable to register CVS for MODEM\n", __func__); } if (common.apr_q6_cvp == NULL) { pr_debug("%s: Start to register CVP callback\n", __func__); common.apr_q6_cvp = apr_register("ADSP", "CVP", qdsp_cvp_callback, 0xFFFFFFFF, &common); if (common.apr_q6_cvp == NULL) { pr_err("%s: Unable to register CVP\n", __func__); goto err; } rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp); /* * Register with modem for SSR callback. The APR handle * is not stored since it is used only to receive notifications * and not for communication */ modem_cvp = apr_register("MODEM", "CVP", qdsp_cvp_callback, 0xFFFFFFFF, &common); if (modem_cvp == NULL) pr_err("%s: Unable to register CVP for MODEM\n", __func__); } mutex_unlock(&common.common_lock); return 0; err: if (common.apr_q6_cvs != NULL) { apr_deregister(common.apr_q6_cvs); common.apr_q6_cvs = NULL; rtac_set_voice_handle(RTAC_CVS, NULL); } if (common.apr_q6_mvm != NULL) { apr_deregister(common.apr_q6_mvm); common.apr_q6_mvm = NULL; } mutex_unlock(&common.common_lock); return -ENODEV; } static int voice_send_dual_control_cmd(struct voice_data *v) { int ret = 0; struct mvm_modem_dual_control_session_cmd mvm_voice_ctl_cmd; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } pr_debug("%s: VoLTE command to MVM\n", __func__); if (is_volte_session(v->session_id) || is_voice_session(v->session_id) || is_voice2_session(v->session_id)) { mvm_handle = voice_get_mvm_handle(v); mvm_voice_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_voice_ctl_cmd.hdr.pkt_size = APR_PKT_SIZE( APR_HDR_SIZE, sizeof(mvm_voice_ctl_cmd) - APR_HDR_SIZE); pr_debug("%s: send mvm Voice Ctl pkt size = %d\n", __func__, mvm_voice_ctl_cmd.hdr.pkt_size); mvm_voice_ctl_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_voice_ctl_cmd.hdr.dest_port = mvm_handle; mvm_voice_ctl_cmd.hdr.token = 0; mvm_voice_ctl_cmd.hdr.opcode = VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL; mvm_voice_ctl_cmd.voice_ctl.enable_flag = true; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_voice_ctl_cmd); if (ret < 0) { pr_err("%s: Error sending MVM Voice CTL CMD\n", __func__); ret = -EINVAL; goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail; } } ret = 0; fail: return ret; } static int voice_create_mvm_cvs_session(struct voice_data *v) { int ret = 0; struct mvm_create_ctl_session_cmd mvm_session_cmd; struct cvs_create_passive_ctl_session_cmd cvs_session_cmd; struct cvs_create_full_ctl_session_cmd cvs_full_ctl_cmd; struct mvm_attach_stream_cmd attach_stream_cmd; void *apr_mvm, *apr_cvs, *apr_cvp; u16 mvm_handle, cvs_handle, cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; apr_cvs = common.apr_q6_cvs; apr_cvp = common.apr_q6_cvp; if (!apr_mvm || !apr_cvs || !apr_cvp) { pr_err("%s: apr_mvm or apr_cvs or apr_cvp is NULL\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); cvs_handle = voice_get_cvs_handle(v); cvp_handle = voice_get_cvp_handle(v); pr_debug("%s: mvm_hdl=%d, cvs_hdl=%d\n", __func__, mvm_handle, cvs_handle); /* send cmd to create mvm session and wait for response */ if (!mvm_handle) { if (is_voice_session(v->session_id) || is_volte_session(v->session_id) || is_voice2_session(v->session_id)) { mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_session_cmd.hdr.pkt_size = APR_PKT_SIZE( APR_HDR_SIZE, sizeof(mvm_session_cmd) - APR_HDR_SIZE); pr_debug("%s: send mvm create session pkt size = %d\n", __func__, mvm_session_cmd.hdr.pkt_size); mvm_session_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_session_cmd.hdr.dest_port = 0; mvm_session_cmd.hdr.token = 0; mvm_session_cmd.hdr.opcode = VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION; if (is_volte_session(v->session_id)) { strlcpy(mvm_session_cmd.mvm_session.name, "default volte voice", sizeof(mvm_session_cmd.mvm_session.name)); } else if (is_voice2_session(v->session_id)) { strlcpy(mvm_session_cmd.mvm_session.name, VOICE2_SESSION_VSID_STR, sizeof(mvm_session_cmd.mvm_session.name)); } else { strlcpy(mvm_session_cmd.mvm_session.name, "default modem voice", sizeof(mvm_session_cmd.mvm_session.name)); } v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_session_cmd); if (ret < 0) { pr_err("%s: Error sending MVM_CONTROL_SESSION\n", __func__); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } } else { pr_debug("%s: creating MVM full ctrl\n", __func__); mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_session_cmd) - APR_HDR_SIZE); mvm_session_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_session_cmd.hdr.dest_port = 0; mvm_session_cmd.hdr.token = 0; mvm_session_cmd.hdr.opcode = VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION; strlcpy(mvm_session_cmd.mvm_session.name, "default voip", sizeof(mvm_session_cmd.mvm_session.name)); v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_session_cmd); if (ret < 0) { pr_err("Fail in sending MVM_CONTROL_SESSION\n"); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } } /* Get the created MVM handle. */ mvm_handle = voice_get_mvm_handle(v); } /* send cmd to create cvs session */ if (!cvs_handle) { if (is_voice_session(v->session_id) || is_volte_session(v->session_id) || is_voice2_session(v->session_id)) { pr_debug("%s: creating CVS passive session\n", __func__); cvs_session_cmd.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_session_cmd) - APR_HDR_SIZE); cvs_session_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_session_cmd.hdr.dest_port = 0; cvs_session_cmd.hdr.token = 0; cvs_session_cmd.hdr.opcode = VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION; if (is_volte_session(v->session_id)) { strlcpy(cvs_session_cmd.cvs_session.name, "default volte voice", sizeof(cvs_session_cmd.cvs_session.name)); } else if (is_voice2_session(v->session_id)) { strlcpy(cvs_session_cmd.cvs_session.name, VOICE2_SESSION_VSID_STR, sizeof(cvs_session_cmd.cvs_session.name)); } else { strlcpy(cvs_session_cmd.cvs_session.name, "default modem voice", sizeof(cvs_session_cmd.cvs_session.name)); } v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_session_cmd); if (ret < 0) { pr_err("Fail in sending STREAM_CONTROL_SESSION\n"); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } /* Get the created CVS handle. */ cvs_handle = voice_get_cvs_handle(v); } else { pr_debug("%s: creating CVS full session\n", __func__); cvs_full_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_full_ctl_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_full_ctl_cmd) - APR_HDR_SIZE); cvs_full_ctl_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_full_ctl_cmd.hdr.dest_port = 0; cvs_full_ctl_cmd.hdr.token = 0; cvs_full_ctl_cmd.hdr.opcode = VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION; cvs_full_ctl_cmd.cvs_session.direction = 2; cvs_full_ctl_cmd.cvs_session.enc_media_type = common.mvs_info.media_type; cvs_full_ctl_cmd.cvs_session.dec_media_type = common.mvs_info.media_type; cvs_full_ctl_cmd.cvs_session.network_id = common.mvs_info.network_type; strlcpy(cvs_full_ctl_cmd.cvs_session.name, "default q6 voice", sizeof(cvs_full_ctl_cmd.cvs_session.name)); v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_full_ctl_cmd); if (ret < 0) { pr_err("%s: Err %d sending CREATE_FULL_CTRL\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } /* Get the created CVS handle. */ cvs_handle = voice_get_cvs_handle(v); /* Attach MVM to CVS. */ pr_debug("%s: Attach MVM to stream\n", __func__); attach_stream_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); attach_stream_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(attach_stream_cmd) - APR_HDR_SIZE); attach_stream_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); attach_stream_cmd.hdr.dest_port = mvm_handle; attach_stream_cmd.hdr.token = 0; attach_stream_cmd.hdr.opcode = VSS_IMVM_CMD_ATTACH_STREAM; attach_stream_cmd.attach_stream.handle = cvs_handle; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &attach_stream_cmd); if (ret < 0) { pr_err("%s: Error %d sending ATTACH_STREAM\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } } } return 0; fail: return -EINVAL; } static int voice_destroy_mvm_cvs_session(struct voice_data *v) { int ret = 0; struct mvm_detach_stream_cmd detach_stream; struct apr_hdr mvm_destroy; struct apr_hdr cvs_destroy; void *apr_mvm, *apr_cvs; u16 mvm_handle, cvs_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; apr_cvs = common.apr_q6_cvs; if (!apr_mvm || !apr_cvs) { pr_err("%s: apr_mvm or apr_cvs is NULL\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); cvs_handle = voice_get_cvs_handle(v); /* MVM, CVS sessions are destroyed only for Full control sessions. */ if (is_voip_session(v->session_id)) { pr_debug("%s: MVM detach stream, VOC_STATE: %d\n", __func__, v->voc_state); /* Detach voice stream. */ detach_stream.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); detach_stream.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(detach_stream) - APR_HDR_SIZE); detach_stream.hdr.src_port = voice_get_idx_for_session(v->session_id); detach_stream.hdr.dest_port = mvm_handle; detach_stream.hdr.token = 0; detach_stream.hdr.opcode = VSS_IMVM_CMD_DETACH_STREAM; detach_stream.detach_stream.handle = cvs_handle; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &detach_stream); if (ret < 0) { pr_err("%s: Error %d sending DETACH_STREAM\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait event timeout\n", __func__); goto fail; } /* Unmap memory */ if (v->shmem_info.mem_handle != 0) { ret = voice_send_mvm_unmap_memory_physical_cmd(v, v->shmem_info.mem_handle); if (ret < 0) { pr_err("%s Memory_unmap for voip failed %d\n", __func__, ret); goto fail; } v->shmem_info.mem_handle = 0; } } if (is_voip_session(v->session_id) || v->voc_state == VOC_ERROR) { /* Destroy CVS. */ pr_debug("%s: CVS destroy session\n", __func__); cvs_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_destroy) - APR_HDR_SIZE); cvs_destroy.src_port = voice_get_idx_for_session(v->session_id); cvs_destroy.dest_port = cvs_handle; cvs_destroy.token = 0; cvs_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_destroy); if (ret < 0) { pr_err("%s: Error %d sending CVS DESTROY\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait event timeout\n", __func__); goto fail; } cvs_handle = 0; voice_set_cvs_handle(v, cvs_handle); /* Unmap physical memory for calibration */ pr_debug("%s: cal_mem_handle %d\n", __func__, common.cal_mem_handle); if (!is_other_session_active(v->session_id) && (common.cal_mem_handle != 0)) { ret = voice_send_mvm_unmap_memory_physical_cmd(v, common.cal_mem_handle); if (ret < 0) { pr_err("%s Fail at cal mem unmap %d\n", __func__, ret); goto fail; } common.cal_mem_handle = 0; } /* Destroy MVM. */ pr_debug("MVM destroy session\n"); mvm_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_destroy) - APR_HDR_SIZE); mvm_destroy.src_port = voice_get_idx_for_session(v->session_id); mvm_destroy.dest_port = mvm_handle; mvm_destroy.token = 0; mvm_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_destroy); if (ret < 0) { pr_err("%s: Error %d sending MVM DESTROY\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait event timeout\n", __func__); goto fail; } mvm_handle = 0; voice_set_mvm_handle(v, mvm_handle); } return 0; fail: return -EINVAL; } static int voice_send_tty_mode_cmd(struct voice_data *v) { int ret = 0; struct mvm_set_tty_mode_cmd mvm_tty_mode_cmd; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); if (v->tty_mode) { /* send tty mode cmd to mvm */ mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_tty_mode_cmd) - APR_HDR_SIZE); pr_debug("%s: pkt size = %d\n", __func__, mvm_tty_mode_cmd.hdr.pkt_size); mvm_tty_mode_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_tty_mode_cmd.hdr.dest_port = mvm_handle; mvm_tty_mode_cmd.hdr.token = 0; mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE; mvm_tty_mode_cmd.tty_mode.mode = v->tty_mode; pr_debug("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode); v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd); if (ret < 0) { pr_err("%s: Error %d sending SET_TTY_MODE\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } } return 0; fail: return -EINVAL; } static int voice_send_set_pp_enable_cmd(struct voice_data *v, uint32_t module_id, int enable) { struct cvs_set_pp_enable_cmd cvs_set_pp_cmd; int ret = 0; void *apr_cvs; u16 cvs_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_set_pp_cmd) - APR_HDR_SIZE); cvs_set_pp_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_set_pp_cmd.hdr.dest_port = cvs_handle; cvs_set_pp_cmd.hdr.token = 0; cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY; cvs_set_pp_cmd.vss_set_pp.module_id = module_id; cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE; cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN; cvs_set_pp_cmd.vss_set_pp.reserved = 0; cvs_set_pp_cmd.vss_set_pp.enable = enable; cvs_set_pp_cmd.vss_set_pp.reserved_field = 0; pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n", module_id, enable); v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd); if (ret < 0) { pr_err("Fail: sending cvs set pp enable,\n"); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_set_dtx(struct voice_data *v) { int ret = 0; void *apr_cvs; u16 cvs_handle; struct cvs_set_enc_dtx_mode_cmd cvs_set_dtx; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); /* Set DTX */ cvs_set_dtx.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_set_dtx.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_set_dtx) - APR_HDR_SIZE); cvs_set_dtx.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_set_dtx.hdr.dest_port = cvs_handle; cvs_set_dtx.hdr.token = 0; cvs_set_dtx.hdr.opcode = VSS_ISTREAM_CMD_SET_ENC_DTX_MODE; cvs_set_dtx.dtx_mode.enable = common.mvs_info.dtx_mode; pr_debug("%s: Setting DTX %d\n", __func__, common.mvs_info.dtx_mode); v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_dtx); if (ret < 0) { pr_err("%s: Error %d sending SET_DTX\n", __func__, ret); return -EINVAL; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); return -EINVAL; } return 0; } static int voice_send_mvm_media_type_cmd(struct voice_data *v) { struct vss_imvm_cmd_set_cal_media_type_t mvm_set_cal_media_type; int ret = 0; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); mvm_set_cal_media_type.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_set_cal_media_type.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_set_cal_media_type) - APR_HDR_SIZE); mvm_set_cal_media_type.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_set_cal_media_type.hdr.dest_port = mvm_handle; mvm_set_cal_media_type.hdr.token = 0; mvm_set_cal_media_type.hdr.opcode = VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE; mvm_set_cal_media_type.media_id = common.mvs_info.media_type; pr_debug("%s: setting media_id as %x\n", __func__ , mvm_set_cal_media_type.media_id); v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_cal_media_type); if (ret < 0) { pr_err("%s: Error %d sending media type\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout %d\n", __func__, ret); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_dtmf_rx_detection_cmd(struct voice_data *v, uint32_t enable) { int ret = 0; void *apr_cvs; u16 cvs_handle; struct cvs_set_rx_dtmf_detection_cmd cvs_dtmf_rx_detection; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); /* Set SET_DTMF_RX_DETECTION */ cvs_dtmf_rx_detection.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_dtmf_rx_detection.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_dtmf_rx_detection) - APR_HDR_SIZE); cvs_dtmf_rx_detection.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_dtmf_rx_detection.hdr.dest_port = cvs_handle; cvs_dtmf_rx_detection.hdr.token = 0; cvs_dtmf_rx_detection.hdr.opcode = VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION; cvs_dtmf_rx_detection.cvs_dtmf_det.enable = enable; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_dtmf_rx_detection); if (ret < 0) { pr_err("%s: Error %d sending SET_DTMF_RX_DETECTION\n", __func__, ret); return -EINVAL; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); return -EINVAL; } return ret; } void voc_disable_dtmf_det_on_active_sessions(void) { struct voice_data *v = NULL; int i; for (i = 0; i < MAX_VOC_SESSIONS; i++) { v = &common.voice[i]; if ((v->dtmf_rx_detect_en) && ((v->voc_state == VOC_RUN) || (v->voc_state == VOC_CHANGE) || (v->voc_state == VOC_STANDBY))) { pr_debug("disable dtmf det on ses_id=%d\n", v->session_id); voice_send_dtmf_rx_detection_cmd(v, 0); } } } int voc_enable_dtmf_rx_detection(uint32_t session_id, uint32_t enable) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); v->dtmf_rx_detect_en = enable; if ((v->voc_state == VOC_RUN) || (v->voc_state == VOC_CHANGE) || (v->voc_state == VOC_STANDBY)) ret = voice_send_dtmf_rx_detection_cmd(v, v->dtmf_rx_detect_en); mutex_unlock(&v->lock); return ret; } static int voice_config_cvs_vocoder(struct voice_data *v) { int ret = 0; void *apr_cvs; u16 cvs_handle; /* Set media type. */ struct cvs_set_media_type_cmd cvs_set_media_cmd; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); cvs_set_media_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_set_media_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_set_media_cmd) - APR_HDR_SIZE); cvs_set_media_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_set_media_cmd.hdr.dest_port = cvs_handle; cvs_set_media_cmd.hdr.token = 0; cvs_set_media_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MEDIA_TYPE; cvs_set_media_cmd.media_type.tx_media_id = common.mvs_info.media_type; cvs_set_media_cmd.media_type.rx_media_id = common.mvs_info.media_type; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_media_cmd); if (ret < 0) { pr_err("%s: Error %d sending SET_MEDIA_TYPE\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } /* Set encoder properties. */ switch (common.mvs_info.media_type) { case VSS_MEDIA_ID_EVRC_MODEM: case VSS_MEDIA_ID_4GV_NB_MODEM: case VSS_MEDIA_ID_4GV_WB_MODEM: { struct cvs_set_cdma_enc_minmax_rate_cmd cvs_set_cdma_rate; pr_debug("Setting EVRC min-max rate\n"); cvs_set_cdma_rate.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_set_cdma_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_set_cdma_rate) - APR_HDR_SIZE); cvs_set_cdma_rate.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_set_cdma_rate.hdr.dest_port = cvs_handle; cvs_set_cdma_rate.hdr.token = 0; cvs_set_cdma_rate.hdr.opcode = VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE; cvs_set_cdma_rate.cdma_rate.min_rate = common.mvs_info.rate; cvs_set_cdma_rate.cdma_rate.max_rate = common.mvs_info.rate; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_cdma_rate); if (ret < 0) { pr_err("%s: Error %d sending SET_EVRC_MINMAX_RATE\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } break; } case VSS_MEDIA_ID_AMR_NB_MODEM: { struct cvs_set_amr_enc_rate_cmd cvs_set_amr_rate; pr_debug("Setting AMR rate\n"); cvs_set_amr_rate.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_set_amr_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_set_amr_rate) - APR_HDR_SIZE); cvs_set_amr_rate.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_set_amr_rate.hdr.dest_port = cvs_handle; cvs_set_amr_rate.hdr.token = 0; cvs_set_amr_rate.hdr.opcode = VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE; cvs_set_amr_rate.amr_rate.mode = common.mvs_info.rate; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amr_rate); if (ret < 0) { pr_err("%s: Error %d sending SET_AMR_RATE\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } ret = voice_set_dtx(v); if (ret < 0) goto fail; break; } case VSS_MEDIA_ID_AMR_WB_MODEM: { struct cvs_set_amrwb_enc_rate_cmd cvs_set_amrwb_rate; pr_debug("Setting AMR WB rate\n"); cvs_set_amrwb_rate.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_set_amrwb_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_set_amrwb_rate) - APR_HDR_SIZE); cvs_set_amrwb_rate.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_set_amrwb_rate.hdr.dest_port = cvs_handle; cvs_set_amrwb_rate.hdr.token = 0; cvs_set_amrwb_rate.hdr.opcode = VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE; cvs_set_amrwb_rate.amrwb_rate.mode = common.mvs_info.rate; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amrwb_rate); if (ret < 0) { pr_err("%s: Error %d sending SET_AMRWB_RATE\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } ret = voice_set_dtx(v); if (ret < 0) goto fail; break; } case VSS_MEDIA_ID_G729: case VSS_MEDIA_ID_G711_ALAW: case VSS_MEDIA_ID_G711_MULAW: { ret = voice_set_dtx(v); break; } default: /* Do nothing. */ break; } return 0; fail: return -EINVAL; } static int voice_send_start_voice_cmd(struct voice_data *v) { struct apr_hdr mvm_start_voice_cmd; int ret = 0; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE); pr_debug("send mvm_start_voice_cmd pkt size = %d\n", mvm_start_voice_cmd.pkt_size); mvm_start_voice_cmd.src_port = voice_get_idx_for_session(v->session_id); mvm_start_voice_cmd.dest_port = mvm_handle; mvm_start_voice_cmd.token = 0; mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n"); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_disable_vocproc_cmd(struct voice_data *v) { struct apr_hdr cvp_disable_cmd; int ret = 0; void *apr_cvp; u16 cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvp = common.apr_q6_cvp; if (!apr_cvp) { pr_err("%s: apr regist failed\n", __func__); return -EINVAL; } cvp_handle = voice_get_cvp_handle(v); /* disable vocproc and wait for respose */ cvp_disable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_disable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_disable_cmd) - APR_HDR_SIZE); pr_debug("cvp_disable_cmd pkt size = %d, cvp_handle=%d\n", cvp_disable_cmd.pkt_size, cvp_handle); cvp_disable_cmd.src_port = voice_get_idx_for_session(v->session_id); cvp_disable_cmd.dest_port = cvp_handle; cvp_disable_cmd.token = 0; cvp_disable_cmd.opcode = VSS_IVOCPROC_CMD_DISABLE; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_disable_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IVOCPROC_CMD_DISABLE\n"); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static void voc_get_tx_rx_topology(struct voice_data *v, uint32_t *tx_topology_id, uint32_t *rx_topology_id) { uint32_t tx_id = 0; uint32_t rx_id = 0; if (v->lch_mode == VOICE_LCH_START) { pr_debug("%s: Setting TX and RX topology to NONE for LCH\n", __func__); tx_id = VSS_IVOCPROC_TOPOLOGY_ID_NONE; rx_id = VSS_IVOCPROC_TOPOLOGY_ID_NONE; } else { /* Use default topology if invalid value in ACDB */ tx_id = get_voice_tx_topology(); if (tx_id == 0) tx_id = VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS; rx_id = get_voice_rx_topology(); if (rx_id == 0) rx_id = VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT; } *tx_topology_id = tx_id; *rx_topology_id = rx_id; } static int voice_send_set_device_cmd(struct voice_data *v) { struct cvp_set_device_cmd cvp_setdev_cmd; int ret = 0; void *apr_cvp; u16 cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvp = common.apr_q6_cvp; if (!apr_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); return -EINVAL; } cvp_handle = voice_get_cvp_handle(v); /* set device and wait for response */ cvp_setdev_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_setdev_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_setdev_cmd) - APR_HDR_SIZE); pr_debug(" send create cvp setdev, pkt size = %d\n", cvp_setdev_cmd.hdr.pkt_size); cvp_setdev_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_setdev_cmd.hdr.dest_port = cvp_handle; cvp_setdev_cmd.hdr.token = 0; cvp_setdev_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_DEVICE_V2; voc_get_tx_rx_topology(v, &cvp_setdev_cmd.cvp_set_device_v2.tx_topology_id, &cvp_setdev_cmd.cvp_set_device_v2.rx_topology_id); cvp_setdev_cmd.cvp_set_device_v2.tx_port_id = v->dev_tx.port_id; cvp_setdev_cmd.cvp_set_device_v2.rx_port_id = v->dev_rx.port_id; cvp_setdev_cmd.cvp_set_device_v2.vocproc_mode = VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING; cvp_setdev_cmd.cvp_set_device_v2.ec_ref_port_id = VSS_IVOCPROC_PORT_ID_NONE; pr_debug("topology=%d , tx_port_id=%d, rx_port_id=%d\n", cvp_setdev_cmd.cvp_set_device_v2.tx_topology_id, cvp_setdev_cmd.cvp_set_device_v2.tx_port_id, cvp_setdev_cmd.cvp_set_device_v2.rx_port_id); v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd); if (ret < 0) { pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n"); goto fail; } pr_debug("wait for cvp create session event\n"); ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_stop_voice_cmd(struct voice_data *v) { struct apr_hdr mvm_stop_voice_cmd; int ret = 0; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); mvm_stop_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_stop_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_stop_voice_cmd) - APR_HDR_SIZE); pr_debug("send mvm_stop_voice_cmd pkt size = %d\n", mvm_stop_voice_cmd.pkt_size); mvm_stop_voice_cmd.src_port = voice_get_idx_for_session(v->session_id); mvm_stop_voice_cmd.dest_port = mvm_handle; mvm_stop_voice_cmd.token = 0; mvm_stop_voice_cmd.opcode = VSS_IMVM_CMD_STOP_VOICE; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_stop_voice_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IMVM_CMD_STOP_VOICE\n"); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvs_register_cal_cmd(struct voice_data *v) { struct cvs_register_cal_data_cmd cvs_reg_cal_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvs_reg_cal_cmd, 0, sizeof(cvs_reg_cal_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvs) { pr_err("%s: apr_cvs is NULL\n", __func__); goto fail; } if (!common.cal_mem_handle) { pr_err("%s: Cal mem handle is NULL\n", __func__); goto fail; } get_vocstrm_cal(&cal_block); if (cal_block.cal_size == 0) { pr_err("%s: CVS cal size is 0\n", __func__); goto fail; } cvs_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_reg_cal_cmd) - APR_HDR_SIZE); cvs_reg_cal_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_reg_cal_cmd.hdr.dest_port = voice_get_cvs_handle(v); cvs_reg_cal_cmd.hdr.token = 0; cvs_reg_cal_cmd.hdr.opcode = VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA_V2; cvs_reg_cal_cmd.cvs_cal_data.cal_mem_handle = common.cal_mem_handle; cvs_reg_cal_cmd.cvs_cal_data.cal_mem_address = cal_block.cal_paddr; cvs_reg_cal_cmd.cvs_cal_data.cal_mem_size = cal_block.cal_size; /* Get the column info corresponding to CVS cal from ACDB. */ get_voice_col_data(VOCSTRM_CAL, &cal_block); if (cal_block.cal_size == 0 || cal_block.cal_size > sizeof(cvs_reg_cal_cmd.cvs_cal_data.column_info)) { pr_err("%s: Invalid VOCSTRM_CAL size %d\n", __func__, cal_block.cal_size); goto fail; } memcpy(&cvs_reg_cal_cmd.cvs_cal_data.column_info[0], (void *) cal_block.cal_kvaddr, cal_block.cal_size); v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_reg_cal_cmd); if (ret < 0) { pr_err("%s: Error %d registering CVS cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v) { struct cvs_deregister_cal_data_cmd cvs_dereg_cal_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvs_dereg_cal_cmd, 0, sizeof(cvs_dereg_cal_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvs) { pr_err("%s: apr_cvs is NULL\n", __func__); goto fail; } get_vocstrm_cal(&cal_block); if (cal_block.cal_size == 0) return 0; cvs_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_dereg_cal_cmd) - APR_HDR_SIZE); cvs_dereg_cal_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_dereg_cal_cmd.hdr.dest_port = voice_get_cvs_handle(v); cvs_dereg_cal_cmd.hdr.token = 0; cvs_dereg_cal_cmd.hdr.opcode = VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_dereg_cal_cmd); if (ret < 0) { pr_err("%s: Error %d de-registering CVS cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvp_register_dev_cfg_cmd(struct voice_data *v) { struct cvp_register_dev_cfg_cmd cvp_reg_dev_cfg_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvp_reg_dev_cfg_cmd, 0, sizeof(cvp_reg_dev_cfg_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL\n", __func__); goto fail; } if (!common.cal_mem_handle) { pr_err("%s: Cal mem handle is NULL\n", __func__); goto fail; } get_vocproc_dev_cfg_cal(&cal_block); if (cal_block.cal_size == 0) { pr_err("%s: CVP cal size is 0\n", __func__); goto fail; } cvp_reg_dev_cfg_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_reg_dev_cfg_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_reg_dev_cfg_cmd) - APR_HDR_SIZE); cvp_reg_dev_cfg_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_reg_dev_cfg_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_reg_dev_cfg_cmd.hdr.token = 0; cvp_reg_dev_cfg_cmd.hdr.opcode = VSS_IVOCPROC_CMD_REGISTER_DEVICE_CONFIG; cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_handle = common.cal_mem_handle; cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_address = cal_block.cal_paddr; cvp_reg_dev_cfg_cmd.cvp_dev_cfg_data.mem_size = cal_block.cal_size; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_reg_dev_cfg_cmd); if (ret < 0) { pr_err("%s: Error %d registering CVP dev cfg cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvp_deregister_dev_cfg_cmd(struct voice_data *v) { struct cvp_deregister_dev_cfg_cmd cvp_dereg_dev_cfg_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvp_dereg_dev_cfg_cmd, 0, sizeof(cvp_dereg_dev_cfg_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); goto fail; } get_vocproc_dev_cfg_cal(&cal_block); if (cal_block.cal_size == 0) return 0; cvp_dereg_dev_cfg_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_dereg_dev_cfg_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_dereg_dev_cfg_cmd) - APR_HDR_SIZE); cvp_dereg_dev_cfg_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_dereg_dev_cfg_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_dereg_dev_cfg_cmd.hdr.token = 0; cvp_dereg_dev_cfg_cmd.hdr.opcode = VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_dereg_dev_cfg_cmd); if (ret < 0) { pr_err("%s: Error %d de-registering CVP dev cfg cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvp_register_cal_cmd(struct voice_data *v) { struct cvp_register_cal_data_cmd cvp_reg_cal_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvp_reg_cal_cmd, 0, sizeof(cvp_reg_cal_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL\n", __func__); goto fail; } if (!common.cal_mem_handle) { pr_err("%s: Cal mem handle is NULL\n", __func__); goto fail; } get_vocproc_cal(&cal_block); if (cal_block.cal_size == 0) { pr_err("%s: CVP cal size is 0\n", __func__); goto fail; } cvp_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_reg_cal_cmd) - APR_HDR_SIZE); cvp_reg_cal_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_reg_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_reg_cal_cmd.hdr.token = 0; cvp_reg_cal_cmd.hdr.opcode = VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2; cvp_reg_cal_cmd.cvp_cal_data.cal_mem_handle = common.cal_mem_handle; cvp_reg_cal_cmd.cvp_cal_data.cal_mem_address = cal_block.cal_paddr; cvp_reg_cal_cmd.cvp_cal_data.cal_mem_size = cal_block.cal_size; /* Get the column info corresponding to CVP cal from ACDB. */ get_voice_col_data(VOCPROC_CAL, &cal_block); if (cal_block.cal_size == 0 || cal_block.cal_size > sizeof(cvp_reg_cal_cmd.cvp_cal_data.column_info)) { pr_err("%s: Invalid VOCPROC_CAL size %d\n", __func__, cal_block.cal_size); goto fail; } memcpy(&cvp_reg_cal_cmd.cvp_cal_data.column_info[0], (void *) cal_block.cal_kvaddr, cal_block.cal_size); v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_reg_cal_cmd); if (ret < 0) { pr_err("%s: Error %d registering CVP cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v) { struct cvp_deregister_cal_data_cmd cvp_dereg_cal_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvp_dereg_cal_cmd, 0, sizeof(cvp_dereg_cal_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); goto fail; } get_vocproc_cal(&cal_block); if (cal_block.cal_size == 0) return 0; cvp_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_dereg_cal_cmd) - APR_HDR_SIZE); cvp_dereg_cal_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_dereg_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_dereg_cal_cmd.hdr.token = 0; cvp_dereg_cal_cmd.hdr.opcode = VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_dereg_cal_cmd); if (ret < 0) { pr_err("%s: Error %d de-registering CVP cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvp_register_vol_cal_cmd(struct voice_data *v) { struct cvp_register_vol_cal_data_cmd cvp_reg_vol_cal_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvp_reg_vol_cal_cmd, 0, sizeof(cvp_reg_vol_cal_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); goto fail; } if (!common.cal_mem_handle) { pr_err("%s: Cal mem handle is NULL\n", __func__); goto fail; } get_vocvol_cal(&cal_block); if (cal_block.cal_size == 0) { pr_err("%s: CVP vol cal size is 0\n", __func__); goto fail; } cvp_reg_vol_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_reg_vol_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_reg_vol_cal_cmd) - APR_HDR_SIZE); cvp_reg_vol_cal_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_reg_vol_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_reg_vol_cal_cmd.hdr.token = 0; cvp_reg_vol_cal_cmd.hdr.opcode = VSS_IVOCPROC_CMD_REGISTER_VOL_CALIBRATION_DATA; cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_handle = common.cal_mem_handle; cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_address = cal_block.cal_paddr; cvp_reg_vol_cal_cmd.cvp_vol_cal_data.cal_mem_size = cal_block.cal_size; /* Get the column info corresponding to CVP volume cal from ACDB. */ get_voice_col_data(VOCVOL_CAL, &cal_block); if (cal_block.cal_size == 0 || cal_block.cal_size > sizeof(cvp_reg_vol_cal_cmd.cvp_vol_cal_data.column_info)) { pr_err("%s: Invalid VOCVOL_CAL size %d\n", __func__, cal_block.cal_size); goto fail; } memcpy(&cvp_reg_vol_cal_cmd.cvp_vol_cal_data.column_info[0], (void *) cal_block.cal_kvaddr, cal_block.cal_size); v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_reg_vol_cal_cmd); if (ret < 0) { pr_err("%s: Error %d registering CVP vol cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_cvp_deregister_vol_cal_cmd(struct voice_data *v) { struct cvp_deregister_vol_cal_data_cmd cvp_dereg_vol_cal_cmd; struct acdb_cal_block cal_block; int ret = 0; memset(&cvp_dereg_vol_cal_cmd, 0, sizeof(cvp_dereg_vol_cal_cmd)); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL\n", __func__); goto fail; } get_vocvol_cal(&cal_block); if (cal_block.cal_size == 0) return 0; cvp_dereg_vol_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_dereg_vol_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_dereg_vol_cal_cmd) - APR_HDR_SIZE); cvp_dereg_vol_cal_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_dereg_vol_cal_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_dereg_vol_cal_cmd.hdr.token = 0; cvp_dereg_vol_cal_cmd.hdr.opcode = VSS_IVOCPROC_CMD_DEREGISTER_VOL_CALIBRATION_DATA; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_dereg_vol_cal_cmd); if (ret < 0) { pr_err("%s: Error %d de-registering CVP vol cal\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } fail: return ret; } static int voice_map_memory_physical_cmd(struct voice_data *v, struct mem_map_table *table_info, dma_addr_t phys, uint32_t size, uint32_t token) { struct vss_imemory_cmd_map_physical_t mvm_map_phys_cmd; uint32_t *memtable; int ret = 0; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); goto fail; } if (!table_info->data) { pr_err("%s: memory table is NULL.\n", __func__); goto fail; } memtable = (uint32_t *) table_info->data; /* * Store next table descriptor's address(64 bit) as NULL as there * is only one memory block */ memtable[0] = (uint32_t)NULL; memtable[1] = (uint32_t)NULL; /* Store next table descriptor's size */ memtable[2] = 0; /* Store shared mem add */ memtable[3] = phys; memtable[4] = 0; /* Store shared memory size */ memtable[5] = size; mvm_map_phys_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_map_phys_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_map_phys_cmd) - APR_HDR_SIZE); mvm_map_phys_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_map_phys_cmd.hdr.dest_port = voice_get_mvm_handle(v); mvm_map_phys_cmd.hdr.token = token; mvm_map_phys_cmd.hdr.opcode = VSS_IMEMORY_CMD_MAP_PHYSICAL; mvm_map_phys_cmd.table_descriptor.mem_address = table_info->phys; mvm_map_phys_cmd.table_descriptor.mem_size = sizeof(struct vss_imemory_block_t) + sizeof(struct vss_imemory_table_descriptor_t); mvm_map_phys_cmd.is_cached = true; mvm_map_phys_cmd.cache_line_size = 128; mvm_map_phys_cmd.access_mask = 3; mvm_map_phys_cmd.page_align = 4096; mvm_map_phys_cmd.min_data_width = 8; mvm_map_phys_cmd.max_data_width = 64; pr_debug("%s: next table desc: add: %lld, size: %d\n", __func__, *((uint64_t *) memtable), *(((uint32_t *) memtable) + 2)); pr_debug("%s: phy add of of mem being mapped 0x%x, size: %d\n", __func__, *(((uint32_t *) memtable) + 3), *(((uint32_t *) memtable) + 5)); v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_mvm, (uint32_t *) &mvm_map_phys_cmd); if (ret < 0) { pr_err("%s: Error %d sending mvm map phy cmd\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_mem_map_cal_block(struct voice_data *v) { int ret = 0; struct acdb_cal_block cal_block; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } if (common.cal_mem_handle != 0) { pr_debug("%s: Cal block already mem mapped\n", __func__); return ret; } /* Get the physical address of calibration memory block from ACDB. */ get_voice_cal_allocation(&cal_block); if (!cal_block.cal_paddr) { pr_err("%s: Cal block not allocated\n", __func__); return -EINVAL; } ret = voice_map_memory_physical_cmd(v, &common.cal_mem_map_table, cal_block.cal_paddr, cal_block.cal_size, VOC_CAL_MEM_MAP_TOKEN); return ret; } static int voice_pause_voice_call(struct voice_data *v) { struct apr_hdr mvm_pause_voice_cmd; void *apr_mvm; int ret = 0; pr_debug("%s\n", __func__); if (v == NULL) { pr_err("%s: Voice data is NULL\n", __func__); ret = -EINVAL; goto done; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); ret = -EINVAL; goto done; } mvm_pause_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_pause_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_pause_voice_cmd) - APR_HDR_SIZE); mvm_pause_voice_cmd.src_port = voice_get_idx_for_session(v->session_id); mvm_pause_voice_cmd.dest_port = voice_get_mvm_handle(v); mvm_pause_voice_cmd.token = 0; mvm_pause_voice_cmd.opcode = VSS_IMVM_CMD_PAUSE_VOICE; v->mvm_state = CMD_STATUS_FAIL; pr_debug("%s: send mvm_pause_voice_cmd pkt size = %d\n", __func__, mvm_pause_voice_cmd.pkt_size); ret = apr_send_pkt(apr_mvm, (uint32_t *)&mvm_pause_voice_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IMVM_CMD_PAUSE_VOICE\n"); ret = -EINVAL; goto done; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); ret = -EINVAL; goto done; } done: return ret; } int voc_unmap_cal_blocks(void) { int result = 0; int result2 = 0; int i; struct voice_data *v = NULL; pr_debug("%s\n", __func__); mutex_lock(&common.common_lock); if (common.cal_mem_handle == 0) goto done; for (i = 0; i < MAX_VOC_SESSIONS; i++) { v = &common.voice[i]; mutex_lock(&v->lock); if (is_voc_state_active(v->voc_state)) { result2 = voice_pause_voice_call(v); if (result2 < 0) { pr_err("%s: voice_pause_voice_call failed for session 0x%x, err %d!\n", __func__, v->session_id, result2); result = result2; } voice_send_cvp_deregister_vol_cal_cmd(v); voice_send_cvp_deregister_cal_cmd(v); voice_send_cvp_deregister_dev_cfg_cmd(v); voice_send_cvs_deregister_cal_cmd(v); result2 = voice_send_start_voice_cmd(v); if (result2) { pr_err("%s: voice_send_start_voice_cmd failed for session 0x%x, err %d!\n", __func__, v->session_id, result2); result = result2; } } if ((common.cal_mem_handle != 0) && (!is_other_session_active(v->session_id))) { result2 = voice_send_mvm_unmap_memory_physical_cmd( v, common.cal_mem_handle); if (result2) { pr_err("%s: voice_send_mvm_unmap_memory_physical_cmd failed for session 0x%x, err %d!\n", __func__, v->session_id, result2); result = result2; } else { common.cal_mem_handle = 0; } } mutex_unlock(&v->lock); } done: mutex_unlock(&common.common_lock); return result; } static int voice_setup_vocproc(struct voice_data *v) { struct cvp_create_full_ctl_session_cmd cvp_session_cmd; int ret = 0; void *apr_cvp; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvp = common.apr_q6_cvp; if (!apr_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); return -EINVAL; } /* create cvp session and wait for response */ cvp_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_session_cmd) - APR_HDR_SIZE); pr_debug(" send create cvp session, pkt size = %d\n", cvp_session_cmd.hdr.pkt_size); cvp_session_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_session_cmd.hdr.dest_port = 0; cvp_session_cmd.hdr.token = 0; cvp_session_cmd.hdr.opcode = VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2; voc_get_tx_rx_topology(v, &cvp_session_cmd.cvp_session.tx_topology_id, &cvp_session_cmd.cvp_session.rx_topology_id); cvp_session_cmd.cvp_session.direction = 2; /*tx and rx*/ cvp_session_cmd.cvp_session.tx_port_id = v->dev_tx.port_id; cvp_session_cmd.cvp_session.rx_port_id = v->dev_rx.port_id; cvp_session_cmd.cvp_session.profile_id = VSS_ICOMMON_CAL_NETWORK_ID_NONE; cvp_session_cmd.cvp_session.vocproc_mode = VSS_IVOCPROC_VOCPROC_MODE_EC_INT_MIXING; cvp_session_cmd.cvp_session.ec_ref_port_id = VSS_IVOCPROC_PORT_ID_NONE; pr_debug("tx_topology: %d tx_port_id=%d, rx_port_id=%d, mode: 0x%x\n", cvp_session_cmd.cvp_session.tx_topology_id, cvp_session_cmd.cvp_session.tx_port_id, cvp_session_cmd.cvp_session.rx_port_id, cvp_session_cmd.cvp_session.vocproc_mode); pr_debug("rx_topology: %d, profile_id: 0x%x, pkt_size: %d\n", cvp_session_cmd.cvp_session.rx_topology_id, cvp_session_cmd.cvp_session.profile_id, cvp_session_cmd.hdr.pkt_size); v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_session_cmd); if (ret < 0) { pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n"); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } voice_send_cvs_register_cal_cmd(v); voice_send_cvp_register_dev_cfg_cmd(v); voice_send_cvp_register_cal_cmd(v); voice_send_cvp_register_vol_cal_cmd(v); /* enable vocproc */ ret = voice_send_enable_vocproc_cmd(v); if (ret < 0) goto fail; /* attach vocproc */ ret = voice_send_attach_vocproc_cmd(v); if (ret < 0) goto fail; /* send tty mode if tty device is used */ voice_send_tty_mode_cmd(v); if (is_voip_session(v->session_id)) { ret = voice_send_mvm_cal_network_cmd(v); if (ret < 0) pr_err("%s: voice_send_mvm_cal_network_cmd: %d\n", __func__, ret); ret = voice_send_mvm_media_type_cmd(v); if (ret < 0) pr_err("%s: voice_send_mvm_media_type_cmd: %d\n", __func__, ret); voice_send_netid_timing_cmd(v); } /* enable slowtalk if st_enable is set */ if (v->st_enable) voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST, v->st_enable); /* Start in-call music delivery if this feature is enabled */ if (v->music_info.play_enable) voice_cvs_start_playback(v); /* Start in-call recording if this feature is enabled */ if (v->rec_info.rec_enable) voice_cvs_start_record(v, v->rec_info.rec_mode); if (v->dtmf_rx_detect_en) voice_send_dtmf_rx_detection_cmd(v, v->dtmf_rx_detect_en); rtac_add_voice(voice_get_cvs_handle(v), voice_get_cvp_handle(v), v->dev_rx.port_id, v->dev_tx.port_id, v->session_id); return 0; fail: return -EINVAL; } static int voice_send_enable_vocproc_cmd(struct voice_data *v) { int ret = 0; struct apr_hdr cvp_enable_cmd; void *apr_cvp; u16 cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvp = common.apr_q6_cvp; if (!apr_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); return -EINVAL; } cvp_handle = voice_get_cvp_handle(v); /* enable vocproc and wait for respose */ cvp_enable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_enable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_enable_cmd) - APR_HDR_SIZE); pr_debug("cvp_enable_cmd pkt size = %d, cvp_handle=%d\n", cvp_enable_cmd.pkt_size, cvp_handle); cvp_enable_cmd.src_port = voice_get_idx_for_session(v->session_id); cvp_enable_cmd.dest_port = cvp_handle; cvp_enable_cmd.token = 0; cvp_enable_cmd.opcode = VSS_IVOCPROC_CMD_ENABLE; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_enable_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IVOCPROC_CMD_ENABLE\n"); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_mvm_cal_network_cmd(struct voice_data *v) { struct vss_imvm_cmd_set_cal_network_t mvm_set_cal_network; int ret = 0; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); mvm_set_cal_network.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_set_cal_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_set_cal_network) - APR_HDR_SIZE); mvm_set_cal_network.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_set_cal_network.hdr.dest_port = mvm_handle; mvm_set_cal_network.hdr.token = 0; mvm_set_cal_network.hdr.opcode = VSS_IMVM_CMD_SET_CAL_NETWORK; mvm_set_cal_network.network_id = VSS_ICOMMON_CAL_NETWORK_ID_NONE; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_cal_network); if (ret < 0) { pr_err("%s: Error %d sending SET_NETWORK\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout %d\n", __func__, ret); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_netid_timing_cmd(struct voice_data *v) { int ret = 0; void *apr_mvm; u16 mvm_handle; struct mvm_set_network_cmd mvm_set_network; struct mvm_set_voice_timing_cmd mvm_set_voice_timing; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); ret = voice_config_cvs_vocoder(v); if (ret < 0) { pr_err("%s: Error %d configuring CVS voc", __func__, ret); goto fail; } /* Set network ID. */ pr_debug("Setting network ID\n"); mvm_set_network.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_set_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_set_network) - APR_HDR_SIZE); mvm_set_network.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_set_network.hdr.dest_port = mvm_handle; mvm_set_network.hdr.token = 0; mvm_set_network.hdr.opcode = VSS_ICOMMON_CMD_SET_NETWORK; mvm_set_network.network.network_id = common.mvs_info.network_type; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_network); if (ret < 0) { pr_err("%s: Error %d sending SET_NETWORK\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } /* Set voice timing. */ pr_debug("Setting voice timing\n"); mvm_set_voice_timing.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_set_voice_timing.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_set_voice_timing) - APR_HDR_SIZE); mvm_set_voice_timing.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_set_voice_timing.hdr.dest_port = mvm_handle; mvm_set_voice_timing.hdr.token = 0; mvm_set_voice_timing.hdr.opcode = VSS_ICOMMON_CMD_SET_VOICE_TIMING; mvm_set_voice_timing.timing.mode = 0; mvm_set_voice_timing.timing.enc_offset = 8000; mvm_set_voice_timing.timing.dec_req_offset = 3300; mvm_set_voice_timing.timing.dec_offset = 8300; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_voice_timing); if (ret < 0) { pr_err("%s: Error %d sending SET_TIMING\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_attach_vocproc_cmd(struct voice_data *v) { int ret = 0; struct mvm_attach_vocproc_cmd mvm_a_vocproc_cmd; void *apr_mvm; u16 mvm_handle, cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); cvp_handle = voice_get_cvp_handle(v); /* attach vocproc and wait for response */ mvm_a_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_a_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_a_vocproc_cmd) - APR_HDR_SIZE); pr_debug("send mvm_a_vocproc_cmd pkt size = %d\n", mvm_a_vocproc_cmd.hdr.pkt_size); mvm_a_vocproc_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_a_vocproc_cmd.hdr.dest_port = mvm_handle; mvm_a_vocproc_cmd.hdr.token = 0; mvm_a_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_ATTACH_VOCPROC; mvm_a_vocproc_cmd.mvm_attach_cvp_handle.handle = cvp_handle; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_a_vocproc_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IMVM_CMD_ATTACH_VOCPROC\n"); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_destroy_vocproc(struct voice_data *v) { struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd; struct apr_hdr cvp_destroy_session_cmd; int ret = 0; void *apr_mvm, *apr_cvp; u16 mvm_handle, cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; apr_cvp = common.apr_q6_cvp; if (!apr_mvm || !apr_cvp) { pr_err("%s: apr_mvm or apr_cvp is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); cvp_handle = voice_get_cvp_handle(v); /* stop playback or recording */ v->music_info.force = 1; voice_cvs_stop_playback(v); voice_cvs_stop_record(v); /* send stop voice cmd */ voice_send_stop_voice_cmd(v); /* send stop dtmf detecton cmd */ if (v->dtmf_rx_detect_en) voice_send_dtmf_rx_detection_cmd(v, 0); /* reset LCH mode */ v->lch_mode = 0; /* clear mute setting */ v->dev_rx.dev_mute = common.default_mute_val; v->dev_tx.dev_mute = common.default_mute_val; v->stream_rx.stream_mute = common.default_mute_val; v->stream_tx.stream_mute = common.default_mute_val; /* detach VOCPROC and wait for response from mvm */ mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_d_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_d_vocproc_cmd) - APR_HDR_SIZE); pr_debug("mvm_d_vocproc_cmd pkt size = %d\n", mvm_d_vocproc_cmd.hdr.pkt_size); mvm_d_vocproc_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); mvm_d_vocproc_cmd.hdr.dest_port = mvm_handle; mvm_d_vocproc_cmd.hdr.token = 0; mvm_d_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_DETACH_VOCPROC; mvm_d_vocproc_cmd.mvm_detach_cvp_handle.handle = cvp_handle; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_d_vocproc_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IMVM_CMD_DETACH_VOCPROC\n"); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } voice_send_cvp_deregister_vol_cal_cmd(v); voice_send_cvp_deregister_cal_cmd(v); voice_send_cvp_deregister_dev_cfg_cmd(v); voice_send_cvs_deregister_cal_cmd(v); /* destrop cvp session */ cvp_destroy_session_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_destroy_session_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_destroy_session_cmd) - APR_HDR_SIZE); pr_debug("cvp_destroy_session_cmd pkt size = %d\n", cvp_destroy_session_cmd.pkt_size); cvp_destroy_session_cmd.src_port = voice_get_idx_for_session(v->session_id); cvp_destroy_session_cmd.dest_port = cvp_handle; cvp_destroy_session_cmd.token = 0; cvp_destroy_session_cmd.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_destroy_session_cmd); if (ret < 0) { pr_err("Fail in sending APRV2_IBASIC_CMD_DESTROY_SESSION\n"); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } rtac_remove_voice(voice_get_cvs_handle(v)); cvp_handle = 0; voice_set_cvp_handle(v, cvp_handle); return 0; fail: return -EINVAL; } static int voice_send_mvm_unmap_memory_physical_cmd(struct voice_data *v, uint32_t mem_handle) { struct vss_imemory_cmd_unmap_t mem_unmap; int ret = 0; void *apr_mvm; u16 mvm_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); return -EINVAL; } mvm_handle = voice_get_mvm_handle(v); mem_unmap.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mem_unmap.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mem_unmap) - APR_HDR_SIZE); mem_unmap.hdr.src_port = voice_get_idx_for_session(v->session_id); mem_unmap.hdr.dest_port = mvm_handle; mem_unmap.hdr.token = 0; mem_unmap.hdr.opcode = VSS_IMEMORY_CMD_UNMAP; mem_unmap.mem_handle = mem_handle; pr_debug("%s: mem_handle: ox%x\n", __func__, mem_unmap.mem_handle); v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *) &mem_unmap); if (ret < 0) { pr_err("mem_unmap op[0x%x]ret[%d]\n", mem_unmap.hdr.opcode, ret); goto fail; } ret = wait_event_timeout(v->mvm_wait, (v->mvm_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout %d\n", __func__, ret); goto fail; } return 0; fail: return ret; } static int voice_send_cvs_packet_exchange_config_cmd(struct voice_data *v) { struct vss_istream_cmd_set_oob_packet_exchange_config_t packet_exchange_config_pkt; int ret = 0; uint64_t *dec_buf; uint64_t *enc_buf; void *apr_cvs; u16 cvs_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } dec_buf = (uint64_t *)v->shmem_info.sh_buf.buf[0].phys; enc_buf = (uint64_t *)v->shmem_info.sh_buf.buf[1].phys; apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); packet_exchange_config_pkt.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); packet_exchange_config_pkt.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(packet_exchange_config_pkt) - APR_HDR_SIZE); packet_exchange_config_pkt.hdr.src_port = voice_get_idx_for_session(v->session_id); packet_exchange_config_pkt.hdr.dest_port = cvs_handle; packet_exchange_config_pkt.hdr.token = 0; packet_exchange_config_pkt.hdr.opcode = VSS_ISTREAM_CMD_SET_OOB_PACKET_EXCHANGE_CONFIG; packet_exchange_config_pkt.mem_handle = v->shmem_info.mem_handle; packet_exchange_config_pkt.dec_buf_addr = (uint32_t)dec_buf; packet_exchange_config_pkt.dec_buf_size = 4096; packet_exchange_config_pkt.enc_buf_addr = (uint32_t)enc_buf; packet_exchange_config_pkt.enc_buf_size = 4096; pr_debug("%s: dec buf: add %p, size %d, enc buf: add %p, size %d\n", __func__, dec_buf, packet_exchange_config_pkt.dec_buf_size, enc_buf, packet_exchange_config_pkt.enc_buf_size); v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &packet_exchange_config_pkt); if (ret < 0) { pr_err("Failed to send packet exchange config cmd %d\n", ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) pr_err("%s: wait_event timeout %d\n", __func__, ret); return 0; fail: return -EINVAL; } static int voice_send_cvs_data_exchange_mode_cmd(struct voice_data *v) { struct vss_istream_cmd_set_packet_exchange_mode_t data_exchange_pkt; int ret = 0; void *apr_cvs; u16 cvs_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); data_exchange_pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); data_exchange_pkt.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(data_exchange_pkt) - APR_HDR_SIZE); data_exchange_pkt.hdr.src_port = voice_get_idx_for_session(v->session_id); data_exchange_pkt.hdr.dest_port = cvs_handle; data_exchange_pkt.hdr.token = 0; data_exchange_pkt.hdr.opcode = VSS_ISTREAM_CMD_SET_PACKET_EXCHANGE_MODE; data_exchange_pkt.mode = VSS_ISTREAM_PACKET_EXCHANGE_MODE_OUT_OF_BAND; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &data_exchange_pkt); if (ret < 0) { pr_err("Failed to send data exchange mode %d\n", ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) pr_err("%s: wait_event timeout %d\n", __func__, ret); return 0; fail: return -EINVAL; } static int voice_send_stream_mute_cmd(struct voice_data *v) { struct cvs_set_mute_cmd cvs_mute_cmd; int ret = 0; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); goto fail; } /* send mute/unmute to cvs */ cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_mute_cmd) - APR_HDR_SIZE); cvs_mute_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_mute_cmd.hdr.dest_port = voice_get_cvs_handle(v); cvs_mute_cmd.hdr.token = 0; cvs_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2; cvs_mute_cmd.cvs_set_mute.direction = VSS_IVOLUME_DIRECTION_TX; cvs_mute_cmd.cvs_set_mute.mute_flag = v->stream_tx.stream_mute; cvs_mute_cmd.cvs_set_mute.ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_mute_cmd); if (ret < 0) { pr_err("%s: Error %d sending stream mute\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_device_mute_cmd(struct voice_data *v, uint16_t direction, uint16_t mute_flag) { struct cvp_set_mute_cmd cvp_mute_cmd; int ret = 0; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); goto fail; } if (!common.apr_q6_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); goto fail; } cvp_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_mute_cmd) - APR_HDR_SIZE); cvp_mute_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_mute_cmd.hdr.dest_port = voice_get_cvp_handle(v); cvp_mute_cmd.hdr.token = 0; cvp_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2; cvp_mute_cmd.cvp_set_mute.direction = direction; cvp_mute_cmd.cvp_set_mute.mute_flag = mute_flag; cvp_mute_cmd.cvp_set_mute.ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_mute_cmd); if (ret < 0) { pr_err("%s: Error %d sending rx device cmd\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: Command timeout\n", __func__); goto fail; } return 0; fail: return -EINVAL; } static int voice_send_vol_index_cmd(struct voice_data *v) { struct cvp_set_rx_volume_index_cmd cvp_vol_cmd; int ret = 0; void *apr_cvp; u16 cvp_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvp = common.apr_q6_cvp; if (!apr_cvp) { pr_err("%s: apr_cvp is NULL.\n", __func__); return -EINVAL; } cvp_handle = voice_get_cvp_handle(v); /* send volume index to cvp */ cvp_vol_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvp_vol_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvp_vol_cmd) - APR_HDR_SIZE); cvp_vol_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); cvp_vol_cmd.hdr.dest_port = cvp_handle; cvp_vol_cmd.hdr.token = 0; cvp_vol_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX; cvp_vol_cmd.cvp_set_vol_idx.vol_index = v->dev_rx.volume; v->cvp_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_vol_cmd); if (ret < 0) { pr_err("Fail in sending RX VOL INDEX\n"); return -EINVAL; } ret = wait_event_timeout(v->cvp_wait, (v->cvp_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); return -EINVAL; } return 0; } static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode) { int ret = 0; void *apr_cvs; u16 cvs_handle; struct cvs_start_record_cmd cvs_start_record; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); if (!v->rec_info.recording) { cvs_start_record.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_start_record.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_start_record) - APR_HDR_SIZE); cvs_start_record.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_start_record.hdr.dest_port = cvs_handle; cvs_start_record.hdr.token = 0; cvs_start_record.hdr.opcode = VSS_IRECORD_CMD_START; cvs_start_record.rec_mode.port_id = VSS_IRECORD_PORT_ID_DEFAULT; if (rec_mode == VOC_REC_UPLINK) { cvs_start_record.rec_mode.rx_tap_point = VSS_IRECORD_TAP_POINT_NONE; cvs_start_record.rec_mode.tx_tap_point = VSS_IRECORD_TAP_POINT_STREAM_END; } else if (rec_mode == VOC_REC_DOWNLINK) { cvs_start_record.rec_mode.rx_tap_point = VSS_IRECORD_TAP_POINT_STREAM_END; cvs_start_record.rec_mode.tx_tap_point = VSS_IRECORD_TAP_POINT_NONE; } else if (rec_mode == VOC_REC_BOTH) { cvs_start_record.rec_mode.rx_tap_point = VSS_IRECORD_TAP_POINT_STREAM_END; cvs_start_record.rec_mode.tx_tap_point = VSS_IRECORD_TAP_POINT_STREAM_END; } else { pr_err("%s: Invalid in-call rec_mode %d\n", __func__, rec_mode); ret = -EINVAL; goto fail; } v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_record); if (ret < 0) { pr_err("%s: Error %d sending START_RECORD\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } v->rec_info.recording = 1; } else { pr_debug("%s: Start record already sent\n", __func__); } return 0; fail: return ret; } static int voice_cvs_stop_record(struct voice_data *v) { int ret = 0; void *apr_cvs; u16 cvs_handle; struct apr_hdr cvs_stop_record; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); if (v->rec_info.recording) { cvs_stop_record.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_stop_record.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_stop_record) - APR_HDR_SIZE); cvs_stop_record.src_port = voice_get_idx_for_session(v->session_id); cvs_stop_record.dest_port = cvs_handle; cvs_stop_record.token = 0; cvs_stop_record.opcode = VSS_IRECORD_CMD_STOP; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_record); if (ret < 0) { pr_err("%s: Error %d sending STOP_RECORD\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } v->rec_info.recording = 0; } else { pr_debug("%s: Stop record already sent\n", __func__); } return 0; fail: return ret; } int voc_start_record(uint32_t port_id, uint32_t set) { int ret = 0; int rec_mode = 0; u16 cvs_handle; int i, rec_set = 0; for (i = 0; i < MAX_VOC_SESSIONS; i++) { struct voice_data *v = &common.voice[i]; pr_debug("%s: i:%d port_id: %d, set: %d\n", __func__, i, port_id, set); mutex_lock(&v->lock); rec_mode = v->rec_info.rec_mode; rec_set = set; if (set) { if ((v->rec_route_state.ul_flag != 0) && (v->rec_route_state.dl_flag != 0)) { pr_debug("%s: i=%d, rec mode already set.\n", __func__, i); mutex_unlock(&v->lock); if (i < MAX_VOC_SESSIONS) continue; else return 0; } if (port_id == VOICE_RECORD_TX) { if ((v->rec_route_state.ul_flag == 0) && (v->rec_route_state.dl_flag == 0)) { rec_mode = VOC_REC_UPLINK; v->rec_route_state.ul_flag = 1; } else if ((v->rec_route_state.ul_flag == 0) && (v->rec_route_state.dl_flag != 0)) { voice_cvs_stop_record(v); rec_mode = VOC_REC_BOTH; v->rec_route_state.ul_flag = 1; } } else if (port_id == VOICE_RECORD_RX) { if ((v->rec_route_state.ul_flag == 0) && (v->rec_route_state.dl_flag == 0)) { rec_mode = VOC_REC_DOWNLINK; v->rec_route_state.dl_flag = 1; } else if ((v->rec_route_state.ul_flag != 0) && (v->rec_route_state.dl_flag == 0)) { voice_cvs_stop_record(v); rec_mode = VOC_REC_BOTH; v->rec_route_state.dl_flag = 1; } } rec_set = 1; } else { if ((v->rec_route_state.ul_flag == 0) && (v->rec_route_state.dl_flag == 0)) { pr_debug("%s: i=%d, rec already stops.\n", __func__, i); mutex_unlock(&v->lock); if (i < MAX_VOC_SESSIONS) continue; else return 0; } if (port_id == VOICE_RECORD_TX) { if ((v->rec_route_state.ul_flag != 0) && (v->rec_route_state.dl_flag == 0)) { v->rec_route_state.ul_flag = 0; rec_set = 0; } else if ((v->rec_route_state.ul_flag != 0) && (v->rec_route_state.dl_flag != 0)) { voice_cvs_stop_record(v); v->rec_route_state.ul_flag = 0; rec_mode = VOC_REC_DOWNLINK; rec_set = 1; } } else if (port_id == VOICE_RECORD_RX) { if ((v->rec_route_state.ul_flag == 0) && (v->rec_route_state.dl_flag != 0)) { v->rec_route_state.dl_flag = 0; rec_set = 0; } else if ((v->rec_route_state.ul_flag != 0) && (v->rec_route_state.dl_flag != 0)) { voice_cvs_stop_record(v); v->rec_route_state.dl_flag = 0; rec_mode = VOC_REC_UPLINK; rec_set = 1; } } } pr_debug("%s: i=%d, mode =%d, set =%d\n", __func__, i, rec_mode, rec_set); cvs_handle = voice_get_cvs_handle(v); if (cvs_handle != 0) { if (rec_set) ret = voice_cvs_start_record(v, rec_mode); else ret = voice_cvs_stop_record(v); } /* Cache the value */ v->rec_info.rec_enable = rec_set; v->rec_info.rec_mode = rec_mode; mutex_unlock(&v->lock); } return ret; } static int voice_cvs_start_playback(struct voice_data *v) { int ret = 0; struct cvs_start_playback_cmd cvs_start_playback; void *apr_cvs; u16 cvs_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); if (!v->music_info.playing && v->music_info.count) { cvs_start_playback.hdr.hdr_field = APR_HDR_FIELD( APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_start_playback.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_start_playback) - APR_HDR_SIZE); cvs_start_playback.hdr.src_port = voice_get_idx_for_session(v->session_id); cvs_start_playback.hdr.dest_port = cvs_handle; cvs_start_playback.hdr.token = 0; cvs_start_playback.hdr.opcode = VSS_IPLAYBACK_CMD_START; cvs_start_playback.playback_mode.port_id = VSS_IPLAYBACK_PORT_ID_DEFAULT; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_playback); if (ret < 0) { pr_err("%s: Error %d sending START_PLAYBACK\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } v->music_info.playing = 1; } else { pr_debug("%s: Start playback already sent\n", __func__); } return 0; fail: return ret; } static int voice_cvs_stop_playback(struct voice_data *v) { int ret = 0; struct apr_hdr cvs_stop_playback; void *apr_cvs; u16 cvs_handle; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL.\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); if (v->music_info.playing && ((!v->music_info.count) || (v->music_info.force))) { cvs_stop_playback.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cvs_stop_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(cvs_stop_playback) - APR_HDR_SIZE); cvs_stop_playback.src_port = voice_get_idx_for_session(v->session_id); cvs_stop_playback.dest_port = cvs_handle; cvs_stop_playback.token = 0; cvs_stop_playback.opcode = VSS_IPLAYBACK_CMD_STOP; v->cvs_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_playback); if (ret < 0) { pr_err("%s: Error %d sending STOP_PLAYBACK\n", __func__, ret); goto fail; } ret = wait_event_timeout(v->cvs_wait, (v->cvs_state == CMD_STATUS_SUCCESS), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); goto fail; } v->music_info.playing = 0; v->music_info.force = 0; } else { pr_debug("%s: Stop playback already sent\n", __func__); } return 0; fail: return ret; } int voc_start_playback(uint32_t set) { int ret = 0; u16 cvs_handle; int i; for (i = 0; i < MAX_VOC_SESSIONS; i++) { struct voice_data *v = &common.voice[i]; mutex_lock(&v->lock); v->music_info.play_enable = set; if (set) v->music_info.count++; else v->music_info.count--; pr_debug("%s: music_info count =%d\n", __func__, v->music_info.count); cvs_handle = voice_get_cvs_handle(v); if (cvs_handle != 0) { if (set) ret = voice_cvs_start_playback(v); else ret = voice_cvs_stop_playback(v); } mutex_unlock(&v->lock); } return ret; } int voc_disable_cvp(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); if (v->voc_state == VOC_RUN) { rtac_remove_voice(voice_get_cvs_handle(v)); /* send cmd to dsp to disable vocproc */ ret = voice_send_disable_vocproc_cmd(v); if (ret < 0) { pr_err("%s: disable vocproc failed\n", __func__); goto fail; } voice_send_cvp_deregister_vol_cal_cmd(v); voice_send_cvp_deregister_cal_cmd(v); voice_send_cvp_deregister_dev_cfg_cmd(v); v->voc_state = VOC_CHANGE; } fail: mutex_unlock(&v->lock); return ret; } int voc_enable_cvp(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: Invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); if (v->voc_state == VOC_CHANGE) { ret = voice_send_set_device_cmd(v); if (ret < 0) { pr_err("%s: Set device failed\n", __func__); goto fail; } voice_send_cvp_register_dev_cfg_cmd(v); voice_send_cvp_register_cal_cmd(v); voice_send_cvp_register_vol_cal_cmd(v); if (v->lch_mode == VOICE_LCH_START) { pr_debug("%s: TX and RX mute ON\n", __func__); voice_send_device_mute_cmd(v, VSS_IVOLUME_DIRECTION_TX, VSS_IVOLUME_MUTE_ON); voice_send_device_mute_cmd(v, VSS_IVOLUME_DIRECTION_RX, VSS_IVOLUME_MUTE_ON); } else if (v->lch_mode == VOICE_LCH_STOP) { pr_debug("%s: TX and RX mute OFF\n", __func__); voice_send_device_mute_cmd(v, VSS_IVOLUME_DIRECTION_TX, VSS_IVOLUME_MUTE_OFF); voice_send_device_mute_cmd(v, VSS_IVOLUME_DIRECTION_RX, VSS_IVOLUME_MUTE_OFF); /* Reset lch mode when VOICE_LCH_STOP is recieved */ v->lch_mode = 0; } else { pr_debug("%s: Mute commands not sent for lch_mode=%d\n", __func__, v->lch_mode); } ret = voice_send_enable_vocproc_cmd(v); if (ret < 0) { pr_err("%s: Enable vocproc failed %d\n", __func__, ret); goto fail; } /* Send tty mode if tty device is used */ voice_send_tty_mode_cmd(v); /* enable slowtalk */ if (v->st_enable) voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST, v->st_enable); rtac_add_voice(voice_get_cvs_handle(v), voice_get_cvp_handle(v), v->dev_rx.port_id, v->dev_tx.port_id, v->session_id); v->voc_state = VOC_RUN; } fail: mutex_unlock(&v->lock); return ret; } static int voice_set_packet_exchange_mode_and_config(uint32_t session_id, uint32_t mode) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } if (v->voc_state != VOC_RUN) ret = voice_send_cvs_data_exchange_mode_cmd(v); if (ret) { pr_err("%s: Error voice_send_data_exchange_mode_cmd %d\n", __func__, ret); goto fail; } ret = voice_send_cvs_packet_exchange_config_cmd(v); if (ret) { pr_err("%s: Error: voice_send_packet_exchange_config_cmd %d\n", __func__, ret); goto fail; } return ret; fail: return -EINVAL; } int voc_set_tx_mute(uint32_t session_id, uint32_t dir, uint32_t mute) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); v->stream_tx.stream_mute = mute; if ((v->voc_state == VOC_RUN) || (v->voc_state == VOC_CHANGE) || (v->voc_state == VOC_STANDBY)) ret = voice_send_stream_mute_cmd(v); mutex_unlock(&v->lock); return ret; } int voc_set_rx_device_mute(uint32_t session_id, uint32_t mute) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); v->dev_rx.dev_mute = mute; if (v->voc_state == VOC_RUN) ret = voice_send_device_mute_cmd(v, VSS_IVOLUME_DIRECTION_RX, v->dev_rx.dev_mute); mutex_unlock(&v->lock); return ret; } int voc_get_rx_device_mute(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); ret = v->dev_rx.dev_mute; mutex_unlock(&v->lock); return ret; } int voc_set_tty_mode(uint32_t session_id, uint8_t tty_mode) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); v->tty_mode = tty_mode; mutex_unlock(&v->lock); return ret; } uint8_t voc_get_tty_mode(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); ret = v->tty_mode; mutex_unlock(&v->lock); return ret; } int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); if (module_id == MODULE_ID_VOICE_MODULE_ST) v->st_enable = enable; if (v->voc_state == VOC_RUN) { if (module_id == MODULE_ID_VOICE_MODULE_ST) ret = voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST, enable); } mutex_unlock(&v->lock); return ret; } int voc_get_pp_enable(uint32_t session_id, uint32_t module_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); if (module_id == MODULE_ID_VOICE_MODULE_ST) ret = v->st_enable; mutex_unlock(&v->lock); return ret; } int voc_set_rx_vol_index(uint32_t session_id, uint32_t dir, uint32_t vol_idx) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); v->dev_rx.volume = vol_idx; if ((v->voc_state == VOC_RUN) || (v->voc_state == VOC_CHANGE) || (v->voc_state == VOC_STANDBY)) ret = voice_send_vol_index_cmd(v); mutex_unlock(&v->lock); return ret; } int voc_set_rxtx_port(uint32_t session_id, uint32_t port_id, uint32_t dev_type) { struct voice_data *v = voice_get_session(session_id); if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } pr_debug("%s: port_id=%d, type=%d\n", __func__, port_id, dev_type); mutex_lock(&v->lock); if (dev_type == DEV_RX) v->dev_rx.port_id = q6audio_get_port_id(port_id); else v->dev_tx.port_id = q6audio_get_port_id(port_id); mutex_unlock(&v->lock); return 0; } int voc_set_route_flag(uint32_t session_id, uint8_t path_dir, uint8_t set) { struct voice_data *v = voice_get_session(session_id); if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } pr_debug("%s: path_dir=%d, set=%d\n", __func__, path_dir, set); mutex_lock(&v->lock); if (path_dir == RX_PATH) v->voc_route_state.rx_route_flag = set; else v->voc_route_state.tx_route_flag = set; mutex_unlock(&v->lock); return 0; } uint8_t voc_get_route_flag(uint32_t session_id, uint8_t path_dir) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return 0; } mutex_lock(&v->lock); if (path_dir == RX_PATH) ret = v->voc_route_state.rx_route_flag; else ret = v->voc_route_state.tx_route_flag; mutex_unlock(&v->lock); return ret; } int voc_end_voice_call(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR || v->voc_state == VOC_CHANGE || v->voc_state == VOC_STANDBY) { pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state); ret = voice_destroy_vocproc(v); if (ret < 0) pr_err("%s: destroy voice failed\n", __func__); voice_destroy_mvm_cvs_session(v); v->voc_state = VOC_RELEASE; } else { pr_err("%s: Error: End voice called in state %d\n", __func__, v->voc_state); ret = -EINVAL; } mutex_unlock(&v->lock); return ret; } int voc_standby_voice_call(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); struct apr_hdr mvm_standby_voice_cmd; void *apr_mvm; u16 mvm_handle; int ret = 0; pr_debug("%s: voc state=%d", __func__, v->voc_state); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } if (v->voc_state == VOC_RUN) { apr_mvm = common.apr_q6_mvm; if (!apr_mvm) { pr_err("%s: apr_mvm is NULL.\n", __func__); ret = -EINVAL; goto fail; } mvm_handle = voice_get_mvm_handle(v); mvm_standby_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mvm_standby_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(mvm_standby_voice_cmd) - APR_HDR_SIZE); pr_debug("send mvm_standby_voice_cmd pkt size = %d\n", mvm_standby_voice_cmd.pkt_size); mvm_standby_voice_cmd.src_port = voice_get_idx_for_session(v->session_id); mvm_standby_voice_cmd.dest_port = mvm_handle; mvm_standby_voice_cmd.token = 0; mvm_standby_voice_cmd.opcode = VSS_IMVM_CMD_STANDBY_VOICE; v->mvm_state = CMD_STATUS_FAIL; ret = apr_send_pkt(apr_mvm, (uint32_t *)&mvm_standby_voice_cmd); if (ret < 0) { pr_err("Fail in sending VSS_IMVM_CMD_STANDBY_VOICE\n"); ret = -EINVAL; goto fail; } v->voc_state = VOC_STANDBY; } fail: return ret; } int voc_set_lch(uint32_t session_id, enum voice_lch_mode lch_mode) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: Invalid session_id 0x%x\n", __func__, session_id); ret = -EINVAL; goto done; } mutex_lock(&v->lock); if (v->lch_mode == lch_mode) { pr_debug("%s: Session %d already in LCH mode %d\n", __func__, session_id, lch_mode); mutex_unlock(&v->lock); goto done; } v->lch_mode = lch_mode; mutex_unlock(&v->lock); ret = voc_disable_cvp(session_id); if (ret < 0) { pr_err("%s: voc_disable_cvp failed ret=%d\n", __func__, ret); goto done; } /* Mute and topology_none will be set as part of voc_enable_cvp() */ ret = voc_enable_cvp(session_id); if (ret < 0) { pr_err("%s: voc_enable_cvp failed ret=%d\n", __func__, ret); goto done; } done: return ret; } int voc_resume_voice_call(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; ret = voice_send_start_voice_cmd(v); if (ret < 0) { pr_err("Fail in sending START_VOICE\n"); goto fail; } v->voc_state = VOC_RUN; return 0; fail: return -EINVAL; } int voc_start_voice_call(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); int ret = 0; if (v == NULL) { pr_err("%s: invalid session_id 0x%x\n", __func__, session_id); return -EINVAL; } mutex_lock(&v->lock); if (v->voc_state == VOC_ERROR) { pr_debug("%s: VOC in ERR state\n", __func__); voice_destroy_mvm_cvs_session(v); v->voc_state = VOC_INIT; } if ((v->voc_state == VOC_INIT) || (v->voc_state == VOC_RELEASE)) { ret = voice_apr_register(); if (ret < 0) { pr_err("%s: apr register failed\n", __func__); goto fail; } ret = voice_create_mvm_cvs_session(v); if (ret < 0) { pr_err("create mvm and cvs failed\n"); goto fail; } /* Memory map the calibration memory block. */ ret = voice_mem_map_cal_block(v); if (ret < 0) { pr_err("%s: Memory map of cal block failed %d\n", __func__, ret); /* Allow call to continue, call quality will be bad. */ } if (is_voip_session(session_id)) { ret = voice_map_memory_physical_cmd(v, &v->shmem_info.memtbl, v->shmem_info.sh_buf.buf[0].phys, v->shmem_info.sh_buf.buf[0].size * NUM_OF_BUFFERS, VOIP_MEM_MAP_TOKEN); if (ret) { pr_err("%s: mvm_map_memory_phy failed %d\n", __func__, ret); goto fail; } ret = voice_set_packet_exchange_mode_and_config( session_id, VSS_ISTREAM_PACKET_EXCHANGE_MODE_OUT_OF_BAND); if (ret) { pr_err("%s: Err: exchange_mode_and_config %d\n", __func__, ret); goto fail; } } ret = voice_send_dual_control_cmd(v); if (ret < 0) { pr_err("Err Dual command failed\n"); goto fail; } ret = voice_setup_vocproc(v); if (ret < 0) { pr_err("setup voice failed\n"); goto fail; } ret = voice_send_vol_index_cmd(v); if (ret < 0) pr_err("voice volume failed\n"); ret = voice_send_stream_mute_cmd(v); if (ret < 0) pr_err("voice mute failed\n"); ret = voice_send_start_voice_cmd(v); if (ret < 0) { pr_err("start voice failed\n"); goto fail; } v->voc_state = VOC_RUN; } else { pr_err("%s: Error: Start voice called in state %d\n", __func__, v->voc_state); ret = -EINVAL; goto fail; } fail: mutex_unlock(&v->lock); return ret; } void voc_register_mvs_cb(ul_cb_fn ul_cb, dl_cb_fn dl_cb, void *private_data) { common.mvs_info.ul_cb = ul_cb; common.mvs_info.dl_cb = dl_cb; common.mvs_info.private_data = private_data; } void voc_register_dtmf_rx_detection_cb(dtmf_rx_det_cb_fn dtmf_rx_ul_cb, void *private_data) { common.dtmf_info.dtmf_rx_ul_cb = dtmf_rx_ul_cb; common.dtmf_info.private_data = private_data; } void voc_config_vocoder(uint32_t media_type, uint32_t rate, uint32_t network_type, uint32_t dtx_mode) { common.mvs_info.media_type = media_type; common.mvs_info.rate = rate; common.mvs_info.network_type = network_type; common.mvs_info.dtx_mode = dtx_mode; } static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv) { uint32_t *ptr = NULL; struct common_data *c = NULL; struct voice_data *v = NULL; int i = 0; if ((data == NULL) || (priv == NULL)) { pr_err("%s: data or priv is NULL\n", __func__); return -EINVAL; } c = priv; pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, data->payload_size, data->opcode); if (data->opcode == RESET_EVENTS) { if (data->reset_proc == APR_DEST_MODEM) { pr_debug("%s: Received MODEM reset event\n", __func__); } else { pr_debug("%s: Reset event received in Voice service\n", __func__); apr_reset(c->apr_q6_mvm); c->apr_q6_mvm = NULL; /* clean up memory handle */ c->cal_mem_handle = 0; /* Sub-system restart is applicable to all sessions. */ for (i = 0; i < MAX_VOC_SESSIONS; i++) { c->voice[i].mvm_handle = 0; c->voice[i].shmem_info.mem_handle = 0; } } voc_set_error_state(data->reset_proc); return 0; } pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port); v = voice_get_session_by_idx(data->dest_port); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } if (data->opcode == APR_BASIC_RSP_RESULT) { if (data->payload_size) { ptr = data->payload; pr_debug("%x %x\n", ptr[0], ptr[1]); /* ping mvm service ACK */ switch (ptr[0]) { case VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION: case VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION: /* Passive session is used for CS call * Full session is used for VoIP call. */ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); if (!ptr[1]) { pr_debug("%s: MVM handle is %d\n", __func__, data->src_port); voice_set_mvm_handle(v, data->src_port); } else pr_err("got NACK for sending MVM create session\n"); v->mvm_state = CMD_STATUS_SUCCESS; wake_up(&v->mvm_wait); break; case VSS_IMVM_CMD_START_VOICE: case VSS_IMVM_CMD_ATTACH_VOCPROC: case VSS_IMVM_CMD_STOP_VOICE: case VSS_IMVM_CMD_DETACH_VOCPROC: case VSS_ISTREAM_CMD_SET_TTY_MODE: case APRV2_IBASIC_CMD_DESTROY_SESSION: case VSS_IMVM_CMD_ATTACH_STREAM: case VSS_IMVM_CMD_DETACH_STREAM: case VSS_ICOMMON_CMD_SET_NETWORK: case VSS_ICOMMON_CMD_SET_VOICE_TIMING: case VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL: case VSS_IMVM_CMD_SET_CAL_NETWORK: case VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE: case VSS_IMEMORY_CMD_MAP_PHYSICAL: case VSS_IMEMORY_CMD_UNMAP: case VSS_IMVM_CMD_PAUSE_VOICE: case VSS_IMVM_CMD_STANDBY_VOICE: pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); v->mvm_state = CMD_STATUS_SUCCESS; wake_up(&v->mvm_wait); break; default: pr_debug("%s: not match cmd = 0x%x\n", __func__, ptr[0]); break; } } } else if (data->opcode == VSS_IMEMORY_RSP_MAP) { pr_debug("%s, Revd VSS_IMEMORY_RSP_MAP response\n", __func__); if (data->payload_size && data->token == VOIP_MEM_MAP_TOKEN) { ptr = data->payload; if (ptr[0]) { v->shmem_info.mem_handle = ptr[0]; pr_debug("%s: shared mem_handle: 0x[%x]\n", __func__, v->shmem_info.mem_handle); v->mvm_state = CMD_STATUS_SUCCESS; wake_up(&v->mvm_wait); } } else if (data->payload_size && data->token == VOC_CAL_MEM_MAP_TOKEN) { ptr = data->payload; if (ptr[0]) { c->cal_mem_handle = ptr[0]; pr_debug("%s: cal mem handle 0x%x\n", __func__, c->cal_mem_handle); v->mvm_state = CMD_STATUS_SUCCESS; wake_up(&v->mvm_wait); } } else { pr_err("%s: Unknown mem map token %d\n", __func__, data->token); } } return 0; } static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv) { uint32_t *ptr = NULL; struct common_data *c = NULL; struct voice_data *v = NULL; int i = 0; if ((data == NULL) || (priv == NULL)) { pr_err("%s: data or priv is NULL\n", __func__); return -EINVAL; } c = priv; pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port); pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, data->payload_size, data->opcode); if (data->opcode == RESET_EVENTS) { if (data->reset_proc == APR_DEST_MODEM) { pr_debug("%s: Received Modem reset event\n", __func__); } else { pr_debug("%s: Reset event received in Voice service\n", __func__); apr_reset(c->apr_q6_cvs); c->apr_q6_cvs = NULL; /* Sub-system restart is applicable to all sessions. */ for (i = 0; i < MAX_VOC_SESSIONS; i++) c->voice[i].cvs_handle = 0; } voc_set_error_state(data->reset_proc); return 0; } v = voice_get_session_by_idx(data->dest_port); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } if (data->opcode == APR_BASIC_RSP_RESULT) { if (data->payload_size) { ptr = data->payload; pr_debug("%x %x\n", ptr[0], ptr[1]); if (ptr[1] != 0) { pr_err("%s: cmd = 0x%x returned error = 0x%x\n", __func__, ptr[0], ptr[1]); } /*response from CVS */ switch (ptr[0]) { case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION: case VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION: if (!ptr[1]) { pr_debug("%s: CVS handle is %d\n", __func__, data->src_port); voice_set_cvs_handle(v, data->src_port); } else pr_err("got NACK for sending CVS create session\n"); v->cvs_state = CMD_STATUS_SUCCESS; wake_up(&v->cvs_wait); break; case VSS_IVOLUME_CMD_MUTE_V2: case VSS_ISTREAM_CMD_SET_MEDIA_TYPE: case VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE: case VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE: case VSS_ISTREAM_CMD_SET_ENC_DTX_MODE: case VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE: case APRV2_IBASIC_CMD_DESTROY_SESSION: case VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA_V2: case VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA: case VSS_ICOMMON_CMD_MAP_MEMORY: case VSS_ICOMMON_CMD_UNMAP_MEMORY: case VSS_ICOMMON_CMD_SET_UI_PROPERTY: case VSS_IPLAYBACK_CMD_START: case VSS_IPLAYBACK_CMD_STOP: case VSS_IRECORD_CMD_START: case VSS_IRECORD_CMD_STOP: case VSS_ISTREAM_CMD_SET_PACKET_EXCHANGE_MODE: case VSS_ISTREAM_CMD_SET_OOB_PACKET_EXCHANGE_CONFIG: case VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION: pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); v->cvs_state = CMD_STATUS_SUCCESS; wake_up(&v->cvs_wait); break; case VOICE_CMD_SET_PARAM: pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__); rtac_make_voice_callback(RTAC_CVS, ptr, data->payload_size); break; case VOICE_CMD_GET_PARAM: pr_debug("%s: VOICE_CMD_GET_PARAM\n", __func__); /* Should only come here if there is an APR */ /* error or malformed APR packet. Otherwise */ /* response will be returned as */ /* VOICE_EVT_GET_PARAM_ACK */ if (ptr[1] != 0) { pr_err("%s: CVP get param error = %d, resuming\n", __func__, ptr[1]); rtac_make_voice_callback(RTAC_CVP, data->payload, data->payload_size); } break; default: pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); break; } } } else if (data->opcode == VSS_ISTREAM_EVT_OOB_NOTIFY_ENC_BUFFER_READY) { int ret = 0; u16 cvs_handle; uint32_t *cvs_voc_pkt; struct cvs_enc_buffer_consumed_cmd send_enc_buf_consumed_cmd; void *apr_cvs; pr_debug("Encoder buffer is ready\n"); apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); send_enc_buf_consumed_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); send_enc_buf_consumed_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(send_enc_buf_consumed_cmd) - APR_HDR_SIZE); send_enc_buf_consumed_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); send_enc_buf_consumed_cmd.hdr.dest_port = cvs_handle; send_enc_buf_consumed_cmd.hdr.token = 0; send_enc_buf_consumed_cmd.hdr.opcode = VSS_ISTREAM_EVT_OOB_NOTIFY_ENC_BUFFER_CONSUMED; cvs_voc_pkt = v->shmem_info.sh_buf.buf[1].data; if (cvs_voc_pkt != NULL && common.mvs_info.ul_cb != NULL) { common.mvs_info.ul_cb((uint8_t *)&cvs_voc_pkt[3], cvs_voc_pkt[2], common.mvs_info.private_data); } else pr_err("%s: cvs_voc_pkt or ul_cb is NULL\n", __func__); ret = apr_send_pkt(apr_cvs, (uint32_t *) &send_enc_buf_consumed_cmd); if (ret < 0) { pr_err("%s: Err send ENC_BUF_CONSUMED_NOTIFY %d\n", __func__, ret); goto fail; } } else if (data->opcode == VSS_ISTREAM_EVT_SEND_ENC_BUFFER) { pr_debug("Recd VSS_ISTREAM_EVT_SEND_ENC_BUFFER\n"); } else if (data->opcode == VSS_ISTREAM_EVT_OOB_NOTIFY_DEC_BUFFER_REQUEST) { int ret = 0; u16 cvs_handle; uint32_t *cvs_voc_pkt; struct cvs_dec_buffer_ready_cmd send_dec_buf; void *apr_cvs; apr_cvs = common.apr_q6_cvs; if (!apr_cvs) { pr_err("%s: apr_cvs is NULL\n", __func__); return -EINVAL; } cvs_handle = voice_get_cvs_handle(v); send_dec_buf.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); send_dec_buf.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(send_dec_buf) - APR_HDR_SIZE); send_dec_buf.hdr.src_port = voice_get_idx_for_session(v->session_id); send_dec_buf.hdr.dest_port = cvs_handle; send_dec_buf.hdr.token = 0; send_dec_buf.hdr.opcode = VSS_ISTREAM_EVT_OOB_NOTIFY_DEC_BUFFER_READY; cvs_voc_pkt = (uint32_t *)(v->shmem_info.sh_buf.buf[0].data); if (cvs_voc_pkt != NULL && common.mvs_info.dl_cb != NULL) { /* Set timestamp to 0 and advance the pointer */ cvs_voc_pkt[0] = 0; /* Set media_type and advance the pointer */ cvs_voc_pkt[1] = common.mvs_info.media_type; common.mvs_info.dl_cb( (uint8_t *)&cvs_voc_pkt[2], common.mvs_info.private_data); ret = apr_send_pkt(apr_cvs, (uint32_t *) &send_dec_buf); if (ret < 0) { pr_err("%s: Err send DEC_BUF_READY_NOTIFI %d\n", __func__, ret); goto fail; } } else { pr_debug("%s: voc_pkt or dl_cb is NULL\n", __func__); goto fail; } } else if (data->opcode == VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER) { pr_debug("Recd VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER\n"); } else if (data->opcode == VSS_ISTREAM_EVT_SEND_DEC_BUFFER) { pr_debug("Send dec buf resp\n"); } else if (data->opcode == APR_RSP_ACCEPTED) { ptr = data->payload; if (ptr[0]) pr_debug("%s: APR_RSP_ACCEPTED for 0x%x:\n", __func__, ptr[0]); } else if (data->opcode == VSS_ISTREAM_EVT_NOT_READY) { pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n"); } else if (data->opcode == VSS_ISTREAM_EVT_READY) { pr_debug("Recd VSS_ISTREAM_EVT_READY\n"); } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) { pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__); ptr = data->payload; if (ptr[0] != 0) { pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n", __func__, ptr[0]); } rtac_make_voice_callback(RTAC_CVS, data->payload, data->payload_size); } else if (data->opcode == VSS_ISTREAM_EVT_RX_DTMF_DETECTED) { struct vss_istream_evt_rx_dtmf_detected *dtmf_rx_detected; uint32_t *voc_pkt = data->payload; uint32_t pkt_len = data->payload_size; if ((voc_pkt != NULL) && (pkt_len == sizeof(struct vss_istream_evt_rx_dtmf_detected))) { dtmf_rx_detected = (struct vss_istream_evt_rx_dtmf_detected *) voc_pkt; pr_debug("RX_DTMF_DETECTED low_freq=%d high_freq=%d\n", dtmf_rx_detected->low_freq, dtmf_rx_detected->high_freq); if (c->dtmf_info.dtmf_rx_ul_cb) c->dtmf_info.dtmf_rx_ul_cb((uint8_t *)voc_pkt, voc_get_session_name(v->session_id), c->dtmf_info.private_data); } else { pr_err("Invalid packet\n"); } } else pr_debug("Unknown opcode 0x%x\n", data->opcode); fail: return 0; } static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv) { uint32_t *ptr = NULL; struct common_data *c = NULL; struct voice_data *v = NULL; int i = 0; if ((data == NULL) || (priv == NULL)) { pr_err("%s: data or priv is NULL\n", __func__); return -EINVAL; } c = priv; if (data->opcode == RESET_EVENTS) { if (data->reset_proc == APR_DEST_MODEM) { pr_debug("%s: Received Modem reset event\n", __func__); } else { pr_debug("%s: Reset event received in Voice service\n", __func__); apr_reset(c->apr_q6_cvp); c->apr_q6_cvp = NULL; /* Sub-system restart is applicable to all sessions. */ for (i = 0; i < MAX_VOC_SESSIONS; i++) c->voice[i].cvp_handle = 0; } voc_set_error_state(data->reset_proc); return 0; } v = voice_get_session_by_idx(data->dest_port); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } if (data->opcode == APR_BASIC_RSP_RESULT) { if (data->payload_size) { ptr = data->payload; pr_debug("%x %x\n", ptr[0], ptr[1]); if (ptr[1] != 0) { pr_err("%s: cmd = 0x%x returned error = 0x%x\n", __func__, ptr[0], ptr[1]); } switch (ptr[0]) { case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2: /*response from CVP */ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); if (!ptr[1]) { voice_set_cvp_handle(v, data->src_port); pr_debug("status: %d, cvphdl=%d\n", ptr[1], data->src_port); } else pr_err("got NACK from CVP create session response\n"); v->cvp_state = CMD_STATUS_SUCCESS; wake_up(&v->cvp_wait); break; case VSS_IVOCPROC_CMD_SET_DEVICE_V2: case VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX: case VSS_IVOCPROC_CMD_ENABLE: case VSS_IVOCPROC_CMD_DISABLE: case APRV2_IBASIC_CMD_DESTROY_SESSION: case VSS_IVOCPROC_CMD_REGISTER_VOL_CALIBRATION_DATA: case VSS_IVOCPROC_CMD_DEREGISTER_VOL_CALIBRATION_DATA: case VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2: case VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA: case VSS_IVOCPROC_CMD_REGISTER_DEVICE_CONFIG: case VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG: case VSS_ICOMMON_CMD_MAP_MEMORY: case VSS_ICOMMON_CMD_UNMAP_MEMORY: case VSS_IVOLUME_CMD_MUTE_V2: v->cvp_state = CMD_STATUS_SUCCESS; wake_up(&v->cvp_wait); break; case VOICE_CMD_SET_PARAM: pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__); rtac_make_voice_callback(RTAC_CVP, ptr, data->payload_size); break; case VOICE_CMD_GET_PARAM: pr_debug("%s: VOICE_CMD_GET_PARAM\n", __func__); /* Should only come here if there is an APR */ /* error or malformed APR packet. Otherwise */ /* response will be returned as */ /* VOICE_EVT_GET_PARAM_ACK */ if (ptr[1] != 0) { pr_err("%s: CVP get param error = %d, resuming\n", __func__, ptr[1]); rtac_make_voice_callback(RTAC_CVP, data->payload, data->payload_size); } break; default: pr_debug("%s: not match cmd = 0x%x\n", __func__, ptr[0]); break; } } } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) { pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__); ptr = data->payload; if (ptr[0] != 0) { pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n", __func__, ptr[0]); } rtac_make_voice_callback(RTAC_CVP, data->payload, data->payload_size); } return 0; } static int voice_alloc_oob_shared_mem(void) { int cnt = 0; int rc = 0; int len; void *mem_addr; dma_addr_t phys; int bufsz = BUFFER_BLOCK_SIZE; int bufcnt = NUM_OF_BUFFERS; struct voice_data *v = voice_get_session( common.voice[VOC_PATH_FULL].session_id); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } rc = msm_audio_ion_alloc("voip_client", &(v->shmem_info.sh_buf.client), &(v->shmem_info.sh_buf.handle), bufsz*bufcnt, (ion_phys_addr_t *)&phys, (size_t *)&len, &mem_addr); if (rc) { pr_err("%s: audio ION alloc failed, rc = %d\n", __func__, rc); return -EINVAL; } while (cnt < bufcnt) { v->shmem_info.sh_buf.buf[cnt].data = mem_addr + (cnt * bufsz); v->shmem_info.sh_buf.buf[cnt].phys = phys + (cnt * bufsz); v->shmem_info.sh_buf.buf[cnt].size = bufsz; cnt++; } pr_debug("%s buf[0].data:[%p], buf[0].phys:[%p], &buf[0].phys:[%p],\n", __func__, (void *)v->shmem_info.sh_buf.buf[0].data, (void *)v->shmem_info.sh_buf.buf[0].phys, (void *)&v->shmem_info.sh_buf.buf[0].phys); pr_debug("%s: buf[1].data:[%p], buf[1].phys[%p], &buf[1].phys[%p]\n", __func__, (void *)v->shmem_info.sh_buf.buf[1].data, (void *)v->shmem_info.sh_buf.buf[1].phys, (void *)&v->shmem_info.sh_buf.buf[1].phys); memset((void *)v->shmem_info.sh_buf.buf[0].data, 0, (bufsz * bufcnt)); return 0; } static int voice_alloc_oob_mem_table(void) { int rc = 0; int len; struct voice_data *v = voice_get_session( common.voice[VOC_PATH_FULL].session_id); if (v == NULL) { pr_err("%s: v is NULL\n", __func__); return -EINVAL; } rc = msm_audio_ion_alloc("voip_client", &(v->shmem_info.memtbl.client), &(v->shmem_info.memtbl.handle), sizeof(struct vss_imemory_table_t), (ion_phys_addr_t *)&v->shmem_info.memtbl.phys, (size_t *)&len, &(v->shmem_info.memtbl.data)); if (rc) { pr_err("%s: audio ION alloc failed, rc = %d\n", __func__, rc); return -EINVAL; } v->shmem_info.memtbl.size = sizeof(struct vss_imemory_table_t); pr_debug("%s data[%p]phys[%p][%p]\n", __func__, (void *)v->shmem_info.memtbl.data, (void *)v->shmem_info.memtbl.phys, (void *)&v->shmem_info.memtbl.phys); return 0; } static int voice_alloc_cal_mem_map_table(void) { int ret = 0; int len; ret = msm_audio_ion_alloc("voip_client", &(common.cal_mem_map_table.client), &(common.cal_mem_map_table.handle), sizeof(struct vss_imemory_table_t), (ion_phys_addr_t *)&common.cal_mem_map_table.phys, (size_t *) &len, &(common.cal_mem_map_table.data)); if (ret) { pr_err("%s: audio ION alloc failed, rc = %d\n", __func__, ret); return -EINVAL; } common.cal_mem_map_table.size = sizeof(struct vss_imemory_table_t); pr_debug("%s: data 0x%x phys 0x%x\n", __func__, (unsigned int) common.cal_mem_map_table.data, common.cal_mem_map_table.phys); return 0; } static int __init voice_init(void) { int rc = 0, i = 0; memset(&common, 0, sizeof(struct common_data)); /* set default value */ common.default_mute_val = 0; /* default is un-mute */ common.default_vol_val = 0; common.default_sample_val = 8000; /* Initialize MVS info. */ common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT; mutex_init(&common.common_lock); /* Initialize session id with vsid */ init_session_id(); for (i = 0; i < MAX_VOC_SESSIONS; i++) { /* initialize dev_rx and dev_tx */ common.voice[i].dev_rx.volume = common.default_vol_val; common.voice[i].dev_rx.dev_mute = common.default_mute_val; common.voice[i].dev_tx.dev_mute = common.default_mute_val; common.voice[i].stream_rx.stream_mute = common.default_mute_val; common.voice[i].stream_tx.stream_mute = common.default_mute_val; common.voice[i].dev_tx.port_id = 0x100B; common.voice[i].dev_rx.port_id = 0x100A; common.voice[i].sidetone_gain = 0x512; common.voice[i].dtmf_rx_detect_en = 0; common.voice[i].lch_mode = 0; common.voice[i].voc_state = VOC_INIT; init_waitqueue_head(&common.voice[i].mvm_wait); init_waitqueue_head(&common.voice[i].cvs_wait); init_waitqueue_head(&common.voice[i].cvp_wait); mutex_init(&common.voice[i].lock); } /* Allocate shared memory for OOB Voip */ rc = voice_alloc_oob_shared_mem(); if (rc < 0) pr_err("failed to alloc shared memory for OOB %d\n", rc); else { /* Allocate mem map table for OOB */ rc = voice_alloc_oob_mem_table(); if (rc < 0) pr_err("failed to alloc mem map talbe %d\n", rc); } /* Allocate memory for calibration memory map table. */ rc = voice_alloc_cal_mem_map_table(); return rc; } late_initcall(voice_init);
upworkstar/AndroidAmazon
sound/soc/msm/qdsp6v2/q6voice.c
C
gpl-2.0
133,119
/* * Copyright 2005 Eric Anholt * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eric Anholt <anholt@FreeBSD.org> * */ #include "sis_context.h" #include "sis_state.h" #include "sis_tris.h" #include "sis_lock.h" #include "sis_tex.h" #include "sis_reg.h" #include "context.h" #include "enums.h" #include "colormac.h" #include "swrast/swrast.h" #include "vbo/vbo.h" #include "tnl/tnl.h" #include "swrast_setup/swrast_setup.h" #include "tnl/t_pipeline.h" /* ============================================================= * Alpha blending */ static void sis6326DDAlphaFunc( GLcontext *ctx, GLenum func, GLfloat ref ) { sisContextPtr smesa = SIS_CONTEXT(ctx); GLubyte refbyte; __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; CLAMPED_FLOAT_TO_UBYTE(refbyte, ref); current->hwAlpha = refbyte << 16; /* Alpha Test function */ switch (func) { case GL_NEVER: current->hwAlpha |= S_ASET_PASS_NEVER; break; case GL_LESS: current->hwAlpha |= S_ASET_PASS_LESS; break; case GL_EQUAL: current->hwAlpha |= S_ASET_PASS_EQUAL; break; case GL_LEQUAL: current->hwAlpha |= S_ASET_PASS_LEQUAL; break; case GL_GREATER: current->hwAlpha |= S_ASET_PASS_GREATER; break; case GL_NOTEQUAL: current->hwAlpha |= S_ASET_PASS_NOTEQUAL; break; case GL_GEQUAL: current->hwAlpha |= S_ASET_PASS_GEQUAL; break; case GL_ALWAYS: current->hwAlpha |= S_ASET_PASS_ALWAYS; break; } prev->hwAlpha = current->hwAlpha; smesa->GlobalFlag |= GFLAG_ALPHASETTING; } static void sis6326DDBlendFuncSeparate( GLcontext *ctx, GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorA, GLenum dfactorA ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; current->hwDstSrcBlend = 0; switch (dfactorRGB) { case GL_ZERO: current->hwDstSrcBlend |= S_DBLEND_ZERO; break; case GL_ONE: current->hwDstSrcBlend |= S_DBLEND_ONE; break; case GL_SRC_COLOR: current->hwDstSrcBlend |= S_DBLEND_SRC_COLOR; break; case GL_ONE_MINUS_SRC_COLOR: current->hwDstSrcBlend |= S_DBLEND_INV_SRC_COLOR; break; case GL_SRC_ALPHA: current->hwDstSrcBlend |= S_DBLEND_SRC_ALPHA; break; case GL_ONE_MINUS_SRC_ALPHA: current->hwDstSrcBlend |= S_DBLEND_INV_SRC_ALPHA; break; case GL_DST_ALPHA: current->hwDstSrcBlend |= S_DBLEND_DST_ALPHA; break; case GL_ONE_MINUS_DST_ALPHA: current->hwDstSrcBlend |= S_DBLEND_INV_DST_ALPHA; break; } switch (sfactorRGB) { case GL_ZERO: current->hwDstSrcBlend |= S_SBLEND_ZERO; break; case GL_ONE: current->hwDstSrcBlend |= S_SBLEND_ONE; break; case GL_SRC_ALPHA: current->hwDstSrcBlend |= S_SBLEND_SRC_ALPHA; break; case GL_ONE_MINUS_SRC_ALPHA: current->hwDstSrcBlend |= S_SBLEND_INV_SRC_ALPHA; break; case GL_DST_ALPHA: current->hwDstSrcBlend |= S_SBLEND_DST_ALPHA; break; case GL_ONE_MINUS_DST_ALPHA: current->hwDstSrcBlend |= S_SBLEND_INV_DST_ALPHA; break; case GL_DST_COLOR: current->hwDstSrcBlend |= S_SBLEND_DST_COLOR; break; case GL_ONE_MINUS_DST_COLOR: current->hwDstSrcBlend |= S_SBLEND_INV_DST_COLOR; break; case GL_SRC_ALPHA_SATURATE: current->hwDstSrcBlend |= S_SBLEND_SRC_ALPHA_SAT; break; } if (current->hwDstSrcBlend != prev->hwDstSrcBlend) { prev->hwDstSrcBlend = current->hwDstSrcBlend; smesa->GlobalFlag |= GFLAG_DSTBLEND; } } /* ============================================================= * Depth testing */ static void sis6326DDDepthFunc( GLcontext *ctx, GLenum func ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; current->hwZ &= ~MASK_6326_ZTestMode; switch (func) { case GL_LESS: current->hwZ |= S_ZSET_PASS_LESS; break; case GL_GEQUAL: current->hwZ |= S_ZSET_PASS_GEQUAL; break; case GL_LEQUAL: current->hwZ |= S_ZSET_PASS_LEQUAL; break; case GL_GREATER: current->hwZ |= S_ZSET_PASS_GREATER; break; case GL_NOTEQUAL: current->hwZ |= S_ZSET_PASS_NOTEQUAL; break; case GL_EQUAL: current->hwZ |= S_ZSET_PASS_EQUAL; break; case GL_ALWAYS: current->hwZ |= S_ZSET_PASS_ALWAYS; break; case GL_NEVER: current->hwZ |= S_ZSET_PASS_NEVER; break; } if (current->hwZ != prev->hwZ) { prev->hwZ = current->hwZ; smesa->GlobalFlag |= GFLAG_ZSETTING; } } static void sis6326DDDepthMask( GLcontext *ctx, GLboolean flag ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *current = &smesa->current; if (ctx->Depth.Test) current->hwCapEnable |= S_ENABLE_ZWrite; else current->hwCapEnable &= ~S_ENABLE_ZWrite; } /* ============================================================= * Fog */ static void sis6326DDFogfv( GLcontext *ctx, GLenum pname, const GLfloat *params ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *current = &smesa->current; __GLSiSHardware *prev = &smesa->prev; GLint fogColor; switch(pname) { case GL_FOG_COLOR: fogColor = FLOAT_TO_UBYTE( ctx->Fog.Color[0] ) << 16; fogColor |= FLOAT_TO_UBYTE( ctx->Fog.Color[1] ) << 8; fogColor |= FLOAT_TO_UBYTE( ctx->Fog.Color[2] ); current->hwFog = 0x01000000 | fogColor; if (current->hwFog != prev->hwFog) { prev->hwFog = current->hwFog; smesa->GlobalFlag |= GFLAG_FOGSETTING; } break; } } /* ============================================================= * Clipping */ void sis6326UpdateClipping(GLcontext *ctx) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; GLint x1, y1, x2, y2; x1 = 0; y1 = 0; x2 = smesa->width - 1; y2 = smesa->height - 1; if (ctx->Scissor.Enabled) { if (ctx->Scissor.X > x1) x1 = ctx->Scissor.X; if (ctx->Scissor.Y > y1) y1 = ctx->Scissor.Y; if (ctx->Scissor.X + ctx->Scissor.Width - 1 < x2) x2 = ctx->Scissor.X + ctx->Scissor.Width - 1; if (ctx->Scissor.Y + ctx->Scissor.Height - 1 < y2) y2 = ctx->Scissor.Y + ctx->Scissor.Height - 1; } y1 = Y_FLIP(y1); y2 = Y_FLIP(y2); /*current->clipTopBottom = (y2 << 13) | y1; current->clipLeftRight = (x1 << 13) | x2;*/ /* XXX */ current->clipTopBottom = (0 << 13) | smesa->height; current->clipLeftRight = (0 << 13) | smesa->width; if ((current->clipTopBottom != prev->clipTopBottom) || (current->clipLeftRight != prev->clipLeftRight)) { prev->clipTopBottom = current->clipTopBottom; prev->clipLeftRight = current->clipLeftRight; smesa->GlobalFlag |= GFLAG_CLIPPING; } } static void sis6326DDScissor( GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h ) { if (ctx->Scissor.Enabled) sis6326UpdateClipping( ctx ); } /* ============================================================= * Culling */ static void sis6326UpdateCull( GLcontext *ctx ) { /* XXX culling */ } static void sis6326DDCullFace( GLcontext *ctx, GLenum mode ) { sis6326UpdateCull( ctx ); } static void sis6326DDFrontFace( GLcontext *ctx, GLenum mode ) { sis6326UpdateCull( ctx ); } /* ============================================================= * Masks */ static void sis6326DDColorMask( GLcontext *ctx, GLboolean r, GLboolean g, GLboolean b, GLboolean a ) { sisContextPtr smesa = SIS_CONTEXT(ctx); if (r && g && b && ((ctx->Visual.alphaBits == 0) || a)) { FALLBACK(smesa, SIS_FALLBACK_WRITEMASK, 0); } else { FALLBACK(smesa, SIS_FALLBACK_WRITEMASK, 1); } } /* ============================================================= * Rendering attributes */ static void sis6326UpdateSpecular(GLcontext *ctx) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *current = &smesa->current; if (NEED_SECONDARY_COLOR(ctx)) current->hwCapEnable |= S_ENABLE_Specular; else current->hwCapEnable &= ~S_ENABLE_Specular; } static void sis6326DDLightModelfv(GLcontext *ctx, GLenum pname, const GLfloat *param) { if (pname == GL_LIGHT_MODEL_COLOR_CONTROL) { sis6326UpdateSpecular(ctx); } } static void sis6326DDShadeModel( GLcontext *ctx, GLenum mode ) { sisContextPtr smesa = SIS_CONTEXT(ctx); /* Signal to sisRasterPrimitive to recalculate dwPrimitiveSet */ smesa->hw_primitive = -1; } /* ============================================================= * Window position */ /* ============================================================= * Viewport */ static void sis6326CalcViewport( GLcontext *ctx ) { sisContextPtr smesa = SIS_CONTEXT(ctx); const GLfloat *v = ctx->Viewport._WindowMap.m; GLfloat *m = smesa->hw_viewport; /* See also sis_translate_vertex. */ m[MAT_SX] = v[MAT_SX]; m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X; m[MAT_SY] = - v[MAT_SY]; m[MAT_TY] = - v[MAT_TY] + smesa->driDrawable->h + SUBPIXEL_Y; m[MAT_SZ] = v[MAT_SZ] * smesa->depth_scale; m[MAT_TZ] = v[MAT_TZ] * smesa->depth_scale; } static void sis6326DDViewport( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height ) { sis6326CalcViewport( ctx ); } static void sis6326DDDepthRange( GLcontext *ctx, GLclampd nearval, GLclampd farval ) { sis6326CalcViewport( ctx ); } /* ============================================================= * Miscellaneous */ static void sis6326DDLogicOpCode( GLcontext *ctx, GLenum opcode ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; if (!ctx->Color.ColorLogicOpEnabled) return; current->hwDstSet &= ~MASK_ROP2; switch (opcode) { case GL_CLEAR: current->hwDstSet |= LOP_CLEAR; break; case GL_SET: current->hwDstSet |= LOP_SET; break; case GL_COPY: current->hwDstSet |= LOP_COPY; break; case GL_COPY_INVERTED: current->hwDstSet |= LOP_COPY_INVERTED; break; case GL_NOOP: current->hwDstSet |= LOP_NOOP; break; case GL_INVERT: current->hwDstSet |= LOP_INVERT; break; case GL_AND: current->hwDstSet |= LOP_AND; break; case GL_NAND: current->hwDstSet |= LOP_NAND; break; case GL_OR: current->hwDstSet |= LOP_OR; break; case GL_NOR: current->hwDstSet |= LOP_NOR; break; case GL_XOR: current->hwDstSet |= LOP_XOR; break; case GL_EQUIV: current->hwDstSet |= LOP_EQUIV; break; case GL_AND_REVERSE: current->hwDstSet |= LOP_AND_REVERSE; break; case GL_AND_INVERTED: current->hwDstSet |= LOP_AND_INVERTED; break; case GL_OR_REVERSE: current->hwDstSet |= LOP_OR_REVERSE; break; case GL_OR_INVERTED: current->hwDstSet |= LOP_OR_INVERTED; break; } if (current->hwDstSet != prev->hwDstSet) { prev->hwDstSet = current->hwDstSet; smesa->GlobalFlag |= GFLAG_DESTSETTING; } } void sis6326DDDrawBuffer( GLcontext *ctx, GLenum mode ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; if(getenv("SIS_DRAW_FRONT")) ctx->DrawBuffer->_ColorDrawBufferMask[0] = GL_FRONT_LEFT; /* * _DrawDestMask is easier to cope with than <mode>. */ current->hwDstSet &= ~MASK_DstBufferPitch; switch ( ctx->DrawBuffer->_ColorDrawBufferMask[0] ) { case BUFFER_BIT_FRONT_LEFT: current->hwOffsetDest = smesa->front.offset; current->hwDstSet |= smesa->front.pitch; FALLBACK( smesa, SIS_FALLBACK_DRAW_BUFFER, GL_FALSE ); break; case BUFFER_BIT_BACK_LEFT: current->hwOffsetDest = smesa->back.offset; current->hwDstSet |= smesa->back.pitch; FALLBACK( smesa, SIS_FALLBACK_DRAW_BUFFER, GL_FALSE ); break; default: /* GL_NONE or GL_FRONT_AND_BACK or stereo left&right, etc */ FALLBACK( smesa, SIS_FALLBACK_DRAW_BUFFER, GL_TRUE ); return; } if (current->hwDstSet != prev->hwDstSet) { prev->hwDstSet = current->hwDstSet; smesa->GlobalFlag |= GFLAG_DESTSETTING; } if (current->hwOffsetDest != prev->hwOffsetDest) { prev->hwOffsetDest = current->hwOffsetDest; smesa->GlobalFlag |= GFLAG_DESTSETTING; } } /* ============================================================= * Polygon stipple */ /* ============================================================= * Render mode */ /* ============================================================= * State enable/disable */ static void sis6326DDEnable( GLcontext *ctx, GLenum cap, GLboolean state ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *current = &smesa->current; switch (cap) { case GL_ALPHA_TEST: if (state) current->hwCapEnable |= S_ENABLE_AlphaTest; else current->hwCapEnable &= ~S_ENABLE_AlphaTest; break; case GL_BLEND: /* TODO: */ if (state) /* if (state & !ctx->Color.ColorLogicOpEnabled) */ current->hwCapEnable |= S_ENABLE_Blend; else current->hwCapEnable &= ~S_ENABLE_Blend; break; case GL_CULL_FACE: /* XXX culling */ break; case GL_DEPTH_TEST: if (state && smesa->depth.offset != 0) current->hwCapEnable |= S_ENABLE_ZTest; else current->hwCapEnable &= ~S_ENABLE_ZTest; sis6326DDDepthMask( ctx, ctx->Depth.Mask ); break; case GL_DITHER: if (state) current->hwCapEnable |= S_ENABLE_Dither; else current->hwCapEnable &= ~S_ENABLE_Dither; break; case GL_FOG: if (state) current->hwCapEnable |= S_ENABLE_Fog; else current->hwCapEnable &= ~S_ENABLE_Fog; break; case GL_COLOR_LOGIC_OP: if (state) sis6326DDLogicOpCode( ctx, ctx->Color.LogicOp ); else sis6326DDLogicOpCode( ctx, GL_COPY ); break; case GL_SCISSOR_TEST: sis6326UpdateClipping( ctx ); break; case GL_STENCIL_TEST: if (state) { FALLBACK(smesa, SIS_FALLBACK_STENCIL, 1); } else { FALLBACK(smesa, SIS_FALLBACK_STENCIL, 0); } break; case GL_LIGHTING: case GL_COLOR_SUM_EXT: sis6326UpdateSpecular(ctx); break; } } /* ============================================================= * State initialization, management */ /* Called before beginning of rendering. */ void sis6326UpdateHWState( GLcontext *ctx ) { sisContextPtr smesa = SIS_CONTEXT(ctx); __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; if (smesa->NewGLState & _NEW_TEXTURE) sisUpdateTextureState( ctx ); if (current->hwCapEnable ^ prev->hwCapEnable) { prev->hwCapEnable = current->hwCapEnable; smesa->GlobalFlag |= GFLAG_ENABLESETTING; } if (smesa->GlobalFlag & GFLAG_RENDER_STATES) sis_update_render_state( smesa ); if (smesa->GlobalFlag & GFLAG_TEXTURE_STATES) sis_update_texture_state( smesa ); } static void sis6326DDInvalidateState( GLcontext *ctx, GLuint new_state ) { sisContextPtr smesa = SIS_CONTEXT(ctx); _swrast_InvalidateState( ctx, new_state ); _swsetup_InvalidateState( ctx, new_state ); _vbo_InvalidateState( ctx, new_state ); _tnl_InvalidateState( ctx, new_state ); smesa->NewGLState |= new_state; } /* Initialize the context's hardware state. */ void sis6326DDInitState( sisContextPtr smesa ) { __GLSiSHardware *prev = &smesa->prev; __GLSiSHardware *current = &smesa->current; GLcontext *ctx = smesa->glCtx; /* add Texture Perspective Enable */ current->hwCapEnable = S_ENABLE_TextureCache | S_ENABLE_TexturePerspective | S_ENABLE_Dither; /* Z test mode is LESS */ current->hwZ = S_ZSET_PASS_LESS | S_ZSET_FORMAT_16; if (ctx->Visual.depthBits > 0) current->hwCapEnable |= S_ENABLE_ZWrite; /* Alpha test mode is ALWAYS, alpha ref value is 0 */ current->hwAlpha = S_ASET_PASS_ALWAYS; /* ROP2 is COPYPEN */ current->hwDstSet = LOP_COPY; /* LinePattern is 0, Repeat Factor is 0 */ current->hwLinePattern = 0x00008000; /* Src blend is BLEND_ONE, Dst blend is D3DBLEND_ZERO */ current->hwDstSrcBlend = S_SBLEND_ONE | S_DBLEND_ZERO; switch (smesa->bytesPerPixel) { case 2: current->hwDstSet |= DST_FORMAT_RGB_565; break; case 4: current->hwDstSet |= DST_FORMAT_ARGB_8888; break; } smesa->depth_scale = 1.0 / (GLfloat)0xffff; smesa->clearTexCache = GL_TRUE; smesa->clearColorPattern = 0; sis6326UpdateZPattern(smesa, 1.0); sis6326UpdateCull(ctx); /* Set initial fog settings. Start and end are the same case. */ sis6326DDFogfv( ctx, GL_FOG_DENSITY, &ctx->Fog.Density ); sis6326DDFogfv( ctx, GL_FOG_END, &ctx->Fog.End ); sis6326DDFogfv( ctx, GL_FOG_MODE, NULL ); memcpy(prev, current, sizeof(__GLSiSHardware)); } /* Initialize the driver's state functions. */ void sis6326DDInitStateFuncs( GLcontext *ctx ) { ctx->Driver.UpdateState = sis6326DDInvalidateState; ctx->Driver.Clear = sis6326DDClear; ctx->Driver.ClearColor = sis6326DDClearColor; ctx->Driver.ClearDepth = sis6326DDClearDepth; ctx->Driver.AlphaFunc = sis6326DDAlphaFunc; ctx->Driver.BlendFuncSeparate = sis6326DDBlendFuncSeparate; ctx->Driver.ColorMask = sis6326DDColorMask; ctx->Driver.CullFace = sis6326DDCullFace; ctx->Driver.DepthMask = sis6326DDDepthMask; ctx->Driver.DepthFunc = sis6326DDDepthFunc; ctx->Driver.DepthRange = sis6326DDDepthRange; ctx->Driver.DrawBuffer = sis6326DDDrawBuffer; ctx->Driver.Enable = sis6326DDEnable; ctx->Driver.FrontFace = sis6326DDFrontFace; ctx->Driver.Fogfv = sis6326DDFogfv; ctx->Driver.LogicOpcode = sis6326DDLogicOpCode; ctx->Driver.Scissor = sis6326DDScissor; ctx->Driver.ShadeModel = sis6326DDShadeModel; ctx->Driver.LightModelfv = sis6326DDLightModelfv; ctx->Driver.Viewport = sis6326DDViewport; }
ZHAW-INES/rioxo-uClinux-dist
lib/mesa/src/mesa/drivers/dri/sis/sis6326_state.c
C
gpl-2.0
19,463
/***************************************************************************** Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *****************************************************************************/ /**************************************************//** @file dict/dict0load.c Loads to the memory cache database object definitions from dictionary tables Created 4/24/1996 Heikki Tuuri *******************************************************/ #include "dict0load.h" #include "mysql_version.h" #ifdef UNIV_NONINL #include "dict0load.ic" #endif #include "btr0pcur.h" #include "btr0btr.h" #include "page0page.h" #include "mach0data.h" #include "dict0dict.h" #include "dict0boot.h" #include "rem0cmp.h" #include "srv0start.h" #include "srv0srv.h" #include "ha_prototypes.h" /* innobase_casedn_str() */ /** Following are six InnoDB system tables */ static const char* SYSTEM_TABLE_NAME[] = { "SYS_TABLES", "SYS_INDEXES", "SYS_COLUMNS", "SYS_FIELDS", "SYS_FOREIGN", "SYS_FOREIGN_COLS" }; /****************************************************************//** Compare the name of an index column. @return TRUE if the i'th column of index is 'name'. */ static ibool name_of_col_is( /*===========*/ const dict_table_t* table, /*!< in: table */ const dict_index_t* index, /*!< in: index */ ulint i, /*!< in: index field offset */ const char* name) /*!< in: name to compare to */ { ulint tmp = dict_col_get_no(dict_field_get_col( dict_index_get_nth_field( index, i))); return(strcmp(name, dict_table_get_col_name(table, tmp)) == 0); } /********************************************************************//** Finds the first table name in the given database. @return own: table name, NULL if does not exist; the caller must free the memory in the string! */ UNIV_INTERN char* dict_get_first_table_name_in_db( /*============================*/ const char* name) /*!< in: database name which ends in '/' */ { dict_table_t* sys_tables; btr_pcur_t pcur; dict_index_t* sys_index; dtuple_t* tuple; mem_heap_t* heap; dfield_t* dfield; const rec_t* rec; const byte* field; ulint len; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); heap = mem_heap_create(1000); mtr_start(&mtr); sys_tables = dict_table_get_low("SYS_TABLES"); sys_index = UT_LIST_GET_FIRST(sys_tables->indexes); ut_a(!dict_table_is_comp(sys_tables)); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); dfield_set_data(dfield, name, ut_strlen(name)); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); loop: rec = btr_pcur_get_rec(&pcur); if (!btr_pcur_is_on_user_rec(&pcur)) { /* Not found */ btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); return(NULL); } field = rec_get_nth_field_old(rec, 0, &len); if (len < strlen(name) || ut_memcmp(name, field, strlen(name)) != 0) { /* Not found */ btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); return(NULL); } if (!rec_get_deleted_flag(rec, 0)) { /* We found one */ char* table_name = mem_strdupl((char*) field, len); btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); return(table_name); } btr_pcur_move_to_next_user_rec(&pcur, &mtr); goto loop; } /********************************************************************//** Prints to the standard output information on all tables found in the data dictionary system table. */ UNIV_INTERN void dict_print(void) /*============*/ { dict_table_t* table; btr_pcur_t pcur; const rec_t* rec; mem_heap_t* heap; mtr_t mtr; /* Enlarge the fatal semaphore wait timeout during the InnoDB table monitor printout */ mutex_enter(&kernel_mutex); srv_fatal_semaphore_wait_threshold += 7200; /* 2 hours */ mutex_exit(&kernel_mutex); heap = mem_heap_create(1000); mutex_enter(&(dict_sys->mutex)); mtr_start(&mtr); rec = dict_startscan_system(&pcur, &mtr, SYS_TABLES); while (rec) { const char* err_msg; err_msg = dict_process_sys_tables_rec( heap, rec, &table, DICT_TABLE_LOAD_FROM_CACHE | DICT_TABLE_UPDATE_STATS); mtr_commit(&mtr); if (!err_msg) { dict_table_print_low(table); } else { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: %s\n", err_msg); } mem_heap_empty(heap); mtr_start(&mtr); rec = dict_getnext_system(&pcur, &mtr); } mtr_commit(&mtr); mutex_exit(&(dict_sys->mutex)); mem_heap_free(heap); /* Restore the fatal semaphore wait timeout */ mutex_enter(&kernel_mutex); srv_fatal_semaphore_wait_threshold -= 7200; /* 2 hours */ mutex_exit(&kernel_mutex); } /********************************************************************//** This function gets the next system table record as it scans the table. @return the next record if found, NULL if end of scan */ static const rec_t* dict_getnext_system_low( /*====================*/ btr_pcur_t* pcur, /*!< in/out: persistent cursor to the record*/ mtr_t* mtr) /*!< in: the mini-transaction */ { rec_t* rec = NULL; while (!rec || rec_get_deleted_flag(rec, 0)) { btr_pcur_move_to_next_user_rec(pcur, mtr); rec = btr_pcur_get_rec(pcur); if (!btr_pcur_is_on_user_rec(pcur)) { /* end of index */ btr_pcur_close(pcur); return(NULL); } } /* Get a record, let's save the position */ btr_pcur_store_position(pcur, mtr); return(rec); } /********************************************************************//** This function opens a system table, and return the first record. @return first record of the system table */ UNIV_INTERN const rec_t* dict_startscan_system( /*==================*/ btr_pcur_t* pcur, /*!< out: persistent cursor to the record */ mtr_t* mtr, /*!< in: the mini-transaction */ dict_system_id_t system_id) /*!< in: which system table to open */ { dict_table_t* system_table; dict_index_t* clust_index; const rec_t* rec; ut_a(system_id < SYS_NUM_SYSTEM_TABLES); system_table = dict_table_get_low(SYSTEM_TABLE_NAME[system_id]); clust_index = UT_LIST_GET_FIRST(system_table->indexes); btr_pcur_open_at_index_side(TRUE, clust_index, BTR_SEARCH_LEAF, pcur, TRUE, mtr); rec = dict_getnext_system_low(pcur, mtr); return(rec); } /********************************************************************//** This function gets the next system table record as it scans the table. @return the next record if found, NULL if end of scan */ UNIV_INTERN const rec_t* dict_getnext_system( /*================*/ btr_pcur_t* pcur, /*!< in/out: persistent cursor to the record */ mtr_t* mtr) /*!< in: the mini-transaction */ { const rec_t* rec; /* Restore the position */ btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr); /* Get the next record */ rec = dict_getnext_system_low(pcur, mtr); return(rec); } /********************************************************************//** This function processes one SYS_TABLES record and populate the dict_table_t struct for the table. Extracted out of dict_print() to be used by both monitor table output and information schema innodb_sys_tables output. @return error message, or NULL on success */ UNIV_INTERN const char* dict_process_sys_tables_rec( /*========================*/ mem_heap_t* heap, /*!< in/out: temporary memory heap */ const rec_t* rec, /*!< in: SYS_TABLES record */ dict_table_t** table, /*!< out: dict_table_t to fill */ dict_table_info_t status) /*!< in: status bit controls options such as whether we shall look for dict_table_t from cache first */ { ulint len; const char* field; const char* err_msg = NULL; char* table_name; field = (const char*) rec_get_nth_field_old(rec, 0, &len); ut_a(!rec_get_deleted_flag(rec, 0)); /* Get the table name */ table_name = mem_heap_strdupl(heap, field, len); /* If DICT_TABLE_LOAD_FROM_CACHE is set, first check whether there is cached dict_table_t struct first */ if (status & DICT_TABLE_LOAD_FROM_CACHE) { *table = dict_table_get_low(table_name); if (!(*table)) { err_msg = "Table not found in cache"; } } else { err_msg = dict_load_table_low(table_name, rec, table); } if (err_msg) { return(err_msg); } if ((status & DICT_TABLE_UPDATE_STATS) && dict_table_get_first_index(*table)) { /* Update statistics if DICT_TABLE_UPDATE_STATS is set */ dict_update_statistics(*table, FALSE /* update even if initialized */); } return(NULL); } /********************************************************************//** This function parses a SYS_INDEXES record and populate a dict_index_t structure with the information from the record. For detail information about SYS_INDEXES fields, please refer to dict_boot() function. @return error message, or NULL on success */ UNIV_INTERN const char* dict_process_sys_indexes_rec( /*=========================*/ mem_heap_t* heap, /*!< in/out: heap memory */ const rec_t* rec, /*!< in: current SYS_INDEXES rec */ dict_index_t* index, /*!< out: index to be filled */ table_id_t* table_id) /*!< out: index table id */ { const char* err_msg; byte* buf; buf = mem_heap_alloc(heap, 8); /* Parse the record, and get "dict_index_t" struct filled */ err_msg = dict_load_index_low(buf, NULL, heap, rec, FALSE, &index); *table_id = mach_read_from_8(buf); return(err_msg); } /********************************************************************//** This function parses a SYS_COLUMNS record and populate a dict_column_t structure with the information from the record. @return error message, or NULL on success */ UNIV_INTERN const char* dict_process_sys_columns_rec( /*=========================*/ mem_heap_t* heap, /*!< in/out: heap memory */ const rec_t* rec, /*!< in: current SYS_COLUMNS rec */ dict_col_t* column, /*!< out: dict_col_t to be filled */ table_id_t* table_id, /*!< out: table id */ const char** col_name) /*!< out: column name */ { const char* err_msg; /* Parse the record, and get "dict_col_t" struct filled */ err_msg = dict_load_column_low(NULL, heap, column, table_id, col_name, rec); return(err_msg); } /********************************************************************//** This function parses a SYS_FIELDS record and populates a dict_field_t structure with the information from the record. @return error message, or NULL on success */ UNIV_INTERN const char* dict_process_sys_fields_rec( /*========================*/ mem_heap_t* heap, /*!< in/out: heap memory */ const rec_t* rec, /*!< in: current SYS_FIELDS rec */ dict_field_t* sys_field, /*!< out: dict_field_t to be filled */ ulint* pos, /*!< out: Field position */ index_id_t* index_id, /*!< out: current index id */ index_id_t last_id) /*!< in: previous index id */ { byte* buf; byte* last_index_id; const char* err_msg; buf = mem_heap_alloc(heap, 8); last_index_id = mem_heap_alloc(heap, 8); mach_write_to_8(last_index_id, last_id); err_msg = dict_load_field_low(buf, NULL, sys_field, pos, last_index_id, heap, rec, NULL, 0); *index_id = mach_read_from_8(buf); return(err_msg); } #ifdef FOREIGN_NOT_USED /********************************************************************//** This function parses a SYS_FOREIGN record and populate a dict_foreign_t structure with the information from the record. For detail information about SYS_FOREIGN fields, please refer to dict_load_foreign() function. @return error message, or NULL on success */ UNIV_INTERN const char* dict_process_sys_foreign_rec( /*=========================*/ mem_heap_t* heap, /*!< in/out: heap memory */ const rec_t* rec, /*!< in: current SYS_FOREIGN rec */ dict_foreign_t* foreign) /*!< out: dict_foreign_t struct to be filled */ { ulint len; const byte* field; ulint n_fields_and_type; if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { return("delete-marked record in SYS_FOREIGN"); } if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != 6)) { return("wrong number of columns in SYS_FOREIGN record"); } field = rec_get_nth_field_old(rec, 0/*ID*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { err_len: return("incorrect column length in SYS_FOREIGN"); } /* This recieves a dict_foreign_t* that points to a stack variable. So mem_heap_free(foreign->heap) is not used as elsewhere. Since the heap used here is freed elsewhere, foreign->heap is not assigned. */ foreign->id = mem_heap_strdupl(heap, (const char*) field, len); rec_get_nth_field_offs_old(rec, 1/*DB_TRX_ID*/, &len); if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 2/*DB_ROLL_PTR*/, &len); if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { goto err_len; } /* The _lookup versions of the referenced and foreign table names are not assigned since they are not used in this dict_foreign_t */ field = rec_get_nth_field_old(rec, 3/*FOR_NAME*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { goto err_len; } foreign->foreign_table_name = mem_heap_strdupl( heap, (const char*) field, len); field = rec_get_nth_field_old(rec, 4/*REF_NAME*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { goto err_len; } foreign->referenced_table_name = mem_heap_strdupl( heap, (const char*) field, len); field = rec_get_nth_field_old(rec, 5/*N_COLS*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } n_fields_and_type = mach_read_from_4(field); foreign->type = (unsigned int) (n_fields_and_type >> 24); foreign->n_fields = (unsigned int) (n_fields_and_type & 0x3FFUL); return(NULL); } #endif /* FOREIGN_NOT_USED */ #ifdef FOREIGN_NOT_USED /********************************************************************//** This function parses a SYS_FOREIGN_COLS record and extract necessary information from the record and return to caller. @return error message, or NULL on success */ UNIV_INTERN const char* dict_process_sys_foreign_col_rec( /*=============================*/ mem_heap_t* heap, /*!< in/out: heap memory */ const rec_t* rec, /*!< in: current SYS_FOREIGN_COLS rec */ const char** name, /*!< out: foreign key constraint name */ const char** for_col_name, /*!< out: referencing column name */ const char** ref_col_name, /*!< out: referenced column name in referenced table */ ulint* pos) /*!< out: column position */ { ulint len; const byte* field; if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { return("delete-marked record in SYS_FOREIGN_COLS"); } if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != 6)) { return("wrong number of columns in SYS_FOREIGN_COLS record"); } field = rec_get_nth_field_old(rec, 0/*ID*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { err_len: return("incorrect column length in SYS_FOREIGN_COLS"); } *name = mem_heap_strdupl(heap, (char*) field, len); field = rec_get_nth_field_old(rec, 1/*POS*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } *pos = mach_read_from_4(field); rec_get_nth_field_offs_old(rec, 2/*DB_TRX_ID*/, &len); if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 3/*DB_ROLL_PTR*/, &len); if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { goto err_len; } field = rec_get_nth_field_old(rec, 4/*FOR_COL_NAME*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { goto err_len; } *for_col_name = mem_heap_strdupl(heap, (char*) field, len); field = rec_get_nth_field_old(rec, 5/*REF_COL_NAME*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { goto err_len; } *ref_col_name = mem_heap_strdupl(heap, (char*) field, len); return(NULL); } #endif /* FOREIGN_NOT_USED */ /********************************************************************//** Determine the flags of a table described in SYS_TABLES. @return compressed page size in kilobytes; or 0 if the tablespace is uncompressed, ULINT_UNDEFINED on error */ static ulint dict_sys_tables_get_flags( /*======================*/ const rec_t* rec) /*!< in: a record of SYS_TABLES */ { const byte* field; ulint len; ulint n_cols; ulint flags; field = rec_get_nth_field_old(rec, 5, &len); ut_a(len == 4); flags = mach_read_from_4(field); if (UNIV_LIKELY(flags == DICT_TABLE_ORDINARY)) { return(0); } field = rec_get_nth_field_old(rec, 4/*N_COLS*/, &len); n_cols = mach_read_from_4(field); if (UNIV_UNLIKELY(!(n_cols & 0x80000000UL))) { /* New file formats require ROW_FORMAT=COMPACT. */ return(ULINT_UNDEFINED); } switch (flags & (DICT_TF_FORMAT_MASK | DICT_TF_COMPACT)) { default: case DICT_TF_FORMAT_51 << DICT_TF_FORMAT_SHIFT: case DICT_TF_FORMAT_51 << DICT_TF_FORMAT_SHIFT | DICT_TF_COMPACT: /* flags should be DICT_TABLE_ORDINARY, or DICT_TF_FORMAT_MASK should be nonzero. */ return(ULINT_UNDEFINED); case DICT_TF_FORMAT_ZIP << DICT_TF_FORMAT_SHIFT | DICT_TF_COMPACT: #if DICT_TF_FORMAT_MAX > DICT_TF_FORMAT_ZIP # error "missing case labels for DICT_TF_FORMAT_ZIP .. DICT_TF_FORMAT_MAX" #endif /* We support this format. */ break; } if (UNIV_UNLIKELY((flags & DICT_TF_ZSSIZE_MASK) > (DICT_TF_ZSSIZE_MAX << DICT_TF_ZSSIZE_SHIFT))) { /* Unsupported compressed page size. */ return(ULINT_UNDEFINED); } if (UNIV_UNLIKELY(flags & (~0 << DICT_TF_BITS))) { /* Some unused bits are set. */ return(ULINT_UNDEFINED); } return(flags); } /********************************************************************//** In a crash recovery we already have all the tablespace objects created. This function compares the space id information in the InnoDB data dictionary to what we already read with fil_load_single_table_tablespaces(). In a normal startup, we create the tablespace objects for every table in InnoDB's data dictionary, if the corresponding .ibd file exists. We also scan the biggest space id, and store it to fil_system. */ UNIV_INTERN void dict_check_tablespaces_and_store_max_id( /*====================================*/ ibool in_crash_recovery) /*!< in: are we doing a crash recovery */ { dict_table_t* sys_tables; dict_index_t* sys_index; btr_pcur_t pcur; const rec_t* rec; ulint max_space_id; mtr_t mtr; mutex_enter(&(dict_sys->mutex)); mtr_start(&mtr); sys_tables = dict_table_get_low("SYS_TABLES"); sys_index = UT_LIST_GET_FIRST(sys_tables->indexes); ut_a(!dict_table_is_comp(sys_tables)); max_space_id = mtr_read_ulint(dict_hdr_get(&mtr) + DICT_HDR_MAX_SPACE_ID, MLOG_4BYTES, &mtr); fil_set_max_space_id_if_bigger(max_space_id); btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr); loop: btr_pcur_move_to_next_user_rec(&pcur, &mtr); rec = btr_pcur_get_rec(&pcur); if (!btr_pcur_is_on_user_rec(&pcur)) { /* end of index */ btr_pcur_close(&pcur); mtr_commit(&mtr); /* We must make the tablespace cache aware of the biggest known space id */ /* printf("Biggest space id in data dictionary %lu\n", max_space_id); */ fil_set_max_space_id_if_bigger(max_space_id); mutex_exit(&(dict_sys->mutex)); return; } if (!rec_get_deleted_flag(rec, 0)) { /* We found one */ const byte* field; ulint len; ulint space_id; ulint flags; char* name; field = rec_get_nth_field_old(rec, 0, &len); name = mem_strdupl((char*) field, len); flags = dict_sys_tables_get_flags(rec); if (UNIV_UNLIKELY(flags == ULINT_UNDEFINED)) { field = rec_get_nth_field_old(rec, 5, &len); flags = mach_read_from_4(field); ut_print_timestamp(stderr); fputs(" InnoDB: Error: table ", stderr); ut_print_filename(stderr, name); fprintf(stderr, "\n" "InnoDB: in InnoDB data dictionary" " has unknown type %lx.\n", (ulong) flags); goto loop; } field = rec_get_nth_field_old(rec, 9, &len); ut_a(len == 4); space_id = mach_read_from_4(field); btr_pcur_store_position(&pcur, &mtr); mtr_commit(&mtr); if (space_id == 0) { /* The system tablespace always exists. */ } else if (in_crash_recovery) { /* Check that the tablespace (the .ibd file) really exists; print a warning to the .err log if not. Do not print warnings for temporary tables. */ ibool is_temp; field = rec_get_nth_field_old(rec, 4, &len); if (0x80000000UL & mach_read_from_4(field)) { /* ROW_FORMAT=COMPACT: read the is_temp flag from SYS_TABLES.MIX_LEN. */ field = rec_get_nth_field_old(rec, 7, &len); is_temp = mach_read_from_4(field) & DICT_TF2_TEMPORARY; } else { /* For tables created with old versions of InnoDB, SYS_TABLES.MIX_LEN may contain garbage. Such tables would always be in ROW_FORMAT=REDUNDANT. Pretend that all such tables are non-temporary. That is, do not suppress error printouts about temporary tables not being found. */ is_temp = FALSE; } fil_space_for_table_exists_in_mem( space_id, name, is_temp, TRUE, !is_temp); } else { /* It is a normal database startup: create the space object and check that the .ibd file exists. */ fil_open_single_table_tablespace(FALSE, space_id, flags, name); } mem_free(name); if (space_id > max_space_id) { max_space_id = space_id; } mtr_start(&mtr); btr_pcur_restore_position(BTR_SEARCH_LEAF, &pcur, &mtr); } goto loop; } /********************************************************************//** Loads a table column definition from a SYS_COLUMNS record to dict_table_t. @return error message, or NULL on success */ UNIV_INTERN const char* dict_load_column_low( /*=================*/ dict_table_t* table, /*!< in/out: table, could be NULL if we just populate a dict_column_t struct with information from a SYS_COLUMNS record */ mem_heap_t* heap, /*!< in/out: memory heap for temporary storage */ dict_col_t* column, /*!< out: dict_column_t to fill, or NULL if table != NULL */ table_id_t* table_id, /*!< out: table id */ const char** col_name, /*!< out: column name */ const rec_t* rec) /*!< in: SYS_COLUMNS record */ { char* name; const byte* field; ulint len; ulint mtype; ulint prtype; ulint col_len; ulint pos; ut_ad(table || column); if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { return("delete-marked record in SYS_COLUMNS"); } if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != 9)) { return("wrong number of columns in SYS_COLUMNS record"); } field = rec_get_nth_field_old(rec, 0/*TABLE_ID*/, &len); if (UNIV_UNLIKELY(len != 8)) { err_len: return("incorrect column length in SYS_COLUMNS"); } if (table_id) { *table_id = mach_read_from_8(field); } else if (UNIV_UNLIKELY(table->id != mach_read_from_8(field))) { return("SYS_COLUMNS.TABLE_ID mismatch"); } field = rec_get_nth_field_old(rec, 1/*POS*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } pos = mach_read_from_4(field); if (UNIV_UNLIKELY(table && table->n_def != pos)) { return("SYS_COLUMNS.POS mismatch"); } rec_get_nth_field_offs_old(rec, 2/*DB_TRX_ID*/, &len); if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 3/*DB_ROLL_PTR*/, &len); if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { goto err_len; } field = rec_get_nth_field_old(rec, 4/*NAME*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { goto err_len; } name = mem_heap_strdupl(heap, (const char*) field, len); if (col_name) { *col_name = name; } field = rec_get_nth_field_old(rec, 5/*MTYPE*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } mtype = mach_read_from_4(field); field = rec_get_nth_field_old(rec, 6/*PRTYPE*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } prtype = mach_read_from_4(field); if (dtype_get_charset_coll(prtype) == 0 && dtype_is_string_type(mtype)) { /* The table was created with < 4.1.2. */ if (dtype_is_binary_string_type(mtype, prtype)) { /* Use the binary collation for string columns of binary type. */ prtype = dtype_form_prtype( prtype, DATA_MYSQL_BINARY_CHARSET_COLL); } else { /* Use the default charset for other than binary columns. */ prtype = dtype_form_prtype( prtype, data_mysql_default_charset_coll); } } field = rec_get_nth_field_old(rec, 7/*LEN*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } col_len = mach_read_from_4(field); field = rec_get_nth_field_old(rec, 8/*PREC*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } if (!column) { dict_mem_table_add_col(table, heap, name, mtype, prtype, col_len); } else { dict_mem_fill_column_struct(column, pos, mtype, prtype, col_len); } return(NULL); } /********************************************************************//** Loads definitions for table columns. */ static void dict_load_columns( /*==============*/ dict_table_t* table, /*!< in/out: table */ mem_heap_t* heap) /*!< in/out: memory heap for temporary storage */ { dict_table_t* sys_columns; dict_index_t* sys_index; btr_pcur_t pcur; dtuple_t* tuple; dfield_t* dfield; const rec_t* rec; byte* buf; ulint i; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); mtr_start(&mtr); sys_columns = dict_table_get_low("SYS_COLUMNS"); sys_index = UT_LIST_GET_FIRST(sys_columns->indexes); ut_a(!dict_table_is_comp(sys_columns)); ut_a(name_of_col_is(sys_columns, sys_index, 4, "NAME")); ut_a(name_of_col_is(sys_columns, sys_index, 8, "PREC")); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); buf = mem_heap_alloc(heap, 8); mach_write_to_8(buf, table->id); dfield_set_data(dfield, buf, 8); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); for (i = 0; i + DATA_N_SYS_COLS < (ulint) table->n_cols; i++) { const char* err_msg; rec = btr_pcur_get_rec(&pcur); ut_a(btr_pcur_is_on_user_rec(&pcur)); err_msg = dict_load_column_low(table, heap, NULL, NULL, NULL, rec); if (err_msg) { fprintf(stderr, "InnoDB: %s\n", err_msg); ut_error; } btr_pcur_move_to_next_user_rec(&pcur, &mtr); } btr_pcur_close(&pcur); mtr_commit(&mtr); } /** Error message for a delete-marked record in dict_load_field_low() */ static const char* dict_load_field_del = "delete-marked record in SYS_FIELDS"; static const char* dict_load_field_too_big = "column prefix exceeds maximum" " limit"; /********************************************************************//** Loads an index field definition from a SYS_FIELDS record to dict_index_t. @return error message, or NULL on success */ UNIV_INTERN const char* dict_load_field_low( /*================*/ byte* index_id, /*!< in/out: index id (8 bytes) an "in" value if index != NULL and "out" if index == NULL */ dict_index_t* index, /*!< in/out: index, could be NULL if we just populate a dict_field_t struct with information from a SYS_FIELDSS record */ dict_field_t* sys_field, /*!< out: dict_field_t to be filled */ ulint* pos, /*!< out: Field position */ byte* last_index_id, /*!< in: last index id */ mem_heap_t* heap, /*!< in/out: memory heap for temporary storage */ const rec_t* rec, /*!< in: SYS_FIELDS record */ char* addition_err_str,/*!< out: additional error message that requires information to be filled, or NULL */ ulint err_str_len) /*!< in: length of addition_err_str in bytes */ { const byte* field; ulint len; ulint pos_and_prefix_len; ulint prefix_len; ibool first_field; ulint position; /* Either index or sys_field is supplied, not both */ ut_a((!index) || (!sys_field)); if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { return(dict_load_field_del); } if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != 5)) { return("wrong number of columns in SYS_FIELDS record"); } field = rec_get_nth_field_old(rec, 0/*INDEX_ID*/, &len); if (UNIV_UNLIKELY(len != 8)) { err_len: return("incorrect column length in SYS_FIELDS"); } if (!index) { ut_a(last_index_id); memcpy(index_id, (const char*)field, 8); first_field = memcmp(index_id, last_index_id, 8); } else { first_field = (index->n_def == 0); if (memcmp(field, index_id, 8)) { return("SYS_FIELDS.INDEX_ID mismatch"); } } field = rec_get_nth_field_old(rec, 1/*POS*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } rec_get_nth_field_offs_old(rec, 2/*DB_TRX_ID*/, &len); if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 3/*DB_ROLL_PTR*/, &len); if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { goto err_len; } /* The next field stores the field position in the index and a possible column prefix length if the index field does not contain the whole column. The storage format is like this: if there is at least one prefix field in the index, then the HIGH 2 bytes contain the field number (index->n_def) and the low 2 bytes the prefix length for the field. Otherwise the field number (index->n_def) is contained in the 2 LOW bytes. */ pos_and_prefix_len = mach_read_from_4(field); if (index && UNIV_UNLIKELY ((pos_and_prefix_len & 0xFFFFUL) != index->n_def && (pos_and_prefix_len >> 16 & 0xFFFF) != index->n_def)) { return("SYS_FIELDS.POS mismatch"); } if (first_field || pos_and_prefix_len > 0xFFFFUL) { prefix_len = pos_and_prefix_len & 0xFFFFUL; position = (pos_and_prefix_len & 0xFFFF0000UL) >> 16; } else { prefix_len = 0; position = pos_and_prefix_len & 0xFFFFUL; } field = rec_get_nth_field_old(rec, 4, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { goto err_len; } if (prefix_len > REC_VERSION_56_MAX_INDEX_COL_LEN) { if (addition_err_str) { ut_snprintf(addition_err_str, err_str_len, "index field '%s' has a prefix length" " of %lu bytes", mem_heap_strdupl( heap, (const char*) field, len), (ulong) prefix_len); } return(dict_load_field_too_big); } if (index) { dict_mem_index_add_field( index, mem_heap_strdupl(heap, (const char*) field, len), prefix_len); } else { ut_a(sys_field); ut_a(pos); sys_field->name = mem_heap_strdupl( heap, (const char*) field, len); sys_field->prefix_len = prefix_len; *pos = position; } return(NULL); } /********************************************************************//** Loads definitions for index fields. @return DB_SUCCESS if ok, DB_CORRUPTION if corruption */ static ulint dict_load_fields( /*=============*/ dict_index_t* index, /*!< in/out: index whose fields to load */ mem_heap_t* heap) /*!< in: memory heap for temporary storage */ { dict_table_t* sys_fields; dict_index_t* sys_index; btr_pcur_t pcur; dtuple_t* tuple; dfield_t* dfield; const rec_t* rec; byte* buf; ulint i; mtr_t mtr; ulint error; ut_ad(mutex_own(&(dict_sys->mutex))); mtr_start(&mtr); sys_fields = dict_table_get_low("SYS_FIELDS"); sys_index = UT_LIST_GET_FIRST(sys_fields->indexes); ut_a(!dict_table_is_comp(sys_fields)); ut_a(name_of_col_is(sys_fields, sys_index, 4, "COL_NAME")); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); buf = mem_heap_alloc(heap, 8); mach_write_to_8(buf, index->id); dfield_set_data(dfield, buf, 8); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); for (i = 0; i < index->n_fields; i++) { const char* err_msg; char addition_err_str[1024]; rec = btr_pcur_get_rec(&pcur); ut_a(btr_pcur_is_on_user_rec(&pcur)); err_msg = dict_load_field_low(buf, index, NULL, NULL, NULL, heap, rec, addition_err_str, sizeof(addition_err_str)); if (err_msg == dict_load_field_del) { /* There could be delete marked records in SYS_FIELDS because SYS_FIELDS.INDEX_ID can be updated by ALTER TABLE ADD INDEX. */ goto next_rec; } else if (err_msg) { if (err_msg == dict_load_field_too_big) { fprintf(stderr, "InnoDB: Error: load index" " '%s' failed.\n" "InnoDB: %s,\n" "InnoDB: which exceeds the" " maximum limit of %lu bytes.\n" "InnoDB: Please use server that" " supports long index prefix\n" "InnoDB: or turn on" " innodb_force_recovery to load" " the table\n", index->name, addition_err_str, (ulong) (REC_VERSION_56_MAX_INDEX_COL_LEN)); } else { fprintf(stderr, "InnoDB: %s\n", err_msg); } error = DB_CORRUPTION; goto func_exit; } next_rec: btr_pcur_move_to_next_user_rec(&pcur, &mtr); } error = DB_SUCCESS; func_exit: btr_pcur_close(&pcur); mtr_commit(&mtr); return(error); } /** Error message for a delete-marked record in dict_load_index_low() */ static const char* dict_load_index_del = "delete-marked record in SYS_INDEXES"; /** Error message for table->id mismatch in dict_load_index_low() */ static const char* dict_load_index_id_err = "SYS_INDEXES.TABLE_ID mismatch"; /********************************************************************//** Loads an index definition from a SYS_INDEXES record to dict_index_t. If allocate=TRUE, we will create a dict_index_t structure and fill it accordingly. If allocated=FALSE, the dict_index_t will be supplied by the caller and filled with information read from the record. @return error message, or NULL on success */ UNIV_INTERN const char* dict_load_index_low( /*================*/ byte* table_id, /*!< in/out: table id (8 bytes), an "in" value if allocate=TRUE and "out" when allocate=FALSE */ const char* table_name, /*!< in: table name */ mem_heap_t* heap, /*!< in/out: temporary memory heap */ const rec_t* rec, /*!< in: SYS_INDEXES record */ ibool allocate, /*!< in: TRUE=allocate *index, FALSE=fill in a pre-allocated *index */ dict_index_t** index) /*!< out,own: index, or NULL */ { const byte* field; ulint len; ulint name_len; char* name_buf; index_id_t id; ulint n_fields; ulint type; ulint space; if (allocate) { /* If allocate=TRUE, no dict_index_t will be supplied. Initialize "*index" to NULL */ *index = NULL; } if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { return(dict_load_index_del); } if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != 9)) { return("wrong number of columns in SYS_INDEXES record"); } field = rec_get_nth_field_old(rec, 0/*TABLE_ID*/, &len); if (UNIV_UNLIKELY(len != 8)) { err_len: return("incorrect column length in SYS_INDEXES"); } if (!allocate) { /* We are reading a SYS_INDEXES record. Copy the table_id */ memcpy(table_id, (const char*)field, 8); } else if (memcmp(field, table_id, 8)) { /* Caller supplied table_id, verify it is the same id as on the index record */ return(dict_load_index_id_err); } field = rec_get_nth_field_old(rec, 1/*ID*/, &len); if (UNIV_UNLIKELY(len != 8)) { goto err_len; } id = mach_read_from_8(field); rec_get_nth_field_offs_old(rec, 2/*DB_TRX_ID*/, &len); if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 3/*DB_ROLL_PTR*/, &len); if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { goto err_len; } field = rec_get_nth_field_old(rec, 4/*NAME*/, &name_len); if (UNIV_UNLIKELY(name_len == UNIV_SQL_NULL)) { goto err_len; } name_buf = mem_heap_strdupl(heap, (const char*) field, name_len); field = rec_get_nth_field_old(rec, 5/*N_FIELDS*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } n_fields = mach_read_from_4(field); field = rec_get_nth_field_old(rec, 6/*TYPE*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } type = mach_read_from_4(field); field = rec_get_nth_field_old(rec, 7/*SPACE*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } space = mach_read_from_4(field); field = rec_get_nth_field_old(rec, 8/*PAGE_NO*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } if (allocate) { *index = dict_mem_index_create(table_name, name_buf, space, type, n_fields); } else { ut_a(*index); dict_mem_fill_index_struct(*index, NULL, NULL, name_buf, space, type, n_fields); } (*index)->id = id; (*index)->page = mach_read_from_4(field); ut_ad((*index)->page); return(NULL); } /********************************************************************//** Loads definitions for table indexes. Adds them to the data dictionary cache. @return DB_SUCCESS if ok, DB_CORRUPTION if corruption of dictionary table or DB_UNSUPPORTED if table has unknown index type */ static ulint dict_load_indexes( /*==============*/ dict_table_t* table, /*!< in/out: table */ mem_heap_t* heap, /*!< in: memory heap for temporary storage */ dict_err_ignore_t ignore_err) /*!< in: error to be ignored when loading the index definition */ { dict_table_t* sys_indexes; dict_index_t* sys_index; btr_pcur_t pcur; dtuple_t* tuple; dfield_t* dfield; const rec_t* rec; byte* buf; mtr_t mtr; ulint error = DB_SUCCESS; ut_ad(mutex_own(&(dict_sys->mutex))); mtr_start(&mtr); sys_indexes = dict_table_get_low("SYS_INDEXES"); sys_index = UT_LIST_GET_FIRST(sys_indexes->indexes); ut_a(!dict_table_is_comp(sys_indexes)); ut_a(name_of_col_is(sys_indexes, sys_index, 4, "NAME")); ut_a(name_of_col_is(sys_indexes, sys_index, 8, "PAGE_NO")); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); buf = mem_heap_alloc(heap, 8); mach_write_to_8(buf, table->id); dfield_set_data(dfield, buf, 8); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); for (;;) { dict_index_t* index = NULL; const char* err_msg; if (!btr_pcur_is_on_user_rec(&pcur)) { break; } rec = btr_pcur_get_rec(&pcur); err_msg = dict_load_index_low(buf, table->name, heap, rec, TRUE, &index); ut_ad((index == NULL) == (err_msg != NULL)); if (err_msg == dict_load_index_id_err) { /* TABLE_ID mismatch means that we have run out of index definitions for the table. */ break; } else if (err_msg == dict_load_index_del) { /* Skip delete-marked records. */ goto next_rec; } else if (err_msg) { fprintf(stderr, "InnoDB: %s\n", err_msg); error = DB_CORRUPTION; goto func_exit; } ut_ad(index); /* We check for unsupported types first, so that the subsequent checks are relevant for the supported types. */ if (index->type & ~(DICT_CLUSTERED | DICT_UNIQUE)) { fprintf(stderr, "InnoDB: Error: unknown type %lu" " of index %s of table %s\n", (ulong) index->type, index->name, table->name); error = DB_UNSUPPORTED; dict_mem_index_free(index); goto func_exit; } else if (index->page == FIL_NULL) { fprintf(stderr, "InnoDB: Error: trying to load index %s" " for table %s\n" "InnoDB: but the index tree has been freed!\n", index->name, table->name); if (ignore_err & DICT_ERR_IGNORE_INDEX_ROOT) { /* If caller can tolerate this error, we will continue to load the index and let caller deal with this error. However mark the index and table corrupted */ index->corrupted = TRUE; table->corrupted = TRUE; fprintf(stderr, "InnoDB: Index is corrupt but forcing" " load into data dictionary\n"); } else { corrupted: dict_mem_index_free(index); error = DB_CORRUPTION; goto func_exit; } } else if (!dict_index_is_clust(index) && NULL == dict_table_get_first_index(table)) { fputs("InnoDB: Error: trying to load index ", stderr); ut_print_name(stderr, NULL, FALSE, index->name); fputs(" for table ", stderr); ut_print_name(stderr, NULL, TRUE, table->name); fputs("\nInnoDB: but the first index" " is not clustered!\n", stderr); goto corrupted; } else if (table->id < DICT_HDR_FIRST_ID && (dict_index_is_clust(index) || ((table == dict_sys->sys_tables) && !strcmp("ID_IND", index->name)))) { /* The index was created in memory already at booting of the database server */ dict_mem_index_free(index); } else { error = dict_load_fields(index, heap); if (error != DB_SUCCESS) { fprintf(stderr, "InnoDB: Error: load index '%s'" " for table '%s' failed\n", index->name, table->name); /* If the force recovery flag is set, and if the failed index is not the primary index, we will continue and open other indexes */ if (srv_force_recovery && !dict_index_is_clust(index)) { error = DB_SUCCESS; goto next_rec; } else { goto func_exit; } } error = dict_index_add_to_cache(table, index, index->page, FALSE); /* The data dictionary tables should never contain invalid index definitions. If we ignored this error and simply did not load this index definition, the .frm file would disagree with the index definitions inside InnoDB. */ if (UNIV_UNLIKELY(error != DB_SUCCESS)) { goto func_exit; } } next_rec: btr_pcur_move_to_next_user_rec(&pcur, &mtr); } func_exit: btr_pcur_close(&pcur); mtr_commit(&mtr); return(error); } /********************************************************************//** Loads a table definition from a SYS_TABLES record to dict_table_t. Does not load any columns or indexes. @return error message, or NULL on success */ UNIV_INTERN const char* dict_load_table_low( /*================*/ const char* name, /*!< in: table name */ const rec_t* rec, /*!< in: SYS_TABLES record */ dict_table_t** table) /*!< out,own: table, or NULL */ { const byte* field; ulint len; ulint space; ulint n_cols; ulint flags; if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { return("delete-marked record in SYS_TABLES"); } if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != 10)) { return("wrong number of columns in SYS_TABLES record"); } rec_get_nth_field_offs_old(rec, 0/*NAME*/, &len); if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) { err_len: return("incorrect column length in SYS_TABLES"); } rec_get_nth_field_offs_old(rec, 1/*DB_TRX_ID*/, &len); if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 2/*DB_ROLL_PTR*/, &len); if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { goto err_len; } rec_get_nth_field_offs_old(rec, 3/*ID*/, &len); if (UNIV_UNLIKELY(len != 8)) { goto err_len; } field = rec_get_nth_field_old(rec, 4/*N_COLS*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } n_cols = mach_read_from_4(field); rec_get_nth_field_offs_old(rec, 5/*TYPE*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } rec_get_nth_field_offs_old(rec, 6/*MIX_ID*/, &len); if (UNIV_UNLIKELY(len != 8)) { goto err_len; } rec_get_nth_field_offs_old(rec, 7/*MIX_LEN*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } rec_get_nth_field_offs_old(rec, 8/*CLUSTER_ID*/, &len); if (UNIV_UNLIKELY(len != UNIV_SQL_NULL)) { goto err_len; } field = rec_get_nth_field_old(rec, 9/*SPACE*/, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } space = mach_read_from_4(field); /* Check if the tablespace exists and has the right name */ if (space != 0) { flags = dict_sys_tables_get_flags(rec); if (UNIV_UNLIKELY(flags == ULINT_UNDEFINED)) { field = rec_get_nth_field_old(rec, 5/*TYPE*/, &len); ut_ad(len == 4); /* this was checked earlier */ flags = mach_read_from_4(field); ut_print_timestamp(stderr); fputs(" InnoDB: Error: table ", stderr); ut_print_filename(stderr, name); fprintf(stderr, "\n" "InnoDB: in InnoDB data dictionary" " has unknown type %lx.\n", (ulong) flags); return("incorrect flags in SYS_TABLES"); } } else { flags = 0; } /* The high-order bit of N_COLS is the "compact format" flag. For tables in that format, MIX_LEN may hold additional flags. */ if (n_cols & 0x80000000UL) { ulint flags2; flags |= DICT_TF_COMPACT; field = rec_get_nth_field_old(rec, 7, &len); if (UNIV_UNLIKELY(len != 4)) { goto err_len; } flags2 = mach_read_from_4(field); if (flags2 & (~0 << (DICT_TF2_BITS - DICT_TF2_SHIFT))) { ut_print_timestamp(stderr); fputs(" InnoDB: Warning: table ", stderr); ut_print_filename(stderr, name); fprintf(stderr, "\n" "InnoDB: in InnoDB data dictionary" " has unknown flags %lx.\n", (ulong) flags2); flags2 &= ~(~0 << (DICT_TF2_BITS - DICT_TF2_SHIFT)); } flags |= flags2 << DICT_TF2_SHIFT; } /* See if the tablespace is available. */ *table = dict_mem_table_create(name, space, n_cols & ~0x80000000UL, flags); field = rec_get_nth_field_old(rec, 3/*ID*/, &len); ut_ad(len == 8); /* this was checked earlier */ (*table)->id = mach_read_from_8(field); (*table)->ibd_file_missing = FALSE; return(NULL); } /********************************************************************//** Loads a table definition and also all its index definitions, and also the cluster definition if the table is a member in a cluster. Also loads all foreign key constraints where the foreign key is in the table or where a foreign key references columns in this table. Adds all these to the data dictionary cache. @return table, NULL if does not exist; if the table is stored in an .ibd file, but the file does not exist, then we set the ibd_file_missing flag TRUE in the table object we return */ UNIV_INTERN dict_table_t* dict_load_table( /*============*/ const char* name, /*!< in: table name in the databasename/tablename format */ ibool cached, /*!< in: TRUE=add to cache, FALSE=do not */ dict_err_ignore_t ignore_err) /*!< in: error to be ignored when loading table and its indexes' definition */ { dict_table_t* table; dict_table_t* sys_tables; btr_pcur_t pcur; dict_index_t* sys_index; dtuple_t* tuple; mem_heap_t* heap; dfield_t* dfield; const rec_t* rec; const byte* field; ulint len; ulint err; const char* err_msg; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); heap = mem_heap_create(32000); mtr_start(&mtr); sys_tables = dict_table_get_low("SYS_TABLES"); sys_index = UT_LIST_GET_FIRST(sys_tables->indexes); ut_a(!dict_table_is_comp(sys_tables)); ut_a(name_of_col_is(sys_tables, sys_index, 3, "ID")); ut_a(name_of_col_is(sys_tables, sys_index, 4, "N_COLS")); ut_a(name_of_col_is(sys_tables, sys_index, 5, "TYPE")); ut_a(name_of_col_is(sys_tables, sys_index, 7, "MIX_LEN")); ut_a(name_of_col_is(sys_tables, sys_index, 9, "SPACE")); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); dfield_set_data(dfield, name, ut_strlen(name)); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); rec = btr_pcur_get_rec(&pcur); if (!btr_pcur_is_on_user_rec(&pcur) || rec_get_deleted_flag(rec, 0)) { /* Not found */ err_exit: btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); return(NULL); } field = rec_get_nth_field_old(rec, 0, &len); /* Check if the table name in record is the searched one */ if (len != ut_strlen(name) || ut_memcmp(name, field, len) != 0) { goto err_exit; } err_msg = dict_load_table_low(name, rec, &table); if (err_msg) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: %s\n", err_msg); goto err_exit; } if (table->space == 0) { /* The system tablespace is always available. */ } else if (!fil_space_for_table_exists_in_mem( table->space, name, (table->flags >> DICT_TF2_SHIFT) & DICT_TF2_TEMPORARY, FALSE, FALSE)) { if (table->flags & (DICT_TF2_TEMPORARY << DICT_TF2_SHIFT)) { /* Do not bother to retry opening temporary tables. */ table->ibd_file_missing = TRUE; } else { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: error: space object of table "); ut_print_filename(stderr, name); fprintf(stderr, ",\n" "InnoDB: space id %lu did not exist in memory." " Retrying an open.\n", (ulong) table->space); /* Try to open the tablespace */ if (!fil_open_single_table_tablespace( TRUE, table->space, table->flags == DICT_TF_COMPACT ? 0 : table->flags & ~(~0 << DICT_TF_BITS), name)) { /* We failed to find a sensible tablespace file */ table->ibd_file_missing = TRUE; } } } btr_pcur_close(&pcur); mtr_commit(&mtr); dict_load_columns(table, heap); if (cached) { dict_table_add_to_cache(table, heap); } else { dict_table_add_system_columns(table, heap); } mem_heap_empty(heap); err = dict_load_indexes(table, heap, ignore_err); /* Initialize table foreign_child value. Its value could be changed when dict_load_foreigns() is called below */ table->fk_max_recusive_level = 0; /* If the force recovery flag is set, we open the table irrespective of the error condition, since the user may want to dump data from the clustered index. However we load the foreign key information only if all indexes were loaded. */ if (!cached) { } else if (err == DB_SUCCESS) { err = dict_load_foreigns(table->name, TRUE, TRUE); if (err != DB_SUCCESS) { dict_table_remove_from_cache(table); table = NULL; } else { table->fk_max_recusive_level = 0; } } else { dict_index_t* index; /* Make sure that at least the clustered index was loaded. Otherwise refuse to load the table */ index = dict_table_get_first_index(table); if (!srv_force_recovery || !index || !dict_index_is_clust(index)) { dict_table_remove_from_cache(table); table = NULL; } } #if 0 if (err != DB_SUCCESS && table != NULL) { mutex_enter(&dict_foreign_err_mutex); ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: could not make a foreign key" " definition to match\n" "InnoDB: the foreign key table" " or the referenced table!\n" "InnoDB: The data dictionary of InnoDB is corrupt." " You may need to drop\n" "InnoDB: and recreate the foreign key table" " or the referenced table.\n" "InnoDB: Submit a detailed bug report" " to http://bugs.mysql.com\n" "InnoDB: Latest foreign key error printout:\n%s\n", dict_foreign_err_buf); mutex_exit(&dict_foreign_err_mutex); } #endif /* 0 */ mem_heap_free(heap); return(table); } /***********************************************************************//** Loads a table object based on the table id. @return table; NULL if table does not exist */ UNIV_INTERN dict_table_t* dict_load_table_on_id( /*==================*/ table_id_t table_id) /*!< in: table id */ { byte id_buf[8]; btr_pcur_t pcur; mem_heap_t* heap; dtuple_t* tuple; dfield_t* dfield; dict_index_t* sys_table_ids; dict_table_t* sys_tables; const rec_t* rec; const byte* field; ulint len; dict_table_t* table; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); table = NULL; /* NOTE that the operation of this function is protected by the dictionary mutex, and therefore no deadlocks can occur with other dictionary operations. */ mtr_start(&mtr); /*---------------------------------------------------*/ /* Get the secondary index based on ID for table SYS_TABLES */ sys_tables = dict_sys->sys_tables; sys_table_ids = dict_table_get_next_index( dict_table_get_first_index(sys_tables)); ut_a(!dict_table_is_comp(sys_tables)); heap = mem_heap_create(256); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); /* Write the table id in byte format to id_buf */ mach_write_to_8(id_buf, table_id); dfield_set_data(dfield, id_buf, 8); dict_index_copy_types(tuple, sys_table_ids, 1); btr_pcur_open_on_user_rec(sys_table_ids, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); rec = btr_pcur_get_rec(&pcur); if (!btr_pcur_is_on_user_rec(&pcur)) { /* Not found */ goto func_exit; } /* Find the first record that is not delete marked */ while (rec_get_deleted_flag(rec, 0)) { if (!btr_pcur_move_to_next_user_rec(&pcur, &mtr)) { goto func_exit; } rec = btr_pcur_get_rec(&pcur); } /*---------------------------------------------------*/ /* Now we have the record in the secondary index containing the table ID and NAME */ rec = btr_pcur_get_rec(&pcur); field = rec_get_nth_field_old(rec, 0, &len); ut_ad(len == 8); /* Check if the table id in record is the one searched for */ if (table_id != mach_read_from_8(field)) { goto func_exit; } /* Now we get the table name from the record */ field = rec_get_nth_field_old(rec, 1, &len); /* Load the table definition to memory */ table = dict_load_table(mem_heap_strdupl(heap, (char*) field, len), TRUE, DICT_ERR_IGNORE_NONE); func_exit: btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); return(table); } /********************************************************************//** This function is called when the database is booted. Loads system table index definitions except for the clustered index which is added to the dictionary cache at booting before calling this function. */ UNIV_INTERN void dict_load_sys_table( /*================*/ dict_table_t* table) /*!< in: system table */ { mem_heap_t* heap; ut_ad(mutex_own(&(dict_sys->mutex))); heap = mem_heap_create(1000); dict_load_indexes(table, heap, DICT_ERR_IGNORE_NONE); mem_heap_free(heap); } /********************************************************************//** Loads foreign key constraint col names (also for the referenced table). */ static void dict_load_foreign_cols( /*===================*/ const char* id, /*!< in: foreign constraint id as a null-terminated string */ dict_foreign_t* foreign)/*!< in: foreign constraint object */ { dict_table_t* sys_foreign_cols; dict_index_t* sys_index; btr_pcur_t pcur; dtuple_t* tuple; dfield_t* dfield; const rec_t* rec; const byte* field; ulint len; ulint i; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); foreign->foreign_col_names = mem_heap_alloc( foreign->heap, foreign->n_fields * sizeof(void*)); foreign->referenced_col_names = mem_heap_alloc( foreign->heap, foreign->n_fields * sizeof(void*)); mtr_start(&mtr); sys_foreign_cols = dict_table_get_low("SYS_FOREIGN_COLS"); sys_index = UT_LIST_GET_FIRST(sys_foreign_cols->indexes); ut_a(!dict_table_is_comp(sys_foreign_cols)); tuple = dtuple_create(foreign->heap, 1); dfield = dtuple_get_nth_field(tuple, 0); dfield_set_data(dfield, id, ut_strlen(id)); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); for (i = 0; i < foreign->n_fields; i++) { rec = btr_pcur_get_rec(&pcur); ut_a(btr_pcur_is_on_user_rec(&pcur)); ut_a(!rec_get_deleted_flag(rec, 0)); field = rec_get_nth_field_old(rec, 0, &len); ut_a(len == ut_strlen(id)); ut_a(ut_memcmp(id, field, len) == 0); field = rec_get_nth_field_old(rec, 1, &len); ut_a(len == 4); ut_a(i == mach_read_from_4(field)); field = rec_get_nth_field_old(rec, 4, &len); foreign->foreign_col_names[i] = mem_heap_strdupl( foreign->heap, (char*) field, len); field = rec_get_nth_field_old(rec, 5, &len); foreign->referenced_col_names[i] = mem_heap_strdupl( foreign->heap, (char*) field, len); btr_pcur_move_to_next_user_rec(&pcur, &mtr); } btr_pcur_close(&pcur); mtr_commit(&mtr); } /***********************************************************************//** Loads a foreign key constraint to the dictionary cache. @return DB_SUCCESS or error code */ static ulint dict_load_foreign( /*==============*/ const char* id, /*!< in: foreign constraint id as a null-terminated string */ ibool check_charsets, /*!< in: TRUE=check charset compatibility */ ibool check_recursive) /*!< in: Whether to record the foreign table parent count to avoid unlimited recursive load of chained foreign tables */ { dict_foreign_t* foreign; dict_table_t* sys_foreign; btr_pcur_t pcur; dict_index_t* sys_index; dtuple_t* tuple; mem_heap_t* heap2; dfield_t* dfield; const rec_t* rec; const byte* field; ulint len; ulint n_fields_and_type; mtr_t mtr; dict_table_t* for_table; dict_table_t* ref_table; ut_ad(mutex_own(&(dict_sys->mutex))); heap2 = mem_heap_create(1000); mtr_start(&mtr); sys_foreign = dict_table_get_low("SYS_FOREIGN"); sys_index = UT_LIST_GET_FIRST(sys_foreign->indexes); ut_a(!dict_table_is_comp(sys_foreign)); tuple = dtuple_create(heap2, 1); dfield = dtuple_get_nth_field(tuple, 0); dfield_set_data(dfield, id, ut_strlen(id)); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); rec = btr_pcur_get_rec(&pcur); if (!btr_pcur_is_on_user_rec(&pcur) || rec_get_deleted_flag(rec, 0)) { /* Not found */ fprintf(stderr, "InnoDB: Error A: cannot load foreign constraint %s\n", id); btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap2); return(DB_ERROR); } field = rec_get_nth_field_old(rec, 0, &len); /* Check if the id in record is the searched one */ if (len != ut_strlen(id) || ut_memcmp(id, field, len) != 0) { fprintf(stderr, "InnoDB: Error B: cannot load foreign constraint %s\n", id); btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap2); return(DB_ERROR); } /* Read the table names and the number of columns associated with the constraint */ mem_heap_free(heap2); foreign = dict_mem_foreign_create(); n_fields_and_type = mach_read_from_4( rec_get_nth_field_old(rec, 5, &len)); ut_a(len == 4); /* We store the type in the bits 24..29 of n_fields_and_type. */ foreign->type = (unsigned int) (n_fields_and_type >> 24); foreign->n_fields = (unsigned int) (n_fields_and_type & 0x3FFUL); foreign->id = mem_heap_strdup(foreign->heap, id); field = rec_get_nth_field_old(rec, 3, &len); foreign->foreign_table_name = mem_heap_strdupl( foreign->heap, (char*) field, len); dict_mem_foreign_table_name_lookup_set(foreign, TRUE); field = rec_get_nth_field_old(rec, 4, &len); foreign->referenced_table_name = mem_heap_strdupl( foreign->heap, (char*) field, len); dict_mem_referenced_table_name_lookup_set(foreign, TRUE); btr_pcur_close(&pcur); mtr_commit(&mtr); dict_load_foreign_cols(id, foreign); ref_table = dict_table_check_if_in_cache_low( foreign->referenced_table_name_lookup); /* We could possibly wind up in a deep recursive calls if we call dict_table_get_low() again here if there is a chain of tables concatenated together with foreign constraints. In such case, each table is both a parent and child of the other tables, and act as a "link" in such table chains. To avoid such scenario, we would need to check the number of ancesters the current table has. If that exceeds DICT_FK_MAX_CHAIN_LEN, we will stop loading the child table. Foreign constraints are loaded in a Breath First fashion, that is, the index on FOR_NAME is scanned first, and then index on REF_NAME. So foreign constrains in which current table is a child (foreign table) are loaded first, and then those constraints where current table is a parent (referenced) table. Thus we could check the parent (ref_table) table's reference count (fk_max_recusive_level) to know how deep the recursive call is. If the parent table (ref_table) is already loaded, and its fk_max_recusive_level is larger than DICT_FK_MAX_CHAIN_LEN, we will stop the recursive loading by skipping loading the child table. It will not affect foreign constraint check for DMLs since child table will be loaded at that time for the constraint check. */ if (!ref_table || ref_table->fk_max_recusive_level < DICT_FK_MAX_RECURSIVE_LOAD) { /* If the foreign table is not yet in the dictionary cache, we have to load it so that we are able to make type comparisons in the next function call. */ for_table = dict_table_get_low(foreign->foreign_table_name_lookup); if (for_table && ref_table && check_recursive) { /* This is to record the longest chain of ancesters this table has, if the parent has more ancesters than this table has, record it after add 1 (for this parent */ if (ref_table->fk_max_recusive_level >= for_table->fk_max_recusive_level) { for_table->fk_max_recusive_level = ref_table->fk_max_recusive_level + 1; } } } /* Note that there may already be a foreign constraint object in the dictionary cache for this constraint: then the following call only sets the pointers in it to point to the appropriate table and index objects and frees the newly created object foreign. Adding to the cache should always succeed since we are not creating a new foreign key constraint but loading one from the data dictionary. */ return(dict_foreign_add_to_cache(foreign, check_charsets)); } /***********************************************************************//** Loads foreign key constraints where the table is either the foreign key holder or where the table is referenced by a foreign key. Adds these constraints to the data dictionary. Note that we know that the dictionary cache already contains all constraints where the other relevant table is already in the dictionary cache. @return DB_SUCCESS or error code */ UNIV_INTERN ulint dict_load_foreigns( /*===============*/ const char* table_name, /*!< in: table name */ ibool check_recursive,/*!< in: Whether to check recursive load of tables chained by FK */ ibool check_charsets) /*!< in: TRUE=check charset compatibility */ { btr_pcur_t pcur; mem_heap_t* heap; dtuple_t* tuple; dfield_t* dfield; dict_index_t* sec_index; dict_table_t* sys_foreign; const rec_t* rec; const byte* field; ulint len; char* id ; ulint err; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); sys_foreign = dict_table_get_low("SYS_FOREIGN"); if (sys_foreign == NULL) { /* No foreign keys defined yet in this database */ fprintf(stderr, "InnoDB: Error: no foreign key system tables" " in the database\n"); return(DB_ERROR); } ut_a(!dict_table_is_comp(sys_foreign)); mtr_start(&mtr); /* Get the secondary index based on FOR_NAME from table SYS_FOREIGN */ sec_index = dict_table_get_next_index( dict_table_get_first_index(sys_foreign)); start_load: heap = mem_heap_create(256); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); dfield_set_data(dfield, table_name, ut_strlen(table_name)); dict_index_copy_types(tuple, sec_index, 1); btr_pcur_open_on_user_rec(sec_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); loop: rec = btr_pcur_get_rec(&pcur); if (!btr_pcur_is_on_user_rec(&pcur)) { /* End of index */ goto load_next_index; } /* Now we have the record in the secondary index containing a table name and a foreign constraint ID */ rec = btr_pcur_get_rec(&pcur); field = rec_get_nth_field_old(rec, 0, &len); /* Check if the table name in the record is the one searched for; the following call does the comparison in the latin1_swedish_ci charset-collation, in a case-insensitive way. */ if (0 != cmp_data_data(dfield_get_type(dfield)->mtype, dfield_get_type(dfield)->prtype, dfield_get_data(dfield), dfield_get_len(dfield), field, len)) { goto load_next_index; } /* Since table names in SYS_FOREIGN are stored in a case-insensitive order, we have to check that the table name matches also in a binary string comparison. On Unix, MySQL allows table names that only differ in character case. If lower_case_table_names=2 then what is stored may not be the same case, but the previous comparison showed that they match with no-case. */ if ((innobase_get_lower_case_table_names() != 2) && (0 != ut_memcmp(field, table_name, len))) { goto next_rec; } if (rec_get_deleted_flag(rec, 0)) { goto next_rec; } /* Now we get a foreign key constraint id */ field = rec_get_nth_field_old(rec, 1, &len); id = mem_heap_strdupl(heap, (char*) field, len); btr_pcur_store_position(&pcur, &mtr); mtr_commit(&mtr); /* Load the foreign constraint definition to the dictionary cache */ err = dict_load_foreign(id, check_charsets, check_recursive); if (err != DB_SUCCESS) { btr_pcur_close(&pcur); mem_heap_free(heap); return(err); } mtr_start(&mtr); btr_pcur_restore_position(BTR_SEARCH_LEAF, &pcur, &mtr); next_rec: btr_pcur_move_to_next_user_rec(&pcur, &mtr); goto loop; load_next_index: btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); sec_index = dict_table_get_next_index(sec_index); if (sec_index != NULL) { mtr_start(&mtr); /* Switch to scan index on REF_NAME, fk_max_recusive_level already been updated when scanning FOR_NAME index, no need to update again */ check_recursive = FALSE; goto start_load; } return(DB_SUCCESS); }
cryptdb-org/mysql-5-5-14
storage/innobase/dict/dict0load.c
C
gpl-2.0
65,630
/* arch/arm/mach-rk29/vpu.c * * Copyright (C) 2010 ROCKCHIP, Inc. * author: chenhengming chm@rock-chips.com * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/ioport.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/wakelock.h> #include <linux/cdev.h> #include <linux/of.h> #include <linux/rockchip/cpu.h> #include <linux/rockchip/cru.h> #include <asm/cacheflush.h> #include <asm/uaccess.h> #if defined(CONFIG_ION_ROCKCHIP) #include <linux/rockchip_ion.h> #endif //#define CONFIG_VCODEC_MMU #ifdef CONFIG_VCODEC_MMU #include <linux/rockchip/iovmm.h> #include <linux/rockchip/sysmmu.h> #include <linux/dma-buf.h> #endif #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #endif #if defined(CONFIG_ARCH_RK319X) #include <mach/grf.h> #endif #include "vcodec_service.h" #define HEVC_TEST_ENABLE 0 #define HEVC_SIM_ENABLE 0 #define VCODEC_CLOCK_ENABLE 1 typedef enum { VPU_DEC_ID_9190 = 0x6731, VPU_ID_8270 = 0x8270, VPU_ID_4831 = 0x4831, HEVC_ID = 0x6867, } VPU_HW_ID; typedef enum { VPU_DEC_TYPE_9190 = 0, VPU_ENC_TYPE_8270 = 0x100, VPU_ENC_TYPE_4831 , } VPU_HW_TYPE_E; typedef enum VPU_FREQ { VPU_FREQ_200M, VPU_FREQ_266M, VPU_FREQ_300M, VPU_FREQ_400M, VPU_FREQ_500M, VPU_FREQ_600M, VPU_FREQ_DEFAULT, VPU_FREQ_BUT, } VPU_FREQ; typedef struct { VPU_HW_ID hw_id; unsigned long hw_addr; unsigned long enc_offset; unsigned long enc_reg_num; unsigned long enc_io_size; unsigned long dec_offset; unsigned long dec_reg_num; unsigned long dec_io_size; } VPU_HW_INFO_E; #define VPU_SERVICE_SHOW_TIME 0 #if VPU_SERVICE_SHOW_TIME static struct timeval enc_start, enc_end; static struct timeval dec_start, dec_end; static struct timeval pp_start, pp_end; #endif #define MHZ (1000*1000) #define REG_NUM_9190_DEC (60) #define REG_NUM_9190_PP (41) #define REG_NUM_9190_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP) #define REG_NUM_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP) #define REG_NUM_ENC_8270 (96) #define REG_SIZE_ENC_8270 (0x200) #define REG_NUM_ENC_4831 (164) #define REG_SIZE_ENC_4831 (0x400) #define REG_NUM_HEVC_DEC (68) #define SIZE_REG(reg) ((reg)*4) static VPU_HW_INFO_E vpu_hw_set[] = { [0] = { .hw_id = VPU_ID_8270, .hw_addr = 0, .enc_offset = 0x0, .enc_reg_num = REG_NUM_ENC_8270, .enc_io_size = REG_NUM_ENC_8270 * 4, .dec_offset = REG_SIZE_ENC_8270, .dec_reg_num = REG_NUM_9190_DEC_PP, .dec_io_size = REG_NUM_9190_DEC_PP * 4, }, [1] = { .hw_id = VPU_ID_4831, .hw_addr = 0, .enc_offset = 0x0, .enc_reg_num = REG_NUM_ENC_4831, .enc_io_size = REG_NUM_ENC_4831 * 4, .dec_offset = REG_SIZE_ENC_4831, .dec_reg_num = REG_NUM_9190_DEC_PP, .dec_io_size = REG_NUM_9190_DEC_PP * 4, }, [2] = { .hw_id = HEVC_ID, .hw_addr = 0, .dec_offset = 0x0, .dec_reg_num = REG_NUM_HEVC_DEC, .dec_io_size = REG_NUM_HEVC_DEC * 4, }, }; #define DEC_INTERRUPT_REGISTER 1 #define PP_INTERRUPT_REGISTER 60 #define ENC_INTERRUPT_REGISTER 1 #define DEC_INTERRUPT_BIT 0x100 #define DEC_BUFFER_EMPTY_BIT 0x4000 #define PP_INTERRUPT_BIT 0x100 #define ENC_INTERRUPT_BIT 0x1 #define HEVC_DEC_INT_RAW_BIT 0x200 #define HEVC_DEC_STR_ERROR_BIT 0x4000 #define HEVC_DEC_BUS_ERROR_BIT 0x2000 #define HEVC_DEC_BUFFER_EMPTY_BIT 0x10000 #define VPU_REG_EN_ENC 14 #define VPU_REG_ENC_GATE 2 #define VPU_REG_ENC_GATE_BIT (1<<4) #define VPU_REG_EN_DEC 1 #define VPU_REG_DEC_GATE 2 #define VPU_REG_DEC_GATE_BIT (1<<10) #define VPU_REG_EN_PP 0 #define VPU_REG_PP_GATE 1 #define VPU_REG_PP_GATE_BIT (1<<8) #define VPU_REG_EN_DEC_PP 1 #define VPU_REG_DEC_PP_GATE 61 #define VPU_REG_DEC_PP_GATE_BIT (1<<8) static u8 addr_tbl_vpu_dec[] = { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 40, 41 }; static u8 addr_tbl_vpu_enc[] = { 5, 6, 7, 8, 9, 10, 11, 12, 13, 51 }; static u8 addr_tbl_hevc_dec[] = { 4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 42, 43 }; /** * struct for process session which connect to vpu * * @author ChenHengming (2011-5-3) */ typedef struct vpu_session { VPU_CLIENT_TYPE type; /* a linked list of data so we can access them for debugging */ struct list_head list_session; /* a linked list of register data waiting for process */ struct list_head waiting; /* a linked list of register data in processing */ struct list_head running; /* a linked list of register data processed */ struct list_head done; wait_queue_head_t wait; pid_t pid; atomic_t task_running; } vpu_session; /** * struct for process register set * * @author ChenHengming (2011-5-4) */ typedef struct vpu_reg { VPU_CLIENT_TYPE type; VPU_FREQ freq; vpu_session *session; struct list_head session_link; /* link to vpu service session */ struct list_head status_link; /* link to register set list */ unsigned long size; #if defined(CONFIG_VCODEC_MMU) struct list_head mem_region_list; #endif unsigned long *reg; } vpu_reg; typedef struct vpu_device { atomic_t irq_count_codec; atomic_t irq_count_pp; unsigned long iobaseaddr; unsigned int iosize; volatile u32 *hwregs; } vpu_device; enum vcodec_device_id { VCODEC_DEVICE_ID_VPU, VCODEC_DEVICE_ID_HEVC }; struct vcodec_mem_region { struct list_head srv_lnk; struct list_head reg_lnk; struct list_head session_lnk; dma_addr_t iova; /* virtual address for iommu */ struct dma_buf *buf; struct dma_buf_attachment *attachment; struct sg_table *sg_table; struct ion_handle *hdl; }; typedef struct vpu_service_info { struct wake_lock wake_lock; struct delayed_work power_off_work; struct mutex lock; struct list_head waiting; /* link to link_reg in struct vpu_reg */ struct list_head running; /* link to link_reg in struct vpu_reg */ struct list_head done; /* link to link_reg in struct vpu_reg */ struct list_head session; /* link to list_session in struct vpu_session */ atomic_t total_running; bool enabled; vpu_reg *reg_codec; vpu_reg *reg_pproc; vpu_reg *reg_resev; VPUHwDecConfig_t dec_config; VPUHwEncConfig_t enc_config; VPU_HW_INFO_E *hw_info; unsigned long reg_size; bool auto_freq; bool bug_dec_addr; atomic_t freq_status; struct clk *aclk_vcodec; struct clk *hclk_vcodec; struct clk *clk_core; struct clk *clk_cabac; int irq_dec; int irq_enc; vpu_device enc_dev; vpu_device dec_dev; struct device *dev; struct cdev cdev; dev_t dev_t; struct class *cls; struct device *child_dev; struct dentry *debugfs_dir; struct dentry *debugfs_file_regs; u32 irq_status; #if defined(CONFIG_ION_ROCKCHIP) struct ion_client * ion_client; #endif #if defined(CONFIG_VCODEC_MMU) struct list_head mem_region_list; #endif enum vcodec_device_id dev_id; struct delayed_work simulate_work; } vpu_service_info; typedef struct vpu_request { unsigned long *req; unsigned long size; } vpu_request; /// global variable //static struct clk *pd_video; static struct dentry *parent; // debugfs root directory for all device (vpu, hevc). #ifdef CONFIG_DEBUG_FS static int vcodec_debugfs_init(void); static void vcodec_debugfs_exit(void); static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent); static int debug_vcodec_open(struct inode *inode, struct file *file); static const struct file_operations debug_vcodec_fops = { .open = debug_vcodec_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif #define VPU_POWER_OFF_DELAY 4*HZ /* 4s */ #define VPU_TIMEOUT_DELAY 2*HZ /* 2s */ #define VPU_SIMULATE_DELAY msecs_to_jiffies(15) static void vpu_get_clk(struct vpu_service_info *pservice) { #if VCODEC_CLOCK_ENABLE /*pd_video = clk_get(NULL, "pd_video"); if (IS_ERR(pd_video)) { pr_err("failed on clk_get pd_video\n"); }*/ pservice->aclk_vcodec = devm_clk_get(pservice->dev, "aclk_vcodec"); if (IS_ERR(pservice->aclk_vcodec)) { dev_err(pservice->dev, "failed on clk_get aclk_vcodec\n"); } pservice->hclk_vcodec = devm_clk_get(pservice->dev, "hclk_vcodec"); if (IS_ERR(pservice->hclk_vcodec)) { dev_err(pservice->dev, "failed on clk_get hclk_vcodec\n"); } if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC) { pservice->clk_core = devm_clk_get(pservice->dev, "clk_core"); if (IS_ERR(pservice->clk_core)) { dev_err(pservice->dev, "failed on clk_get clk_core\n"); } pservice->clk_cabac = devm_clk_get(pservice->dev, "clk_cabac"); if (IS_ERR(pservice->clk_cabac)) { dev_err(pservice->dev, "failed on clk_get clk_cabac\n"); } } #endif } static void vpu_put_clk(struct vpu_service_info *pservice) { #if VCODEC_CLOCK_ENABLE //clk_put(pd_video); if (pservice->aclk_vcodec) { devm_clk_put(pservice->dev, pservice->aclk_vcodec); } if (pservice->hclk_vcodec) { devm_clk_put(pservice->dev, pservice->hclk_vcodec); } if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC) { if (pservice->clk_core) { devm_clk_put(pservice->dev, pservice->clk_core); } if (pservice->clk_cabac) { devm_clk_put(pservice->dev, pservice->clk_cabac); } } #endif } static void vpu_reset(struct vpu_service_info *pservice) { #if defined(CONFIG_ARCH_RK29) clk_disable(aclk_ddr_vepu); cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true); cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true); cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true); cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true); mdelay(10); cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false); cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false); cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false); cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false); clk_enable(aclk_ddr_vepu); #elif defined(CONFIG_ARCH_RK30) pmu_set_idle_request(IDLE_REQ_VIDEO, true); cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true); cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true); cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true); cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true); mdelay(1); cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false); cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false); cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false); cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false); pmu_set_idle_request(IDLE_REQ_VIDEO, false); #endif pservice->reg_codec = NULL; pservice->reg_pproc = NULL; pservice->reg_resev = NULL; } static void reg_deinit(struct vpu_service_info *pservice, vpu_reg *reg); static void vpu_service_session_clear(struct vpu_service_info *pservice, vpu_session *session) { vpu_reg *reg, *n; list_for_each_entry_safe(reg, n, &session->waiting, session_link) { reg_deinit(pservice, reg); } list_for_each_entry_safe(reg, n, &session->running, session_link) { reg_deinit(pservice, reg); } list_for_each_entry_safe(reg, n, &session->done, session_link) { reg_deinit(pservice, reg); } } static void vpu_service_dump(struct vpu_service_info *pservice) { int running; vpu_reg *reg, *reg_tmp; vpu_session *session, *session_tmp; running = atomic_read(&pservice->total_running); printk("total_running %d\n", running); printk("reg_codec 0x%.8x\n", (unsigned int)pservice->reg_codec); printk("reg_pproc 0x%.8x\n", (unsigned int)pservice->reg_pproc); printk("reg_resev 0x%.8x\n", (unsigned int)pservice->reg_resev); list_for_each_entry_safe(session, session_tmp, &pservice->session, list_session) { printk("session pid %d type %d:\n", session->pid, session->type); running = atomic_read(&session->task_running); printk("task_running %d\n", running); list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) { printk("waiting register set 0x%.8x\n", (unsigned int)reg); } list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) { printk("running register set 0x%.8x\n", (unsigned int)reg); } list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) { printk("done register set 0x%.8x\n", (unsigned int)reg); } } } static void vpu_service_power_off(struct vpu_service_info *pservice) { int total_running; if (!pservice->enabled) { return; } pservice->enabled = false; total_running = atomic_read(&pservice->total_running); if (total_running) { pr_alert("alert: power off when %d task running!!\n", total_running); mdelay(50); pr_alert("alert: delay 50 ms for running task\n"); vpu_service_dump(pservice); } printk("%s: power off...", dev_name(pservice->dev)); #ifdef CONFIG_ARCH_RK29 pmu_set_power_domain(PD_VCODEC, false); #else //clk_disable(pd_video); #endif udelay(10); #if VCODEC_CLOCK_ENABLE clk_disable_unprepare(pservice->hclk_vcodec); clk_disable_unprepare(pservice->aclk_vcodec); if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC) { clk_disable_unprepare(pservice->clk_core); clk_disable_unprepare(pservice->clk_cabac); } #endif wake_unlock(&pservice->wake_lock); printk("done\n"); } static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice) { queue_delayed_work(system_nrt_wq, &pservice->power_off_work, VPU_POWER_OFF_DELAY); } static void vpu_power_off_work(struct work_struct *work_s) { struct delayed_work *dlwork = container_of(work_s, struct delayed_work, work); struct vpu_service_info *pservice = container_of(dlwork, struct vpu_service_info, power_off_work); if (mutex_trylock(&pservice->lock)) { vpu_service_power_off(pservice); mutex_unlock(&pservice->lock); } else { /* Come back later if the device is busy... */ vpu_queue_power_off_work(pservice); } } static void vpu_service_power_on(struct vpu_service_info *pservice) { static ktime_t last; ktime_t now = ktime_get(); if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) { cancel_delayed_work_sync(&pservice->power_off_work); vpu_queue_power_off_work(pservice); last = now; } if (pservice->enabled) return ; pservice->enabled = true; printk("%s: power on\n", dev_name(pservice->dev)); #if VCODEC_CLOCK_ENABLE clk_prepare_enable(pservice->aclk_vcodec); clk_prepare_enable(pservice->hclk_vcodec); if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC) { clk_prepare_enable(pservice->clk_core); clk_prepare_enable(pservice->clk_cabac); } #endif #if defined(CONFIG_ARCH_RK319X) /// select aclk_vepu as vcodec clock source. #define BIT_VCODEC_SEL (1<<7) writel_relaxed(readl_relaxed(RK319X_GRF_BASE + GRF_SOC_CON1) | (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16), RK319X_GRF_BASE + GRF_SOC_CON1); #endif udelay(10); #ifdef CONFIG_ARCH_RK29 pmu_set_power_domain(PD_VCODEC, true); #else //clk_enable(pd_video); #endif udelay(10); wake_lock(&pservice->wake_lock); } static inline bool reg_check_rmvb_wmv(vpu_reg *reg) { unsigned long type = (reg->reg[3] & 0xF0000000) >> 28; return ((type == 8) || (type == 4)); } static inline bool reg_check_interlace(vpu_reg *reg) { unsigned long type = (reg->reg[3] & (1 << 23)); return (type > 0); } static inline bool reg_check_avc(vpu_reg *reg) { unsigned long type = (reg->reg[3] & 0xF0000000) >> 28; return (type == 0); } static inline int reg_probe_width(vpu_reg *reg) { int width_in_mb = reg->reg[4] >> 23; return width_in_mb * 16; } #if defined(CONFIG_VCODEC_MMU) static unsigned int vcodec_map_ion_handle(vpu_service_info *pservice, vpu_reg *reg, struct ion_handle *ion_handle, struct dma_buf *buf, int offset) { struct vcodec_mem_region *mem_region = kzalloc(sizeof(struct vcodec_mem_region), GFP_KERNEL); if (mem_region == NULL) { dev_err(pservice->dev, "allocate memory for iommu memory region failed\n"); return -1; } mem_region->buf = buf; mem_region->hdl = ion_handle; mem_region->attachment = dma_buf_attach(buf, pservice->dev); if (IS_ERR_OR_NULL(mem_region->attachment)) { dev_err(pservice->dev, "dma_buf_attach() failed: %ld\n", PTR_ERR(mem_region->attachment)); goto err_buf_map_attach; } mem_region->sg_table = dma_buf_map_attachment(mem_region->attachment, DMA_BIDIRECTIONAL); if (IS_ERR_OR_NULL(mem_region->sg_table)) { dev_err(pservice->dev, "dma_buf_map_attachment() failed: %ld\n", PTR_ERR(mem_region->sg_table)); goto err_buf_map_attachment; } mem_region->iova = iovmm_map(pservice->dev, mem_region->sg_table->sgl, offset, buf->size); if (mem_region->iova == 0 || IS_ERR_VALUE(mem_region->iova)) { dev_err(pservice->dev, "iovmm_map() failed: %d\n", mem_region->iova); goto err_iovmm_map; } INIT_LIST_HEAD(&mem_region->reg_lnk); list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list); return mem_region->iova; err_iovmm_map: dma_buf_unmap_attachment(mem_region->attachment, mem_region->sg_table, DMA_BIDIRECTIONAL); err_buf_map_attachment: dma_buf_detach(buf, mem_region->attachment); err_buf_map_attach: kfree(mem_region); return 0; } static int vcodec_reg_address_translate(struct vpu_service_info *pservice, vpu_reg *reg) { VPU_HW_ID hw_id; int i; hw_id = pservice->hw_info->hw_id; if (hw_id == HEVC_ID) { } else { if (reg->type == VPU_DEC) { for (i=0; i<sizeof(addr_tbl_vpu_dec); i++) { int usr_fd; struct ion_handle *hdl; //ion_phys_addr_t phy_addr; struct dma_buf *buf; //size_t len; int offset; #if 0 if (copy_from_user(&usr_fd, &reg->reg[addr_tbl_vpu_dec[i]], sizeof(usr_fd))) return -EFAULT; #else usr_fd = reg->reg[addr_tbl_vpu_dec[i]] & 0xFF; offset = reg->reg[addr_tbl_vpu_dec[i]] >> 8; #endif if (usr_fd != 0) { hdl = ion_import_dma_buf(pservice->ion_client, usr_fd); if (IS_ERR(hdl)) { pr_err("import dma-buf from fd %d failed\n", usr_fd); return PTR_ERR(hdl); } #if 0 ion_phys(pservice->ion_client, hdl, &phy_addr, &len); reg->reg[addr_tbl_vpu_dec[i]] = phy_addr + offset; ion_free(pservice->ion_client, hdl); #else buf = ion_share_dma_buf(pservice->ion_client, hdl); if (IS_ERR_OR_NULL(buf)) { dev_err(pservice->dev, "ion_share_dma_buf() failed\n"); ion_free(pservice->ion_client, hdl); return PTR_ERR(buf); } reg->reg[addr_tbl_vpu_dec[i]] = vcodec_map_ion_handle(pservice, reg, hdl, buf, offset); #endif } } } else if (reg->type == VPU_ENC) { } } return 0; } #endif static vpu_reg *reg_init(struct vpu_service_info *pservice, vpu_session *session, void __user *src, unsigned long size) { vpu_reg *reg = kmalloc(sizeof(vpu_reg)+pservice->reg_size, GFP_KERNEL); if (NULL == reg) { pr_err("error: kmalloc fail in reg_init\n"); return NULL; } if (size > pservice->reg_size) { printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, pservice->reg_size); size = pservice->reg_size; } reg->session = session; reg->type = session->type; reg->size = size; reg->freq = VPU_FREQ_DEFAULT; reg->reg = (unsigned long *)&reg[1]; INIT_LIST_HEAD(&reg->session_link); INIT_LIST_HEAD(&reg->status_link); #if defined(CONFIG_VCODEC_MMU) INIT_LIST_HEAD(&reg->mem_region_list); #endif if (copy_from_user(&reg->reg[0], (void __user *)src, size)) { pr_err("error: copy_from_user failed in reg_init\n"); kfree(reg); return NULL; } #if defined(CONFIG_VCODEC_MMU) if (0 > vcodec_reg_address_translate(pservice, reg)) { pr_err("error: translate reg address failed\n"); kfree(reg); return NULL; } #endif mutex_lock(&pservice->lock); list_add_tail(&reg->status_link, &pservice->waiting); list_add_tail(&reg->session_link, &session->waiting); mutex_unlock(&pservice->lock); if (pservice->auto_freq) { if (!soc_is_rk2928g()) { if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) { if (reg_check_rmvb_wmv(reg)) { reg->freq = VPU_FREQ_200M; } else if (reg_check_avc(reg)) { if (reg_probe_width(reg) > 3200) { // raise frequency for 4k avc. reg->freq = VPU_FREQ_500M; } } else { if (reg_check_interlace(reg)) { reg->freq = VPU_FREQ_400M; } } } if (reg->type == VPU_PP) { reg->freq = VPU_FREQ_400M; } } } return reg; } static void reg_deinit(struct vpu_service_info *pservice, vpu_reg *reg) { #if defined(CONFIG_VCODEC_MMU) struct vcodec_mem_region *mem_region = NULL, *n; #endif list_del_init(&reg->session_link); list_del_init(&reg->status_link); if (reg == pservice->reg_codec) pservice->reg_codec = NULL; if (reg == pservice->reg_pproc) pservice->reg_pproc = NULL; #if defined(CONFIG_VCODEC_MMU) // release memory region attach to this registers table. list_for_each_entry_safe(mem_region, n, &reg->mem_region_list, reg_lnk) { iovmm_unmap(pservice->dev, mem_region->iova); dma_buf_unmap_attachment(mem_region->attachment, mem_region->sg_table, DMA_BIDIRECTIONAL); dma_buf_detach(mem_region->buf, mem_region->attachment); dma_buf_put(mem_region->buf); ion_free(pservice->ion_client, mem_region->hdl); list_del_init(&mem_region->reg_lnk); kfree(mem_region); } #endif kfree(reg); } static void reg_from_wait_to_run(struct vpu_service_info *pservice, vpu_reg *reg) { list_del_init(&reg->status_link); list_add_tail(&reg->status_link, &pservice->running); list_del_init(&reg->session_link); list_add_tail(&reg->session_link, &reg->session->running); } static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count) { int i; u32 *dst = (u32 *)&reg->reg[0]; for (i = 0; i < count; i++) *dst++ = *src++; } static void reg_from_run_to_done(struct vpu_service_info *pservice, vpu_reg *reg) { int irq_reg = -1; list_del_init(&reg->status_link); list_add_tail(&reg->status_link, &pservice->done); list_del_init(&reg->session_link); list_add_tail(&reg->session_link, &reg->session->done); switch (reg->type) { case VPU_ENC : { pservice->reg_codec = NULL; reg_copy_from_hw(reg, pservice->enc_dev.hwregs, pservice->hw_info->enc_reg_num); irq_reg = ENC_INTERRUPT_REGISTER; break; } case VPU_DEC : { int reg_len = pservice->hw_info->hw_id == HEVC_ID ? REG_NUM_HEVC_DEC : REG_NUM_9190_DEC; pservice->reg_codec = NULL; reg_copy_from_hw(reg, pservice->dec_dev.hwregs, reg_len); irq_reg = DEC_INTERRUPT_REGISTER; break; } case VPU_PP : { pservice->reg_pproc = NULL; reg_copy_from_hw(reg, pservice->dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP); pservice->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0; break; } case VPU_DEC_PP : { pservice->reg_codec = NULL; pservice->reg_pproc = NULL; reg_copy_from_hw(reg, pservice->dec_dev.hwregs, REG_NUM_9190_DEC_PP); pservice->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0; break; } default : { pr_err("error: copy reg from hw with unknown type %d\n", reg->type); break; } } if (irq_reg != -1) { reg->reg[irq_reg] = pservice->irq_status; } atomic_sub(1, &reg->session->task_running); atomic_sub(1, &pservice->total_running); wake_up(&reg->session->wait); } static void vpu_service_set_freq(struct vpu_service_info *pservice, vpu_reg *reg) { VPU_FREQ curr = atomic_read(&pservice->freq_status); if (curr == reg->freq) { return ; } atomic_set(&pservice->freq_status, reg->freq); switch (reg->freq) { case VPU_FREQ_200M : { clk_set_rate(pservice->aclk_vcodec, 200*MHZ); //printk("default: 200M\n"); } break; case VPU_FREQ_266M : { clk_set_rate(pservice->aclk_vcodec, 266*MHZ); //printk("default: 266M\n"); } break; case VPU_FREQ_300M : { clk_set_rate(pservice->aclk_vcodec, 300*MHZ); //printk("default: 300M\n"); } break; case VPU_FREQ_400M : { clk_set_rate(pservice->aclk_vcodec, 400*MHZ); //printk("default: 400M\n"); } break; case VPU_FREQ_500M : { clk_set_rate(pservice->aclk_vcodec, 500*MHZ); } break; case VPU_FREQ_600M : { clk_set_rate(pservice->aclk_vcodec, 600*MHZ); } break; default : { if (soc_is_rk2928g()) { clk_set_rate(pservice->aclk_vcodec, 400*MHZ); } else { clk_set_rate(pservice->aclk_vcodec, 300*MHZ); } //printk("default: 300M\n"); } break; } } #if HEVC_SIM_ENABLE static void simulate_start(struct vpu_service_info *pservice); #endif static void reg_copy_to_hw(struct vpu_service_info *pservice, vpu_reg *reg) { int i; u32 *src = (u32 *)&reg->reg[0]; atomic_add(1, &pservice->total_running); atomic_add(1, &reg->session->task_running); if (pservice->auto_freq) { vpu_service_set_freq(pservice, reg); } switch (reg->type) { case VPU_ENC : { int enc_count = pservice->hw_info->enc_reg_num; u32 *dst = (u32 *)pservice->enc_dev.hwregs; #if 0 if (pservice->bug_dec_addr) { #if !defined(CONFIG_ARCH_RK319X) cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true); #endif cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true); cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false); #if !defined(CONFIG_ARCH_RK319X) cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false); #endif } #endif pservice->reg_codec = reg; dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6; for (i = 0; i < VPU_REG_EN_ENC; i++) dst[i] = src[i]; for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++) dst[i] = src[i]; dsb(); dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT; dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC]; #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&enc_start); #endif } break; case VPU_DEC : { u32 *dst = (u32 *)pservice->dec_dev.hwregs; pservice->reg_codec = reg; if (pservice->hw_info->hw_id != HEVC_ID) { for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--) dst[i] = src[i]; } else { for (i = REG_NUM_HEVC_DEC - 1; i > VPU_REG_EN_DEC; i--) { dst[i] = src[i]; } } dsb(); if (pservice->hw_info->hw_id != HEVC_ID) { dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT; dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC]; } else { dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC]; } dsb(); dmb(); #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&dec_start); #endif } break; case VPU_PP : { u32 *dst = (u32 *)pservice->dec_dev.hwregs + PP_INTERRUPT_REGISTER; pservice->reg_pproc = reg; dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT; for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++) dst[i] = src[i]; dsb(); dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP]; #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&pp_start); #endif } break; case VPU_DEC_PP : { u32 *dst = (u32 *)pservice->dec_dev.hwregs; pservice->reg_codec = reg; pservice->reg_pproc = reg; for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++) dst[i] = src[i]; dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2; dsb(); dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT; dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT; dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC]; #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&dec_start); #endif } break; default : { pr_err("error: unsupport session type %d", reg->type); atomic_sub(1, &pservice->total_running); atomic_sub(1, &reg->session->task_running); break; } } #if HEVC_SIM_ENABLE if (pservice->hw_info->hw_id == HEVC_ID) { simulate_start(pservice); } #endif } static void try_set_reg(struct vpu_service_info *pservice) { // first get reg from reg list if (!list_empty(&pservice->waiting)) { int can_set = 0; vpu_reg *reg = list_entry(pservice->waiting.next, vpu_reg, status_link); vpu_service_power_on(pservice); switch (reg->type) { case VPU_ENC : { if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc)) can_set = 1; } break; case VPU_DEC : { if (NULL == pservice->reg_codec) can_set = 1; if (pservice->auto_freq && (NULL != pservice->reg_pproc)) { can_set = 0; } } break; case VPU_PP : { if (NULL == pservice->reg_codec) { if (NULL == pservice->reg_pproc) can_set = 1; } else { if ((VPU_DEC == pservice->reg_codec->type) && (NULL == pservice->reg_pproc)) can_set = 1; // can not charge frequency when vpu is working if (pservice->auto_freq) { can_set = 0; } } } break; case VPU_DEC_PP : { if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc)) can_set = 1; } break; default : { printk("undefined reg type %d\n", reg->type); } break; } if (can_set) { reg_from_wait_to_run(pservice, reg); reg_copy_to_hw(pservice, reg); } } } static int return_reg(struct vpu_service_info *pservice, vpu_reg *reg, u32 __user *dst) { int ret = 0; switch (reg->type) { case VPU_ENC : { if (copy_to_user(dst, &reg->reg[0], pservice->hw_info->enc_io_size)) ret = -EFAULT; break; } case VPU_DEC : { int reg_len = pservice->hw_info->hw_id == HEVC_ID ? REG_NUM_HEVC_DEC : REG_NUM_9190_DEC; if (copy_to_user(dst, &reg->reg[0], SIZE_REG(reg_len))) ret = -EFAULT; break; } case VPU_PP : { if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_9190_PP))) ret = -EFAULT; break; } case VPU_DEC_PP : { if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP))) ret = -EFAULT; break; } default : { ret = -EFAULT; pr_err("error: copy reg to user with unknown type %d\n", reg->type); break; } } reg_deinit(pservice, reg); return ret; } static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct vpu_service_info *pservice = container_of(filp->f_dentry->d_inode->i_cdev, struct vpu_service_info, cdev); vpu_session *session = (vpu_session *)filp->private_data; if (NULL == session) { return -EINVAL; } switch (cmd) { case VPU_IOC_SET_CLIENT_TYPE : { session->type = (VPU_CLIENT_TYPE)arg; break; } case VPU_IOC_GET_HW_FUSE_STATUS : { vpu_request req; if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) { pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n"); return -EFAULT; } else { if (VPU_ENC != session->type) { if (copy_to_user((void __user *)req.req, &pservice->dec_config, sizeof(VPUHwDecConfig_t))) { pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type); return -EFAULT; } } else { if (copy_to_user((void __user *)req.req, &pservice->enc_config, sizeof(VPUHwEncConfig_t))) { pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type); return -EFAULT; } } } break; } case VPU_IOC_SET_REG : { vpu_request req; vpu_reg *reg; if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) { pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n"); return -EFAULT; } reg = reg_init(pservice, session, (void __user *)req.req, req.size); if (NULL == reg) { return -EFAULT; } else { mutex_lock(&pservice->lock); try_set_reg(pservice); mutex_unlock(&pservice->lock); } break; } case VPU_IOC_GET_REG : { vpu_request req; vpu_reg *reg; if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) { pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n"); return -EFAULT; } else { int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY); if (!list_empty(&session->done)) { if (ret < 0) { pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret); } ret = 0; } else { if (unlikely(ret < 0)) { pr_err("error: pid %d wait task ret %d\n", session->pid, ret); } else if (0 == ret) { pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running)); ret = -ETIMEDOUT; } } if (ret < 0) { int task_running = atomic_read(&session->task_running); mutex_lock(&pservice->lock); vpu_service_dump(pservice); if (task_running) { atomic_set(&session->task_running, 0); atomic_sub(task_running, &pservice->total_running); printk("%d task is running but not return, reset hardware...", task_running); vpu_reset(pservice); printk("done\n"); } vpu_service_session_clear(pservice, session); mutex_unlock(&pservice->lock); return ret; } } mutex_lock(&pservice->lock); reg = list_entry(session->done.next, vpu_reg, session_link); return_reg(pservice, reg, (u32 __user *)req.req); mutex_unlock(&pservice->lock); break; } default : { pr_err("error: unknow vpu service ioctl cmd %x\n", cmd); break; } } return 0; } static int vpu_service_check_hw(vpu_service_info *p, unsigned long hw_addr) { int ret = -EINVAL, i = 0; volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4); u32 enc_id = *tmp; #if HEVC_SIM_ENABLE /// temporary, hevc driver test. if (strncmp(dev_name(p->dev), "hevc_service", strlen("hevc_service")) == 0) { p->hw_info = &vpu_hw_set[2]; return 0; } #endif enc_id = (enc_id >> 16) & 0xFFFF; pr_info("checking hw id %x\n", enc_id); p->hw_info = NULL; for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) { if (enc_id == vpu_hw_set[i].hw_id) { p->hw_info = &vpu_hw_set[i]; ret = 0; break; } } iounmap((void *)tmp); return ret; } static int vpu_service_open(struct inode *inode, struct file *filp) { struct vpu_service_info *pservice = container_of(inode->i_cdev, struct vpu_service_info, cdev); vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL); if (NULL == session) { pr_err("error: unable to allocate memory for vpu_session."); return -ENOMEM; } session->type = VPU_TYPE_BUTT; session->pid = current->pid; INIT_LIST_HEAD(&session->waiting); INIT_LIST_HEAD(&session->running); INIT_LIST_HEAD(&session->done); INIT_LIST_HEAD(&session->list_session); init_waitqueue_head(&session->wait); atomic_set(&session->task_running, 0); mutex_lock(&pservice->lock); list_add_tail(&session->list_session, &pservice->session); filp->private_data = (void *)session; mutex_unlock(&pservice->lock); pr_debug("dev opened\n"); return nonseekable_open(inode, filp); } static int vpu_service_release(struct inode *inode, struct file *filp) { struct vpu_service_info *pservice = container_of(inode->i_cdev, struct vpu_service_info, cdev); int task_running; vpu_session *session = (vpu_session *)filp->private_data; if (NULL == session) return -EINVAL; task_running = atomic_read(&session->task_running); if (task_running) { pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running); msleep(50); } wake_up(&session->wait); mutex_lock(&pservice->lock); /* remove this filp from the asynchronusly notified filp's */ list_del_init(&session->list_session); vpu_service_session_clear(pservice, session); kfree(session); filp->private_data = NULL; mutex_unlock(&pservice->lock); pr_debug("dev closed\n"); return 0; } static const struct file_operations vpu_service_fops = { .unlocked_ioctl = vpu_service_ioctl, .open = vpu_service_open, .release = vpu_service_release, //.fasync = vpu_service_fasync, }; static irqreturn_t vdpu_irq(int irq, void *dev_id); static irqreturn_t vdpu_isr(int irq, void *dev_id); static irqreturn_t vepu_irq(int irq, void *dev_id); static irqreturn_t vepu_isr(int irq, void *dev_id); static void get_hw_info(struct vpu_service_info *pservice); #if HEVC_SIM_ENABLE static void simulate_work(struct work_struct *work_s) { struct delayed_work *dlwork = container_of(work_s, struct delayed_work, work); struct vpu_service_info *pservice = container_of(dlwork, struct vpu_service_info, simulate_work); vpu_device *dev = &pservice->dec_dev; if (!list_empty(&pservice->running)) { atomic_add(1, &dev->irq_count_codec); vdpu_isr(0, (void*)pservice); } else { //simulate_start(pservice); pr_err("empty running queue\n"); } } static void simulate_init(struct vpu_service_info *pservice) { INIT_DELAYED_WORK(&pservice->simulate_work, simulate_work); } static void simulate_start(struct vpu_service_info *pservice) { cancel_delayed_work_sync(&pservice->power_off_work); queue_delayed_work(system_nrt_wq, &pservice->simulate_work, VPU_SIMULATE_DELAY); } #endif #if HEVC_TEST_ENABLE static int hevc_test_case0(vpu_service_info *pservice); #endif #if defined(CONFIG_VCODEC_MMU) & defined(CONFIG_ION_ROCKCHIP) extern struct ion_client *rockchip_ion_client_create(const char * name); #endif static int vcodec_probe(struct platform_device *pdev) { int ret = 0; struct resource *res = NULL; struct device *dev = &pdev->dev; void __iomem *regs = NULL; struct device_node *np = pdev->dev.of_node; struct vpu_service_info *pservice = devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL); char *prop = (char*)dev_name(dev); #if defined(CONFIG_VCODEC_MMU) struct device *mmu_dev = NULL; char mmu_dev_dts_name[40]; #endif pr_info("probe device %s\n", dev_name(dev)); of_property_read_string(np, "name", (const char**)&prop); dev_set_name(dev, prop); if (strcmp(dev_name(dev), "hevc_service") == 0) { pservice->dev_id = VCODEC_DEVICE_ID_HEVC; } else if (strcmp(dev_name(dev), "vpu_service") == 0) { pservice->dev_id = VCODEC_DEVICE_ID_VPU; } else { dev_err(dev, "Unknown device %s to probe\n", dev_name(dev)); return -1; } wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu"); INIT_LIST_HEAD(&pservice->waiting); INIT_LIST_HEAD(&pservice->running); INIT_LIST_HEAD(&pservice->done); INIT_LIST_HEAD(&pservice->session); mutex_init(&pservice->lock); pservice->reg_codec = NULL; pservice->reg_pproc = NULL; atomic_set(&pservice->total_running, 0); pservice->enabled = false; pservice->dev = dev; vpu_get_clk(pservice); INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work); vpu_service_power_on(pservice); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(pservice->dev, res); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto err; } ret = vpu_service_check_hw(pservice, res->start); if (ret < 0) { pr_err("error: hw info check faild\n"); goto err; } /// define regs address. pservice->dec_dev.iobaseaddr = res->start + pservice->hw_info->dec_offset; pservice->dec_dev.iosize = pservice->hw_info->dec_io_size; pservice->dec_dev.hwregs = (volatile u32 *)((u8 *)regs + pservice->hw_info->dec_offset); pservice->reg_size = pservice->dec_dev.iosize; if (pservice->hw_info->hw_id != HEVC_ID) { pservice->enc_dev.iobaseaddr = res->start + pservice->hw_info->enc_offset; pservice->enc_dev.iosize = pservice->hw_info->enc_io_size; pservice->reg_size = pservice->reg_size > pservice->enc_dev.iosize ? pservice->reg_size : pservice->enc_dev.iosize; pservice->enc_dev.hwregs = (volatile u32 *)((u8 *)regs + pservice->hw_info->enc_offset); pservice->irq_enc = platform_get_irq_byname(pdev, "irq_enc"); if (pservice->irq_enc < 0) { dev_err(pservice->dev, "cannot find IRQ encoder\n"); ret = -ENXIO; goto err; } ret = devm_request_threaded_irq(pservice->dev, pservice->irq_enc, vepu_irq, vepu_isr, 0, dev_name(pservice->dev), (void *)pservice); if (ret) { dev_err(pservice->dev, "error: can't request vepu irq %d\n", pservice->irq_enc); goto err; } } pservice->irq_dec = platform_get_irq_byname(pdev, "irq_dec"); if (pservice->irq_dec < 0) { dev_err(pservice->dev, "cannot find IRQ decoder\n"); ret = -ENXIO; goto err; } /* get the IRQ line */ ret = devm_request_threaded_irq(pservice->dev, pservice->irq_dec, vdpu_irq, vdpu_isr, 0, dev_name(pservice->dev), (void *)pservice); if (ret) { dev_err(pservice->dev, "error: can't request vdpu irq %d\n", pservice->irq_dec); goto err; } atomic_set(&pservice->dec_dev.irq_count_codec, 0); atomic_set(&pservice->dec_dev.irq_count_pp, 0); atomic_set(&pservice->enc_dev.irq_count_codec, 0); atomic_set(&pservice->enc_dev.irq_count_pp, 0); /// create device ret = alloc_chrdev_region(&pservice->dev_t, 0, 1, dev_name(dev)); if (ret) { dev_err(dev, "alloc dev_t failed\n"); goto err; } cdev_init(&pservice->cdev, &vpu_service_fops); pservice->cdev.owner = THIS_MODULE; pservice->cdev.ops = &vpu_service_fops; ret = cdev_add(&pservice->cdev, pservice->dev_t, 1); if (ret) { dev_err(dev, "add dev_t failed\n"); goto err; } pservice->cls = class_create(THIS_MODULE, dev_name(dev)); if (IS_ERR(pservice->cls)) { ret = PTR_ERR(pservice->cls); dev_err(dev, "class_create err:%d\n", ret); goto err; } pservice->child_dev = device_create(pservice->cls, dev, pservice->dev_t, NULL, dev_name(dev)); platform_set_drvdata(pdev, pservice); get_hw_info(pservice); #ifdef CONFIG_DEBUG_FS pservice->debugfs_dir = vcodec_debugfs_create_device_dir((char*)dev_name(dev), parent); if (pservice->debugfs_dir == NULL) { pr_err("create debugfs dir %s failed\n", dev_name(dev)); } pservice->debugfs_file_regs = debugfs_create_file("regs", 0664, pservice->debugfs_dir, pservice, &debug_vcodec_fops); #endif vpu_service_power_off(pservice); pr_info("init success\n"); #if defined(CONFIG_VCODEC_MMU) & defined(CONFIG_ION_ROCKCHIP) pservice->ion_client = rockchip_ion_client_create("vpu"); if (IS_ERR(pservice->ion_client)) { dev_err(&pdev->dev, "failed to create ion client for vcodec"); return PTR_ERR(pservice->ion_client); } else { dev_info(&pdev->dev, "vcodec ion client create success!\n"); } sprintf(mmu_dev_dts_name, "iommu,%s", dev_name(dev)); mmu_dev = rockchip_get_sysmmu_device_by_compatible(mmu_dev_dts_name); platform_set_sysmmu(mmu_dev, pservice->dev); iovmm_activate(pservice->dev); #endif #if HEVC_SIM_ENABLE if (pservice->hw_info->hw_id == HEVC_ID) { simulate_init(pservice); } #endif #if HEVC_TEST_ENABLE hevc_test_case0(pservice); #endif return 0; err: pr_info("init failed\n"); vpu_service_power_off(pservice); vpu_put_clk(pservice); wake_lock_destroy(&pservice->wake_lock); if (res) { if (regs) { devm_ioremap_release(&pdev->dev, res); } devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); } if (pservice->irq_enc > 0) { free_irq(pservice->irq_enc, (void *)pservice); } if (pservice->irq_dec > 0) { free_irq(pservice->irq_dec, (void *)pservice); } if (pservice->child_dev) { device_destroy(pservice->cls, pservice->dev_t); cdev_del(&pservice->cdev); unregister_chrdev_region(pservice->dev_t, 1); } if (pservice->cls) { class_destroy(pservice->cls); } return ret; } static int vcodec_remove(struct platform_device *pdev) { struct vpu_service_info *pservice = platform_get_drvdata(pdev); struct resource *res; device_destroy(pservice->cls, pservice->dev_t); class_destroy(pservice->cls); cdev_del(&pservice->cdev); unregister_chrdev_region(pservice->dev_t, 1); free_irq(pservice->irq_enc, (void *)&pservice->enc_dev); free_irq(pservice->irq_dec, (void *)&pservice->dec_dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); devm_ioremap_release(&pdev->dev, res); devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); vpu_put_clk(pservice); wake_lock_destroy(&pservice->wake_lock); #ifdef CONFIG_DEBUG_FS if (pservice->debugfs_file_regs) { debugfs_remove(pservice->debugfs_file_regs); } if (pservice->debugfs_dir) { debugfs_remove(pservice->debugfs_dir); } #endif return 0; } #if defined(CONFIG_OF) static const struct of_device_id vcodec_service_dt_ids[] = { {.compatible = "vpu_service",}, {.compatible = "rockchip,hevc_service",}, {}, }; #endif static struct platform_driver vcodec_driver = { .probe = vcodec_probe, .remove = vcodec_remove, .driver = { .name = "vcodec", .owner = THIS_MODULE, #if defined(CONFIG_OF) .of_match_table = of_match_ptr(vcodec_service_dt_ids), #endif }, }; static void get_hw_info(struct vpu_service_info *pservice) { VPUHwDecConfig_t *dec = &pservice->dec_config; VPUHwEncConfig_t *enc = &pservice->enc_config; if (pservice->dev_id == VCODEC_DEVICE_ID_VPU) { u32 configReg = pservice->dec_dev.hwregs[VPU_DEC_HWCFG0]; u32 asicID = pservice->dec_dev.hwregs[0]; dec->h264Support = (configReg >> DWL_H264_E) & 0x3U; dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U; if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U)) dec->jpegSupport = JPEG_PROGRESSIVE; dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U; dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U; dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U; dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U; dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U; dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U; if (!soc_is_rk3190() && !soc_is_rk3288()) { dec->maxDecPicWidth = configReg & 0x07FFU; } else { dec->maxDecPicWidth = 4096; } /* 2nd Config register */ configReg = pservice->dec_dev.hwregs[VPU_DEC_HWCFG1]; if (dec->refBufSupport) { if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U) dec->refBufSupport |= 2; if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U) dec->refBufSupport |= 4; } dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U; dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U; dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U; dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U; /* JPEG xtensions */ if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) { dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U; } else { dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED; } if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) { dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U; } else { dec->rvSupport = RV_NOT_SUPPORTED; } dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U; if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) { dec->refBufSupport |= 8; /* enable HW support for offset */ } /// invalidate fuse register value in rk319x vpu and following. if (!soc_is_rk3190() && !soc_is_rk3288()) { VPUHwFuseStatus_t hwFuseSts; /* Decoder fuse configuration */ u32 fuseReg = pservice->dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG]; hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U; hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U; hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U; hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U; hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U; hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U; hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U; hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U; hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U; hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U; hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U; hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U; hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U; hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U; /* check max. decoder output width */ if (fuseReg & 0x8000U) hwFuseSts.maxDecPicWidthFuse = 1920; else if (fuseReg & 0x4000U) hwFuseSts.maxDecPicWidthFuse = 1280; else if (fuseReg & 0x2000U) hwFuseSts.maxDecPicWidthFuse = 720; else if (fuseReg & 0x1000U) hwFuseSts.maxDecPicWidthFuse = 352; else /* remove warning */ hwFuseSts.maxDecPicWidthFuse = 352; hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U; /* Pp configuration */ configReg = pservice->dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG]; if ((configReg >> DWL_PP_E) & 0x01U) { dec->ppSupport = 1; dec->maxPpOutPicWidth = configReg & 0x07FFU; /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */ dec->ppConfig = configReg; } else { dec->ppSupport = 0; dec->maxPpOutPicWidth = 0; dec->ppConfig = 0; } /* check the HW versio */ if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) { /* Pp configuration */ configReg = pservice->dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG]; if ((configReg >> DWL_PP_E) & 0x01U) { /* Pp fuse configuration */ u32 fuseRegPp = pservice->dec_dev.hwregs[VPU_PP_HW_FUSE_CFG]; if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) { hwFuseSts.ppSupportFuse = 1; /* check max. pp output width */ if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920; else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280; else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720; else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352; else hwFuseSts.maxPpOutPicWidthFuse = 352; hwFuseSts.ppConfigFuse = fuseRegPp; } else { hwFuseSts.ppSupportFuse = 0; hwFuseSts.maxPpOutPicWidthFuse = 0; hwFuseSts.ppConfigFuse = 0; } } else { hwFuseSts.ppSupportFuse = 0; hwFuseSts.maxPpOutPicWidthFuse = 0; hwFuseSts.ppConfigFuse = 0; } if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse) dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse; if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse) dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse; if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED; if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED; if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED; if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED; if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse) dec->jpegSupport = JPEG_BASELINE; if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED; if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED; if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED; if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED; if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED; if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED; /* check the pp config vs fuse status */ if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) { u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25); u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24); u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25); u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24); if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000; if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000; } if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED; if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED; if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED; if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED; if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED; } } configReg = pservice->enc_dev.hwregs[63]; enc->maxEncodedWidth = configReg & ((1 << 11) - 1); enc->h264Enabled = (configReg >> 27) & 1; enc->mpeg4Enabled = (configReg >> 26) & 1; enc->jpegEnabled = (configReg >> 25) & 1; enc->vsEnabled = (configReg >> 24) & 1; enc->rgbEnabled = (configReg >> 28) & 1; //enc->busType = (configReg >> 20) & 15; //enc->synthesisLanguage = (configReg >> 16) & 15; //enc->busWidth = (configReg >> 12) & 15; enc->reg_size = pservice->reg_size; enc->reserv[0] = enc->reserv[1] = 0; pservice->auto_freq = soc_is_rk2928g() || soc_is_rk2928l() || soc_is_rk2926() || soc_is_rk3288(); if (pservice->auto_freq) { pr_info("vpu_service set to auto frequency mode\n"); atomic_set(&pservice->freq_status, VPU_FREQ_BUT); } pservice->bug_dec_addr = cpu_is_rk30xx(); //printk("cpu 3066b bug %d\n", service.bug_dec_addr); } else { // disable frequency switch in hevc. pservice->auto_freq = false; } } static irqreturn_t vdpu_irq(int irq, void *dev_id) { struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id; vpu_device *dev = &pservice->dec_dev; u32 raw_status; u32 irq_status = raw_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER); pr_debug("dec_irq\n"); if (irq_status & DEC_INTERRUPT_BIT) { pr_debug("dec_isr dec %x\n", irq_status); if ((irq_status & 0x40001) == 0x40001) { do { irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER); } while ((irq_status & 0x40001) == 0x40001); } /* clear dec IRQ */ if (pservice->hw_info->hw_id != HEVC_ID) { writel(irq_status & (~DEC_INTERRUPT_BIT|DEC_BUFFER_EMPTY_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER); } else { /*writel(irq_status & (~(DEC_INTERRUPT_BIT|HEVC_DEC_INT_RAW_BIT|HEVC_DEC_STR_ERROR_BIT|HEVC_DEC_BUS_ERROR_BIT|HEVC_DEC_BUFFER_EMPTY_BIT)), dev->hwregs + DEC_INTERRUPT_REGISTER);*/ writel(0, dev->hwregs + DEC_INTERRUPT_REGISTER); } atomic_add(1, &dev->irq_count_codec); } if (pservice->hw_info->hw_id != HEVC_ID) { irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER); if (irq_status & PP_INTERRUPT_BIT) { pr_debug("vdpu_isr pp %x\n", irq_status); /* clear pp IRQ */ writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER); atomic_add(1, &dev->irq_count_pp); } } pservice->irq_status = raw_status; return IRQ_WAKE_THREAD; } static irqreturn_t vdpu_isr(int irq, void *dev_id) { struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id; vpu_device *dev = &pservice->dec_dev; mutex_lock(&pservice->lock); if (atomic_read(&dev->irq_count_codec)) { #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&dec_end); pr_info("dec task: %ld ms\n", (dec_end.tv_sec - dec_start.tv_sec) * 1000 + (dec_end.tv_usec - dec_start.tv_usec) / 1000); #endif atomic_sub(1, &dev->irq_count_codec); if (NULL == pservice->reg_codec) { pr_err("error: dec isr with no task waiting\n"); } else { reg_from_run_to_done(pservice, pservice->reg_codec); } } if (atomic_read(&dev->irq_count_pp)) { #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&pp_end); printk("pp task: %ld ms\n", (pp_end.tv_sec - pp_start.tv_sec) * 1000 + (pp_end.tv_usec - pp_start.tv_usec) / 1000); #endif atomic_sub(1, &dev->irq_count_pp); if (NULL == pservice->reg_pproc) { pr_err("error: pp isr with no task waiting\n"); } else { reg_from_run_to_done(pservice, pservice->reg_pproc); } } try_set_reg(pservice); mutex_unlock(&pservice->lock); return IRQ_HANDLED; } static irqreturn_t vepu_irq(int irq, void *dev_id) { //struct vpu_device *dev = (struct vpu_device *) dev_id; struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id; vpu_device *dev = &pservice->enc_dev; u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER); pr_debug("vepu_irq irq status %x\n", irq_status); #if VPU_SERVICE_SHOW_TIME do_gettimeofday(&enc_end); pr_info("enc task: %ld ms\n", (enc_end.tv_sec - enc_start.tv_sec) * 1000 + (enc_end.tv_usec - enc_start.tv_usec) / 1000); #endif if (likely(irq_status & ENC_INTERRUPT_BIT)) { /* clear enc IRQ */ writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER); atomic_add(1, &dev->irq_count_codec); } pservice->irq_status = irq_status; return IRQ_WAKE_THREAD; } static irqreturn_t vepu_isr(int irq, void *dev_id) { //struct vpu_device *dev = (struct vpu_device *) dev_id; struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id; vpu_device *dev = &pservice->enc_dev; mutex_lock(&pservice->lock); if (atomic_read(&dev->irq_count_codec)) { atomic_sub(1, &dev->irq_count_codec); if (NULL == pservice->reg_codec) { pr_err("error: enc isr with no task waiting\n"); } else { reg_from_run_to_done(pservice, pservice->reg_codec); } } try_set_reg(pservice); mutex_unlock(&pservice->lock); return IRQ_HANDLED; } static int __init vcodec_service_init(void) { int ret; if ((ret = platform_driver_register(&vcodec_driver)) != 0) { pr_err("Platform device register failed (%d).\n", ret); return ret; } #ifdef CONFIG_DEBUG_FS vcodec_debugfs_init(); #endif return ret; } static void __exit vcodec_service_exit(void) { #ifdef CONFIG_DEBUG_FS vcodec_debugfs_exit(); #endif platform_driver_unregister(&vcodec_driver); } module_init(vcodec_service_init); module_exit(vcodec_service_exit); #ifdef CONFIG_DEBUG_FS #include <linux/seq_file.h> static int vcodec_debugfs_init() { parent = debugfs_create_dir("vcodec", NULL); if (!parent) return -1; return 0; } static void vcodec_debugfs_exit() { debugfs_remove(parent); } static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent) { return debugfs_create_dir(dirname, parent); } static int debug_vcodec_show(struct seq_file *s, void *unused) { struct vpu_service_info *pservice = s->private; unsigned int i, n; vpu_reg *reg, *reg_tmp; vpu_session *session, *session_tmp; mutex_lock(&pservice->lock); vpu_service_power_on(pservice); if (pservice->hw_info->hw_id != HEVC_ID) { seq_printf(s, "\nENC Registers:\n"); n = pservice->enc_dev.iosize >> 2; for (i = 0; i < n; i++) { seq_printf(s, "\tswreg%d = %08X\n", i, readl(pservice->enc_dev.hwregs + i)); } } seq_printf(s, "\nDEC Registers:\n"); n = pservice->dec_dev.iosize >> 2; for (i = 0; i < n; i++) { seq_printf(s, "\tswreg%d = %08X\n", i, readl(pservice->dec_dev.hwregs + i)); } seq_printf(s, "\nvpu service status:\n"); list_for_each_entry_safe(session, session_tmp, &pservice->session, list_session) { seq_printf(s, "session pid %d type %d:\n", session->pid, session->type); //seq_printf(s, "waiting reg set %d\n"); list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) { seq_printf(s, "waiting register set\n"); } list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) { seq_printf(s, "running register set\n"); } list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) { seq_printf(s, "done register set\n"); } } mutex_unlock(&pservice->lock); return 0; } static int debug_vcodec_open(struct inode *inode, struct file *file) { return single_open(file, debug_vcodec_show, inode->i_private); } #endif #if HEVC_TEST_ENABLE & defined(CONFIG_ION_ROCKCHIP) #include "hevc_test_inc/pps_00.h" #include "hevc_test_inc/register_00.h" #include "hevc_test_inc/rps_00.h" #include "hevc_test_inc/scaling_list_00.h" #include "hevc_test_inc/stream_00.h" #include "hevc_test_inc/pps_01.h" #include "hevc_test_inc/register_01.h" #include "hevc_test_inc/rps_01.h" #include "hevc_test_inc/scaling_list_01.h" #include "hevc_test_inc/stream_01.h" #include "hevc_test_inc/cabac.h" extern struct ion_client *rockchip_ion_client_create(const char * name); static struct ion_client *ion_client = NULL; u8* get_align_ptr(u8* tbl, int len, u32 *phy) { int size = (len+15) & (~15); struct ion_handle *handle; u8 *ptr;// = (u8*)kzalloc(size, GFP_KERNEL); if (ion_client == NULL) { ion_client = rockchip_ion_client_create("vcodec"); } handle = ion_alloc(ion_client, (size_t)len, 16, ION_HEAP(ION_CMA_HEAP_ID), 0); ptr = ion_map_kernel(ion_client, handle); ion_phys(ion_client, handle, phy, &size); memcpy(ptr, tbl, len); return ptr; } u8* get_align_ptr_no_copy(int len, u32 *phy) { int size = (len+15) & (~15); struct ion_handle *handle; u8 *ptr;// = (u8*)kzalloc(size, GFP_KERNEL); if (ion_client == NULL) { ion_client = rockchip_ion_client_create("vcodec"); } handle = ion_alloc(ion_client, (size_t)len, 16, ION_HEAP(ION_CMA_HEAP_ID), 0); ptr = ion_map_kernel(ion_client, handle); ion_phys(ion_client, handle, phy, &size); return ptr; } #define TEST_CNT 2 static int hevc_test_case0(vpu_service_info *pservice) { vpu_session session; vpu_reg *reg; unsigned long size = 272;//sizeof(register_00); // registers array length int testidx = 0; int ret = 0; u8 *pps_tbl[TEST_CNT]; u8 *register_tbl[TEST_CNT]; u8 *rps_tbl[TEST_CNT]; u8 *scaling_list_tbl[TEST_CNT]; u8 *stream_tbl[TEST_CNT]; int stream_size[2]; int pps_size[2]; int rps_size[2]; int scl_size[2]; int cabac_size[2]; u32 phy_pps; u32 phy_rps; u32 phy_scl; u32 phy_str; u32 phy_yuv; u32 phy_ref; u32 phy_cabac; volatile u8 *stream_buf; volatile u8 *pps_buf; volatile u8 *rps_buf; volatile u8 *scl_buf; volatile u8 *yuv_buf; volatile u8 *cabac_buf; volatile u8 *ref_buf; u8 *pps; u8 *yuv[2]; int i; pps_tbl[0] = pps_00; pps_tbl[1] = pps_01; register_tbl[0] = register_00; register_tbl[1] = register_01; rps_tbl[0] = rps_00; rps_tbl[1] = rps_01; scaling_list_tbl[0] = scaling_list_00; scaling_list_tbl[1] = scaling_list_01; stream_tbl[0] = stream_00; stream_tbl[1] = stream_01; stream_size[0] = sizeof(stream_00); stream_size[1] = sizeof(stream_01); pps_size[0] = sizeof(pps_00); pps_size[1] = sizeof(pps_01); rps_size[0] = sizeof(rps_00); rps_size[1] = sizeof(rps_01); scl_size[0] = sizeof(scaling_list_00); scl_size[1] = sizeof(scaling_list_01); cabac_size[0] = sizeof(Cabac_table); cabac_size[1] = sizeof(Cabac_table); // create session session.pid = current->pid; session.type = VPU_DEC; INIT_LIST_HEAD(&session.waiting); INIT_LIST_HEAD(&session.running); INIT_LIST_HEAD(&session.done); INIT_LIST_HEAD(&session.list_session); init_waitqueue_head(&session.wait); atomic_set(&session.task_running, 0); list_add_tail(&session.list_session, &pservice->session); yuv[0] = get_align_ptr_no_copy(256*256*2, &phy_yuv); yuv[1] = get_align_ptr_no_copy(256*256*2, &phy_ref); while (testidx < TEST_CNT) { // create registers reg = kmalloc(sizeof(vpu_reg)+pservice->reg_size, GFP_KERNEL); if (NULL == reg) { pr_err("error: kmalloc fail in reg_init\n"); return -1; } if (size > pservice->reg_size) { printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, pservice->reg_size); size = pservice->reg_size; } reg->session = &session; reg->type = session.type; reg->size = size; reg->freq = VPU_FREQ_DEFAULT; reg->reg = (unsigned long *)&reg[1]; INIT_LIST_HEAD(&reg->session_link); INIT_LIST_HEAD(&reg->status_link); // TODO: stuff registers memcpy(&reg->reg[0], register_tbl[testidx], /*sizeof(register_00)*/ 176); stream_buf = get_align_ptr(stream_tbl[testidx], stream_size[testidx], &phy_str); pps_buf = get_align_ptr(pps_tbl[0], pps_size[0], &phy_pps); rps_buf = get_align_ptr(rps_tbl[testidx], rps_size[testidx], &phy_rps); scl_buf = get_align_ptr(scaling_list_tbl[testidx], scl_size[testidx], &phy_scl); cabac_buf = get_align_ptr(Cabac_table, cabac_size[testidx], &phy_cabac); pps = pps_buf; // TODO: replace reigster address for (i=0; i<64; i++) { u32 scaling_offset; u32 tmp; scaling_offset = (u32)pps[i*80+74]; scaling_offset += (u32)pps[i*80+75] << 8; scaling_offset += (u32)pps[i*80+76] << 16; scaling_offset += (u32)pps[i*80+77] << 24; tmp = phy_scl + scaling_offset; pps[i*80+74] = tmp & 0xff; pps[i*80+75] = (tmp >> 8) & 0xff; pps[i*80+76] = (tmp >> 16) & 0xff; pps[i*80+77] = (tmp >> 24) & 0xff; } printk("%s %d, phy stream %08x, phy pps %08x, phy rps %08x\n", __func__, __LINE__, phy_str, phy_pps, phy_rps); reg->reg[1] = 0x21; reg->reg[4] = phy_str; reg->reg[5] = ((stream_size[testidx]+15)&(~15))+64; reg->reg[6] = phy_cabac; reg->reg[7] = testidx?phy_ref:phy_yuv; reg->reg[42] = phy_pps; reg->reg[43] = phy_rps; for (i = 10; i <= 24; i++) { reg->reg[i] = phy_yuv; } mutex_lock(&pservice->lock); list_add_tail(&reg->status_link, &pservice->waiting); list_add_tail(&reg->session_link, &session.waiting); mutex_unlock(&pservice->lock); printk("%s %d %p\n", __func__, __LINE__, pservice); // stuff hardware try_set_reg(pservice); // wait for result ret = wait_event_timeout(session.wait, !list_empty(&session.done), VPU_TIMEOUT_DELAY); if (!list_empty(&session.done)) { if (ret < 0) { pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session.pid, ret); } ret = 0; } else { if (unlikely(ret < 0)) { pr_err("error: pid %d wait task ret %d\n", session.pid, ret); } else if (0 == ret) { pr_err("error: pid %d wait %d task done timeout\n", session.pid, atomic_read(&session.task_running)); ret = -ETIMEDOUT; } } if (ret < 0) { int task_running = atomic_read(&session.task_running); int n; mutex_lock(&pservice->lock); vpu_service_dump(pservice); if (task_running) { atomic_set(&session.task_running, 0); atomic_sub(task_running, &pservice->total_running); printk("%d task is running but not return, reset hardware...", task_running); vpu_reset(pservice); printk("done\n"); } vpu_service_session_clear(pservice, &session); mutex_unlock(&pservice->lock); printk("\nDEC Registers:\n"); n = pservice->dec_dev.iosize >> 2; for (i=0; i<n; i++) { printk("\tswreg%d = %08X\n", i, readl(pservice->dec_dev.hwregs + i)); } pr_err("test index %d failed\n", testidx); break; } else { pr_info("test index %d success\n", testidx); vpu_reg *reg = list_entry(session.done.next, vpu_reg, session_link); for (i=0; i<68; i++) { if (i % 4 == 0) { printk("%02d: ", i); } printk("%08x ", reg->reg[i]); if ((i+1) % 4 == 0) { printk("\n"); } } testidx++; } reg_deinit(pservice, reg); } return 0; } #endif
crewrktablets/rk3x_kernel_3.10
arch/arm/mach-rockchip/vcodec_service.c
C
gpl-2.0
73,376
/* cast5.c */ /* This file is part of the AVR-Crypto-Lib. Copyright (C) 2006-2015 Daniel Otte (bg@nerilex.org) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * \file cast5.c * \author Daniel Otte * \email bg@nerilex.org * \date 2006-07-26 * \par License: * GPLv3 or later * \brief Implementation of the CAST5 (aka CAST-128) cipher algorithm as described in RFC 2144 * */ #include <stdint.h> #include <string.h> #include "cast5.h" #include <avr/pgmspace.h> #undef DEBUG #ifdef DEBUG #include "cli.h" #endif #include "cast5-sbox.h" #define S5(x) pgm_read_dword(&s5[(x)]) #define S6(x) pgm_read_dword(&s6[(x)]) #define S7(x) pgm_read_dword(&s7[(x)]) #define S8(x) pgm_read_dword(&s8[(x)]) static void cast5_init_A(uint8_t *dest, uint8_t *src, bool bmode) { uint8_t mask = bmode ? 0x8 : 0; *((uint32_t*) (&dest[0x0])) = *((uint32_t*) (&src[0x0 ^ mask])) ^ S5(src[0xD ^ mask]) ^ S6(src[0xF ^ mask]) ^ S7(src[0xC ^ mask]) ^ S8(src[0xE ^ mask]) ^ S7(src[0x8 ^ mask]); *((uint32_t*) (&dest[0x4])) = *((uint32_t*) (&src[0x8 ^ mask])) ^ S5(dest[0x0]) ^ S6(dest[0x2]) ^ S7(dest[0x1]) ^ S8(dest[0x3]) ^ S8(src[0xA ^ mask]); *((uint32_t*) (&dest[0x8])) = *((uint32_t*) (&src[0xC ^ mask])) ^ S5(dest[0x7]) ^ S6(dest[0x6]) ^ S7(dest[0x5]) ^ S8(dest[0x4]) ^ S5(src[0x9 ^ mask]); *((uint32_t*) (&dest[0xC])) = *((uint32_t*) (&src[0x4 ^ mask])) ^ S5(dest[0xA]) ^ S6(dest[0x9]) ^ S7(dest[0xB]) ^ S8(dest[0x8]) ^ S6(src[0xB ^ mask]); } static void cast5_init_M(uint8_t *dest, uint8_t *src, bool nmode, bool xmode) { uint8_t nmt[] = { 0xB, 0xA, 0x9, 0x8, 0xF, 0xE, 0xD, 0xC, 0x3, 0x2, 0x1, 0x0, 0x7, 0x6, 0x5, 0x4 }; /* nmode table */ uint8_t xmt[4][4] = { { 0x2, 0x6, 0x9, 0xC }, { 0x8, 0xD, 0x3, 0x7 }, { 0x3, 0x7, 0x8, 0xD }, { 0x9, 0xC, 0x2, 0x6 } }; #define NMT(x) (src[nmode?nmt[(x)]:(x)]) #define XMT(x) (src[xmt[(xmode<<1) + nmode][(x)]]) *((uint32_t*) (&dest[0x0])) = S5(NMT(0x8)) ^ S6(NMT(0x9)) ^ S7(NMT(0x7)) ^ S8(NMT(0x6)) ^ S5(XMT(0)); *((uint32_t*) (&dest[0x4])) = S5(NMT(0xA)) ^ S6(NMT(0xB)) ^ S7(NMT(0x5)) ^ S8(NMT(0x4)) ^ S6(XMT(1)); *((uint32_t*) (&dest[0x8])) = S5(NMT(0xC)) ^ S6(NMT(0xD)) ^ S7(NMT(0x3)) ^ S8(NMT(0x2)) ^ S7(XMT(2)); *((uint32_t*) (&dest[0xC])) = S5(NMT(0xE)) ^ S6(NMT(0xF)) ^ S7(NMT(0x1)) ^ S8(NMT(0x0)) ^ S8(XMT(3)); } #define S5B(x) pgm_read_byte(3+(uint8_t*)(&s5[(x)])) #define S6B(x) pgm_read_byte(3+(uint8_t*)(&s6[(x)])) #define S7B(x) pgm_read_byte(3+(uint8_t*)(&s7[(x)])) #define S8B(x) pgm_read_byte(3+(uint8_t*)(&s8[(x)])) static void cast5_init_rM(uint8_t *klo, uint8_t *khi, uint8_t offset, uint8_t *src, bool nmode, bool xmode) { uint8_t nmt[] = { 0xB, 0xA, 0x9, 0x8, 0xF, 0xE, 0xD, 0xC, 0x3, 0x2, 0x1, 0x0, 0x7, 0x6, 0x5, 0x4 }; /* nmode table */ uint8_t xmt[4][4] = { { 0x2, 0x6, 0x9, 0xC }, { 0x8, 0xD, 0x3, 0x7 }, { 0x3, 0x7, 0x8, 0xD }, { 0x9, 0xC, 0x2, 0x6 } }; uint8_t t, h = 0; t = S5B(NMT(0x8)) ^ S6B(NMT(0x9)) ^ S7B(NMT(0x7)) ^ S8B(NMT(0x6)) ^ S5B(XMT(0)); klo[offset * 2] |= (t & 0x0f); h |= (t & 0x10); h >>= 1; t = S5B(NMT(0xA)) ^ S6B(NMT(0xB)) ^ S7B(NMT(0x5)) ^ S8B(NMT(0x4)) ^ S6B(XMT(1)); klo[offset * 2] |= (t << 4) & 0xf0; h |= t & 0x10; h >>= 1; t = S5B(NMT(0xC)) ^ S6B(NMT(0xD)) ^ S7B(NMT(0x3)) ^ S8B(NMT(0x2)) ^ S7B(XMT(2)); klo[offset * 2 + 1] |= t & 0xf; h |= t & 0x10; h >>= 1; t = S5B(NMT(0xE)) ^ S6B(NMT(0xF)) ^ S7B(NMT(0x1)) ^ S8B(NMT(0x0)) ^ S8B(XMT(3)); klo[offset * 2 + 1] |= t << 4; h |= t & 0x10; h >>= 1; #ifdef DEBUG cli_putstr("\r\n\t h="); cli_hexdump(&h,1); #endif khi[offset >> 1] |= h << ((offset & 0x1) ? 4 : 0); } #define S_5X(s) pgm_read_dword(&s5[BPX[(s)]]) #define S_6X(s) pgm_read_dword(&s6[BPX[(s)]]) #define S_7X(s) pgm_read_dword(&s7[BPX[(s)]]) #define S_8X(s) pgm_read_dword(&s8[BPX[(s)]]) #define S_5Z(s) pgm_read_dword(&s5[BPZ[(s)]]) #define S_6Z(s) pgm_read_dword(&s6[BPZ[(s)]]) #define S_7Z(s) pgm_read_dword(&s7[BPZ[(s)]]) #define S_8Z(s) pgm_read_dword(&s8[BPZ[(s)]]) void cast5_init(const void *key, uint16_t keylength_b, cast5_ctx_t *s) { /* we migth return if the key is valid and if setup was successful */ uint32_t x[4], z[4]; #define BPX ((uint8_t*)&(x[0])) #define BPZ ((uint8_t*)&(z[0])) s->shortkey = (keylength_b <= 80); /* littel endian only! */ memset(&(x[0]), 0, 16); /* set x to zero */ if (keylength_b > 128) keylength_b = 128; memcpy(&(x[0]), key, (keylength_b + 7) / 8); /* todo: merge a and b and compress the whole stuff */ /***** A *****/ cast5_init_A((uint8_t*) (&z[0]), (uint8_t*) (&x[0]), false); /***** M *****/ cast5_init_M((uint8_t*) (&(s->mask[0])), (uint8_t*) (&z[0]), false, false); /***** B *****/ cast5_init_A((uint8_t*) (&x[0]), (uint8_t*) (&z[0]), true); /***** N *****/ cast5_init_M((uint8_t*) (&(s->mask[4])), (uint8_t*) (&x[0]), true, false); /***** A *****/ cast5_init_A((uint8_t*) (&z[0]), (uint8_t*) (&x[0]), false); /***** N' *****/ cast5_init_M((uint8_t*) (&(s->mask[8])), (uint8_t*) (&z[0]), true, true); /***** B *****/ cast5_init_A((uint8_t*) (&x[0]), (uint8_t*) (&z[0]), true); /***** M' *****/ cast5_init_M((uint8_t*) (&(s->mask[12])), (uint8_t*) (&x[0]), false, true); /* that were the masking keys, now the rotation keys */ /* set the keys to zero */ memset(&(s->rotl[0]), 0, 8); s->roth[0] = s->roth[1] = 0; /***** A *****/ cast5_init_A((uint8_t*) (&z[0]), (uint8_t*) (&x[0]), false); /***** M *****/ cast5_init_rM(&(s->rotl[0]), &(s->roth[0]), 0, (uint8_t*) (&z[0]), false, false); /***** B *****/ cast5_init_A((uint8_t*) (&x[0]), (uint8_t*) (&z[0]), true); /***** N *****/ cast5_init_rM(&(s->rotl[0]), &(s->roth[0]), 1, (uint8_t*) (&x[0]), true, false); /***** A *****/ cast5_init_A((uint8_t*) (&z[0]), (uint8_t*) (&x[0]), false); /***** N' *****/ cast5_init_rM(&(s->rotl[0]), &(s->roth[0]), 2, (uint8_t*) (&z[0]), true, true); /***** B *****/ cast5_init_A((uint8_t*) (&x[0]), (uint8_t*) (&z[0]), true); /***** M' *****/ cast5_init_rM(&(s->rotl[0]), &(s->roth[0]), 3, (uint8_t*) (&x[0]), false, true); /* done ;-) */ } /********************************************************************************************************/ #define ROTL32(a,n) ((a)<<(n) | (a)>>(32-(n))) #define CHANGE_ENDIAN32(x) ((x)<<24 | (x)>>24 | ((x)&0xff00)<<8 | ((x)&0xff0000)>>8 ) typedef uint32_t cast5_f_t(uint32_t, uint32_t, uint8_t); #define IA 3 #define IB 2 #define IC 1 #define ID 0 static uint32_t cast5_f1(uint32_t d, uint32_t m, uint8_t r) { uint32_t t; t = ROTL32((d + m), r); #ifdef DEBUG uint32_t ia,ib,ic,id; cli_putstr("\r\n f1("); cli_hexdump(&d, 4); cli_putc(','); cli_hexdump(&m , 4); cli_putc(','); cli_hexdump(&r, 1);cli_putstr("): I="); cli_hexdump(&t, 4); ia = pgm_read_dword(&s1[((uint8_t*)&t)[IA]] ); ib = pgm_read_dword(&s2[((uint8_t*)&t)[IB]] ); ic = pgm_read_dword(&s3[((uint8_t*)&t)[IC]] ); id = pgm_read_dword(&s4[((uint8_t*)&t)[ID]] ); cli_putstr("\r\n\tIA="); cli_hexdump(&ia, 4); cli_putstr("\r\n\tIB="); cli_hexdump(&ib, 4); cli_putstr("\r\n\tIC="); cli_hexdump(&ic, 4); cli_putstr("\r\n\tID="); cli_hexdump(&id, 4); return (((ia ^ ib) - ic) + id); #else return ((( pgm_read_dword(&s1[((uint8_t*)&t)[IA]]) ^ pgm_read_dword(&s2[((uint8_t*)&t)[IB]])) - pgm_read_dword(&s3[((uint8_t*)&t)[IC]])) + pgm_read_dword(&s4[((uint8_t*)&t)[ID]])); #endif } static uint32_t cast5_f2(uint32_t d, uint32_t m, uint8_t r) { uint32_t t; t = ROTL32((d ^ m), r); #ifdef DEBUG uint32_t ia,ib,ic,id; cli_putstr("\r\n f2("); cli_hexdump(&d, 4); cli_putc(','); cli_hexdump(&m , 4); cli_putc(','); cli_hexdump(&r, 1);cli_putstr("): I="); cli_hexdump(&t, 4); ia = pgm_read_dword(&s1[((uint8_t*)&t)[IA]] ); ib = pgm_read_dword(&s2[((uint8_t*)&t)[IB]] ); ic = pgm_read_dword(&s3[((uint8_t*)&t)[IC]] ); id = pgm_read_dword(&s4[((uint8_t*)&t)[ID]] ); cli_putstr("\r\n\tIA="); cli_hexdump(&ia, 4); cli_putstr("\r\n\tIB="); cli_hexdump(&ib, 4); cli_putstr("\r\n\tIC="); cli_hexdump(&ic, 4); cli_putstr("\r\n\tID="); cli_hexdump(&id, 4); return (((ia - ib) + ic) ^ id); #else return ((( pgm_read_dword(&s1[((uint8_t*)&t)[IA]]) - pgm_read_dword(&s2[((uint8_t*)&t)[IB]])) + pgm_read_dword(&s3[((uint8_t*)&t)[IC]])) ^ pgm_read_dword(&s4[((uint8_t*)&t)[ID]])); #endif } static uint32_t cast5_f3(uint32_t d, uint32_t m, uint8_t r) { uint32_t t; t = ROTL32((m - d), r); #ifdef DEBUG uint32_t ia,ib,ic,id; cli_putstr("\r\n f3("); cli_hexdump(&d, 4); cli_putc(','); cli_hexdump(&m , 4); cli_putc(','); cli_hexdump(&r, 1);cli_putstr("): I="); cli_hexdump(&t, 4); ia = pgm_read_dword(&s1[((uint8_t*)&t)[IA]] ); ib = pgm_read_dword(&s2[((uint8_t*)&t)[IB]] ); ic = pgm_read_dword(&s3[((uint8_t*)&t)[IC]] ); id = pgm_read_dword(&s4[((uint8_t*)&t)[ID]] ); cli_putstr("\r\n\tIA="); cli_hexdump(&ia, 4); cli_putstr("\r\n\tIB="); cli_hexdump(&ib, 4); cli_putstr("\r\n\tIC="); cli_hexdump(&ic, 4); cli_putstr("\r\n\tID="); cli_hexdump(&id, 4); return (((ia + ib) ^ ic) - id); #else return (( pgm_read_dword(&s1[((uint8_t*)&t)[IA]] ) + pgm_read_dword(&s2[((uint8_t*)&t)[IB]])) ^ pgm_read_dword(&s3[((uint8_t*)&t)[IC]])) - pgm_read_dword(&s4[((uint8_t*)&t)[ID]]); #endif } /******************************************************************************/ void cast5_enc(void *block, const cast5_ctx_t *s) { uint32_t l, r, x, y; uint8_t i; cast5_f_t *f[] = { cast5_f1, cast5_f2, cast5_f3 }; l = ((uint32_t*) block)[0]; r = ((uint32_t*) block)[1]; // cli_putstr("\r\n round[-1] = "); // cli_hexdump(&r, 4); for (i = 0; i < (s->shortkey ? 12 : 16); ++i) { x = r; y = (f[i % 3])(CHANGE_ENDIAN32(r), CHANGE_ENDIAN32(s->mask[i]), (((s->roth[i >> 3]) & (1 << (i & 0x7))) ? 0x10 : 0x00) + (((s->rotl[i >> 1]) >> ((i & 1) ? 4 : 0)) & 0x0f)); r = l ^ CHANGE_ENDIAN32(y); // cli_putstr("\r\n round["); DEBUG_B(i); cli_putstr("] = "); // cli_hexdump(&r, 4); l = x; } ((uint32_t*) block)[0] = r; ((uint32_t*) block)[1] = l; } /******************************************************************************/ void cast5_dec(void *block, const cast5_ctx_t *s) { uint32_t l, r, x, y; int8_t i, rounds; cast5_f_t *f[] = { cast5_f1, cast5_f2, cast5_f3 }; l = ((uint32_t*) block)[0]; r = ((uint32_t*) block)[1]; rounds = (s->shortkey ? 12 : 16); for (i = rounds - 1; i >= 0; --i) { x = r; y = (f[i % 3])(CHANGE_ENDIAN32(r), CHANGE_ENDIAN32(s->mask[i]), (((s->roth[i >> 3]) & (1 << (i & 0x7))) ? 0x10 : 0x00) + (((s->rotl[i >> 1]) >> ((i & 1) ? 4 : 0)) & 0x0f)); r = l ^ CHANGE_ENDIAN32(y); l = x; } ((uint32_t*) block)[0] = r; ((uint32_t*) block)[1] = l; } /******************************************************************************/
nerilex/avr-crypto-lib
cast5/cast5.c
C
gpl-2.0
12,161
/********************************************************************************************* * Fichero: Bmp.c * Autor: * Descrip: Funciones de control y visualizacion del LCD * Version: *********************************************************************************************/ /*--- Archivos cabecera ---*/ #include "bmp.h" #include "def.h" #include "lcd.h" /*--- variables globales ---*/ /* mapa de bits del cursor del raton */ const INT8U ucMouseMap[] = { BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, BLACK, BLACK, BLACK, BLACK, BLACK, WHITE, WHITE, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, BLACK, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, BLACK, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, WHITE, WHITE, WHITE, BLACK, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, TRANSPARENCY, BLACK, BLACK, BLACK, TRANSPARENCY, TRANSPARENCY }; STRU_BITMAP Stru_Bitmap_gbMouse = {0x10, 4, 12, 20, TRANSPARENCY, (INT8U *)ucMouseMap}; INT16U ulMouseX; INT16U ulMouseY; INT8U ucCursorBackUp[20][12/2]; /*--- codigo de funcion ---*/ /********************************************************************************************* * name: BitmapView() * func: display bitmap * para: x,y -- pot's X-Y coordinate * Stru_Bitmap -- bitmap struct * ret: none * modify: * comment: *********************************************************************************************/ void BitmapView (INT16U x, INT16U y, STRU_BITMAP Stru_Bitmap) { INT32U i, j; INT8U ucColor; for (i = 0; i < Stru_Bitmap.usHeight; i++) { for (j = 0; j <Stru_Bitmap.usWidth; j++) { if ((ucColor = *(INT8U*)(Stru_Bitmap.pucStart + i * Stru_Bitmap.usWidth + j)) != TRANSPARENCY) { LCD_PutPixel(x + j, y + i, ucColor); } } } } /********************************************************************************************* * name: BitmapPush() * func: push bitmap data into LCD active buffer * para: x,y -- pot's X-Y coordinate * Stru_Bitmap -- bitmap struct * ret: none * modify: * comment: *********************************************************************************************/ void BitmapPush (INT16U x, INT16U y, STRU_BITMAP Stru_Bitmap) { INT32U i, j; ulMouseX = x; ulMouseY = y; for (i = 0; i < Stru_Bitmap.usHeight; i++) { for (j = 0; j < Stru_Bitmap.usWidth; j+=2) { if ((x + j)%2) { ucCursorBackUp[i][j/2] = (((*(INT8U*)(LCD_ACTIVE_BUFFER + (y + i) * SCR_XSIZE / 2 + (x + j) / 8 * 4 + 3 - ((x + j)%8) / 2)) << 4) & 0xf0) + (((*(INT8U*)(LCD_ACTIVE_BUFFER + (y + i) * SCR_XSIZE / 2 + (x + j+1) / 8 * 4 + 3 - ((x + j+1)%8) / 2)) >> 4) & 0x0f); } else { ucCursorBackUp[i][j/2] = (*(INT8U*)(LCD_ACTIVE_BUFFER + (y + i) * SCR_XSIZE / 2 + (x + j) / 8 * 4 + 3 - ((x + j)%8) / 2)); } } } } /********************************************************************************************* * name: BitmapPop() * func: pop bitmap data into LCD active buffer * para: x,y -- pot's X-Y coordinate * Stru_Bitmap -- bitmap struct * ret: none * modify: * comment: *********************************************************************************************/ void BitmapPop(INT16U x, INT16U y, STRU_BITMAP Stru_Bitmap) { INT32U i, j; INT32U ulAddr, ulAddr1; for (i = 0; i < Stru_Bitmap.usHeight; i++) { for (j = 0; j <Stru_Bitmap.usWidth; j+=2) { ulAddr = LCD_ACTIVE_BUFFER + (y + i) * SCR_XSIZE / 2 + (x + j) / 8 * 4 + 3 - ((x + j)%8) / 2; ulAddr1 =LCD_ACTIVE_BUFFER + (y + i) * SCR_XSIZE / 2 + (x + j + 1) / 8 * 4 + 3 - ((x + j + 1)%8) / 2; if ((x + j)%2) { (*(INT8U*)ulAddr) &= 0xf0; (*(INT8U*)ulAddr) |= ((ucCursorBackUp[i][j/2] >> 4) & 0x0f); (*(INT8U*)ulAddr1) &= 0x0f; (*(INT8U*)ulAddr1) |= ((ucCursorBackUp[i][j/2] << 4) & 0xf0); } else { (*(INT8U*)ulAddr) = ucCursorBackUp[i][j/2]; } } } } /********************************************************************************************* * name: CursorInit() * func: cursor init * para: none * ret: none * modify: * comment: *********************************************************************************************/ void CursorInit(void) { ulMouseX = 0; ulMouseY = 0; CursorView(ulMouseX, ulMouseY); } /********************************************************************************************* * name: CursorPush() * func: cursor push * para: none * ret: none * modify: * comment: *********************************************************************************************/ void CursorPush(INT16U x, INT16U y) { BitmapPush(x, y, Stru_Bitmap_gbMouse); } /********************************************************************************************* * name: CursorPop() * func: cursor pop * para: none * ret: none * modify: * comment: *********************************************************************************************/ void CursorPop() { BitmapPop(ulMouseX, ulMouseY, Stru_Bitmap_gbMouse); } /********************************************************************************************* * name: CursorView() * func: cursor display * para: none * ret: none * modify: * comment: *********************************************************************************************/ void CursorView(INT16U x, INT16U y) { CursorPush(x, y); BitmapView(x, y, Stru_Bitmap_gbMouse); }
UnizarCurryMicroSystems/ProyectoHardware
PH_3/Bmp.c
C
gpl-2.0
8,483
/* * USI wm-bn-bm-01-5(bcm4329) sdio wifi power management API * evb gpio define * A10 gpio define: * usi_bm01a_wl_pwr = port:PH12<1><default><default><0> * usi_bm01a_wlbt_regon = port:PI11<1><default><default><0> * usi_bm01a_wl_rst = port:PI10<1><default><default><0> * usi_bm01a_wl_wake = port:PI12<1><default><default><0> * usi_bm01a_bt_rst = port:PB05<1><default><default><0> * usi_bm01a_bt_wake = port:PI20<1><default><default><0> * usi_bm01a_bt_hostwake = port:PI21<0><default><default><0> * ----------------------------------------------------------- * A12 gpio define: * usi_bm01a_wl_pwr = LDO3 * usi_bm01a_wl_wake = port:PA01<1><default><default><0> * usi_bm01a_wlbt_regon = port:PA02<1><default><default><0> * usi_bm01a_wl_rst = port:PA03<1><default><default><0> * usi_bm01a_bt_rst = port:PA04<1><default><default><0> * usi_bm01a_bt_wake = port:PA05<1><default><default><0> * usi_bm01a_bt_hostwake = */ #include <linux/kernel.h> #include <linux/module.h> #include <mach/sys_config.h> #include "mmc_pm.h" #define usi_msg(...) do {printk("[usi_bm01a]: "__VA_ARGS__);} while(0) static int usi_bm01a_wl_on = 0; static int usi_bm01a_bt_on = 0; #if CONFIG_CHIP_ID==1125 #include <linux/regulator/consumer.h> static int usi_bm01a_power_onoff(int onoff) { struct regulator* wifi_ldo = NULL; static int first = 1; #ifndef CONFIG_AW_AXP usi_msg("AXP driver is disabled, pls check !!\n"); return 0; #endif usi_msg("usi_bm01a_power_onoff\n"); wifi_ldo = regulator_get(NULL, "axp20_pll"); if (!wifi_ldo) usi_msg("Get power regulator failed\n"); if (first) { usi_msg("first time\n"); regulator_force_disable(wifi_ldo); first = 0; } if (onoff) { usi_msg("regulator on\n"); regulator_set_voltage(wifi_ldo, 3300000, 3300000); regulator_enable(wifi_ldo); } else { usi_msg("regulator off\n"); regulator_disable(wifi_ldo); } return 0; } #endif static int usi_bm01a_gpio_ctrl(char* name, int level) { struct mmc_pm_ops *ops = &mmc_card_pm_ops; char* gpio_cmd[6] = {"usi_bm01a_wl_regon", "usi_bm01a_bt_regon", "usi_bm01a_wl_rst", "usi_bm01a_wl_wake", "usi_bm01a_bt_rst", "usi_bm01a_bt_wake"}; int i = 0; int ret = 0; for (i=0; i<6; i++) { if (strcmp(name, gpio_cmd[i])==0) break; } if (i==6) { usi_msg("No gpio %s for USI-BM01A module\n", name); return -1; } // usi_msg("Set GPIO %s to %d !\n", name, level); if (strcmp(name, "usi_bm01a_wl_regon") == 0) { if (level) { if (usi_bm01a_bt_on) { usi_msg("USI-BM01A is already powered up by bluetooth\n"); goto change_state; } else { usi_msg("USI-BM01A is powered up by wifi\n"); goto power_change; } } else { if (usi_bm01a_bt_on) { usi_msg("USI-BM01A should stay on because of bluetooth\n"); goto change_state; } else { usi_msg("USI-BM01A is powered off by wifi\n"); goto power_change; } } } if (strcmp(name, "usi_bm01a_bt_regon") == 0) { if (level) { if (usi_bm01a_wl_on) { usi_msg("USI-BM01A is already powered up by wifi\n"); goto change_state; } else { usi_msg("USI-BM01A is powered up by bt\n"); goto power_change; } } else { if (usi_bm01a_wl_on) { usi_msg("USI-BM01A should stay on because of wifi\n"); goto change_state; } else { usi_msg("USI-BM01A is powered off by bt\n"); goto power_change; } } } ret = gpio_write_one_pin_value(ops->pio_hdle, level, name); if (ret) { usi_msg("Failed to set gpio %s to %d !\n", name, level); return -1; } return 0; power_change: #if CONFIG_CHIP_ID==1123 ret = gpio_write_one_pin_value(ops->pio_hdle, level, "usi_bm01a_wl_pwr"); #elif CONFIG_CHIP_ID==1125 ret = usi_bm01a_power_onoff(level); #else #error "Found wrong chip id in wifi onoff\n" #endif if (ret) { usi_msg("Failed to power off USI-BM01A module!\n"); return -1; } ret = gpio_write_one_pin_value(ops->pio_hdle, level, "usi_bm01a_wlbt_regon"); if (ret) { usi_msg("Failed to regon off for USI-BM01A module!\n"); return -1; } change_state: if (strcmp(name, "usi_bm01a_wl_regon")==0) usi_bm01a_wl_on = level; if (strcmp(name, "usi_bm01a_bt_regon")==0) usi_bm01a_bt_on = level; usi_msg("USI-BM01A power state change: wifi %d, bt %d !!\n", usi_bm01a_wl_on, usi_bm01a_bt_on); return 0; } static int usi_bm01a_get_gpio_value(char* name) { struct mmc_pm_ops *ops = &mmc_card_pm_ops; char* bt_hostwake = "usi_bm01a_bt_hostwake"; if (strcmp(name, bt_hostwake)) { usi_msg("No gpio %s for USI-BM01A\n", name); return -1; } return gpio_read_one_pin_value(ops->pio_hdle, name); } void usi_bm01a_gpio_init(void) { struct mmc_pm_ops *ops = &mmc_card_pm_ops; usi_bm01a_wl_on = 0; usi_bm01a_bt_on = 0; ops->gpio_ctrl = usi_bm01a_gpio_ctrl; ops->get_io_val = usi_bm01a_get_gpio_value; }
sdugit/linux-3.0_7025
drivers/mmc/mmc-pm/mmc_pm_usi_bm01a.c
C
gpl-2.0
5,469
/* * board/omap3621_boxer/max17042.c * * Copyright (C) 2010 Barnes & Noble, Inc. * Intrinsyc Software International, Inc. on behalf of Barnes & Noble, Inc. * * Max17042 Gas Gauge initialization for u-boot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <common.h> #include <asm/arch/cpu.h> #include <asm/io.h> #include <asm/arch/bits.h> #include <asm/arch/mux.h> #include <asm/arch/sys_proto.h> #include <asm/arch/sys_info.h> #include <asm/arch/clocks.h> #include <asm/arch/mem.h> #include <i2c.h> #include <asm/mach-types.h> #define TEMP_RESOLUTION 3900 /*3900uC*/ #define COMPLEMENT_VAL(x, y, z) (( ((((~x) & 0x7FFF) + 1) * y) / z ) * (-1)) #define MAX17042_ADDR 0x36 #define MAX_WRITE(reg, val) i2c_multidata_write(MAX17042_ADDR, (reg), 1, (val), 2) #define MAX_READ(reg, val) i2c_read_2_byte(MAX17042_ADDR,(reg), (val)) struct max17042_saved_data { uint16_t tag; uint16_t val_FullCAP; uint16_t val_Cycles; uint16_t val_FullCAPNom; uint16_t val_SOCempty; uint16_t val_Iavg_empty; uint16_t val_RCOMP0; uint16_t val_TempCo; uint16_t val_k_empty0; uint16_t val_dQacc; uint16_t val_dPacc; uint16_t val_SOCmix; uint16_t val_Empty_TempCo; uint16_t val_ICHGTerm; uint16_t val_Vempty; uint16_t val_FilterCFG; uint16_t val_TempNom; uint16_t val_DesignCap; uint16_t val_Capacity; uint16_t val_SOCREP; }; static struct max17042_saved_data save_store; static int is_power_on = 1; //battery flat, battery removed, static int is_history_exist = 1; //max17042.bin is in ROM partition static uint16_t VFSOC = 0; #define DEBUG(x...) printf(x) #define TEMPLIM 0x2305 // hard POR value #define MISCCFG 0x0810 // hard POR value -- and with 0xCC1F #define MISCCFG_MASK 0xCC1F #define TGAIN 0xE3E1 // hard POR value #define TOFF 0x290E // hard POR value #define CGAIN 0x4000 // hard POR value #define COFF 0x0000 // hard POR value #define FCTC 0x05E0 // hard POR value #define MAX17042_STATUS 0x00 #define MAX17042_RemCapREP 0x05 #define MAX17042_SOCREP 0x06 #define MAX17042_Vcell 0x09 #define MAX17042_SOCmix 0x0D #define MAX17042_RemCapmix 0x0F #define MAX17042_FullCap 0x10 #define MAX17042_Vempty 0x12 #define MAX17042_Cycles 0x17 #define MAX17042_DesignCap 0x18 #define MAX17042_CONFIG 0x1D #define MAX17042_ICHGTerm 0x1E #define MAX17042_Version 0x21 #define MAX17042_FullCAPNom 0x23 #define MAX17042_TempNom 0x24 #define MAX17042_TempLim 0x25 #define MAX17042_LearnCFG 0x28 #define MAX17042_RelaxCFG 0x2A #define MAX17042_FilterCFG 0x29 #define MAX17042_MiscCFG 0x2B #define MAX17042_TGAIN 0x2C #define MAX17042_TOFF 0x2D #define MAX17042_CGAIN 0x2E #define MAX17042_COFF 0x2F #define MAX17042_SOCempty 0x33 #define MAX17042_FullCap0 0x35 #define MAX17042_Iavg_empty 0x36 #define MAX17042_FCTC 0x37 #define MAX17042_RCOMP0 0x38 #define MAX17042_TempCo 0x39 #define MAX17042_Empty_TempCo 0x3A #define MAX17042_k_empty0 0x3B #define MAX17042_dQacc 0x45 #define MAX17042_dPacc 0x46 #define MAX17042_VFSOC_Unlock 0x60 #define MAX17042_OCV 0xFB #define MAX17042_FSTAT 0xFD #define MAX17042_SOCvf 0xFF #define MAX17042_STATUS_bit_POR (1<<1) typedef enum { BATT_LG = 0, BATT_MCNAIR, /* add new battery type here */ BATT_MAX } batt_type; // This struct contains registers for initialize configuration typedef struct { uint16_t RelaxCFG; uint16_t Config; uint16_t FilterCFG; uint16_t LearnCFG; uint16_t Vempty; uint16_t RCOMP0; uint16_t TempCo; uint16_t ETC; uint16_t Kempty0; uint16_t ICHGTerm; uint16_t Capacity; } max17042_init_params; // 48-word custom model params typedef struct { uint16_t buf_80h[16]; uint16_t buf_90h[16]; uint16_t buf_A0h[16]; } max17042_custom_model; // All the registers for initializing max17042 typedef struct { const max17042_init_params* init_params; const max17042_custom_model* model; } batt_type_params; // This table containas default initialization values provided by maxim for different battery types. static const max17042_init_params const init_param_table[BATT_MAX] = { //RelaxCFG Config FilterCFG LearnCFG Vempty RCOMP0 TempCo ETC Kempty0 ICHGTerm Capacity /*LG */{ 0x083B, 0x2210, 0x87A4, 0x2406, 0x7D5A, 0x0080, 0x3670, 0x2F2C, 0x078F, 0x0140, 0x205C}, /*MCNAIR*/{ 0x083B, 0x2210, 0x87A4, 0x2406, 0x7D5A, 0x0081, 0x1921, 0x0635, 0x0679, 0x04F3, 0x1EB9}, // add an entry here for new battery type }; // This table containas 48-word custom model params provided by maxim for different battery types. static const max17042_custom_model const model_table[BATT_MAX] = { // LG { /*80h*/{0xA0D0, 0xB3F0, 0xB820, 0xB940, 0xBB80, 0xBBF0, 0xBC90, 0xBD00, 0xBDA0, 0xBE80, 0xBF70, 0xC280, 0xC5B0, 0xC7F0, 0xCAB0, 0xD030}, /*90h*/{0x0100, 0x0700, 0x1400, 0x0B00, 0x2640, 0x3210, 0x1D40, 0x2C00, 0x1760, 0x15D0, 0x09C0, 0x0CE0, 0x0BD0, 0x09F0, 0x08F0, 0x08F0}, /*A0h*/{0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100}, }, // MCNAIR { /*80h*/{0x9EE0, 0xB660, 0xB7D0, 0xB980, 0xBB00, 0xBBD0, 0xBCA0, 0xBD70, 0xBE60, 0xBF70, 0xC0A0, 0xC410, 0xC710, 0xCA50, 0xCC80, 0xD100}, /*90h*/{0x0060, 0x13F0, 0x0AF0, 0x10B0, 0x1920, 0x2720, 0x1E30, 0x1A20, 0x1600, 0x14F0, 0x0BF0, 0x0CF0, 0x0610, 0x0A00, 0x0A80, 0x0A80}, /*A0h*/{0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100}, }, // add an entry here for new battery type }; static const batt_type_params param_table[BATT_MAX] = { /*LG */ {&init_param_table[BATT_LG], &model_table[BATT_LG] }, /*MCNAIR*/ {&init_param_table[BATT_MCNAIR], &model_table[BATT_MCNAIR]}, // add an entry here for new battery type }; static const batt_type_params* type_params = &param_table[BATT_LG]; static inline int max17042_dumpreg( char *pRegName, int iReg ) { uint16_t val; MAX_READ( iReg, (uchar*)&val); DEBUG("%s (%02xh) is 0x%04x\n", pRegName, iReg, val ); return val; } #define MAX17042_DUMPREG( reg ) max17042_dumpreg( #reg, reg ) static int max17042_check_init_config(void) { uint16_t buf=0; const max17042_init_params* params = type_params->init_params; MAX_READ(MAX17042_CONFIG, (uchar*)&buf); DEBUG("uboot verify: %02x CONFIG is %04x ; should be %04x & 0xFDFB\n", MAX17042_CONFIG, buf, params->Config); // in case of warm boot, kernel might have changed bits Ten and Aen if ( (buf & ~0x0204) != ( params->Config & ~0x0204) ) return 1; buf=0; MAX_READ(MAX17042_RelaxCFG, (uchar*)&buf); DEBUG("uboot verify: %02x RELAXCFG is %04x ; should be %04x\n", MAX17042_RelaxCFG, buf, params->RelaxCFG); if ( buf != params->RelaxCFG ) return 1; buf=0; MAX_READ(MAX17042_FilterCFG, (uchar*)&buf); DEBUG("uboot verify: %02x FILTERCFG is %04x ; should be %04x\n", MAX17042_FilterCFG, buf, params->FilterCFG); if ( buf != params->FilterCFG ) return 1; buf=0; MAX_READ(MAX17042_LearnCFG, (uchar*)&buf); DEBUG("uboot verify: %02x LEARNCFG is %04x ; should be %04x & 0xFF0F\n", MAX17042_LearnCFG, buf, params->LearnCFG); if ( (buf & 0xFF0F) != ( params->LearnCFG & 0xFF0F) ) return 1; MAX_READ( MAX17042_DesignCap, (u8*)&buf); DEBUG("uboot verify: %02x DesignCap is %04x ; should be %04x\n", MAX17042_DesignCap, buf, params->Capacity); if ( buf != params->Capacity ) return 1; MAX_READ( MAX17042_Vempty, (u8*)&buf); DEBUG("uboot verify: %02x Vempty is %04x ; should be %04x\n", MAX17042_Vempty, buf, params->Vempty); if ( buf != params->Vempty ) return 1; buf=0; MAX_READ(MAX17042_TempLim, (uchar*)&buf); DEBUG("uboot verify: %02x TEMPLIM is %04x ; should be %04x\n", MAX17042_TempLim, buf, TEMPLIM); if ( buf != TEMPLIM ) return 1; buf=0; MAX_READ(MAX17042_MiscCFG, (uchar*)&buf); DEBUG("uboot verify: %02x MiscCFG is %04x ; should be %04x & %04x\n", MAX17042_MiscCFG, buf, MISCCFG, MISCCFG_MASK ); if ( (buf & MISCCFG_MASK) != (MISCCFG & MISCCFG_MASK) ) return 1; buf=0; MAX_READ(MAX17042_TGAIN, (uchar*)&buf); DEBUG("uboot verify: %02x TGAIN is %04x ; should be %04x\n", MAX17042_TGAIN, buf, TGAIN); if ( buf != TGAIN ) return 1; buf=0; MAX_READ(MAX17042_TOFF, (uchar*)&buf); DEBUG("uboot verify: %02x TOFF is %04x ; should be %04x\n", MAX17042_TOFF, buf, TOFF); if ( buf != TOFF ) return 1; buf=0; MAX_READ(MAX17042_CGAIN, (uchar*)&buf); DEBUG("uboot verify: %02x CGAIN is %04x ; should be %04x\n", MAX17042_CGAIN, buf, CGAIN); if ( buf != CGAIN ) return 1; buf=0; MAX_READ(MAX17042_COFF, (uchar*)&buf); DEBUG("uboot verify: %02x COFF is %04x ; should be %04x\n", MAX17042_COFF, buf, COFF); if ( buf != COFF ) return 1; buf=0; MAX_READ(MAX17042_FCTC, (uchar*)&buf); DEBUG("uboot verify: %02x FCTC is %04x ; should be %04x\n", MAX17042_FCTC, buf, FCTC); if ( buf != FCTC ) return 1; return 0; } static int max17042_init_config(void) { int err; const max17042_init_params* params = type_params->init_params; err =MAX_WRITE(MAX17042_CONFIG, (uchar*)&(params->Config)); if ( err != 0 ) { DEBUG("uboot: write err CONFIG \n"); return err; } // DEBUG("config = 0x%04x\n", params->Config); err =MAX_WRITE(MAX17042_RelaxCFG, (uchar*)&params->RelaxCFG); if ( err != 0 ) { DEBUG( "uboot: write err RelaxCFG \n"); return err; } // DEBUG("relaxcfg = 0x%04x\n", params->RelaxCFG); err =MAX_WRITE(MAX17042_FilterCFG, (uchar*)&params->FilterCFG); if ( err != 0 ) { DEBUG( "write err FilterCFG \n"); return err; } // DEBUG("filtercfg = 0x%04x\n", params->FilterCFG); err =MAX_WRITE(MAX17042_LearnCFG, (uchar*)& params->LearnCFG); if ( err != 0 ) { DEBUG( "write err LearnCFG\n"); return err; } // DEBUG("LearnCFG = 0x%04x\n", params->learncfg); err =MAX_WRITE(MAX17042_Vempty, (uchar*)& params->Vempty); if ( err != 0 ) { DEBUG( "write err Vempty\n"); return err; } // DEBUG("Vempty = 0x%04x\n", params->Vempty); return max17042_check_init_config(); } int is_max17042_por(void) { uint16_t stat = 0; int ret; stat = MAX17042_DUMPREG(MAX17042_STATUS); ret = (stat & MAX17042_STATUS_bit_POR) ? 1: 0 ; return ret; } //return 1: memory lost on power_on //return 0; memory is in place static int is_power_on_rst(void) { int ret = 0; uint16_t stat = 0; stat = MAX17042_DUMPREG(MAX17042_STATUS); /*POR bit check*/ /* previous code has the operator precedence problem where != is evaluated before bitand ! */ if ( (stat & MAX17042_STATUS_bit_POR) != 0 ) { ret = 1; } if( (stat & (1 <<15))!=0) DEBUG("MAX17042+UBOOT: POWER SUPPLY Detected!\n"); else DEBUG("MAX17042+UBOOT: BATTERY Detected!\n"); is_power_on = ret; return ret; } static void max17042_clear_POR(void) { uint16_t stat = 0; stat = MAX17042_DUMPREG(MAX17042_STATUS); if ( stat & MAX17042_STATUS_bit_POR ) { DEBUG("STATUS = 0x%04x -- clearing POR\n", stat ); stat &= ~MAX17042_STATUS_bit_POR; MAX_WRITE(MAX17042_STATUS,(uchar*)&stat); MAX17042_DUMPREG(MAX17042_STATUS); } } static int max17042_save_start_para(void) { int err; uint16_t buf; err =MAX_READ(MAX17042_SOCmix, (uchar*)&buf); if ( err != 0 ) { DEBUG("read err MixedSOC \n"); return err; } else save_store.val_SOCmix = buf; err =MAX_READ(MAX17042_dQacc, (uchar*)&buf); if ( err != 0 ) { DEBUG( "read err dQ_acc \n"); return err; } else save_store.val_dQacc = buf; err =MAX_READ(MAX17042_dPacc, (uchar*)&buf); if ( err != 0 ) { DEBUG( "read err dP_acc \n"); return err; } else save_store.val_dPacc = buf; return 0; } static void max17042_unlock_model(void) { static uint16_t val1 = 0x0059; static uint16_t val2 = 0x00C4; MAX_WRITE(0x62, (uchar*)&val1); udelay(10); MAX_WRITE(0x63, (uchar*)&val2); } static int max17042_write_model(void) { int i; int err=1; const max17042_custom_model* model = type_params->model; for ( i = 0; i < 16; i++) { err = MAX_WRITE((0x80+i), (uchar*)(&model->buf_80h[i])); if ( err != 0 ) { DEBUG( "write err model 0x80 \n"); return err; } //DEBUG(" %x write %04x\n", (0x80+i), model->buf_80h[i]); udelay(10); err = MAX_WRITE((0x90+i), (uchar*)(&model->buf_90h[i])); if ( err != 0 ) { DEBUG( "write err model 0x90 \n"); return err; } //DEBUG(" %x write %04x\n", (0x90+i), model->buf_90h[i]); udelay(10); MAX_WRITE((0xA0+i), (uchar*)(&model->buf_A0h[i])); if ( err != 0 ) { DEBUG( "write err model 0xA0 \n"); return err; } //DEBUG(" %x write %04x\n", (0xA0+i), model->buf_A0h[i]); udelay(10); } return 0; } static int max17042_read_verify_model(void) { int i; uint16_t buf; int err = 1; const max17042_custom_model* model = type_params->model; for ( i = 0; i < 16; i++) { err = MAX_READ((0x80+i), (uchar*)&buf); if ( err != 0 ) { DEBUG( "read err model 0x80 \n"); return err; } else if ( buf != model->buf_80h[i] ) { DEBUG(" err 80h item %d not matched\n", i); return 1; } //DEBUG(" %x model: %04x\n", (0x80+i), buf); udelay(10); err = MAX_READ((0x90+i), (uchar*)&buf); if ( err != 0 ) { DEBUG( "read err model 0x90 \n"); return err; }else if ( buf != model->buf_90h[i] ) { DEBUG(" err 90h item %d not matched\n", i); return 1; } //DEBUG(" %x model: %04x\n", (0x90+i), buf); udelay(10); err = MAX_READ((0xA0+i), (uchar*)&buf); if ( err != 0 ) { DEBUG( "read err model 0xA0 \n"); return err; }else if ( buf != model->buf_A0h[i] ) { DEBUG(" err A0h item %d not matched\n", i); return 1; } //DEBUG(" %x model: %04x\n", (0xA0+i), buf); udelay(10); } return 0; } static void max17042_lock_model(void) { static const uint16_t lock = 0x0000; MAX_WRITE(0x62, (uchar*)&lock); udelay(10); MAX_WRITE(0x63, (uchar*)&lock); udelay(100); return; } static int max17042_verify_lock_model(void) { int i; uint16_t buf; int err = 1; for ( i = 0; i < 16; i++) { err = MAX_READ((0x80+i), (uchar*)&buf); //DEBUG(" %x model: %04x\n", (0x80+i), buf); if ( err != 0 ) { DEBUG( "read err model 0x80 \n"); return err; } else if ( buf != 0x0000 ) { DEBUG(" err model not locked!\n", i); return 1; } udelay(10); err = MAX_READ((0x90+i), (uchar*)&buf); //DEBUG(" %x model: %04x\n", (0x90+i), buf); if ( err != 0 ) { DEBUG( "read err model 0x90 \n"); return err; }else if ( buf != 0x0000 ) { DEBUG(" err model not locked\n", i); return 1; } udelay(10); err = MAX_READ((0xA0+i), (uchar*)&buf); //DEBUG(" %x model: %04x\n", (0xA0+i), buf); if ( err != 0 ) { DEBUG( "read err model 0xA0 \n"); return err; }else if ( buf != 0x0000 ) { DEBUG(" err model not locked\n", i); return 1; } udelay(10); } return 0; } int max17042_soft_por(void) { uint16_t buf = 0; int iReps; iReps = 0; while ( 1 ) { if ( iReps++ > 10 ) { DEBUG("Soft POR : unlock failure\n"); return 1; } // DEBUG("Soft POR : attempting unlock\n"); max17042_lock_model(); buf = 0; MAX_WRITE(MAX17042_STATUS, (uchar*)&buf); // clear all Status // note: clear POR bit is not enough here udelay(1000); MAX_READ(MAX17042_STATUS, (uchar*)&buf); if ( buf != 0 ) continue; MAX_READ(0x62, (uchar*)&buf); if ( buf != 0 ) continue; MAX_READ(0x63, (uchar*)&buf); if ( buf != 0 ) continue; break; } // DEBUG("Soft POR: unlocked\n"); for ( iReps = 0; iReps < 10; iReps++ ) { buf = 0x000F; MAX_WRITE(0x60, (uchar*)&buf); udelay(2*1000); if ( is_max17042_por() ) { DEBUG("Soft POR: Success!\n"); return 0; } } DEBUG("Soft POR: failed\n"); return 1; } static int max_write_verify( u8 reg, const u8* val) { int err; uint16_t buf1, buf2; int iTmp; buf1 = *(uint16_t*)val; // DEBUG("%s: write 0x%04x to reg 0x%x\n", __FUNCTION__, buf1, reg ); for ( iTmp=0; iTmp < 3; iTmp++ ) { err = MAX_WRITE( reg, (uchar*)val ); udelay(50); err = MAX_READ( reg, (u8*)&buf2 ); if ( buf1 == buf2 ) return 0; DEBUG("Retry write 0x%04x to reg 0x%x\n", buf1, reg ); } DEBUG ("Failed to write 0x%04x to reg 0x%x (contains 0x%04x)\n", buf1, reg, buf2 ); return 1; } static int max17042_set_cycles( uint16_t cycles ) { return max_write_verify(MAX17042_Cycles, (u8*)&cycles); } static int max17042_restore_fullcap(void) { int err; uint16_t fullcap0, remcap, SOCmix, dPacc, dQacc; if ( !is_history_exist ) { printf("%s: no history file exists!\n", __FUNCTION__); return 1; } DEBUG("Restoring learned full capacity\n"); err = MAX_READ(MAX17042_FullCap0, (u8*)&fullcap0); if ( err != 0 ) { DEBUG( "read err reg 0x%x\n", MAX17042_FullCap0); return err; } err =MAX_READ(MAX17042_SOCmix, (uchar*)&SOCmix); if ( err != 0 ) { DEBUG( "read err reg 0x%x\n", MAX17042_SOCmix); return err; } remcap = (uint16_t)( ((int)SOCmix * (int)fullcap0) / 25600 ); DEBUG("FullCap0=0x%04x SOCmix=0x%04x, remcap=0x%04x\n", fullcap0, SOCmix, remcap); err = max_write_verify(MAX17042_RemCapmix, (u8*)&remcap); if ( err != 0 ) { return err; } err = max_write_verify(MAX17042_FullCap,(u8*)&save_store.val_FullCAP); if ( err != 0 ) { return err; } DEBUG("FullCAP = 0x%04x\n", save_store.val_FullCAP); dQacc = (save_store.val_FullCAPNom / 4); err = max_write_verify(MAX17042_dQacc, (u8*)&dQacc); if ( err != 0 ) { return err; } dPacc = 0x1900; err = max_write_verify(MAX17042_dPacc, (u8*)&dPacc); if ( err != 0 ) { return err; } return 0; } static int max17042_restore_learned_para(void) { int err; if ( !is_history_exist ) { printf("%s: error: no history file exists!\n", __FUNCTION__); return 1; } DEBUG("Restoring learned parameters\n"); //21. Restore Learned Parameters err = max_write_verify( MAX17042_RCOMP0, (u8*)&save_store.val_RCOMP0 ); if ( err != 0 ) { return err; } err = max_write_verify ( MAX17042_TempCo, (u8*)&save_store.val_TempCo ); if ( err != 0 ) { return err; } err = max_write_verify( MAX17042_Iavg_empty, (u8*)&save_store.val_Iavg_empty); if ( err != 0 ) { return err; } err = max_write_verify( MAX17042_k_empty0, (u8*)&save_store.val_k_empty0 ); if ( err != 0 ) { return err; } err = max_write_verify(MAX17042_FullCAPNom, (u8*)&save_store.val_FullCAPNom); if ( err != 0 ) { return err; } //22. delay 350ms; udelay ( 350 *1000 ); //23. RestoreFullCap err = max17042_restore_fullcap(); if ( err != 0 ) { return err; } //24. delay 350ms; udelay ( 350 *1000 ); //25. restore Cycles err = max17042_set_cycles( save_store.val_Cycles ); if (err != 0 ) { DEBUG("restoring cycles failed\n"); return err; } return 0; } static int max17042_write_custom_para(void) { uint16_t buf; int err; /* * Note: This hardcoded values are specific to Encore as supplied * by Maxim via email, 16/07/2010 */ DEBUG("%s: use hardcoded values\n", __FUNCTION__); buf = 0x0080; err = max_write_verify( MAX17042_RCOMP0, (u8*)&buf ); if ( err != 0 ) { return err; } buf = 0x3670; err = max_write_verify ( MAX17042_TempCo, (u8*)&buf ); if ( err != 0 ) { DEBUG( "write verify err 0x39 \n"); return err; } buf = 0x2F2C; err = MAX_WRITE( MAX17042_Empty_TempCo, (u8*)&buf); if ( err != 0 ) { DEBUG( "write err 0x3A \n"); return err; } buf = 0x078F; err = max_write_verify( MAX17042_k_empty0, (u8*)&buf ); if ( err != 0 ) { DEBUG( "write verify err 0x3B \n"); return err; } // IchgTerm should map to 50 mA buf = (uint16_t)(0.050 * 0.01 / 0.0000015625); err = MAX_WRITE( MAX17042_ICHGTerm, (u8*)&buf ); if ( err != 0 ) { DEBUG( "write verify err reg 0x%x\n", MAX17042_ICHGTerm); return err; } DEBUG("ICHGTerm = 0x%04x\n", buf); return 0; } static int max17042_update_cap_para(void) { int err; uint16_t buf; buf = type_params->init_params->Capacity; DEBUG(" use hardcoded Capacity 0x%04x\n", buf); err = max_write_verify( MAX17042_FullCap, (u8*)&buf); if ( err != 0 ) { return err; } err = MAX_WRITE( MAX17042_DesignCap, (u8*)&buf); if ( err != 0 ) { DEBUG( "write verify err reg 0x%x\n", MAX17042_DesignCap); return err; } err = max_write_verify( MAX17042_FullCAPNom, (u8*)&buf); if ( err != 0 ) { DEBUG( "write verify err reg 0x%x\n", MAX17042_FullCAPNom); return err; } return 0; } static int max17042_write_vfsoc(void) { int err = 0; uint16_t buf; err = MAX_READ(0xFF, (u8*)&buf); if ( err != 0 ) { DEBUG( "read err 0xFF\n"); return err; } VFSOC = buf; // used in step 16 DEBUG("VFSOC = 0x%04x\n", VFSOC); buf = 0x0080; // unlock code MAX_WRITE(MAX17042_VFSOC_Unlock, (u8*)&buf); err = max_write_verify(0x48, (u8*)&VFSOC); buf = 0x0000; // lock code MAX_WRITE(MAX17042_VFSOC_Unlock, (u8*)&buf); return err; } static int max17042_load_cap_para( void ) { uint16_t buf, remcap, repcap, dq_acc, fullcap0; //16. MAX_READ(MAX17042_FullCap0, (u8*)&fullcap0); remcap = (uint16_t)( ((int)VFSOC * (int)fullcap0) / 25600 ); DEBUG("fullcap0=0x%04x VFSOC=0x%04x remcap=0x%04x\n", fullcap0, VFSOC, remcap); MAX_WRITE(MAX17042_RemCapmix, (u8*)&remcap); repcap = remcap; max_write_verify(MAX17042_RemCapREP, (u8*)&repcap); //write dQ_acc and dP_acc to 200% of capacity dq_acc = save_store.val_DesignCap / 4; max_write_verify(MAX17042_dQacc, (u8*)&dq_acc); buf = 0x3200; max_write_verify(MAX17042_dPacc, (u8*)&buf); max_write_verify(MAX17042_FullCap, (u8*)&save_store.val_DesignCap); MAX_WRITE(MAX17042_DesignCap, (u8*)&save_store.val_DesignCap); max_write_verify(MAX17042_FullCAPNom, (u8*)&save_store.val_DesignCap); return 0; } static batt_type get_battery_type(int load) { batt_type ret = BATT_LG; char *token = (char*) 0x81000000; token[0] = 'L'; token[1] = 'G'; if (load) { // Ignore the result of this command, if it fails it's LG battery... run_command("mmcinit 1; fatload mmc 1:4 0x81000000 devconf/BatteryType 0x40", 0); } if(('L' == token[0] || 'l' == token[0]) && ('G' == token[1] || 'g' == token[1]) ) { } else if (('M' == token[0] || 'm' == token[0]) && ('C' == token[1] || 'c' == token[1]) && ('N' == token[2] || 'n' == token[2]) && ('A' == token[3] || 'a' == token[3]) && ('I' == token[4] || 'i' == token[4]) && ('R' == token[5] || 'r' == token[5]) ) { ret = BATT_MCNAIR; } DEBUG("MAX17042+UBOOT: battery type=%s\n", (BATT_LG == ret)? "LG" : "MCNAIR"); return ret; } int max17042_init(int load) { uint16_t data; int i; static const uint16_t* bufp = (uint16_t*) 0x81000000; uint16_t* savestorep; int err, retries=2, force_por=0; uint16_t designcap; type_params = &param_table[get_battery_type(load)]; designcap = type_params->init_params->Capacity; i2c_init(100, MAX17042_ADDR); if ( MAX_READ(MAX17042_STATUS, (uchar*)&data) != 0) { DEBUG("MAX17042+UBOOT: No battery or 0V battery!\n"); return 1; } DEBUG("MAX17042+UBOOT: gas gauge detected (0x%04x)\n",data); //check if we need restore registers inside is_power_on_rst(); if ( is_power_on ) { DEBUG("MAX17042+UBOOT:POR detected!\n"); } else { DEBUG("MAX17042+UBOOT:WARM BOOT \n"); } if (load) { run_command("mmcinit 1; fatload mmc 1:5 0x81000000 max17042.bin 0x1000", 0); } if (*bufp != 0x1234 || !load) { DEBUG(" No valid max17042 init data found, assume no battery history \n"); is_history_exist = 0; } else { DEBUG(" Valid max17042 init data is loaded into memory \n"); } if ( is_history_exist == 1 ) { savestorep = (uint16_t*)&save_store; for ( i = 0; i <(sizeof(save_store) / sizeof(uint16_t)); i++) { DEBUG (" 0x%04x\n", *bufp); *savestorep++ = *bufp++; } #define MIN_CAP_AGING 25/100 // allow no less than 25% of design capacity before rejecting #define MAX_CAP_AGING 13/10 // reject history capacity if it seems overly big #define MIN_CAPNOM_AGING 25/100 // allow no less than 25% of nominal design capacity before rejecting #define MAX_CAPNOM_AGING 15/10 // reject history capacity if it seems overly big if ( (save_store.val_FullCAP < (uint16_t)(((uint32_t)designcap)*MIN_CAP_AGING)) || (save_store.val_FullCAP > (uint16_t)(((uint32_t)designcap)*MAX_CAP_AGING)) || (save_store.val_FullCAPNom < (uint16_t)(((uint32_t)designcap)*MIN_CAPNOM_AGING)) || (save_store.val_FullCAPNom > (uint16_t)(((uint32_t)designcap)*MAX_CAPNOM_AGING)) ) { printf("Resetting battery defaults due to faulty CAPACITY (0x%x, 0x%x)\n", save_store.val_FullCAP, save_store.val_FullCAPNom); force_por = 1; is_history_exist = 0; } else { DEBUG(" verify if mem loaded: FullcapNom was saved as %04x\n", save_store.val_FullCAPNom ); } // In case val_DesignCap in history data does not match battery's design capacity, // we should throw away the history data. if(save_store.val_DesignCap != designcap) { printf("Resetting battery defaults because Design Capactiy(0x%04X)in history data" " does not match battery's Design Capacity(0x%04X)\n", save_store.val_DesignCap, designcap); force_por = 1; is_history_exist = 0; } } save_store.val_DesignCap = designcap; i2c_init(100, 0x36); //no need if ( !is_power_on ) { // when there is no history file, assume it is a POR //if ( is_history_exist && max17042_check_init_config() == 0 ) // UPDATE: if history file doesn't exist don't do a POR, if (!force_por && max17042_check_init_config() == 0 ) { DEBUG("MAX17042+UBOOT: warm config is okay\n"); return 0; } else { /* when the config is bad but it's not a POR, then something * is quite wrong. */ DEBUG("MAX17042+UBOOT: warm config bad. soft POR\n"); is_power_on = 1; max17042_soft_por(); } } //1. Delay 500ms udelay( 500 * 1000 ); MAX17042_DUMPREG( MAX17042_Version ); MAX17042_DUMPREG( MAX17042_DesignCap ); MAX17042_DUMPREG( MAX17042_OCV ); MAX17042_DUMPREG( MAX17042_FSTAT ); MAX17042_DUMPREG( MAX17042_SOCvf ); //2. Init Configuration max17042_init_config(); //3. Save starting para max17042_save_start_para(); //4. unlock model access max17042_unlock_model(); do { //5. write custom model max17042_write_model(); //6. read model //7. verify model err = max17042_read_verify_model(); } while ( err != 0 && --retries > 0 ); if ( retries == 0 ) { DEBUG( " writing model failed\n"); return err; } retries = 2; do { //8. lock model access max17042_lock_model(); //9. verify model access is locked err = max17042_verify_lock_model(); } while ( err != 0 && --retries > 0 ); if ( retries == 0 ) { DEBUG( " locking model failed\n"); return err; } //10. write custom parameters err = max17042_write_custom_para( ); if ( err != 0 ) { DEBUG("write custom parameters failed\n"); return err; } //11 update full capacity parameters err = max17042_update_cap_para( ); if ( err != 0 ) { DEBUG("update capacity parameters failed\n"); return err; } //13. delay 350ms; udelay ( 350 *1000 ); //14. write VFSOC to VFSCO0 err = max17042_write_vfsoc(); if ( err != 0 ) { DEBUG("write vfsoc failed\n"); return err; } /* 15.5 Advance to Colomb-Counter Mode * We do this all the time. In the factory the battery is fresh (close to * design capacity, and when there is a history file we restore a known good * capacity after this, so that case it's safe to assume we have a good estimate * as well. */ err = max17042_set_cycles( 0x00A0 ); if ( err != 0 ) { DEBUG("set cycles 0x00A0 failed\n"); return err; } err = max17042_load_cap_para( ); if ( err != 0 ) { DEBUG("load capacity parameters failed\n"); return err; } max17042_clear_POR(); if ( is_history_exist ) { err = max17042_restore_learned_para(); if ( err != 0 ) { DEBUG("restore learned parameters failed\n"); return err; } } is_power_on = 0; DEBUG("Max17042 init is done\n"); return 0; } /*get the volage reading*/ int max17042_voltage( uint16_t* val) { int err; /*reading Vcell*/ err = MAX_READ(0x09, (u8*)val); if ( err != 0 ) { printf( "read 0x09 Vcell err\n"); return err; } else { (*val)>>=3; return 0; } } /*get the volage reading*/ int max17042_vfocv( uint16_t* val) { int err; /*reading Vcell*/ err = MAX_READ(0xFB, (u8*)val); if ( err != 0 ) { printf( "read 0xFB open circuit v\n"); return err; } else { (*val)>>=3; return 0; } } int max17042_soc( uint16_t* val) { int err; err = MAX_READ(0x06, (u8*)val); if ( err != 0 ) { printf( "read 0x06 SOCREP err\n"); return err; } (*val) >>= 8; //upper byte is good enough return 0; } //resolution 0.0039-degree, or 3900uC int max17042_temp( uint32_t* temp) { int err; uint16_t val; err = MAX_READ(0x08, (u8*)&val); if ( err != 0 ) { printf( "read 0x08 reg(temperature) err!\n"); return err; } else { if ( val & (1<<15) ) { *temp = COMPLEMENT_VAL(val, TEMP_RESOLUTION, 1); } else { *temp = (val & 0x7FFF) * TEMP_RESOLUTION; } return err; } }
astarasikov/uboot-bn-nook-hd-fastboot
drivers/max17042.c
C
gpl-2.0
30,417
/* * * This source code is released for free distribution under the terms of the * GNU General Public License. * * This module contains functions for generating tags for Rust files. */ /* * INCLUDE FILES */ #include "general.h" /* must always come first */ #include "main.h" #include <string.h> #include "keyword.h" #include "parse.h" #include "entry.h" #include "options.h" #include "read.h" #include "vstring.h" /* * MACROS */ #define MAX_STRING_LENGTH 64 /* * DATA DECLARATIONS */ typedef enum { K_MOD, K_STRUCT, K_TRAIT, K_IMPL, K_FN, K_ENUM, K_TYPE, K_STATIC, K_MACRO, K_FIELD, K_VARIANT, K_METHOD, K_NONE } RustKind; static kindOption rustKinds[] = { {TRUE, 'n', "namespace", "module"}, {TRUE, 's', "struct", "structural type"}, {TRUE, 'i', "interface", "trait interface"}, {TRUE, 'c', "class", "implementation"}, {TRUE, 'f', "function", "Function"}, {TRUE, 'g', "enum", "Enum"}, {TRUE, 't', "typedef", "Type Alias"}, {TRUE, 'v', "variable", "Global variable"}, {TRUE, 'M', "macro", "Macro Definition"}, {TRUE, 'm', "field", "A struct field"}, {TRUE, 'e', "enumerator", "An enum variant"}, {TRUE, 'F', "method", "A method"}, }; typedef enum { TOKEN_WHITESPACE, TOKEN_STRING, TOKEN_IDENT, TOKEN_LSHIFT, TOKEN_RSHIFT, TOKEN_RARROW, TOKEN_EOF } tokenType; typedef struct { /* Characters */ int cur_c; int next_c; /* Tokens */ int cur_token; vString* token_str; unsigned long line; MIOPos pos; } lexerState; /* * FUNCTION PROTOTYPES */ static void parseBlock (lexerState *lexer, boolean delim, int kind, vString *scope); /* * FUNCTION DEFINITIONS */ /* Resets the scope string to the old length */ static void resetScope (vString *scope, size_t old_len) { scope->length = old_len; scope->buffer[old_len] = '\0'; } /* Adds a name to the end of the scope string */ static void addToScope (vString *scope, vString *name) { if (vStringLength(scope) > 0) vStringCatS(scope, "::"); vStringCat(scope, name); } /* Write the lexer's current token to string, taking care of special tokens */ static void writeCurTokenToStr (lexerState *lexer, vString *out_str) { switch (lexer->cur_token) { case TOKEN_IDENT: vStringCat(out_str, lexer->token_str); break; case TOKEN_STRING: vStringPut(out_str, '"'); vStringCat(out_str, lexer->token_str); vStringPut(out_str, '"'); break; case TOKEN_WHITESPACE: vStringPut(out_str, ' '); break; case TOKEN_LSHIFT: vStringCatS(out_str, "<<"); break; case TOKEN_RSHIFT: vStringCatS(out_str, ">>"); break; case TOKEN_RARROW: vStringCatS(out_str, "->"); break; default: vStringPut(out_str, (char) lexer->cur_token); } } /* Reads a character from the file */ static void advanceChar (lexerState *lexer) { lexer->cur_c = lexer->next_c; lexer->next_c = fileGetc(); } /* Reads N characters from the file */ static void advanceNChar (lexerState *lexer, int n) { while (n--) advanceChar(lexer); } static boolean isWhitespace (int c) { return c == ' ' || c == '\t' || c == '\r' || c == '\n'; } static boolean isAscii (int c) { return (c >= 0) && (c < 0x80); } /* This isn't quite right for Unicode identifiers */ static boolean isIdentifierStart (int c) { return (isAscii(c) && (isalpha(c) || c == '_')) || !isAscii(c); } /* This isn't quite right for Unicode identifiers */ static boolean isIdentifierContinue (int c) { return (isAscii(c) && (isalnum(c) || c == '_')) || !isAscii(c); } static void scanWhitespace (lexerState *lexer) { while (isWhitespace(lexer->cur_c)) advanceChar(lexer); } /* Normal line comments start with two /'s and continue until the next \n * (NOT any other newline character!). Additionally, a shebang in the beginning * of the file also counts as a line comment. * Block comments start with / followed by a * and end with a * followed by a /. * Unlike in C/C++ they nest. */ static void scanComments (lexerState *lexer) { /* // or #! */ if (lexer->next_c == '/' || lexer->next_c == '!') { advanceNChar(lexer, 2); while (lexer->cur_c != EOF && lexer->cur_c != '\n') advanceChar(lexer); } else if (lexer->next_c == '*') { int level = 1; advanceNChar(lexer, 2); while (lexer->cur_c != EOF && level > 0) { if (lexer->cur_c == '*' && lexer->next_c == '/') { level--; advanceNChar(lexer, 2); } else if (lexer->cur_c == '/' && lexer->next_c == '*') { level++; advanceNChar(lexer, 2); } else { advanceChar(lexer); } } } } static void scanIdentifier (lexerState *lexer) { vStringClear(lexer->token_str); do { vStringPut(lexer->token_str, (char) lexer->cur_c); advanceChar(lexer); } while(lexer->cur_c != EOF && isIdentifierContinue(lexer->cur_c)); } /* Double-quoted strings, we only care about the \" escape. These * last past the end of the line, so be careful not too store too much * of them (see MAX_STRING_LENGTH). The only place we look at their * contents is in the function definitions, and there the valid strings are * things like "C" and "Rust" */ static void scanString (lexerState *lexer) { vStringClear(lexer->token_str); advanceChar(lexer); while (lexer->cur_c != EOF && lexer->cur_c != '"') { if (lexer->cur_c == '\\' && lexer->next_c == '"') advanceChar(lexer); if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) vStringPut(lexer->token_str, (char) lexer->cur_c); advanceChar(lexer); } advanceChar(lexer); } /* Raw strings look like this: r"" or r##""## where the number of * hashes must match */ static void scanRawString (lexerState *lexer) { size_t num_initial_hashes = 0; vStringClear(lexer->token_str); advanceChar(lexer); /* Count how many leading hashes there are */ while (lexer->cur_c == '#') { num_initial_hashes++; advanceChar(lexer); } if (lexer->cur_c != '"') return; advanceChar(lexer); while (lexer->cur_c != EOF) { if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) vStringPut(lexer->token_str, (char) lexer->cur_c); /* Count how many trailing hashes there are. If the number is equal or more * than the number of leading hashes, break. */ if (lexer->cur_c == '"') { size_t num_trailing_hashes = 0; advanceChar(lexer); while (lexer->cur_c == '#' && num_trailing_hashes < num_initial_hashes) { num_trailing_hashes++; if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) vStringPut(lexer->token_str, (char) lexer->cur_c); advanceChar(lexer); } if (num_trailing_hashes == num_initial_hashes) { /* Strip the trailing hashes and quotes */ if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH && vStringLength(lexer->token_str) > num_trailing_hashes + 1) { lexer->token_str->length = vStringLength(lexer->token_str) - num_trailing_hashes - 1; lexer->token_str->buffer[lexer->token_str->length] = '\0'; } break; } } else { advanceChar(lexer); } } } /* Advances the parser one token, optionally skipping whitespace * (otherwise it is concatenated and returned as a single whitespace token). * Whitespace is needed to properly render function signatures. Unrecognized * token starts are stored literally, e.g. token may equal to a character '#'. */ static int advanceToken (lexerState *lexer, boolean skip_whitspace) { boolean have_whitespace = FALSE; lexer->line = getSourceLineNumber(); lexer->pos = getInputFilePosition(); while (lexer->cur_c != EOF) { if (isWhitespace(lexer->cur_c)) { scanWhitespace(lexer); have_whitespace = TRUE; } else if (lexer->cur_c == '/' && (lexer->next_c == '/' || lexer->next_c == '*')) { scanComments(lexer); have_whitespace = TRUE; } else { if (have_whitespace && !skip_whitspace) return lexer->cur_token = TOKEN_WHITESPACE; break; } } lexer->line = getSourceLineNumber(); lexer->pos = getInputFilePosition(); while (lexer->cur_c != EOF) { if (lexer->cur_c == '"') { scanString(lexer); return lexer->cur_token = TOKEN_STRING; } else if (lexer->cur_c == 'r' && (lexer->next_c == '#' || lexer->next_c == '"')) { scanRawString(lexer); return lexer->cur_token = TOKEN_STRING; } else if (isIdentifierStart(lexer->cur_c)) { scanIdentifier(lexer); return lexer->cur_token = TOKEN_IDENT; } /* These shift tokens aren't too important for tag-generation per se, * but they confuse the skipUntil code which tracks the <> pairs. */ else if (lexer->cur_c == '>' && lexer->next_c == '>') { advanceNChar(lexer, 2); return lexer->cur_token = TOKEN_RSHIFT; } else if (lexer->cur_c == '<' && lexer->next_c == '<') { advanceNChar(lexer, 2); return lexer->cur_token = TOKEN_LSHIFT; } else if (lexer->cur_c == '-' && lexer->next_c == '>') { advanceNChar(lexer, 2); return lexer->cur_token = TOKEN_RARROW; } else { int c = lexer->cur_c; advanceChar(lexer); return lexer->cur_token = c; } } return lexer->cur_token = TOKEN_EOF; } static void initLexer (lexerState *lexer) { advanceNChar(lexer, 2); lexer->token_str = vStringNew(); if (lexer->cur_c == '#' && lexer->next_c == '!') scanComments(lexer); advanceToken(lexer, TRUE); } static void deInitLexer (lexerState *lexer) { vStringDelete(lexer->token_str); lexer->token_str = NULL; } static void addTag (vString* ident, const char* type, const char* arg_list, int kind, unsigned long line, MIOPos pos, vString *scope, int parent_kind) { if (kind == K_NONE) return; tagEntryInfo tag; initTagEntry(&tag, ident->buffer); tag.lineNumber = line; tag.filePosition = pos; tag.sourceFileName = getSourceFileName(); tag.kindName = rustKinds[kind].name; tag.kind = rustKinds[kind].letter; tag.extensionFields.arglist = arg_list; tag.extensionFields.varType = type; if (parent_kind != K_NONE) { tag.extensionFields.scope[0] = rustKinds[parent_kind].name; tag.extensionFields.scope[1] = scope->buffer; } makeTagEntry(&tag); } /* Skip tokens until one of the goal tokens is hit. Escapes when level = 0 if there are no goal tokens. * Keeps track of balanced <>'s, ()'s, []'s, and {}'s and ignores the goal tokens within those pairings */ static void skipUntil (lexerState *lexer, int goal_tokens[], int num_goal_tokens) { int angle_level = 0; int paren_level = 0; int brace_level = 0; int bracket_level = 0; while (lexer->cur_token != TOKEN_EOF) { if (angle_level == 0 && paren_level == 0 && brace_level == 0 && bracket_level == 0) { int ii = 0; for(ii = 0; ii < num_goal_tokens; ii++) { if (lexer->cur_token == goal_tokens[ii]) { break; } } if (ii < num_goal_tokens) break; } switch (lexer->cur_token) { case '<': angle_level++; break; case '(': paren_level++; break; case '{': brace_level++; break; case '[': bracket_level++; break; case '>': angle_level--; break; case ')': paren_level--; break; case '}': brace_level--; break; case ']': bracket_level--; break; case TOKEN_RSHIFT: if (angle_level >= 2) angle_level -= 2; break; /* TOKEN_LSHIFT is never interpreted as two <'s in valid Rust code */ default: break; } /* Has to be after the token switch to catch the case when we start with the initial level token */ if (num_goal_tokens == 0 && angle_level == 0 && paren_level == 0 && brace_level == 0 && bracket_level == 0) break; advanceToken(lexer, TRUE); } } /* Function format: * "fn" <ident>[<type_bounds>] "(" [<args>] ")" ["->" <ret_type>] "{" [<body>] "}"*/ static void parseFn (lexerState *lexer, vString *scope, int parent_kind) { int kind = (parent_kind == K_TRAIT || parent_kind == K_IMPL) ? K_METHOD : K_FN; vString *name; vString *arg_list; unsigned long line; MIOPos pos; int paren_level = 0; boolean found_paren = FALSE; boolean valid_signature = TRUE; advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; name = vStringNewCopy(lexer->token_str); arg_list = vStringNew(); line = lexer->line; pos = lexer->pos; advanceToken(lexer, TRUE); /* HACK: This is a bit coarse as far as what tag entry means by * 'arglist'... */ while (lexer->cur_token != '{' && lexer->cur_token != ';') { if (lexer->cur_token == '}') { valid_signature = FALSE; break; } else if (lexer->cur_token == '(') { found_paren = TRUE; paren_level++; } else if (lexer->cur_token == ')') { paren_level--; if (paren_level < 0) { valid_signature = FALSE; break; } } else if (lexer->cur_token == TOKEN_EOF) { valid_signature = FALSE; break; } writeCurTokenToStr(lexer, arg_list); advanceToken(lexer, FALSE); } if (!found_paren || paren_level != 0) valid_signature = FALSE; if (valid_signature) { vStringStripTrailing(arg_list); addTag(name, NULL, arg_list->buffer, kind, line, pos, scope, parent_kind); addToScope(scope, name); parseBlock(lexer, TRUE, kind, scope); } vStringDelete(name); vStringDelete(arg_list); } /* Mod format: * "mod" <ident> "{" [<body>] "}" * "mod" <ident> ";"*/ static void parseMod (lexerState *lexer, vString *scope, int parent_kind) { advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; addTag(lexer->token_str, NULL, NULL, K_MOD, lexer->line, lexer->pos, scope, parent_kind); addToScope(scope, lexer->token_str); advanceToken(lexer, TRUE); parseBlock(lexer, TRUE, K_MOD, scope); } /* Trait format: * "trait" <ident> [<type_bounds>] "{" [<body>] "}" */ static void parseTrait (lexerState *lexer, vString *scope, int parent_kind) { int goal_tokens[] = {'{'}; advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; addTag(lexer->token_str, NULL, NULL, K_TRAIT, lexer->line, lexer->pos, scope, parent_kind); addToScope(scope, lexer->token_str); advanceToken(lexer, TRUE); skipUntil(lexer, goal_tokens, 1); parseBlock(lexer, TRUE, K_TRAIT, scope); } /* Skips type blocks of the form <T:T<T>, ...> */ static void skipTypeBlock (lexerState *lexer) { if (lexer->cur_token == '<') { skipUntil(lexer, NULL, 0); advanceToken(lexer, TRUE); } } /* Essentially grabs the last ident before 'for', '<' and '{', which * tends to correspond to what we want as the impl tag entry name */ static void parseQualifiedType (lexerState *lexer, vString* name) { while (lexer->cur_token != TOKEN_EOF) { if (lexer->cur_token == TOKEN_IDENT) { if (strcmp(lexer->token_str->buffer, "for") == 0) break; vStringClear(name); vStringCat(name, lexer->token_str); } else if (lexer->cur_token == '<' || lexer->cur_token == '{') { break; } advanceToken(lexer, TRUE); } skipTypeBlock(lexer); } /* Impl format: * "impl" [<type_bounds>] <qualified_ident>[<type_bounds>] ["for" <qualified_ident>[<type_bounds>]] "{" [<body>] "}" */ static void parseImpl (lexerState *lexer, vString *scope, int parent_kind) { unsigned long line; MIOPos pos; vString *name; advanceToken(lexer, TRUE); line = lexer->line; pos = lexer->pos; skipTypeBlock(lexer); name = vStringNew(); parseQualifiedType(lexer, name); if (lexer->cur_token == TOKEN_IDENT && strcmp(lexer->token_str->buffer, "for") == 0) { advanceToken(lexer, TRUE); parseQualifiedType(lexer, name); } addTag(name, NULL, NULL, K_IMPL, line, pos, scope, parent_kind); addToScope(scope, name); parseBlock(lexer, TRUE, K_IMPL, scope); vStringDelete(name); } /* Static format: * "static" ["mut"] <ident> */ static void parseStatic (lexerState *lexer, vString *scope, int parent_kind) { advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; if (strcmp(lexer->token_str->buffer, "mut") == 0) { advanceToken(lexer, TRUE); } if (lexer->cur_token != TOKEN_IDENT) return; addTag(lexer->token_str, NULL, NULL, K_STATIC, lexer->line, lexer->pos, scope, parent_kind); } /* Type format: * "type" <ident> */ static void parseType (lexerState *lexer, vString *scope, int parent_kind) { advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; addTag(lexer->token_str, NULL, NULL, K_TYPE, lexer->line, lexer->pos, scope, parent_kind); } /* Structs and enums are very similar syntax-wise. * It is possible to parse variants a bit more cleverly (e.g. make tuple variants functions and * struct variants structs) but it'd be too clever and the signature wouldn't make too much sense without * the enum's definition (e.g. for the type bounds) * * Struct/Enum format: * "struct/enum" <ident>[<type_bounds>] "{" [<ident>,]+ "}" * "struct/enum" <ident>[<type_bounds>] ";" * */ static void parseStructOrEnum (lexerState *lexer, vString *scope, int parent_kind, boolean is_struct) { int kind = is_struct ? K_STRUCT : K_ENUM; int field_kind = is_struct ? K_FIELD : K_VARIANT; int goal_tokens1[] = {';', '{'}; advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; addTag(lexer->token_str, NULL, NULL, kind, lexer->line, lexer->pos, scope, parent_kind); addToScope(scope, lexer->token_str); skipUntil(lexer, goal_tokens1, 2); if (lexer->cur_token == '{') { vString *field_name = vStringNew(); while (lexer->cur_token != TOKEN_EOF) { int goal_tokens2[] = {'}', ','}; /* Skip attributes. Format: * #[..] or #![..] * */ if (lexer->cur_token == '#') { advanceToken(lexer, TRUE); if (lexer->cur_token == '!') advanceToken(lexer, TRUE); if (lexer->cur_token == '[') { /* It's an attribute, skip it. */ skipUntil(lexer, NULL, 0); } else { /* Something's up with this field, skip to the next one */ skipUntil(lexer, goal_tokens2, 2); continue; } } if (lexer->cur_token == TOKEN_IDENT) { if (strcmp(lexer->token_str->buffer, "priv") == 0 || strcmp(lexer->token_str->buffer, "pub") == 0) { advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) { /* Something's up with this field, skip to the next one */ skipUntil(lexer, goal_tokens2, 2); continue; } } vStringClear(field_name); vStringCat(field_name, lexer->token_str); addTag(field_name, NULL, NULL, field_kind, lexer->line, lexer->pos, scope, kind); skipUntil(lexer, goal_tokens2, 2); } if (lexer->cur_token == '}') { advanceToken(lexer, TRUE); break; } advanceToken(lexer, TRUE); } vStringDelete(field_name); } } /* Skip the body of the macro. Can't use skipUntil here as * the body of the macro may have arbitrary code which confuses it (e.g. * bitshift operators/function return arrows) */ static void skipMacro (lexerState *lexer) { int level = 0; int plus_token = 0; int minus_token = 0; advanceToken(lexer, TRUE); switch (lexer->cur_token) { case '(': plus_token = '('; minus_token = ')'; break; case '{': plus_token = '{'; minus_token = '}'; break; case '[': plus_token = '['; minus_token = ']'; break; default: return; } while (lexer->cur_token != TOKEN_EOF) { if (lexer->cur_token == plus_token) level++; else if (lexer->cur_token == minus_token) level--; if (level == 0) break; advanceToken(lexer, TRUE); } advanceToken(lexer, TRUE); } /* * Macro rules format: * "macro_rules" "!" <ident> <macro_body> */ static void parseMacroRules (lexerState *lexer, vString *scope, int parent_kind) { advanceToken(lexer, TRUE); if (lexer->cur_token != '!') return; advanceToken(lexer, TRUE); if (lexer->cur_token != TOKEN_IDENT) return; addTag(lexer->token_str, NULL, NULL, K_MACRO, lexer->line, lexer->pos, scope, parent_kind); skipMacro(lexer); } /* * Rust is very liberal with nesting, so this function is used pretty much for any block */ static void parseBlock (lexerState *lexer, boolean delim, int kind, vString *scope) { int level = 1; if (delim) { if (lexer->cur_token != '{') return; advanceToken(lexer, TRUE); } while (lexer->cur_token != TOKEN_EOF) { if (lexer->cur_token == TOKEN_IDENT) { size_t old_scope_len = vStringLength(scope); if (strcmp(lexer->token_str->buffer, "fn") == 0) { parseFn(lexer, scope, kind); } else if(strcmp(lexer->token_str->buffer, "mod") == 0) { parseMod(lexer, scope, kind); } else if(strcmp(lexer->token_str->buffer, "static") == 0) { parseStatic(lexer, scope, kind); } else if(strcmp(lexer->token_str->buffer, "trait") == 0) { parseTrait(lexer, scope, kind); } else if(strcmp(lexer->token_str->buffer, "type") == 0) { parseType(lexer, scope, kind); } else if(strcmp(lexer->token_str->buffer, "impl") == 0) { parseImpl(lexer, scope, kind); } else if(strcmp(lexer->token_str->buffer, "struct") == 0) { parseStructOrEnum(lexer, scope, kind, TRUE); } else if(strcmp(lexer->token_str->buffer, "enum") == 0) { parseStructOrEnum(lexer, scope, kind, FALSE); } else if(strcmp(lexer->token_str->buffer, "macro_rules") == 0) { parseMacroRules(lexer, scope, kind); } else { advanceToken(lexer, TRUE); if (lexer->cur_token == '!') { skipMacro(lexer); } } resetScope(scope, old_scope_len); } else if (lexer->cur_token == '{') { level++; advanceToken(lexer, TRUE); } else if (lexer->cur_token == '}') { level--; advanceToken(lexer, TRUE); } else if (lexer->cur_token == '\'') { /* Skip over the 'static lifetime, as it confuses the static parser above */ advanceToken(lexer, TRUE); if (lexer->cur_token == TOKEN_IDENT && strcmp(lexer->token_str->buffer, "static") == 0) advanceToken(lexer, TRUE); } else { advanceToken(lexer, TRUE); } if (delim && level <= 0) break; } } static void findRustTags (void) { lexerState lexer; vString* scope = vStringNew(); initLexer(&lexer); parseBlock(&lexer, FALSE, K_NONE, scope); vStringDelete(scope); deInitLexer(&lexer); } extern parserDefinition *RustParser (void) { static const char *const extensions[] = { "rs", NULL }; parserDefinition *def = parserNew ("Rust"); def->kinds = rustKinds; def->kindCount = KIND_COUNT (rustKinds); def->extensions = extensions; def->parser = findRustTags; return def; }
TheGameCreators/AGKIDE
tagmanager/ctags/rust.c
C
gpl-2.0
22,209
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <string.h> #include <math.h> #include "vmi.h" #include "cbigint.h" #include "harmonyglob.h" #include "exceptions.h" #if defined(LINUX) || defined(FREEBSD) || defined(ZOS) || defined(MACOSX) || defined(AIX) #define USE_LL #endif #define LOW_I32_FROM_VAR(u64) LOW_I32_FROM_LONG64(u64) #define LOW_I32_FROM_PTR(u64ptr) LOW_I32_FROM_LONG64_PTR(u64ptr) #define HIGH_I32_FROM_VAR(u64) HIGH_I32_FROM_LONG64(u64) #define HIGH_I32_FROM_PTR(u64ptr) HIGH_I32_FROM_LONG64_PTR(u64ptr) #define MAX_ACCURACY_WIDTH 17 #define DEFAULT_WIDTH MAX_ACCURACY_WIDTH JNIEXPORT jdouble JNICALL Java_org_apache_harmony_luni_util_FloatingPointParser_parseDblImpl (JNIEnv * env, jclass clazz, jstring s, jint e); JNIEXPORT void JNICALL Java_org_apache_harmony_luni_util_NumberConverter_bigIntDigitGeneratorInstImpl (JNIEnv * env, jobject inst, jlong f, jint e, jboolean isDenormalized, jboolean mantissaIsZero, jint p); jdouble createDouble (JNIEnv * env, const char *s, jint e); jdouble createDouble1 (JNIEnv * env, U_64 * f, IDATA length, jint e); jdouble doubleAlgorithm (JNIEnv * env, U_64 * f, IDATA length, jint e, jdouble z); U_64 dblparse_shiftRight64 (U_64 * lp, volatile int mbe); static const jdouble tens[] = { 1.0, 1.0e1, 1.0e2, 1.0e3, 1.0e4, 1.0e5, 1.0e6, 1.0e7, 1.0e8, 1.0e9, 1.0e10, 1.0e11, 1.0e12, 1.0e13, 1.0e14, 1.0e15, 1.0e16, 1.0e17, 1.0e18, 1.0e19, 1.0e20, 1.0e21, 1.0e22 }; #define tenToTheE(e) (*(tens + (e))) #define LOG5_OF_TWO_TO_THE_N 23 #define INV_LOG_OF_TEN_BASE_2 (0.30102999566398114) #define DOUBLE_MIN_VALUE 5.0e-324 #define sizeOfTenToTheE(e) (((e) / 19) + 1) #if defined(USE_LL) #define INFINITE_LONGBITS (0x7FF0000000000000LL) #else #if defined(USE_L) #define INFINITE_LONGBITS (0x7FF0000000000000L) #else #define INFINITE_LONGBITS (0x7FF0000000000000) #endif /* USE_L */ #endif /* USE_LL */ #define MINIMUM_LONGBITS (0x1) #if defined(USE_LL) #define MANTISSA_MASK (0x000FFFFFFFFFFFFFLL) #define EXPONENT_MASK (0x7FF0000000000000LL) #define NORMAL_MASK (0x0010000000000000LL) #else #if defined(USE_L) #define MANTISSA_MASK (0x000FFFFFFFFFFFFFL) #define EXPONENT_MASK (0x7FF0000000000000L) #define NORMAL_MASK (0x0010000000000000L) #else #define MANTISSA_MASK (0x000FFFFFFFFFFFFF) #define EXPONENT_MASK (0x7FF0000000000000) #define NORMAL_MASK (0x0010000000000000) #endif /* USE_L */ #endif /* USE_LL */ #define DOUBLE_TO_LONGBITS(dbl) (*((U_64 *)(&dbl))) /* Keep a count of the number of times we decrement and increment to * approximate the double, and attempt to detect the case where we * could potentially toggle back and forth between decrementing and * incrementing. It is possible for us to be stuck in the loop when * incrementing by one or decrementing by one may exceed or stay below * the value that we are looking for. In this case, just break out of * the loop if we toggle between incrementing and decrementing for more * than twice. */ #define INCREMENT_DOUBLE(_x, _decCount, _incCount) \ { \ ++DOUBLE_TO_LONGBITS(_x); \ _incCount++; \ if( (_incCount > 2) && (_decCount > 2) ) { \ if( _decCount > _incCount ) { \ DOUBLE_TO_LONGBITS(_x) += _decCount - _incCount; \ } else if( _incCount > _decCount ) { \ DOUBLE_TO_LONGBITS(_x) -= _incCount - _decCount; \ } \ break; \ } \ } #define DECREMENT_DOUBLE(_x, _decCount, _incCount) \ { \ --DOUBLE_TO_LONGBITS(_x); \ _decCount++; \ if( (_incCount > 2) && (_decCount > 2) ) { \ if( _decCount > _incCount ) { \ DOUBLE_TO_LONGBITS(_x) += _decCount - _incCount; \ } else if( _incCount > _decCount ) { \ DOUBLE_TO_LONGBITS(_x) -= _incCount - _decCount; \ } \ break; \ } \ } #define ERROR_OCCURED(x) (HIGH_I32_FROM_VAR(x) < 0) #define allocateU64(x, n) if (!((x) = (U_64*) hymem_allocate_memory((n) * sizeof(U_64)))) goto OutOfMemory; #define release(r) if ((r)) hymem_free_memory((r)); /*NB the Number converter methods are synchronized so it is possible to *have global data for use by bigIntDigitGenerator */ #define RM_SIZE 21 #define STemp_SIZE 22 jdouble createDouble (JNIEnv * env, const char *s, jint e) { /* assumes s is a null terminated string with at least one * character in it */ U_64 def[DEFAULT_WIDTH]; U_64 defBackup[DEFAULT_WIDTH]; U_64 *f, *fNoOverflow, *g, *tempBackup; U_32 overflow; jdouble result; IDATA index = 1; int unprocessedDigits = 0; f = def; fNoOverflow = defBackup; *f = 0; tempBackup = g = 0; do { if (*s >= '0' && *s <= '9') { /* Make a back up of f before appending, so that we can * back out of it if there is no more room, i.e. index > * MAX_ACCURACY_WIDTH. */ memcpy (fNoOverflow, f, sizeof (U_64) * index); overflow = simpleAppendDecimalDigitHighPrecision (f, index, *s - '0'); if (overflow) { f[index++] = overflow; /* There is an overflow, but there is no more room * to store the result. We really only need the top 52 * bits anyway, so we must back out of the overflow, * and ignore the rest of the string. */ if (index >= MAX_ACCURACY_WIDTH) { index--; memcpy (f, fNoOverflow, sizeof (U_64) * index); break; } if (tempBackup) { fNoOverflow = tempBackup; } } } else index = -1; } while (index > 0 && *(++s) != '\0'); /* We've broken out of the parse loop either because we've reached * the end of the string or we've overflowed the maximum accuracy * limit of a double. If we still have unprocessed digits in the * given string, then there are three possible results: * 1. (unprocessed digits + e) == 0, in which case we simply * convert the existing bits that are already parsed * 2. (unprocessed digits + e) < 0, in which case we simply * convert the existing bits that are already parsed along * with the given e * 3. (unprocessed digits + e) > 0 indicates that the value is * simply too big to be stored as a double, so return Infinity */ if ((unprocessedDigits = strlen (s)) > 0) { e += unprocessedDigits; if (index > -1) { if (e == 0) result = toDoubleHighPrecision (f, index); else if (e < 0) result = createDouble1 (env, f, index, e); else { DOUBLE_TO_LONGBITS (result) = INFINITE_LONGBITS; } } else { LOW_I32_FROM_VAR (result) = -1; HIGH_I32_FROM_VAR (result) = -1; } } else { if (index > -1) { if (e == 0) result = toDoubleHighPrecision (f, index); else result = createDouble1 (env, f, index, e); } else { LOW_I32_FROM_VAR (result) = -1; HIGH_I32_FROM_VAR (result) = -1; } } return result; } jdouble createDouble1 (JNIEnv * env, U_64 * f, IDATA length, jint e) { IDATA numBits; jdouble result; #define APPROX_MIN_MAGNITUDE -309 #define APPROX_MAX_MAGNITUDE 309 numBits = highestSetBitHighPrecision (f, length) + 1; numBits -= lowestSetBitHighPrecision (f, length); if (numBits < 54 && e >= 0 && e < LOG5_OF_TWO_TO_THE_N) { return toDoubleHighPrecision (f, length) * tenToTheE (e); } else if (numBits < 54 && e < 0 && (-e) < LOG5_OF_TWO_TO_THE_N) { return toDoubleHighPrecision (f, length) / tenToTheE (-e); } else if (e >= 0 && e < APPROX_MAX_MAGNITUDE) { result = toDoubleHighPrecision (f, length) * pow (10.0, (double) e); } else if (e >= APPROX_MAX_MAGNITUDE) { /* Convert the partial result to make sure that the * non-exponential part is not zero. This check fixes the case * where the user enters 0.0e309! */ result = toDoubleHighPrecision (f, length); /* Don't go straight to zero as the fact that x*0 = 0 independent of x might cause the algorithm to produce an incorrect result. Instead try the min value first and let it fall to zero if need be. */ if (result == 0.0) DOUBLE_TO_LONGBITS (result) = MINIMUM_LONGBITS; else DOUBLE_TO_LONGBITS (result) = INFINITE_LONGBITS; } else if (e > APPROX_MIN_MAGNITUDE) { result = toDoubleHighPrecision (f, length) / pow (10.0, (double) -e); } if (e <= APPROX_MIN_MAGNITUDE) { result = toDoubleHighPrecision (f, length) * pow (10.0, (double) (e + 52)); result = result * pow (10.0, (double) -52); } /* Don't go straight to zero as the fact that x*0 = 0 independent of x might cause the algorithm to produce an incorrect result. Instead try the min value first and let it fall to zero if need be. */ if (result == 0.0) DOUBLE_TO_LONGBITS (result) = MINIMUM_LONGBITS; return doubleAlgorithm (env, f, length, e, result); } U_64 dblparse_shiftRight64 (U_64 * lp, volatile int mbe) { U_64 b1Value = 0; U_32 hi = HIGH_U32_FROM_LONG64_PTR (lp); U_32 lo = LOW_U32_FROM_LONG64_PTR (lp); int srAmt; if (mbe == 0) return 0; if (mbe >= 128) { HIGH_U32_FROM_LONG64_PTR (lp) = 0; LOW_U32_FROM_LONG64_PTR (lp) = 0; return 0; } /* Certain platforms do not handle de-referencing a 64-bit value * from a pointer on the stack correctly (e.g. MVL-hh/XScale) * because the pointer may not be properly aligned, so we'll have * to handle two 32-bit chunks. */ if (mbe < 32) { LOW_U32_FROM_LONG64 (b1Value) = 0; HIGH_U32_FROM_LONG64 (b1Value) = lo << (32 - mbe); LOW_U32_FROM_LONG64_PTR (lp) = (hi << (32 - mbe)) | (lo >> mbe); HIGH_U32_FROM_LONG64_PTR (lp) = hi >> mbe; } else if (mbe == 32) { LOW_U32_FROM_LONG64 (b1Value) = 0; HIGH_U32_FROM_LONG64 (b1Value) = lo; LOW_U32_FROM_LONG64_PTR (lp) = hi; HIGH_U32_FROM_LONG64_PTR (lp) = 0; } else if (mbe < 64) { srAmt = mbe - 32; LOW_U32_FROM_LONG64 (b1Value) = lo << (32 - srAmt); HIGH_U32_FROM_LONG64 (b1Value) = (hi << (32 - srAmt)) | (lo >> srAmt); LOW_U32_FROM_LONG64_PTR (lp) = hi >> srAmt; HIGH_U32_FROM_LONG64_PTR (lp) = 0; } else if (mbe == 64) { LOW_U32_FROM_LONG64 (b1Value) = lo; HIGH_U32_FROM_LONG64 (b1Value) = hi; LOW_U32_FROM_LONG64_PTR (lp) = 0; HIGH_U32_FROM_LONG64_PTR (lp) = 0; } else if (mbe < 96) { srAmt = mbe - 64; b1Value = *lp; HIGH_U32_FROM_LONG64_PTR (lp) = 0; LOW_U32_FROM_LONG64_PTR (lp) = 0; LOW_U32_FROM_LONG64 (b1Value) >>= srAmt; LOW_U32_FROM_LONG64 (b1Value) |= (hi << (32 - srAmt)); HIGH_U32_FROM_LONG64 (b1Value) >>= srAmt; } else if (mbe == 96) { LOW_U32_FROM_LONG64 (b1Value) = hi; HIGH_U32_FROM_LONG64 (b1Value) = 0; HIGH_U32_FROM_LONG64_PTR (lp) = 0; LOW_U32_FROM_LONG64_PTR (lp) = 0; } else { LOW_U32_FROM_LONG64 (b1Value) = hi >> (mbe - 96); HIGH_U32_FROM_LONG64 (b1Value) = 0; HIGH_U32_FROM_LONG64_PTR (lp) = 0; LOW_U32_FROM_LONG64_PTR (lp) = 0; } return b1Value; } #if defined(WIN32) /* disable global optimizations on the microsoft compiler for the * doubleAlgorithm function otherwise it won't compile */ #pragma optimize("g",off) #endif /* The algorithm for the function doubleAlgorithm() below can be found * in: * * "How to Read Floating-Point Numbers Accurately", William D. * Clinger, Proceedings of the ACM SIGPLAN '90 Conference on * Programming Language Design and Implementation, June 20-22, * 1990, pp. 92-101. * * There is a possibility that the function will end up in an endless * loop if the given approximating floating-point number (a very small * floating-point whose value is very close to zero) straddles between * two approximating integer values. We modified the algorithm slightly * to detect the case where it oscillates back and forth between * incrementing and decrementing the floating-point approximation. It * is currently set such that if the oscillation occurs more than twice * then return the original approximation. */ jdouble doubleAlgorithm (JNIEnv * env, U_64 * f, IDATA length, jint e, jdouble z) { U_64 m; IDATA k, comparison, comparison2; U_64 *x, *y, *D, *D2; IDATA xLength, yLength, DLength, D2Length, decApproxCount, incApproxCount; PORT_ACCESS_FROM_ENV (env); x = y = D = D2 = 0; xLength = yLength = DLength = D2Length = 0; decApproxCount = incApproxCount = 0; do { m = doubleMantissa (z); k = doubleExponent (z); if (x && x != f) jclmem_free_memory (env, x); release (y); release (D); release (D2); if (e >= 0 && k >= 0) { xLength = sizeOfTenToTheE (e) + length; allocateU64 (x, xLength); memset (x + length, 0, sizeof (U_64) * (xLength - length)); memcpy (x, f, sizeof (U_64) * length); timesTenToTheEHighPrecision (x, xLength, e); yLength = (k >> 6) + 2; allocateU64 (y, yLength); memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); *y = m; simpleShiftLeftHighPrecision (y, yLength, k); } else if (e >= 0) { xLength = sizeOfTenToTheE (e) + length + ((-k) >> 6) + 1; allocateU64 (x, xLength); memset (x + length, 0, sizeof (U_64) * (xLength - length)); memcpy (x, f, sizeof (U_64) * length); timesTenToTheEHighPrecision (x, xLength, e); simpleShiftLeftHighPrecision (x, xLength, -k); yLength = 1; allocateU64 (y, 1); *y = m; } else if (k >= 0) { xLength = length; x = f; yLength = sizeOfTenToTheE (-e) + 2 + (k >> 6); allocateU64 (y, yLength); memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); *y = m; timesTenToTheEHighPrecision (y, yLength, -e); simpleShiftLeftHighPrecision (y, yLength, k); } else { xLength = length + ((-k) >> 6) + 1; allocateU64 (x, xLength); memset (x + length, 0, sizeof (U_64) * (xLength - length)); memcpy (x, f, sizeof (U_64) * length); simpleShiftLeftHighPrecision (x, xLength, -k); yLength = sizeOfTenToTheE (-e) + 1; allocateU64 (y, yLength); memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); *y = m; timesTenToTheEHighPrecision (y, yLength, -e); } comparison = compareHighPrecision (x, xLength, y, yLength); if (comparison > 0) { /* x > y */ DLength = xLength; allocateU64 (D, DLength); memcpy (D, x, DLength * sizeof (U_64)); subtractHighPrecision (D, DLength, y, yLength); } else if (comparison) { /* y > x */ DLength = yLength; allocateU64 (D, DLength); memcpy (D, y, DLength * sizeof (U_64)); subtractHighPrecision (D, DLength, x, xLength); } else { /* y == x */ DLength = 1; allocateU64 (D, 1); *D = 0; } D2Length = DLength + 1; allocateU64 (D2, D2Length); m <<= 1; multiplyHighPrecision (D, DLength, &m, 1, D2, D2Length); m >>= 1; comparison2 = compareHighPrecision (D2, D2Length, y, yLength); if (comparison2 < 0) { if (comparison < 0 && m == NORMAL_MASK) { simpleShiftLeftHighPrecision (D2, D2Length, 1); if (compareHighPrecision (D2, D2Length, y, yLength) > 0) { DECREMENT_DOUBLE (z, decApproxCount, incApproxCount); } else { break; } } else { break; } } else if (comparison2 == 0) { if ((LOW_U32_FROM_VAR (m) & 1) == 0) { if (comparison < 0 && m == NORMAL_MASK) { DECREMENT_DOUBLE (z, decApproxCount, incApproxCount); } else { break; } } else if (comparison < 0) { DECREMENT_DOUBLE (z, decApproxCount, incApproxCount); break; } else { INCREMENT_DOUBLE (z, decApproxCount, incApproxCount); break; } } else if (comparison < 0) { DECREMENT_DOUBLE (z, decApproxCount, incApproxCount); } else { if (DOUBLE_TO_LONGBITS (z) == INFINITE_LONGBITS) break; INCREMENT_DOUBLE (z, decApproxCount, incApproxCount); } } while (1); if (x && x != f) jclmem_free_memory (env, x); release (y); release (D); release (D2); return z; OutOfMemory: if (x && x != f) jclmem_free_memory (env, x); release (y); release (D); release (D2); DOUBLE_TO_LONGBITS (z) = -2; return z; } #if defined(WIN32) #pragma optimize("",on) /*restore optimizations */ #endif JNIEXPORT jdouble JNICALL Java_org_apache_harmony_luni_util_FloatingPointParser_parseDblImpl (JNIEnv * env, jclass clazz, jstring s, jint e) { jdouble dbl; const char *str = (*env)->GetStringUTFChars (env, s, 0); dbl = createDouble (env, str, e); (*env)->ReleaseStringUTFChars (env, s, str); if (!ERROR_OCCURED (dbl)) { return dbl; } else if (LOW_I32_FROM_VAR (dbl) == (I_32) - 1) { /* NumberFormatException */ throwNewExceptionByName(env, "java/lang/NumberFormatException", ""); } else { /* OutOfMemoryError */ throwNewOutOfMemoryError(env, ""); } return 0.0; } /* The algorithm for this particular function can be found in: * * Printing Floating-Point Numbers Quickly and Accurately, Robert * G. Burger, and R. Kent Dybvig, Programming Language Design and * Implementation (PLDI) 1996, pp.108-116. * * The previous implementation of this function combined m+ and m- into * one single M which caused some inaccuracy of the last digit. The * particular case below shows this inaccuracy: * * System.out.println(new Double((1.234123412431233E107)).toString()); * System.out.println(new Double((1.2341234124312331E107)).toString()); * System.out.println(new Double((1.2341234124312332E107)).toString()); * * outputs the following: * * 1.234123412431233E107 * 1.234123412431233E107 * 1.234123412431233E107 * * instead of: * * 1.234123412431233E107 * 1.2341234124312331E107 * 1.2341234124312331E107 * */ JNIEXPORT void JNICALL Java_org_apache_harmony_luni_util_NumberConverter_bigIntDigitGeneratorInstImpl (JNIEnv * env, jobject inst, jlong f, jint e, jboolean isDenormalized, jboolean mantissaIsZero, jint p) { int RLength, SLength, TempLength, mplus_Length, mminus_Length; int high, low, i; jint k, firstK, U; jint getCount, setCount; jint *uArray; jclass clazz; jfieldID fid; jintArray uArrayObject; U_64 R[RM_SIZE], S[STemp_SIZE], mplus[RM_SIZE], mminus[RM_SIZE], Temp[STemp_SIZE]; memset (R, 0, RM_SIZE * sizeof (U_64)); memset (S, 0, STemp_SIZE * sizeof (U_64)); memset (mplus, 0, RM_SIZE * sizeof (U_64)); memset (mminus, 0, RM_SIZE * sizeof (U_64)); memset (Temp, 0, STemp_SIZE * sizeof (U_64)); if (e >= 0) { *R = f; *mplus = *mminus = 1; simpleShiftLeftHighPrecision (mminus, RM_SIZE, e); if (f != (2 << (p - 1))) { simpleShiftLeftHighPrecision (R, RM_SIZE, e + 1); *S = 2; /* * m+ = m+ << e results in 1.0e23 to be printed as * 0.9999999999999999E23 * m+ = m+ << e+1 results in 1.0e23 to be printed as * 1.0e23 (caused too much rounding) * 470fffffffffffff = 2.0769187434139308E34 * 4710000000000000 = 2.076918743413931E34 */ simpleShiftLeftHighPrecision (mplus, RM_SIZE, e); } else { simpleShiftLeftHighPrecision (R, RM_SIZE, e + 2); *S = 4; simpleShiftLeftHighPrecision (mplus, RM_SIZE, e + 1); } } else { if (isDenormalized || (f != (2 << (p - 1)))) { *R = f << 1; *S = 1; simpleShiftLeftHighPrecision (S, STemp_SIZE, 1 - e); *mplus = *mminus = 1; } else { *R = f << 2; *S = 1; simpleShiftLeftHighPrecision (S, STemp_SIZE, 2 - e); *mplus = 2; *mminus = 1; } } k = (int) ceil ((e + p - 1) * INV_LOG_OF_TEN_BASE_2 - 1e-10); if (k > 0) { timesTenToTheEHighPrecision (S, STemp_SIZE, k); } else { timesTenToTheEHighPrecision (R, RM_SIZE, -k); timesTenToTheEHighPrecision (mplus, RM_SIZE, -k); timesTenToTheEHighPrecision (mminus, RM_SIZE, -k); } RLength = mplus_Length = mminus_Length = RM_SIZE; SLength = TempLength = STemp_SIZE; memset (Temp + RM_SIZE, 0, (STemp_SIZE - RM_SIZE) * sizeof (U_64)); memcpy (Temp, R, RM_SIZE * sizeof (U_64)); while (RLength > 1 && R[RLength - 1] == 0) --RLength; while (mplus_Length > 1 && mplus[mplus_Length - 1] == 0) --mplus_Length; while (mminus_Length > 1 && mminus[mminus_Length - 1] == 0) --mminus_Length; while (SLength > 1 && S[SLength - 1] == 0) --SLength; TempLength = (RLength > mplus_Length ? RLength : mplus_Length) + 1; addHighPrecision (Temp, TempLength, mplus, mplus_Length); if (compareHighPrecision (Temp, TempLength, S, SLength) >= 0) { firstK = k; } else { firstK = k - 1; simpleAppendDecimalDigitHighPrecision (R, ++RLength, 0); simpleAppendDecimalDigitHighPrecision (mplus, ++mplus_Length, 0); simpleAppendDecimalDigitHighPrecision (mminus, ++mminus_Length, 0); while (RLength > 1 && R[RLength - 1] == 0) --RLength; while (mplus_Length > 1 && mplus[mplus_Length - 1] == 0) --mplus_Length; while (mminus_Length > 1 && mminus[mminus_Length - 1] == 0) --mminus_Length; } clazz = (*env)->GetObjectClass (env, inst); fid = (*env)->GetFieldID (env, clazz, "uArray", "[I"); uArrayObject = (jintArray) (*env)->GetObjectField (env, inst, fid); uArray = (*env)->GetIntArrayElements (env, uArrayObject, 0); getCount = setCount = 0; do { U = 0; for (i = 3; i >= 0; --i) { TempLength = SLength + 1; Temp[SLength] = 0; memcpy (Temp, S, SLength * sizeof (U_64)); simpleShiftLeftHighPrecision (Temp, TempLength, i); if (compareHighPrecision (R, RLength, Temp, TempLength) >= 0) { subtractHighPrecision (R, RLength, Temp, TempLength); U += 1 << i; } } low = compareHighPrecision (R, RLength, mminus, mminus_Length) <= 0; memset (Temp + RLength, 0, (STemp_SIZE - RLength) * sizeof (U_64)); memcpy (Temp, R, RLength * sizeof (U_64)); TempLength = (RLength > mplus_Length ? RLength : mplus_Length) + 1; addHighPrecision (Temp, TempLength, mplus, mplus_Length); high = compareHighPrecision (Temp, TempLength, S, SLength) >= 0; if (low || high) break; simpleAppendDecimalDigitHighPrecision (R, ++RLength, 0); simpleAppendDecimalDigitHighPrecision (mplus, ++mplus_Length, 0); simpleAppendDecimalDigitHighPrecision (mminus, ++mminus_Length, 0); while (RLength > 1 && R[RLength - 1] == 0) --RLength; while (mplus_Length > 1 && mplus[mplus_Length - 1] == 0) --mplus_Length; while (mminus_Length > 1 && mminus[mminus_Length - 1] == 0) --mminus_Length; uArray[setCount++] = U; } while (1); simpleShiftLeftHighPrecision (R, ++RLength, 1); if (low && !high) uArray[setCount++] = U; else if (high && !low) uArray[setCount++] = U + 1; else if (compareHighPrecision (R, RLength, S, SLength) < 0) uArray[setCount++] = U; else uArray[setCount++] = U + 1; (*env)->ReleaseIntArrayElements (env, uArrayObject, uArray, 0); fid = (*env)->GetFieldID (env, clazz, "setCount", "I"); (*env)->SetIntField (env, inst, fid, setCount); fid = (*env)->GetFieldID (env, clazz, "getCount", "I"); (*env)->SetIntField (env, inst, fid, getCount); fid = (*env)->GetFieldID (env, clazz, "firstK", "I"); (*env)->SetIntField (env, inst, fid, firstK); }
skyHALud/codenameone
Ports/iOSPort/xmlvm/apache-harmony-6.0-src-r991881/classlib/modules/luni/src/main/native/luni/shared/dblparse.c
C
gpl-2.0
28,372
/** ****************************************************************************** * @file fatfs.c * @brief Code for fatfs applications ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2020 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under Ultimate Liberty license * SLA0044, the "License"; You may not use this file except in compliance with * the License. You may obtain a copy of the License at: * www.st.com/SLA0044 * ****************************************************************************** */ #include "fatfs.h" uint8_t retSD; /* Return value for SD */ char SDPath[4]; /* SD logical drive path */ FATFS SDFatFS; /* File system object for SD logical drive */ FIL SDFile; /* File object for SD */ /* USER CODE BEGIN Variables */ /* USER CODE END Variables */ void MX_FATFS_Init(void) { /*## FatFS: Link the SD driver ###########################*/ retSD = FATFS_LinkDriver(&SD_Driver, SDPath); /* USER CODE BEGIN Init */ /* additional user code for init */ /* USER CODE END Init */ } /** * @brief Gets Time from RTC * @param None * @retval Time in DWORD */ DWORD get_fattime(void) { /* USER CODE BEGIN get_fattime */ return 0; /* USER CODE END get_fattime */ } /* USER CODE BEGIN Application */ /* USER CODE END Application */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
cafe-alpha/wascafe
v13/stm32_bup_test/r07c_firm_F446RE/Src/fatfs.c
C
gpl-2.0
1,568
/* MMIX-specific support for 64-bit ELF. Copyright 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. Contributed by Hans-Peter Nilsson <hp@bitrange.com> This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* No specific ABI or "processor-specific supplement" defined. */ /* TODO: - "Traditional" linker relaxation (shrinking whole sections). - Merge reloc stubs jumping to same location. - GETA stub relaxation (call a stub for out of range new R_MMIX_GETA_STUBBABLE). */ #include "bfd.h" #include "sysdep.h" #include "libbfd.h" #include "elf-bfd.h" #include "elf/mmix.h" #include "opcode/mmix.h" #define MINUS_ONE (((bfd_vma) 0) - 1) #define MAX_PUSHJ_STUB_SIZE (5 * 4) /* Put these everywhere in new code. */ #define FATAL_DEBUG \ _bfd_abort (__FILE__, __LINE__, \ "Internal: Non-debugged code (test-case missing)") #define BAD_CASE(x) \ _bfd_abort (__FILE__, __LINE__, \ "bad case for " #x) struct _mmix_elf_section_data { struct bfd_elf_section_data elf; union { struct bpo_reloc_section_info *reloc; struct bpo_greg_section_info *greg; } bpo; struct pushj_stub_info { /* Maximum number of stubs needed for this section. */ bfd_size_type n_pushj_relocs; /* Size of stubs after a mmix_elf_relax_section round. */ bfd_size_type stubs_size_sum; /* Per-reloc stubs_size_sum information. The stubs_size_sum member is the sum of these. Allocated in mmix_elf_check_common_relocs. */ bfd_size_type *stub_size; /* Offset of next stub during relocation. Somewhat redundant with the above: error coverage is easier and we don't have to reset the stubs_size_sum for relocation. */ bfd_size_type stub_offset; } pjs; }; #define mmix_elf_section_data(sec) \ ((struct _mmix_elf_section_data *) elf_section_data (sec)) /* For each section containing a base-plus-offset (BPO) reloc, we attach this struct as mmix_elf_section_data (section)->bpo, which is otherwise NULL. */ struct bpo_reloc_section_info { /* The base is 1; this is the first number in this section. */ size_t first_base_plus_offset_reloc; /* Number of BPO-relocs in this section. */ size_t n_bpo_relocs_this_section; /* Running index, used at relocation time. */ size_t bpo_index; /* We don't have access to the bfd_link_info struct in mmix_final_link_relocate. What we really want to get at is the global single struct greg_relocation, so we stash it here. */ asection *bpo_greg_section; }; /* Helper struct (in global context) for the one below. There's one of these created for every BPO reloc. */ struct bpo_reloc_request { bfd_vma value; /* Valid after relaxation. The base is 0; the first register number must be added. The offset is in range 0..255. */ size_t regindex; size_t offset; /* The order number for this BPO reloc, corresponding to the order in which BPO relocs were found. Used to create an index after reloc requests are sorted. */ size_t bpo_reloc_no; /* Set when the value is computed. Better than coding "guard values" into the other members. Is FALSE only for BPO relocs in a GC:ed section. */ bfd_boolean valid; }; /* We attach this as mmix_elf_section_data (sec)->bpo in the linker-allocated greg contents section (MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME), which is linked into the register contents section (MMIX_REG_CONTENTS_SECTION_NAME). This section is created by the linker; using the same hook as for usual with BPO relocs does not collide. */ struct bpo_greg_section_info { /* After GC, this reflects the number of remaining, non-excluded BPO-relocs. */ size_t n_bpo_relocs; /* This is the number of allocated bpo_reloc_requests; the size of sorted_indexes. Valid after the check.*relocs functions are called for all incoming sections. It includes the number of BPO relocs in sections that were GC:ed. */ size_t n_max_bpo_relocs; /* A counter used to find out when to fold the BPO gregs, since we don't have a single "after-relaxation" hook. */ size_t n_remaining_bpo_relocs_this_relaxation_round; /* The number of linker-allocated GREGs resulting from BPO relocs. This is an approximation after _bfd_mmix_before_linker_allocation and supposedly accurate after mmix_elf_relax_section is called for all incoming non-collected sections. */ size_t n_allocated_bpo_gregs; /* Index into reloc_request[], sorted on increasing "value", secondary by increasing index for strict sorting order. */ size_t *bpo_reloc_indexes; /* An array of all relocations, with the "value" member filled in by the relaxation function. */ struct bpo_reloc_request *reloc_request; }; static bfd_boolean mmix_elf_link_output_symbol_hook PARAMS ((struct bfd_link_info *, const char *, Elf_Internal_Sym *, asection *, struct elf_link_hash_entry *)); static bfd_reloc_status_type mmix_elf_reloc PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **)); static reloc_howto_type *bfd_elf64_bfd_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type)); static void mmix_info_to_howto_rela PARAMS ((bfd *, arelent *, Elf_Internal_Rela *)); static int mmix_elf_sort_relocs PARAMS ((const PTR, const PTR)); static bfd_boolean mmix_elf_new_section_hook PARAMS ((bfd *, asection *)); static bfd_boolean mmix_elf_check_relocs PARAMS ((bfd *, struct bfd_link_info *, asection *, const Elf_Internal_Rela *)); static bfd_boolean mmix_elf_check_common_relocs PARAMS ((bfd *, struct bfd_link_info *, asection *, const Elf_Internal_Rela *)); static bfd_boolean mmix_elf_relocate_section PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *, Elf_Internal_Rela *, Elf_Internal_Sym *, asection **)); static asection * mmix_elf_gc_mark_hook PARAMS ((asection *, struct bfd_link_info *, Elf_Internal_Rela *, struct elf_link_hash_entry *, Elf_Internal_Sym *)); static bfd_boolean mmix_elf_gc_sweep_hook PARAMS ((bfd *, struct bfd_link_info *, asection *, const Elf_Internal_Rela *)); static bfd_reloc_status_type mmix_final_link_relocate PARAMS ((reloc_howto_type *, asection *, bfd_byte *, bfd_vma, bfd_signed_vma, bfd_vma, const char *, asection *)); static bfd_reloc_status_type mmix_elf_perform_relocation PARAMS ((asection *, reloc_howto_type *, PTR, bfd_vma, bfd_vma)); static bfd_boolean mmix_elf_section_from_bfd_section PARAMS ((bfd *, asection *, int *)); static bfd_boolean mmix_elf_add_symbol_hook PARAMS ((bfd *, struct bfd_link_info *, Elf_Internal_Sym *, const char **, flagword *, asection **, bfd_vma *)); static bfd_boolean mmix_elf_is_local_label_name PARAMS ((bfd *, const char *)); static int bpo_reloc_request_sort_fn PARAMS ((const PTR, const PTR)); static bfd_boolean mmix_elf_relax_section PARAMS ((bfd *abfd, asection *sec, struct bfd_link_info *link_info, bfd_boolean *again)); extern bfd_boolean mmix_elf_final_link PARAMS ((bfd *, struct bfd_link_info *)); extern void mmix_elf_symbol_processing PARAMS ((bfd *, asymbol *)); /* Only intended to be called from a debugger. */ extern void mmix_dump_bpo_gregs PARAMS ((struct bfd_link_info *, bfd_error_handler_type)); static void mmix_set_relaxable_size PARAMS ((bfd *, asection *, void *)); /* Watch out: this currently needs to have elements with the same index as their R_MMIX_ number. */ static reloc_howto_type elf_mmix_howto_table[] = { /* This reloc does nothing. */ HOWTO (R_MMIX_NONE, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_NONE", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ FALSE), /* pcrel_offset */ /* An 8 bit absolute relocation. */ HOWTO (R_MMIX_8, /* type */ 0, /* rightshift */ 0, /* size (0 = byte, 1 = short, 2 = long) */ 8, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_8", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xff, /* dst_mask */ FALSE), /* pcrel_offset */ /* An 16 bit absolute relocation. */ HOWTO (R_MMIX_16, /* type */ 0, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_16", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* An 24 bit absolute relocation. */ HOWTO (R_MMIX_24, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 24, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_24", /* name */ FALSE, /* partial_inplace */ ~0xffffff, /* src_mask */ 0xffffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* A 32 bit absolute relocation. */ HOWTO (R_MMIX_32, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_32", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xffffffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* 64 bit relocation. */ HOWTO (R_MMIX_64, /* type */ 0, /* rightshift */ 4, /* size (0 = byte, 1 = short, 2 = long) */ 64, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_64", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ MINUS_ONE, /* dst_mask */ FALSE), /* pcrel_offset */ /* An 8 bit PC-relative relocation. */ HOWTO (R_MMIX_PC_8, /* type */ 0, /* rightshift */ 0, /* size (0 = byte, 1 = short, 2 = long) */ 8, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_PC_8", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xff, /* dst_mask */ TRUE), /* pcrel_offset */ /* An 16 bit PC-relative relocation. */ HOWTO (R_MMIX_PC_16, /* type */ 0, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_PC_16", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* An 24 bit PC-relative relocation. */ HOWTO (R_MMIX_PC_24, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 24, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_PC_24", /* name */ FALSE, /* partial_inplace */ ~0xffffff, /* src_mask */ 0xffffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* A 32 bit absolute PC-relative relocation. */ HOWTO (R_MMIX_PC_32, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_PC_32", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xffffffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* 64 bit PC-relative relocation. */ HOWTO (R_MMIX_PC_64, /* type */ 0, /* rightshift */ 4, /* size (0 = byte, 1 = short, 2 = long) */ 64, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_MMIX_PC_64", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ MINUS_ONE, /* dst_mask */ TRUE), /* pcrel_offset */ /* GNU extension to record C++ vtable hierarchy. */ HOWTO (R_MMIX_GNU_VTINHERIT, /* type */ 0, /* rightshift */ 0, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont, /* complain_on_overflow */ NULL, /* special_function */ "R_MMIX_GNU_VTINHERIT", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ TRUE), /* pcrel_offset */ /* GNU extension to record C++ vtable member usage. */ HOWTO (R_MMIX_GNU_VTENTRY, /* type */ 0, /* rightshift */ 0, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont, /* complain_on_overflow */ _bfd_elf_rel_vtable_reloc_fn, /* special_function */ "R_MMIX_GNU_VTENTRY", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ FALSE), /* pcrel_offset */ /* The GETA relocation is supposed to get any address that could possibly be reached by the GETA instruction. It can silently expand to get a 64-bit operand, but will complain if any of the two least significant bits are set. The howto members reflect a simple GETA. */ HOWTO (R_MMIX_GETA, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_GETA", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_GETA_1, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_GETA_1", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_GETA_2, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_GETA_2", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_GETA_3, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_GETA_3", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* The conditional branches are supposed to reach any (code) address. It can silently expand to a 64-bit operand, but will emit an error if any of the two least significant bits are set. The howto members reflect a simple branch. */ HOWTO (R_MMIX_CBRANCH, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_CBRANCH", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_CBRANCH_J, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_CBRANCH_J", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_CBRANCH_1, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_CBRANCH_1", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_CBRANCH_2, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_CBRANCH_2", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_CBRANCH_3, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_CBRANCH_3", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* The PUSHJ instruction can reach any (code) address, as long as it's the beginning of a function (no usable restriction). It can silently expand to a 64-bit operand, but will emit an error if any of the two least significant bits are set. It can also expand into a call to a stub; see R_MMIX_PUSHJ_STUBBABLE. The howto members reflect a simple PUSHJ. */ HOWTO (R_MMIX_PUSHJ, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_PUSHJ", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_PUSHJ_1, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_PUSHJ_1", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_PUSHJ_2, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_PUSHJ_2", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_PUSHJ_3, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_PUSHJ_3", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* A JMP is supposed to reach any (code) address. By itself, it can reach +-64M; the expansion can reach all 64 bits. Note that the 64M limit is soon reached if you link the program in wildly different memory segments. The howto members reflect a trivial JMP. */ HOWTO (R_MMIX_JMP, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 27, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_JMP", /* name */ FALSE, /* partial_inplace */ ~0x1ffffff, /* src_mask */ 0x1ffffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_JMP_1, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 27, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_JMP_1", /* name */ FALSE, /* partial_inplace */ ~0x1ffffff, /* src_mask */ 0x1ffffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_JMP_2, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 27, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_JMP_2", /* name */ FALSE, /* partial_inplace */ ~0x1ffffff, /* src_mask */ 0x1ffffff, /* dst_mask */ TRUE), /* pcrel_offset */ HOWTO (R_MMIX_JMP_3, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 27, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_JMP_3", /* name */ FALSE, /* partial_inplace */ ~0x1ffffff, /* src_mask */ 0x1ffffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* When we don't emit link-time-relaxable code from the assembler, or when relaxation has done all it can do, these relocs are used. For GETA/PUSHJ/branches. */ HOWTO (R_MMIX_ADDR19, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_ADDR19", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* For JMP. */ HOWTO (R_MMIX_ADDR27, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 27, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_ADDR27", /* name */ FALSE, /* partial_inplace */ ~0x1ffffff, /* src_mask */ 0x1ffffff, /* dst_mask */ TRUE), /* pcrel_offset */ /* A general register or the value 0..255. If a value, then the instruction (offset -3) needs adjusting. */ HOWTO (R_MMIX_REG_OR_BYTE, /* type */ 0, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 8, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_REG_OR_BYTE", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xff, /* dst_mask */ FALSE), /* pcrel_offset */ /* A general register. */ HOWTO (R_MMIX_REG, /* type */ 0, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 8, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_REG", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xff, /* dst_mask */ FALSE), /* pcrel_offset */ /* A register plus an index, corresponding to the relocation expression. The sizes must correspond to the valid range of the expression, while the bitmasks correspond to what we store in the image. */ HOWTO (R_MMIX_BASE_PLUS_OFFSET, /* type */ 0, /* rightshift */ 4, /* size (0 = byte, 1 = short, 2 = long) */ 64, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_BASE_PLUS_OFFSET", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0xffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* A "magic" relocation for a LOCAL expression, asserting that the expression is less than the number of global registers. No actual modification of the contents is done. Implementing this as a relocation was less intrusive than e.g. putting such expressions in a section to discard *after* relocation. */ HOWTO (R_MMIX_LOCAL, /* type */ 0, /* rightshift */ 0, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_LOCAL", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ FALSE), /* pcrel_offset */ HOWTO (R_MMIX_PUSHJ_STUBBABLE, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ mmix_elf_reloc, /* special_function */ "R_MMIX_PUSHJ_STUBBABLE", /* name */ FALSE, /* partial_inplace */ ~0x0100ffff, /* src_mask */ 0x0100ffff, /* dst_mask */ TRUE) /* pcrel_offset */ }; /* Map BFD reloc types to MMIX ELF reloc types. */ struct mmix_reloc_map { bfd_reloc_code_real_type bfd_reloc_val; enum elf_mmix_reloc_type elf_reloc_val; }; static const struct mmix_reloc_map mmix_reloc_map[] = { {BFD_RELOC_NONE, R_MMIX_NONE}, {BFD_RELOC_8, R_MMIX_8}, {BFD_RELOC_16, R_MMIX_16}, {BFD_RELOC_24, R_MMIX_24}, {BFD_RELOC_32, R_MMIX_32}, {BFD_RELOC_64, R_MMIX_64}, {BFD_RELOC_8_PCREL, R_MMIX_PC_8}, {BFD_RELOC_16_PCREL, R_MMIX_PC_16}, {BFD_RELOC_24_PCREL, R_MMIX_PC_24}, {BFD_RELOC_32_PCREL, R_MMIX_PC_32}, {BFD_RELOC_64_PCREL, R_MMIX_PC_64}, {BFD_RELOC_VTABLE_INHERIT, R_MMIX_GNU_VTINHERIT}, {BFD_RELOC_VTABLE_ENTRY, R_MMIX_GNU_VTENTRY}, {BFD_RELOC_MMIX_GETA, R_MMIX_GETA}, {BFD_RELOC_MMIX_CBRANCH, R_MMIX_CBRANCH}, {BFD_RELOC_MMIX_PUSHJ, R_MMIX_PUSHJ}, {BFD_RELOC_MMIX_JMP, R_MMIX_JMP}, {BFD_RELOC_MMIX_ADDR19, R_MMIX_ADDR19}, {BFD_RELOC_MMIX_ADDR27, R_MMIX_ADDR27}, {BFD_RELOC_MMIX_REG_OR_BYTE, R_MMIX_REG_OR_BYTE}, {BFD_RELOC_MMIX_REG, R_MMIX_REG}, {BFD_RELOC_MMIX_BASE_PLUS_OFFSET, R_MMIX_BASE_PLUS_OFFSET}, {BFD_RELOC_MMIX_LOCAL, R_MMIX_LOCAL}, {BFD_RELOC_MMIX_PUSHJ_STUBBABLE, R_MMIX_PUSHJ_STUBBABLE} }; static reloc_howto_type * bfd_elf64_bfd_reloc_type_lookup (abfd, code) bfd *abfd ATTRIBUTE_UNUSED; bfd_reloc_code_real_type code; { unsigned int i; for (i = 0; i < sizeof (mmix_reloc_map) / sizeof (mmix_reloc_map[0]); i++) { if (mmix_reloc_map[i].bfd_reloc_val == code) return &elf_mmix_howto_table[mmix_reloc_map[i].elf_reloc_val]; } return NULL; } static bfd_boolean mmix_elf_new_section_hook (abfd, sec) bfd *abfd; asection *sec; { struct _mmix_elf_section_data *sdata; bfd_size_type amt = sizeof (*sdata); sdata = (struct _mmix_elf_section_data *) bfd_zalloc (abfd, amt); if (sdata == NULL) return FALSE; sec->used_by_bfd = (PTR) sdata; return _bfd_elf_new_section_hook (abfd, sec); } /* This function performs the actual bitfiddling and sanity check for a final relocation. Each relocation gets its *worst*-case expansion in size when it arrives here; any reduction in size should have been caught in linker relaxation earlier. When we get here, the relocation looks like the smallest instruction with SWYM:s (nop:s) appended to the max size. We fill in those nop:s. R_MMIX_GETA: (FIXME: Relaxation should break this up in 1, 2, 3 tetra) GETA $N,foo -> SETL $N,foo & 0xffff INCML $N,(foo >> 16) & 0xffff INCMH $N,(foo >> 32) & 0xffff INCH $N,(foo >> 48) & 0xffff R_MMIX_CBRANCH: (FIXME: Relaxation should break this up, but condbranches needing relaxation might be rare enough to not be worthwhile.) [P]Bcc $N,foo -> [~P]B~cc $N,.+20 SETL $255,foo & ... INCML ... INCMH ... INCH ... GO $255,$255,0 R_MMIX_PUSHJ: (FIXME: Relaxation...) PUSHJ $N,foo -> SETL $255,foo & ... INCML ... INCMH ... INCH ... PUSHGO $N,$255,0 R_MMIX_JMP: (FIXME: Relaxation...) JMP foo -> SETL $255,foo & ... INCML ... INCMH ... INCH ... GO $255,$255,0 R_MMIX_ADDR19 and R_MMIX_ADDR27 are just filled in. */ static bfd_reloc_status_type mmix_elf_perform_relocation (isec, howto, datap, addr, value) asection *isec; reloc_howto_type *howto; PTR datap; bfd_vma addr; bfd_vma value; { bfd *abfd = isec->owner; bfd_reloc_status_type flag = bfd_reloc_ok; bfd_reloc_status_type r; int offs = 0; int reg = 255; /* The worst case bits are all similar SETL/INCML/INCMH/INCH sequences. We handle the differences here and the common sequence later. */ switch (howto->type) { case R_MMIX_GETA: offs = 0; reg = bfd_get_8 (abfd, (bfd_byte *) datap + 1); /* We change to an absolute value. */ value += addr; break; case R_MMIX_CBRANCH: { int in1 = bfd_get_16 (abfd, (bfd_byte *) datap) << 16; /* Invert the condition and prediction bit, and set the offset to five instructions ahead. We *can* do better if we want to. If the branch is found to be within limits, we could leave the branch as is; there'll just be a bunch of NOP:s after it. But we shouldn't see this sequence often enough that it's worth doing it. */ bfd_put_32 (abfd, (((in1 ^ ((PRED_INV_BIT | COND_INV_BIT) << 24)) & ~0xffff) | (24/4)), (bfd_byte *) datap); /* Put a "GO $255,$255,0" after the common sequence. */ bfd_put_32 (abfd, ((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24) | 0xffff00, (bfd_byte *) datap + 20); /* Common sequence starts at offset 4. */ offs = 4; /* We change to an absolute value. */ value += addr; } break; case R_MMIX_PUSHJ_STUBBABLE: /* If the address fits, we're fine. */ if ((value & 3) == 0 /* Note rightshift 0; see R_MMIX_JMP case below. */ && (r = bfd_check_overflow (complain_overflow_signed, howto->bitsize, 0, bfd_arch_bits_per_address (abfd), value)) == bfd_reloc_ok) goto pcrel_mmix_reloc_fits; else { bfd_size_type size = isec->rawsize ? isec->rawsize : isec->size; /* We have the bytes at the PUSHJ insn and need to get the position for the stub. There's supposed to be room allocated for the stub. */ bfd_byte *stubcontents = ((bfd_byte *) datap - (addr - (isec->output_section->vma + isec->output_offset)) + size + mmix_elf_section_data (isec)->pjs.stub_offset); bfd_vma stubaddr; /* The address doesn't fit, so redirect the PUSHJ to the location of the stub. */ r = mmix_elf_perform_relocation (isec, &elf_mmix_howto_table [R_MMIX_ADDR19], datap, addr, isec->output_section->vma + isec->output_offset + size + (mmix_elf_section_data (isec) ->pjs.stub_offset) - addr); if (r != bfd_reloc_ok) return r; stubaddr = (isec->output_section->vma + isec->output_offset + size + mmix_elf_section_data (isec)->pjs.stub_offset); /* We generate a simple JMP if that suffices, else the whole 5 insn stub. */ if (bfd_check_overflow (complain_overflow_signed, elf_mmix_howto_table[R_MMIX_ADDR27].bitsize, 0, bfd_arch_bits_per_address (abfd), addr + value - stubaddr) == bfd_reloc_ok) { bfd_put_32 (abfd, JMP_INSN_BYTE << 24, stubcontents); r = mmix_elf_perform_relocation (isec, &elf_mmix_howto_table [R_MMIX_ADDR27], stubcontents, stubaddr, value + addr - stubaddr); mmix_elf_section_data (isec)->pjs.stub_offset += 4; if (size + mmix_elf_section_data (isec)->pjs.stub_offset > isec->size) abort (); return r; } else { /* Put a "GO $255,0" after the common sequence. */ bfd_put_32 (abfd, ((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24) | 0xff00, (bfd_byte *) stubcontents + 16); /* Prepare for the general code to set the first part of the linker stub, and */ value += addr; datap = stubcontents; mmix_elf_section_data (isec)->pjs.stub_offset += MAX_PUSHJ_STUB_SIZE; } } break; case R_MMIX_PUSHJ: { int inreg = bfd_get_8 (abfd, (bfd_byte *) datap + 1); /* Put a "PUSHGO $N,$255,0" after the common sequence. */ bfd_put_32 (abfd, ((PUSHGO_INSN_BYTE | IMM_OFFSET_BIT) << 24) | (inreg << 16) | 0xff00, (bfd_byte *) datap + 16); /* We change to an absolute value. */ value += addr; } break; case R_MMIX_JMP: /* This one is a little special. If we get here on a non-relaxing link, and the destination is actually in range, we don't need to execute the nops. If so, we fall through to the bit-fiddling relocs. FIXME: bfd_check_overflow seems broken; the relocation is rightshifted before testing, so supply a zero rightshift. */ if (! ((value & 3) == 0 && (r = bfd_check_overflow (complain_overflow_signed, howto->bitsize, 0, bfd_arch_bits_per_address (abfd), value)) == bfd_reloc_ok)) { /* If the relocation doesn't fit in a JMP, we let the NOP:s be modified below, and put a "GO $255,$255,0" after the address-loading sequence. */ bfd_put_32 (abfd, ((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24) | 0xffff00, (bfd_byte *) datap + 16); /* We change to an absolute value. */ value += addr; break; } /* FALLTHROUGH. */ case R_MMIX_ADDR19: case R_MMIX_ADDR27: pcrel_mmix_reloc_fits: /* These must be in range, or else we emit an error. */ if ((value & 3) == 0 /* Note rightshift 0; see above. */ && (r = bfd_check_overflow (complain_overflow_signed, howto->bitsize, 0, bfd_arch_bits_per_address (abfd), value)) == bfd_reloc_ok) { bfd_vma in1 = bfd_get_32 (abfd, (bfd_byte *) datap); bfd_vma highbit; if ((bfd_signed_vma) value < 0) { highbit = 1 << 24; value += (1 << (howto->bitsize - 1)); } else highbit = 0; value >>= 2; bfd_put_32 (abfd, (in1 & howto->src_mask) | highbit | (value & howto->dst_mask), (bfd_byte *) datap); return bfd_reloc_ok; } else return bfd_reloc_overflow; case R_MMIX_BASE_PLUS_OFFSET: { struct bpo_reloc_section_info *bpodata = mmix_elf_section_data (isec)->bpo.reloc; asection *bpo_greg_section = bpodata->bpo_greg_section; struct bpo_greg_section_info *gregdata = mmix_elf_section_data (bpo_greg_section)->bpo.greg; size_t bpo_index = gregdata->bpo_reloc_indexes[bpodata->bpo_index++]; /* A consistency check: The value we now have in "relocation" must be the same as the value we stored for that relocation. It doesn't cost much, so can be left in at all times. */ if (value != gregdata->reloc_request[bpo_index].value) { (*_bfd_error_handler) (_("%s: Internal inconsistency error for value for\n\ linker-allocated global register: linked: 0x%lx%08lx != relaxed: 0x%lx%08lx\n"), bfd_get_filename (isec->owner), (unsigned long) (value >> 32), (unsigned long) value, (unsigned long) (gregdata->reloc_request[bpo_index].value >> 32), (unsigned long) gregdata->reloc_request[bpo_index].value); bfd_set_error (bfd_error_bad_value); return bfd_reloc_overflow; } /* Then store the register number and offset for that register into datap and datap + 1 respectively. */ bfd_put_8 (abfd, gregdata->reloc_request[bpo_index].regindex + bpo_greg_section->output_section->vma / 8, datap); bfd_put_8 (abfd, gregdata->reloc_request[bpo_index].offset, ((unsigned char *) datap) + 1); return bfd_reloc_ok; } case R_MMIX_REG_OR_BYTE: case R_MMIX_REG: if (value > 255) return bfd_reloc_overflow; bfd_put_8 (abfd, value, datap); return bfd_reloc_ok; default: BAD_CASE (howto->type); } /* This code adds the common SETL/INCML/INCMH/INCH worst-case sequence. */ /* Lowest two bits must be 0. We return bfd_reloc_overflow for everything that looks strange. */ if (value & 3) flag = bfd_reloc_overflow; bfd_put_32 (abfd, (SETL_INSN_BYTE << 24) | (value & 0xffff) | (reg << 16), (bfd_byte *) datap + offs); bfd_put_32 (abfd, (INCML_INSN_BYTE << 24) | ((value >> 16) & 0xffff) | (reg << 16), (bfd_byte *) datap + offs + 4); bfd_put_32 (abfd, (INCMH_INSN_BYTE << 24) | ((value >> 32) & 0xffff) | (reg << 16), (bfd_byte *) datap + offs + 8); bfd_put_32 (abfd, (INCH_INSN_BYTE << 24) | ((value >> 48) & 0xffff) | (reg << 16), (bfd_byte *) datap + offs + 12); return flag; } /* Set the howto pointer for an MMIX ELF reloc (type RELA). */ static void mmix_info_to_howto_rela (abfd, cache_ptr, dst) bfd *abfd ATTRIBUTE_UNUSED; arelent *cache_ptr; Elf_Internal_Rela *dst; { unsigned int r_type; r_type = ELF64_R_TYPE (dst->r_info); BFD_ASSERT (r_type < (unsigned int) R_MMIX_max); cache_ptr->howto = &elf_mmix_howto_table[r_type]; } /* Any MMIX-specific relocation gets here at assembly time or when linking to other formats (such as mmo); this is the relocation function from the reloc_table. We don't get here for final pure ELF linking. */ static bfd_reloc_status_type mmix_elf_reloc (abfd, reloc_entry, symbol, data, input_section, output_bfd, error_message) bfd *abfd; arelent *reloc_entry; asymbol *symbol; PTR data; asection *input_section; bfd *output_bfd; char **error_message ATTRIBUTE_UNUSED; { bfd_vma relocation; bfd_reloc_status_type r; asection *reloc_target_output_section; bfd_reloc_status_type flag = bfd_reloc_ok; bfd_vma output_base = 0; bfd_vma addr; r = bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data, input_section, output_bfd, error_message); /* If that was all that was needed (i.e. this isn't a final link, only some segment adjustments), we're done. */ if (r != bfd_reloc_continue) return r; if (bfd_is_und_section (symbol->section) && (symbol->flags & BSF_WEAK) == 0 && output_bfd == (bfd *) NULL) return bfd_reloc_undefined; /* Is the address of the relocation really within the section? */ if (reloc_entry->address > bfd_get_section_limit (abfd, input_section)) return bfd_reloc_outofrange; /* Work out which section the relocation is targeted at and the initial relocation command value. */ /* Get symbol value. (Common symbols are special.) */ if (bfd_is_com_section (symbol->section)) relocation = 0; else relocation = symbol->value; reloc_target_output_section = bfd_get_output_section (symbol); /* Here the variable relocation holds the final address of the symbol we are relocating against, plus any addend. */ if (output_bfd) output_base = 0; else output_base = reloc_target_output_section->vma; relocation += output_base + symbol->section->output_offset; /* Get position of relocation. */ addr = (reloc_entry->address + input_section->output_section->vma + input_section->output_offset); if (output_bfd != (bfd *) NULL) { /* Add in supplied addend. */ relocation += reloc_entry->addend; /* This is a partial relocation, and we want to apply the relocation to the reloc entry rather than the raw data. Modify the reloc inplace to reflect what we now know. */ reloc_entry->addend = relocation; reloc_entry->address += input_section->output_offset; return flag; } return mmix_final_link_relocate (reloc_entry->howto, input_section, data, reloc_entry->address, reloc_entry->addend, relocation, bfd_asymbol_name (symbol), reloc_target_output_section); } /* Relocate an MMIX ELF section. Modified from elf32-fr30.c; look to it for guidance if you're thinking of copying this. */ static bfd_boolean mmix_elf_relocate_section (output_bfd, info, input_bfd, input_section, contents, relocs, local_syms, local_sections) bfd *output_bfd ATTRIBUTE_UNUSED; struct bfd_link_info *info; bfd *input_bfd; asection *input_section; bfd_byte *contents; Elf_Internal_Rela *relocs; Elf_Internal_Sym *local_syms; asection **local_sections; { Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; Elf_Internal_Rela *rel; Elf_Internal_Rela *relend; bfd_size_type size; size_t pjsno = 0; size = input_section->rawsize ? input_section->rawsize : input_section->size; symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; sym_hashes = elf_sym_hashes (input_bfd); relend = relocs + input_section->reloc_count; /* Zero the stub area before we start. */ if (input_section->rawsize != 0 && input_section->size > input_section->rawsize) memset (contents + input_section->rawsize, 0, input_section->size - input_section->rawsize); for (rel = relocs; rel < relend; rel ++) { reloc_howto_type *howto; unsigned long r_symndx; Elf_Internal_Sym *sym; asection *sec; struct elf_link_hash_entry *h; bfd_vma relocation; bfd_reloc_status_type r; const char *name = NULL; int r_type; bfd_boolean undefined_signalled = FALSE; r_type = ELF64_R_TYPE (rel->r_info); if (r_type == R_MMIX_GNU_VTINHERIT || r_type == R_MMIX_GNU_VTENTRY) continue; r_symndx = ELF64_R_SYM (rel->r_info); if (info->relocatable) { /* This is a relocatable link. For most relocs we don't have to change anything, unless the reloc is against a section symbol, in which case we have to adjust according to where the section symbol winds up in the output section. */ if (r_symndx < symtab_hdr->sh_info) { sym = local_syms + r_symndx; if (ELF_ST_TYPE (sym->st_info) == STT_SECTION) { sec = local_sections [r_symndx]; rel->r_addend += sec->output_offset + sym->st_value; } } /* For PUSHJ stub relocs however, we may need to change the reloc and the section contents, if the reloc doesn't reach beyond the end of the output section and previous stubs. Then we change the section contents to be a PUSHJ to the end of the input section plus stubs (we can do that without using a reloc), and then we change the reloc to be a R_MMIX_PUSHJ at the stub location. */ if (r_type == R_MMIX_PUSHJ_STUBBABLE) { /* We've already checked whether we need a stub; use that knowledge. */ if (mmix_elf_section_data (input_section)->pjs.stub_size[pjsno] != 0) { Elf_Internal_Rela relcpy; if (mmix_elf_section_data (input_section) ->pjs.stub_size[pjsno] != MAX_PUSHJ_STUB_SIZE) abort (); /* There's already a PUSHJ insn there, so just fill in the offset bits to the stub. */ if (mmix_final_link_relocate (elf_mmix_howto_table + R_MMIX_ADDR19, input_section, contents, rel->r_offset, 0, input_section ->output_section->vma + input_section->output_offset + size + mmix_elf_section_data (input_section) ->pjs.stub_offset, NULL, NULL) != bfd_reloc_ok) return FALSE; /* Put a JMP insn at the stub; it goes with the R_MMIX_JMP reloc. */ bfd_put_32 (output_bfd, JMP_INSN_BYTE << 24, contents + size + mmix_elf_section_data (input_section) ->pjs.stub_offset); /* Change the reloc to be at the stub, and to a full R_MMIX_JMP reloc. */ rel->r_info = ELF64_R_INFO (r_symndx, R_MMIX_JMP); rel->r_offset = (size + mmix_elf_section_data (input_section) ->pjs.stub_offset); mmix_elf_section_data (input_section)->pjs.stub_offset += MAX_PUSHJ_STUB_SIZE; /* Shift this reloc to the end of the relocs to maintain the r_offset sorted reloc order. */ relcpy = *rel; memmove (rel, rel + 1, (char *) relend - (char *) rel); relend[-1] = relcpy; /* Back up one reloc, or else we'd skip the next reloc in turn. */ rel--; } pjsno++; } continue; } /* This is a final link. */ howto = elf_mmix_howto_table + ELF64_R_TYPE (rel->r_info); h = NULL; sym = NULL; sec = NULL; if (r_symndx < symtab_hdr->sh_info) { sym = local_syms + r_symndx; sec = local_sections [r_symndx]; relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); name = bfd_elf_string_from_elf_section (input_bfd, symtab_hdr->sh_link, sym->st_name); if (name == NULL) name = bfd_section_name (input_bfd, sec); } else { bfd_boolean unresolved_reloc; RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, r_symndx, symtab_hdr, sym_hashes, h, sec, relocation, unresolved_reloc, undefined_signalled); name = h->root.root.string; } r = mmix_final_link_relocate (howto, input_section, contents, rel->r_offset, rel->r_addend, relocation, name, sec); if (r != bfd_reloc_ok) { bfd_boolean check_ok = TRUE; const char * msg = (const char *) NULL; switch (r) { case bfd_reloc_overflow: check_ok = info->callbacks->reloc_overflow (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0, input_bfd, input_section, rel->r_offset); break; case bfd_reloc_undefined: /* We may have sent this message above. */ if (! undefined_signalled) check_ok = info->callbacks->undefined_symbol (info, name, input_bfd, input_section, rel->r_offset, TRUE); undefined_signalled = TRUE; break; case bfd_reloc_outofrange: msg = _("internal error: out of range error"); break; case bfd_reloc_notsupported: msg = _("internal error: unsupported relocation error"); break; case bfd_reloc_dangerous: msg = _("internal error: dangerous relocation"); break; default: msg = _("internal error: unknown error"); break; } if (msg) check_ok = info->callbacks->warning (info, msg, name, input_bfd, input_section, rel->r_offset); if (! check_ok) return FALSE; } } return TRUE; } /* Perform a single relocation. By default we use the standard BFD routines. A few relocs we have to do ourselves. */ static bfd_reloc_status_type mmix_final_link_relocate (howto, input_section, contents, r_offset, r_addend, relocation, symname, symsec) reloc_howto_type *howto; asection *input_section; bfd_byte *contents; bfd_vma r_offset; bfd_signed_vma r_addend; bfd_vma relocation; const char *symname; asection *symsec; { bfd_reloc_status_type r = bfd_reloc_ok; bfd_vma addr = (input_section->output_section->vma + input_section->output_offset + r_offset); bfd_signed_vma srel = (bfd_signed_vma) relocation + r_addend; switch (howto->type) { /* All these are PC-relative. */ case R_MMIX_PUSHJ_STUBBABLE: case R_MMIX_PUSHJ: case R_MMIX_CBRANCH: case R_MMIX_ADDR19: case R_MMIX_GETA: case R_MMIX_ADDR27: case R_MMIX_JMP: contents += r_offset; srel -= (input_section->output_section->vma + input_section->output_offset + r_offset); r = mmix_elf_perform_relocation (input_section, howto, contents, addr, srel); break; case R_MMIX_BASE_PLUS_OFFSET: if (symsec == NULL) return bfd_reloc_undefined; /* Check that we're not relocating against a register symbol. */ if (strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_CONTENTS_SECTION_NAME) == 0 || strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_SECTION_NAME) == 0) { /* Note: This is separated out into two messages in order to ease the translation into other languages. */ if (symname == NULL || *symname == 0) (*_bfd_error_handler) (_("%s: base-plus-offset relocation against register symbol: (unknown) in %s"), bfd_get_filename (input_section->owner), bfd_get_section_name (symsec->owner, symsec)); else (*_bfd_error_handler) (_("%s: base-plus-offset relocation against register symbol: %s in %s"), bfd_get_filename (input_section->owner), symname, bfd_get_section_name (symsec->owner, symsec)); return bfd_reloc_overflow; } goto do_mmix_reloc; case R_MMIX_REG_OR_BYTE: case R_MMIX_REG: /* For now, we handle these alike. They must refer to an register symbol, which is either relative to the register section and in the range 0..255, or is in the register contents section with vma regno * 8. */ /* FIXME: A better way to check for reg contents section? FIXME: Postpone section->scaling to mmix_elf_perform_relocation? */ if (symsec == NULL) return bfd_reloc_undefined; if (strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_CONTENTS_SECTION_NAME) == 0) { if ((srel & 7) != 0 || srel < 32*8 || srel > 255*8) { /* The bfd_reloc_outofrange return value, though intuitively a better value, will not get us an error. */ return bfd_reloc_overflow; } srel /= 8; } else if (strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_SECTION_NAME) == 0) { if (srel < 0 || srel > 255) /* The bfd_reloc_outofrange return value, though intuitively a better value, will not get us an error. */ return bfd_reloc_overflow; } else { /* Note: This is separated out into two messages in order to ease the translation into other languages. */ if (symname == NULL || *symname == 0) (*_bfd_error_handler) (_("%s: register relocation against non-register symbol: (unknown) in %s"), bfd_get_filename (input_section->owner), bfd_get_section_name (symsec->owner, symsec)); else (*_bfd_error_handler) (_("%s: register relocation against non-register symbol: %s in %s"), bfd_get_filename (input_section->owner), symname, bfd_get_section_name (symsec->owner, symsec)); /* The bfd_reloc_outofrange return value, though intuitively a better value, will not get us an error. */ return bfd_reloc_overflow; } do_mmix_reloc: contents += r_offset; r = mmix_elf_perform_relocation (input_section, howto, contents, addr, srel); break; case R_MMIX_LOCAL: /* This isn't a real relocation, it's just an assertion that the final relocation value corresponds to a local register. We ignore the actual relocation; nothing is changed. */ { asection *regsec = bfd_get_section_by_name (input_section->output_section->owner, MMIX_REG_CONTENTS_SECTION_NAME); bfd_vma first_global; /* Check that this is an absolute value, or a reference to the register contents section or the register (symbol) section. Absolute numbers can get here as undefined section. Undefined symbols are signalled elsewhere, so there's no conflict in us accidentally handling it. */ if (!bfd_is_abs_section (symsec) && !bfd_is_und_section (symsec) && strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_CONTENTS_SECTION_NAME) != 0 && strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_SECTION_NAME) != 0) { (*_bfd_error_handler) (_("%s: directive LOCAL valid only with a register or absolute value"), bfd_get_filename (input_section->owner)); return bfd_reloc_overflow; } /* If we don't have a register contents section, then $255 is the first global register. */ if (regsec == NULL) first_global = 255; else { first_global = bfd_get_section_vma (abfd, regsec) / 8; if (strcmp (bfd_get_section_name (symsec->owner, symsec), MMIX_REG_CONTENTS_SECTION_NAME) == 0) { if ((srel & 7) != 0 || srel < 32*8 || srel > 255*8) /* The bfd_reloc_outofrange return value, though intuitively a better value, will not get us an error. */ return bfd_reloc_overflow; srel /= 8; } } if ((bfd_vma) srel >= first_global) { /* FIXME: Better error message. */ (*_bfd_error_handler) (_("%s: LOCAL directive: Register $%ld is not a local register. First global register is $%ld."), bfd_get_filename (input_section->owner), (long) srel, (long) first_global); return bfd_reloc_overflow; } } r = bfd_reloc_ok; break; default: r = _bfd_final_link_relocate (howto, input_section->owner, input_section, contents, r_offset, relocation, r_addend); } return r; } /* Return the section that should be marked against GC for a given relocation. */ static asection * mmix_elf_gc_mark_hook (sec, info, rel, h, sym) asection *sec; struct bfd_link_info *info ATTRIBUTE_UNUSED; Elf_Internal_Rela *rel; struct elf_link_hash_entry *h; Elf_Internal_Sym *sym; { if (h != NULL) { switch (ELF64_R_TYPE (rel->r_info)) { case R_MMIX_GNU_VTINHERIT: case R_MMIX_GNU_VTENTRY: break; default: switch (h->root.type) { case bfd_link_hash_defined: case bfd_link_hash_defweak: return h->root.u.def.section; case bfd_link_hash_common: return h->root.u.c.p->section; default: break; } } } else return bfd_section_from_elf_index (sec->owner, sym->st_shndx); return NULL; } /* Update relocation info for a GC-excluded section. We could supposedly perform the allocation after GC, but there's no suitable hook between GC (or section merge) and the point when all input sections must be present. Better to waste some memory and (perhaps) a little time. */ static bfd_boolean mmix_elf_gc_sweep_hook (abfd, info, sec, relocs) bfd *abfd ATTRIBUTE_UNUSED; struct bfd_link_info *info ATTRIBUTE_UNUSED; asection *sec ATTRIBUTE_UNUSED; const Elf_Internal_Rela *relocs ATTRIBUTE_UNUSED; { struct bpo_reloc_section_info *bpodata = mmix_elf_section_data (sec)->bpo.reloc; asection *allocated_gregs_section; /* If no bpodata here, we have nothing to do. */ if (bpodata == NULL) return TRUE; allocated_gregs_section = bpodata->bpo_greg_section; mmix_elf_section_data (allocated_gregs_section)->bpo.greg->n_bpo_relocs -= bpodata->n_bpo_relocs_this_section; return TRUE; } /* Sort register relocs to come before expanding relocs. */ static int mmix_elf_sort_relocs (p1, p2) const PTR p1; const PTR p2; { const Elf_Internal_Rela *r1 = (const Elf_Internal_Rela *) p1; const Elf_Internal_Rela *r2 = (const Elf_Internal_Rela *) p2; int r1_is_reg, r2_is_reg; /* Sort primarily on r_offset & ~3, so relocs are done to consecutive insns. */ if ((r1->r_offset & ~(bfd_vma) 3) > (r2->r_offset & ~(bfd_vma) 3)) return 1; else if ((r1->r_offset & ~(bfd_vma) 3) < (r2->r_offset & ~(bfd_vma) 3)) return -1; r1_is_reg = (ELF64_R_TYPE (r1->r_info) == R_MMIX_REG_OR_BYTE || ELF64_R_TYPE (r1->r_info) == R_MMIX_REG); r2_is_reg = (ELF64_R_TYPE (r2->r_info) == R_MMIX_REG_OR_BYTE || ELF64_R_TYPE (r2->r_info) == R_MMIX_REG); if (r1_is_reg != r2_is_reg) return r2_is_reg - r1_is_reg; /* Neither or both are register relocs. Then sort on full offset. */ if (r1->r_offset > r2->r_offset) return 1; else if (r1->r_offset < r2->r_offset) return -1; return 0; } /* Subset of mmix_elf_check_relocs, common to ELF and mmo linking. */ static bfd_boolean mmix_elf_check_common_relocs (abfd, info, sec, relocs) bfd *abfd; struct bfd_link_info *info; asection *sec; const Elf_Internal_Rela *relocs; { bfd *bpo_greg_owner = NULL; asection *allocated_gregs_section = NULL; struct bpo_greg_section_info *gregdata = NULL; struct bpo_reloc_section_info *bpodata = NULL; const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; /* We currently have to abuse this COFF-specific member, since there's no target-machine-dedicated member. There's no alternative outside the bfd_link_info struct; we can't specialize a hash-table since they're different between ELF and mmo. */ bpo_greg_owner = (bfd *) info->base_file; rel_end = relocs + sec->reloc_count; for (rel = relocs; rel < rel_end; rel++) { switch (ELF64_R_TYPE (rel->r_info)) { /* This relocation causes a GREG allocation. We need to count them, and we need to create a section for them, so we need an object to fake as the owner of that section. We can't use the ELF dynobj for this, since the ELF bits assume lots of DSO-related stuff if that member is non-NULL. */ case R_MMIX_BASE_PLUS_OFFSET: /* We don't do anything with this reloc for a relocatable link. */ if (info->relocatable) break; if (bpo_greg_owner == NULL) { bpo_greg_owner = abfd; info->base_file = (PTR) bpo_greg_owner; } if (allocated_gregs_section == NULL) allocated_gregs_section = bfd_get_section_by_name (bpo_greg_owner, MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME); if (allocated_gregs_section == NULL) { allocated_gregs_section = bfd_make_section_with_flags (bpo_greg_owner, MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME, (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)); /* Setting both SEC_ALLOC and SEC_LOAD means the section is treated like any other section, and we'd get errors for address overlap with the text section. Let's set none of those flags, as that is what currently happens for usual GREG allocations, and that works. */ if (allocated_gregs_section == NULL || !bfd_set_section_alignment (bpo_greg_owner, allocated_gregs_section, 3)) return FALSE; gregdata = (struct bpo_greg_section_info *) bfd_zalloc (bpo_greg_owner, sizeof (struct bpo_greg_section_info)); if (gregdata == NULL) return FALSE; mmix_elf_section_data (allocated_gregs_section)->bpo.greg = gregdata; } else if (gregdata == NULL) gregdata = mmix_elf_section_data (allocated_gregs_section)->bpo.greg; /* Get ourselves some auxiliary info for the BPO-relocs. */ if (bpodata == NULL) { /* No use doing a separate iteration pass to find the upper limit - just use the number of relocs. */ bpodata = (struct bpo_reloc_section_info *) bfd_alloc (bpo_greg_owner, sizeof (struct bpo_reloc_section_info) * (sec->reloc_count + 1)); if (bpodata == NULL) return FALSE; mmix_elf_section_data (sec)->bpo.reloc = bpodata; bpodata->first_base_plus_offset_reloc = bpodata->bpo_index = gregdata->n_max_bpo_relocs; bpodata->bpo_greg_section = allocated_gregs_section; bpodata->n_bpo_relocs_this_section = 0; } bpodata->n_bpo_relocs_this_section++; gregdata->n_max_bpo_relocs++; /* We don't get another chance to set this before GC; we've not set up any hook that runs before GC. */ gregdata->n_bpo_relocs = gregdata->n_max_bpo_relocs; break; case R_MMIX_PUSHJ_STUBBABLE: mmix_elf_section_data (sec)->pjs.n_pushj_relocs++; break; } } /* Allocate per-reloc stub storage and initialize it to the max stub size. */ if (mmix_elf_section_data (sec)->pjs.n_pushj_relocs != 0) { size_t i; mmix_elf_section_data (sec)->pjs.stub_size = bfd_alloc (abfd, mmix_elf_section_data (sec)->pjs.n_pushj_relocs * sizeof (mmix_elf_section_data (sec) ->pjs.stub_size[0])); if (mmix_elf_section_data (sec)->pjs.stub_size == NULL) return FALSE; for (i = 0; i < mmix_elf_section_data (sec)->pjs.n_pushj_relocs; i++) mmix_elf_section_data (sec)->pjs.stub_size[i] = MAX_PUSHJ_STUB_SIZE; } return TRUE; } /* Look through the relocs for a section during the first phase. */ static bfd_boolean mmix_elf_check_relocs (abfd, info, sec, relocs) bfd *abfd; struct bfd_link_info *info; asection *sec; const Elf_Internal_Rela *relocs; { Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes, **sym_hashes_end; const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; sym_hashes = elf_sym_hashes (abfd); sym_hashes_end = sym_hashes + symtab_hdr->sh_size/sizeof(Elf64_External_Sym); if (!elf_bad_symtab (abfd)) sym_hashes_end -= symtab_hdr->sh_info; /* First we sort the relocs so that any register relocs come before expansion-relocs to the same insn. FIXME: Not done for mmo. */ qsort ((PTR) relocs, sec->reloc_count, sizeof (Elf_Internal_Rela), mmix_elf_sort_relocs); /* Do the common part. */ if (!mmix_elf_check_common_relocs (abfd, info, sec, relocs)) return FALSE; if (info->relocatable) return TRUE; rel_end = relocs + sec->reloc_count; for (rel = relocs; rel < rel_end; rel++) { struct elf_link_hash_entry *h; unsigned long r_symndx; r_symndx = ELF64_R_SYM (rel->r_info); if (r_symndx < symtab_hdr->sh_info) h = NULL; else { h = sym_hashes[r_symndx - symtab_hdr->sh_info]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; } switch (ELF64_R_TYPE (rel->r_info)) { /* This relocation describes the C++ object vtable hierarchy. Reconstruct it for later use during GC. */ case R_MMIX_GNU_VTINHERIT: if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) return FALSE; break; /* This relocation describes which C++ vtable entries are actually used. Record for later use during GC. */ case R_MMIX_GNU_VTENTRY: if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) return FALSE; break; } } return TRUE; } /* Wrapper for mmix_elf_check_common_relocs, called when linking to mmo. Copied from elf_link_add_object_symbols. */ bfd_boolean _bfd_mmix_check_all_relocs (abfd, info) bfd *abfd; struct bfd_link_info *info; { asection *o; for (o = abfd->sections; o != NULL; o = o->next) { Elf_Internal_Rela *internal_relocs; bfd_boolean ok; if ((o->flags & SEC_RELOC) == 0 || o->reloc_count == 0 || ((info->strip == strip_all || info->strip == strip_debugger) && (o->flags & SEC_DEBUGGING) != 0) || bfd_is_abs_section (o->output_section)) continue; internal_relocs = _bfd_elf_link_read_relocs (abfd, o, (PTR) NULL, (Elf_Internal_Rela *) NULL, info->keep_memory); if (internal_relocs == NULL) return FALSE; ok = mmix_elf_check_common_relocs (abfd, info, o, internal_relocs); if (! info->keep_memory) free (internal_relocs); if (! ok) return FALSE; } return TRUE; } /* Change symbols relative to the reg contents section to instead be to the register section, and scale them down to correspond to the register number. */ static bfd_boolean mmix_elf_link_output_symbol_hook (info, name, sym, input_sec, h) struct bfd_link_info *info ATTRIBUTE_UNUSED; const char *name ATTRIBUTE_UNUSED; Elf_Internal_Sym *sym; asection *input_sec; struct elf_link_hash_entry *h ATTRIBUTE_UNUSED; { if (input_sec != NULL && input_sec->name != NULL && ELF_ST_TYPE (sym->st_info) != STT_SECTION && strcmp (input_sec->name, MMIX_REG_CONTENTS_SECTION_NAME) == 0) { sym->st_value /= 8; sym->st_shndx = SHN_REGISTER; } return TRUE; } /* We fake a register section that holds values that are register numbers. Having a SHN_REGISTER and register section translates better to other formats (e.g. mmo) than for example a STT_REGISTER attribute. This section faking is based on a construct in elf32-mips.c. */ static asection mmix_elf_reg_section; static asymbol mmix_elf_reg_section_symbol; static asymbol *mmix_elf_reg_section_symbol_ptr; /* Handle the special section numbers that a symbol may use. */ void mmix_elf_symbol_processing (abfd, asym) bfd *abfd ATTRIBUTE_UNUSED; asymbol *asym; { elf_symbol_type *elfsym; elfsym = (elf_symbol_type *) asym; switch (elfsym->internal_elf_sym.st_shndx) { case SHN_REGISTER: if (mmix_elf_reg_section.name == NULL) { /* Initialize the register section. */ mmix_elf_reg_section.name = MMIX_REG_SECTION_NAME; mmix_elf_reg_section.flags = SEC_NO_FLAGS; mmix_elf_reg_section.output_section = &mmix_elf_reg_section; mmix_elf_reg_section.symbol = &mmix_elf_reg_section_symbol; mmix_elf_reg_section.symbol_ptr_ptr = &mmix_elf_reg_section_symbol_ptr; mmix_elf_reg_section_symbol.name = MMIX_REG_SECTION_NAME; mmix_elf_reg_section_symbol.flags = BSF_SECTION_SYM; mmix_elf_reg_section_symbol.section = &mmix_elf_reg_section; mmix_elf_reg_section_symbol_ptr = &mmix_elf_reg_section_symbol; } asym->section = &mmix_elf_reg_section; break; default: break; } } /* Given a BFD section, try to locate the corresponding ELF section index. */ static bfd_boolean mmix_elf_section_from_bfd_section (abfd, sec, retval) bfd * abfd ATTRIBUTE_UNUSED; asection * sec; int * retval; { if (strcmp (bfd_get_section_name (abfd, sec), MMIX_REG_SECTION_NAME) == 0) *retval = SHN_REGISTER; else return FALSE; return TRUE; } /* Hook called by the linker routine which adds symbols from an object file. We must handle the special SHN_REGISTER section number here. We also check that we only have *one* each of the section-start symbols, since otherwise having two with the same value would cause them to be "merged", but with the contents serialized. */ bfd_boolean mmix_elf_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp) bfd *abfd; struct bfd_link_info *info ATTRIBUTE_UNUSED; Elf_Internal_Sym *sym; const char **namep ATTRIBUTE_UNUSED; flagword *flagsp ATTRIBUTE_UNUSED; asection **secp; bfd_vma *valp ATTRIBUTE_UNUSED; { if (sym->st_shndx == SHN_REGISTER) { *secp = bfd_make_section_old_way (abfd, MMIX_REG_SECTION_NAME); (*secp)->flags |= SEC_LINKER_CREATED; } else if ((*namep)[0] == '_' && (*namep)[1] == '_' && (*namep)[2] == '.' && strncmp (*namep, MMIX_LOC_SECTION_START_SYMBOL_PREFIX, strlen (MMIX_LOC_SECTION_START_SYMBOL_PREFIX)) == 0) { /* See if we have another one. */ struct bfd_link_hash_entry *h = bfd_link_hash_lookup (info->hash, *namep, FALSE, FALSE, FALSE); if (h != NULL && h->type != bfd_link_hash_undefined) { /* How do we get the asymbol (or really: the filename) from h? h->u.def.section->owner is NULL. */ ((*_bfd_error_handler) (_("%s: Error: multiple definition of `%s'; start of %s is set in a earlier linked file\n"), bfd_get_filename (abfd), *namep, *namep + strlen (MMIX_LOC_SECTION_START_SYMBOL_PREFIX))); bfd_set_error (bfd_error_bad_value); return FALSE; } } return TRUE; } /* We consider symbols matching "L.*:[0-9]+" to be local symbols. */ bfd_boolean mmix_elf_is_local_label_name (abfd, name) bfd *abfd; const char *name; { const char *colpos; int digits; /* Also include the default local-label definition. */ if (_bfd_elf_is_local_label_name (abfd, name)) return TRUE; if (*name != 'L') return FALSE; /* If there's no ":", or more than one, it's not a local symbol. */ colpos = strchr (name, ':'); if (colpos == NULL || strchr (colpos + 1, ':') != NULL) return FALSE; /* Check that there are remaining characters and that they are digits. */ if (colpos[1] == 0) return FALSE; digits = strspn (colpos + 1, "0123456789"); return digits != 0 && colpos[1 + digits] == 0; } /* We get rid of the register section here. */ bfd_boolean mmix_elf_final_link (abfd, info) bfd *abfd; struct bfd_link_info *info; { /* We never output a register section, though we create one for temporary measures. Check that nobody entered contents into it. */ asection *reg_section; reg_section = bfd_get_section_by_name (abfd, MMIX_REG_SECTION_NAME); if (reg_section != NULL) { /* FIXME: Pass error state gracefully. */ if (bfd_get_section_flags (abfd, reg_section) & SEC_HAS_CONTENTS) _bfd_abort (__FILE__, __LINE__, _("Register section has contents\n")); /* Really remove the section, if it hasn't already been done. */ if (!bfd_section_removed_from_list (abfd, reg_section)) { bfd_section_list_remove (abfd, reg_section); --abfd->section_count; } } if (! bfd_elf_final_link (abfd, info)) return FALSE; /* Since this section is marked SEC_LINKER_CREATED, it isn't output by the regular linker machinery. We do it here, like other targets with special sections. */ if (info->base_file != NULL) { asection *greg_section = bfd_get_section_by_name ((bfd *) info->base_file, MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME); if (!bfd_set_section_contents (abfd, greg_section->output_section, greg_section->contents, (file_ptr) greg_section->output_offset, greg_section->size)) return FALSE; } return TRUE; } /* We need to include the maximum size of PUSHJ-stubs in the initial section size. This is expected to shrink during linker relaxation. */ static void mmix_set_relaxable_size (abfd, sec, ptr) bfd *abfd ATTRIBUTE_UNUSED; asection *sec; void *ptr; { struct bfd_link_info *info = ptr; /* Make sure we only do this for section where we know we want this, otherwise we might end up resetting the size of COMMONs. */ if (mmix_elf_section_data (sec)->pjs.n_pushj_relocs == 0) return; sec->rawsize = sec->size; sec->size += (mmix_elf_section_data (sec)->pjs.n_pushj_relocs * MAX_PUSHJ_STUB_SIZE); /* For use in relocatable link, we start with a max stubs size. See mmix_elf_relax_section. */ if (info->relocatable && sec->output_section) mmix_elf_section_data (sec->output_section)->pjs.stubs_size_sum += (mmix_elf_section_data (sec)->pjs.n_pushj_relocs * MAX_PUSHJ_STUB_SIZE); } /* Initialize stuff for the linker-generated GREGs to match R_MMIX_BASE_PLUS_OFFSET relocs seen by the linker. */ bfd_boolean _bfd_mmix_before_linker_allocation (abfd, info) bfd *abfd ATTRIBUTE_UNUSED; struct bfd_link_info *info; { asection *bpo_gregs_section; bfd *bpo_greg_owner; struct bpo_greg_section_info *gregdata; size_t n_gregs; bfd_vma gregs_size; size_t i; size_t *bpo_reloc_indexes; bfd *ibfd; /* Set the initial size of sections. */ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) bfd_map_over_sections (ibfd, mmix_set_relaxable_size, info); /* The bpo_greg_owner bfd is supposed to have been set by mmix_elf_check_relocs when the first R_MMIX_BASE_PLUS_OFFSET is seen. If there is no such object, there was no R_MMIX_BASE_PLUS_OFFSET. */ bpo_greg_owner = (bfd *) info->base_file; if (bpo_greg_owner == NULL) return TRUE; bpo_gregs_section = bfd_get_section_by_name (bpo_greg_owner, MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME); if (bpo_gregs_section == NULL) return TRUE; /* We use the target-data handle in the ELF section data. */ gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg; if (gregdata == NULL) return FALSE; n_gregs = gregdata->n_bpo_relocs; gregdata->n_allocated_bpo_gregs = n_gregs; /* When this reaches zero during relaxation, all entries have been filled in and the size of the linker gregs can be calculated. */ gregdata->n_remaining_bpo_relocs_this_relaxation_round = n_gregs; /* Set the zeroth-order estimate for the GREGs size. */ gregs_size = n_gregs * 8; if (!bfd_set_section_size (bpo_greg_owner, bpo_gregs_section, gregs_size)) return FALSE; /* Allocate and set up the GREG arrays. They're filled in at relaxation time. Note that we must use the max number ever noted for the array, since the index numbers were created before GC. */ gregdata->reloc_request = bfd_zalloc (bpo_greg_owner, sizeof (struct bpo_reloc_request) * gregdata->n_max_bpo_relocs); gregdata->bpo_reloc_indexes = bpo_reloc_indexes = bfd_alloc (bpo_greg_owner, gregdata->n_max_bpo_relocs * sizeof (size_t)); if (bpo_reloc_indexes == NULL) return FALSE; /* The default order is an identity mapping. */ for (i = 0; i < gregdata->n_max_bpo_relocs; i++) { bpo_reloc_indexes[i] = i; gregdata->reloc_request[i].bpo_reloc_no = i; } return TRUE; } /* Fill in contents in the linker allocated gregs. Everything is calculated at this point; we just move the contents into place here. */ bfd_boolean _bfd_mmix_after_linker_allocation (abfd, link_info) bfd *abfd ATTRIBUTE_UNUSED; struct bfd_link_info *link_info; { asection *bpo_gregs_section; bfd *bpo_greg_owner; struct bpo_greg_section_info *gregdata; size_t n_gregs; size_t i, j; size_t lastreg; bfd_byte *contents; /* The bpo_greg_owner bfd is supposed to have been set by mmix_elf_check_relocs when the first R_MMIX_BASE_PLUS_OFFSET is seen. If there is no such object, there was no R_MMIX_BASE_PLUS_OFFSET. */ bpo_greg_owner = (bfd *) link_info->base_file; if (bpo_greg_owner == NULL) return TRUE; bpo_gregs_section = bfd_get_section_by_name (bpo_greg_owner, MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME); /* This can't happen without DSO handling. When DSOs are handled without any R_MMIX_BASE_PLUS_OFFSET seen, there will be no such section. */ if (bpo_gregs_section == NULL) return TRUE; /* We use the target-data handle in the ELF section data. */ gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg; if (gregdata == NULL) return FALSE; n_gregs = gregdata->n_allocated_bpo_gregs; bpo_gregs_section->contents = contents = bfd_alloc (bpo_greg_owner, bpo_gregs_section->size); if (contents == NULL) return FALSE; /* Sanity check: If these numbers mismatch, some relocation has not been accounted for and the rest of gregdata is probably inconsistent. It's a bug, but it's more helpful to identify it than segfaulting below. */ if (gregdata->n_remaining_bpo_relocs_this_relaxation_round != gregdata->n_bpo_relocs) { (*_bfd_error_handler) (_("Internal inconsistency: remaining %u != max %u.\n\ Please report this bug."), gregdata->n_remaining_bpo_relocs_this_relaxation_round, gregdata->n_bpo_relocs); return FALSE; } for (lastreg = 255, i = 0, j = 0; j < n_gregs; i++) if (gregdata->reloc_request[i].regindex != lastreg) { bfd_put_64 (bpo_greg_owner, gregdata->reloc_request[i].value, contents + j * 8); lastreg = gregdata->reloc_request[i].regindex; j++; } return TRUE; } /* Sort valid relocs to come before non-valid relocs, then on increasing value. */ static int bpo_reloc_request_sort_fn (p1, p2) const PTR p1; const PTR p2; { const struct bpo_reloc_request *r1 = (const struct bpo_reloc_request *) p1; const struct bpo_reloc_request *r2 = (const struct bpo_reloc_request *) p2; /* Primary function is validity; non-valid relocs sorted after valid ones. */ if (r1->valid != r2->valid) return r2->valid - r1->valid; /* Then sort on value. Don't simplify and return just the difference of the values: the upper bits of the 64-bit value would be truncated on a host with 32-bit ints. */ if (r1->value != r2->value) return r1->value > r2->value ? 1 : -1; /* As a last re-sort, use the relocation number, so we get a stable sort. The *addresses* aren't stable since items are swapped during sorting. It depends on the qsort implementation if this actually happens. */ return r1->bpo_reloc_no > r2->bpo_reloc_no ? 1 : (r1->bpo_reloc_no < r2->bpo_reloc_no ? -1 : 0); } /* For debug use only. Dumps the global register allocations resulting from base-plus-offset relocs. */ void mmix_dump_bpo_gregs (link_info, pf) struct bfd_link_info *link_info; bfd_error_handler_type pf; { bfd *bpo_greg_owner; asection *bpo_gregs_section; struct bpo_greg_section_info *gregdata; unsigned int i; if (link_info == NULL || link_info->base_file == NULL) return; bpo_greg_owner = (bfd *) link_info->base_file; bpo_gregs_section = bfd_get_section_by_name (bpo_greg_owner, MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME); if (bpo_gregs_section == NULL) return; gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg; if (gregdata == NULL) return; if (pf == NULL) pf = _bfd_error_handler; /* These format strings are not translated. They are for debug purposes only and never displayed to an end user. Should they escape, we surely want them in original. */ (*pf) (" n_bpo_relocs: %u\n n_max_bpo_relocs: %u\n n_remain...round: %u\n\ n_allocated_bpo_gregs: %u\n", gregdata->n_bpo_relocs, gregdata->n_max_bpo_relocs, gregdata->n_remaining_bpo_relocs_this_relaxation_round, gregdata->n_allocated_bpo_gregs); if (gregdata->reloc_request) for (i = 0; i < gregdata->n_max_bpo_relocs; i++) (*pf) ("%4u (%4u)/%4u#%u: 0x%08lx%08lx r: %3u o: %3u\n", i, (gregdata->bpo_reloc_indexes != NULL ? gregdata->bpo_reloc_indexes[i] : (size_t) -1), gregdata->reloc_request[i].bpo_reloc_no, gregdata->reloc_request[i].valid, (unsigned long) (gregdata->reloc_request[i].value >> 32), (unsigned long) gregdata->reloc_request[i].value, gregdata->reloc_request[i].regindex, gregdata->reloc_request[i].offset); } /* This links all R_MMIX_BASE_PLUS_OFFSET relocs into a special array, and when the last such reloc is done, an index-array is sorted according to the values and iterated over to produce register numbers (indexed by 0 from the first allocated register number) and offsets for use in real relocation. PUSHJ stub accounting is also done here. Symbol- and reloc-reading infrastructure copied from elf-m10200.c. */ static bfd_boolean mmix_elf_relax_section (abfd, sec, link_info, again) bfd *abfd; asection *sec; struct bfd_link_info *link_info; bfd_boolean *again; { Elf_Internal_Shdr *symtab_hdr; Elf_Internal_Rela *internal_relocs; Elf_Internal_Rela *irel, *irelend; asection *bpo_gregs_section = NULL; struct bpo_greg_section_info *gregdata; struct bpo_reloc_section_info *bpodata = mmix_elf_section_data (sec)->bpo.reloc; /* The initialization is to quiet compiler warnings. The value is to spot a missing actual initialization. */ size_t bpono = (size_t) -1; size_t pjsno = 0; bfd *bpo_greg_owner; Elf_Internal_Sym *isymbuf = NULL; bfd_size_type size = sec->rawsize ? sec->rawsize : sec->size; mmix_elf_section_data (sec)->pjs.stubs_size_sum = 0; /* Assume nothing changes. */ *again = FALSE; /* We don't have to do anything if this section does not have relocs, or if this is not a code section. */ if ((sec->flags & SEC_RELOC) == 0 || sec->reloc_count == 0 || (sec->flags & SEC_CODE) == 0 || (sec->flags & SEC_LINKER_CREATED) != 0 /* If no R_MMIX_BASE_PLUS_OFFSET relocs and no PUSHJ-stub relocs, then nothing to do. */ || (bpodata == NULL && mmix_elf_section_data (sec)->pjs.n_pushj_relocs == 0)) return TRUE; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; bpo_greg_owner = (bfd *) link_info->base_file; if (bpodata != NULL) { bpo_gregs_section = bpodata->bpo_greg_section; gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg; bpono = bpodata->first_base_plus_offset_reloc; } else gregdata = NULL; /* Get a copy of the native relocations. */ internal_relocs = _bfd_elf_link_read_relocs (abfd, sec, (PTR) NULL, (Elf_Internal_Rela *) NULL, link_info->keep_memory); if (internal_relocs == NULL) goto error_return; /* Walk through them looking for relaxing opportunities. */ irelend = internal_relocs + sec->reloc_count; for (irel = internal_relocs; irel < irelend; irel++) { bfd_vma symval; struct elf_link_hash_entry *h = NULL; /* We only process two relocs. */ if (ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_BASE_PLUS_OFFSET && ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_PUSHJ_STUBBABLE) continue; /* We process relocs in a distinctly different way when this is a relocatable link (for one, we don't look at symbols), so we avoid mixing its code with that for the "normal" relaxation. */ if (link_info->relocatable) { /* The only transformation in a relocatable link is to generate a full stub at the location of the stub calculated for the input section, if the relocated stub location, the end of the output section plus earlier stubs, cannot be reached. Thus relocatable linking can only lead to worse code, but it still works. */ if (ELF64_R_TYPE (irel->r_info) == R_MMIX_PUSHJ_STUBBABLE) { /* If we can reach the end of the output-section and beyond any current stubs, then we don't need a stub for this reloc. The relaxed order of output stub allocation may not exactly match the straightforward order, so we always assume presence of output stubs, which will allow relaxation only on relocations indifferent to the presence of output stub allocations for other relocations and thus the order of output stub allocation. */ if (bfd_check_overflow (complain_overflow_signed, 19, 0, bfd_arch_bits_per_address (abfd), /* Output-stub location. */ sec->output_section->rawsize + (mmix_elf_section_data (sec ->output_section) ->pjs.stubs_size_sum) /* Location of this PUSHJ reloc. */ - (sec->output_offset + irel->r_offset) /* Don't count *this* stub twice. */ - (mmix_elf_section_data (sec) ->pjs.stub_size[pjsno] + MAX_PUSHJ_STUB_SIZE)) == bfd_reloc_ok) mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 0; mmix_elf_section_data (sec)->pjs.stubs_size_sum += mmix_elf_section_data (sec)->pjs.stub_size[pjsno]; pjsno++; } continue; } /* Get the value of the symbol referred to by the reloc. */ if (ELF64_R_SYM (irel->r_info) < symtab_hdr->sh_info) { /* A local symbol. */ Elf_Internal_Sym *isym; asection *sym_sec; /* Read this BFD's local symbols if we haven't already. */ if (isymbuf == NULL) { isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents; if (isymbuf == NULL) isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info, 0, NULL, NULL, NULL); if (isymbuf == 0) goto error_return; } isym = isymbuf + ELF64_R_SYM (irel->r_info); if (isym->st_shndx == SHN_UNDEF) sym_sec = bfd_und_section_ptr; else if (isym->st_shndx == SHN_ABS) sym_sec = bfd_abs_section_ptr; else if (isym->st_shndx == SHN_COMMON) sym_sec = bfd_com_section_ptr; else sym_sec = bfd_section_from_elf_index (abfd, isym->st_shndx); symval = (isym->st_value + sym_sec->output_section->vma + sym_sec->output_offset); } else { unsigned long indx; /* An external symbol. */ indx = ELF64_R_SYM (irel->r_info) - symtab_hdr->sh_info; h = elf_sym_hashes (abfd)[indx]; BFD_ASSERT (h != NULL); if (h->root.type != bfd_link_hash_defined && h->root.type != bfd_link_hash_defweak) { /* This appears to be a reference to an undefined symbol. Just ignore it--it will be caught by the regular reloc processing. We need to keep BPO reloc accounting consistent, though else we'll abort instead of emitting an error message. */ if (ELF64_R_TYPE (irel->r_info) == R_MMIX_BASE_PLUS_OFFSET && gregdata != NULL) { gregdata->n_remaining_bpo_relocs_this_relaxation_round--; bpono++; } continue; } symval = (h->root.u.def.value + h->root.u.def.section->output_section->vma + h->root.u.def.section->output_offset); } if (ELF64_R_TYPE (irel->r_info) == (int) R_MMIX_PUSHJ_STUBBABLE) { bfd_vma value = symval + irel->r_addend; bfd_vma dot = (sec->output_section->vma + sec->output_offset + irel->r_offset); bfd_vma stubaddr = (sec->output_section->vma + sec->output_offset + size + mmix_elf_section_data (sec)->pjs.stubs_size_sum); if ((value & 3) == 0 && bfd_check_overflow (complain_overflow_signed, 19, 0, bfd_arch_bits_per_address (abfd), value - dot - (value > dot ? mmix_elf_section_data (sec) ->pjs.stub_size[pjsno] : 0)) == bfd_reloc_ok) /* If the reloc fits, no stub is needed. */ mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 0; else /* Maybe we can get away with just a JMP insn? */ if ((value & 3) == 0 && bfd_check_overflow (complain_overflow_signed, 27, 0, bfd_arch_bits_per_address (abfd), value - stubaddr - (value > dot ? mmix_elf_section_data (sec) ->pjs.stub_size[pjsno] - 4 : 0)) == bfd_reloc_ok) /* Yep, account for a stub consisting of a single JMP insn. */ mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 4; else /* Nope, go for the full insn stub. It doesn't seem useful to emit the intermediate sizes; those will only be useful for a >64M program assuming contiguous code. */ mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = MAX_PUSHJ_STUB_SIZE; mmix_elf_section_data (sec)->pjs.stubs_size_sum += mmix_elf_section_data (sec)->pjs.stub_size[pjsno]; pjsno++; continue; } /* We're looking at a R_MMIX_BASE_PLUS_OFFSET reloc. */ gregdata->reloc_request[gregdata->bpo_reloc_indexes[bpono]].value = symval + irel->r_addend; gregdata->reloc_request[gregdata->bpo_reloc_indexes[bpono++]].valid = TRUE; gregdata->n_remaining_bpo_relocs_this_relaxation_round--; } /* Check if that was the last BPO-reloc. If so, sort the values and calculate how many registers we need to cover them. Set the size of the linker gregs, and if the number of registers changed, indicate that we need to relax some more because we have more work to do. */ if (gregdata != NULL && gregdata->n_remaining_bpo_relocs_this_relaxation_round == 0) { size_t i; bfd_vma prev_base; size_t regindex; /* First, reset the remaining relocs for the next round. */ gregdata->n_remaining_bpo_relocs_this_relaxation_round = gregdata->n_bpo_relocs; qsort ((PTR) gregdata->reloc_request, gregdata->n_max_bpo_relocs, sizeof (struct bpo_reloc_request), bpo_reloc_request_sort_fn); /* Recalculate indexes. When we find a change (however unlikely after the initial iteration), we know we need to relax again, since items in the GREG-array are sorted by increasing value and stored in the relaxation phase. */ for (i = 0; i < gregdata->n_max_bpo_relocs; i++) if (gregdata->bpo_reloc_indexes[gregdata->reloc_request[i].bpo_reloc_no] != i) { gregdata->bpo_reloc_indexes[gregdata->reloc_request[i].bpo_reloc_no] = i; *again = TRUE; } /* Allocate register numbers (indexing from 0). Stop at the first non-valid reloc. */ for (i = 0, regindex = 0, prev_base = gregdata->reloc_request[0].value; i < gregdata->n_bpo_relocs; i++) { if (gregdata->reloc_request[i].value > prev_base + 255) { regindex++; prev_base = gregdata->reloc_request[i].value; } gregdata->reloc_request[i].regindex = regindex; gregdata->reloc_request[i].offset = gregdata->reloc_request[i].value - prev_base; } /* If it's not the same as the last time, we need to relax again, because the size of the section has changed. I'm not sure we actually need to do any adjustments since the shrinking happens at the start of this section, but better safe than sorry. */ if (gregdata->n_allocated_bpo_gregs != regindex + 1) { gregdata->n_allocated_bpo_gregs = regindex + 1; *again = TRUE; } bpo_gregs_section->size = (regindex + 1) * 8; } if (isymbuf != NULL && (unsigned char *) isymbuf != symtab_hdr->contents) { if (! link_info->keep_memory) free (isymbuf); else { /* Cache the symbols for elf_link_input_bfd. */ symtab_hdr->contents = (unsigned char *) isymbuf; } } if (internal_relocs != NULL && elf_section_data (sec)->relocs != internal_relocs) free (internal_relocs); if (sec->size < size + mmix_elf_section_data (sec)->pjs.stubs_size_sum) abort (); if (sec->size > size + mmix_elf_section_data (sec)->pjs.stubs_size_sum) { sec->size = size + mmix_elf_section_data (sec)->pjs.stubs_size_sum; *again = TRUE; } return TRUE; error_return: if (isymbuf != NULL && (unsigned char *) isymbuf != symtab_hdr->contents) free (isymbuf); if (internal_relocs != NULL && elf_section_data (sec)->relocs != internal_relocs) free (internal_relocs); return FALSE; } #define ELF_ARCH bfd_arch_mmix #define ELF_MACHINE_CODE EM_MMIX /* According to mmix-doc page 36 (paragraph 45), this should be (1LL << 48LL). However, that's too much for something somewhere in the linker part of BFD; perhaps the start-address has to be a non-zero multiple of this number, or larger than this number. The symptom is that the linker complains: "warning: allocated section `.text' not in segment". We settle for 64k; the page-size used in examples is 8k. #define ELF_MAXPAGESIZE 0x10000 Unfortunately, this causes excessive padding in the supposedly small for-education programs that are the expected usage (where people would inspect output). We stick to 256 bytes just to have *some* default alignment. */ #define ELF_MAXPAGESIZE 0x100 #define TARGET_BIG_SYM bfd_elf64_mmix_vec #define TARGET_BIG_NAME "elf64-mmix" #define elf_info_to_howto_rel NULL #define elf_info_to_howto mmix_info_to_howto_rela #define elf_backend_relocate_section mmix_elf_relocate_section #define elf_backend_gc_mark_hook mmix_elf_gc_mark_hook #define elf_backend_gc_sweep_hook mmix_elf_gc_sweep_hook #define elf_backend_link_output_symbol_hook \ mmix_elf_link_output_symbol_hook #define elf_backend_add_symbol_hook mmix_elf_add_symbol_hook #define elf_backend_check_relocs mmix_elf_check_relocs #define elf_backend_symbol_processing mmix_elf_symbol_processing #define bfd_elf64_bfd_is_local_label_name \ mmix_elf_is_local_label_name #define elf_backend_may_use_rel_p 0 #define elf_backend_may_use_rela_p 1 #define elf_backend_default_use_rela_p 1 #define elf_backend_can_gc_sections 1 #define elf_backend_section_from_bfd_section \ mmix_elf_section_from_bfd_section #define bfd_elf64_new_section_hook mmix_elf_new_section_hook #define bfd_elf64_bfd_final_link mmix_elf_final_link #define bfd_elf64_bfd_relax_section mmix_elf_relax_section #include "elf64-target.h"
guoqingzhang/binutils-coffee
bfd/elf64-mmix.c
C
gpl-2.0
94,117
/* ************************************************************************************* * Linux * USB Host Controller Driver * * (c) Copyright 2006-2012, SoftWinners Co,Ld. * All Rights Reserved * * File Name : sw_hcd_virt_hub.c * * Author : javen * * Description : 虚拟 hub * * History : * <author> <time> <version > <desc> * javen 2010-12-20 1.0 create this file * ************************************************************************************* */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/time.h> #include <linux/timer.h> #include <asm/unaligned.h> #include "../include/sw_hcd_config.h" #include "../include/sw_hcd_core.h" #include "../include/sw_hcd_virt_hub.h" /* ******************************************************************************* * sw_hcd_port_suspend_ex * * Description: * only suspend USB port * * Parameters: * sw_hcd : input. USB控制器 * * Return value: * void * * note: * void * ******************************************************************************* */ void sw_hcd_port_suspend_ex(struct sw_hcd *sw_hcd) { /* if peripheral connect, suspend the device */ if (sw_hcd->is_active) { /* suspend usb port */ USBC_Host_SuspendPort(sw_hcd->sw_hcd_io->usb_bsp_hdle); /* delay for 1000ms */ mdelay(1000); } return; } /* ******************************************************************************* * sw_hcd_port_resume_ex * * Description: * only resume USB port * * Parameters: * sw_hcd : input. USB控制器 * * Return value: * void * * note: * void * ******************************************************************************* */ void sw_hcd_port_resume_ex(struct sw_hcd *sw_hcd) { /* resume port */ USBC_Host_RusumePort(sw_hcd->sw_hcd_io->usb_bsp_hdle); mdelay(500); USBC_Host_ClearRusumePortFlag(sw_hcd->sw_hcd_io->usb_bsp_hdle); return; } /* ******************************************************************************* * sw_hcd_port_reset_ex * * Description: * only reset USB port * * Parameters: * sw_hcd : input. USB控制器 * * Return value: * void * * note: * void * ******************************************************************************* */ void sw_hcd_port_reset_ex(struct sw_hcd *sw_hcd) { /* resume port */ sw_hcd_port_resume_ex(sw_hcd); /* reset port */ USBC_Host_ResetPort(sw_hcd->sw_hcd_io->usb_bsp_hdle); mdelay(50); USBC_Host_ClearResetPortFlag(sw_hcd->sw_hcd_io->usb_bsp_hdle); mdelay(500); return; } /* ******************************************************************************* * sw_hcd_port_suspend * * Description: * suspend USB port * * Parameters: * sw_hcd : input. USB控制器 * do_suspend : input. flag. is suspend USB port or not? * * Return value: * void * * note: * void * ******************************************************************************* */ static void sw_hcd_port_suspend(struct sw_hcd *sw_hcd, bool do_suspend) { u8 power = 0; void __iomem *usbc_base = sw_hcd->mregs; if (!is_host_active(sw_hcd)){ DMSG_PANIC("ERR: usb host is not active\n"); return; } /* NOTE: this doesn't necessarily put PHY into low power mode, * turning off its clock; that's a function of PHY integration and * sw_hcd_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect * SE0 changing to connect (J) or wakeup (K) states. */ power = USBC_Readb(USBC_REG_PCTL(usbc_base)); if (do_suspend) { int retries = 10000; DMSG_INFO("[sw_hcd]: suspend port.\n"); power &= ~(1 << USBC_BP_POWER_H_RESUME); power |= (1 << USBC_BP_POWER_H_SUSPEND); USBC_Writeb(power, USBC_REG_PCTL(usbc_base)); /* Needed for OPT A tests */ power = USBC_Readb(USBC_REG_PCTL(usbc_base)); while (power & (1 << USBC_BP_POWER_H_SUSPEND)) { power = USBC_Readb(USBC_REG_PCTL(usbc_base)); if (retries-- < 1) break; } DMSG_DBG_HCD("DBG: Root port suspended, power %02x\n", power); sw_hcd->port1_status |= USB_PORT_STAT_SUSPEND; }else if (power & (1 << USBC_BP_POWER_H_SUSPEND)){ DMSG_INFO("[sw_hcd]: suspend portend, resume port.\n"); power &= ~(1 << USBC_BP_POWER_H_SUSPEND); power |= (1 << USBC_BP_POWER_H_RESUME); USBC_Writeb(power, USBC_REG_PCTL(usbc_base)); DMSG_DBG_HCD("DBG: Root port resuming, power %02x\n", power); /* later, GetPortStatus will stop RESUME signaling */ sw_hcd->port1_status |= SW_HCD_PORT_STAT_RESUME; sw_hcd->rh_timer = jiffies + msecs_to_jiffies(20); }else{ DMSG_PANIC("WRN: sw_hcd_port_suspend nothing to do\n"); } return ; } /* ******************************************************************************* * sw_hcd_port_reset * * Description: * reset USB port * * Parameters: * sw_hcd : input. USB控制器 * do_reset : input. flag. is reset USB port or not? * * Return value: * void * * note: * void * ******************************************************************************* */ static void sw_hcd_port_reset(struct sw_hcd *sw_hcd, bool do_reset) { u8 power = 0; void __iomem *usbc_base = sw_hcd->mregs; if (!is_host_active(sw_hcd)){ DMSG_PANIC("ERR: usb host is not active\n"); return; } /* NOTE: caller guarantees it will turn off the reset when * the appropriate amount of time has passed */ power = USBC_Readb(USBC_REG_PCTL(usbc_base)); if (do_reset) { DMSG_INFO("[sw_hcd]: reset port. \n"); /* * If RESUME is set, we must make sure it stays minimum 20 ms. * Then we must clear RESUME and wait a bit to let sw_hcd start * generating SOFs. If we don't do this, OPT HS A 6.8 tests * fail with "Error! Did not receive an SOF before suspend * detected". */ if (power & (1 << USBC_BP_POWER_H_RESUME)) { while (time_before(jiffies, sw_hcd->rh_timer)){ msleep(1); } power &= ~(1 << USBC_BP_POWER_H_RESUME); USBC_Writeb(power, USBC_REG_PCTL(usbc_base)); msleep(1); } sw_hcd->ignore_disconnect = true; power &= 0xf0; power |= (1 << USBC_BP_POWER_H_RESET); USBC_Writeb(power, USBC_REG_PCTL(usbc_base)); sw_hcd->port1_status |= USB_PORT_STAT_RESET; sw_hcd->port1_status &= ~USB_PORT_STAT_ENABLE; sw_hcd->rh_timer = jiffies + msecs_to_jiffies(50); USBC_Host_SetFunctionAddress_Deafult(sw_hcd->sw_hcd_io->usb_bsp_hdle, USBC_EP_TYPE_TX, 0); //set address ep0 { __u32 i = 1; __u8 old_ep_index = 0; old_ep_index = USBC_GetActiveEp(sw_hcd->sw_hcd_io->usb_bsp_hdle); USBC_SelectActiveEp(sw_hcd->sw_hcd_io->usb_bsp_hdle, 0); USBC_Host_SetFunctionAddress_Deafult(sw_hcd->sw_hcd_io->usb_bsp_hdle, USBC_EP_TYPE_TX, 0); for( i = 1 ; i <= 5; i++){ USBC_SelectActiveEp(sw_hcd->sw_hcd_io->usb_bsp_hdle, i); USBC_Host_SetFunctionAddress_Deafult(sw_hcd->sw_hcd_io->usb_bsp_hdle, USBC_EP_TYPE_TX, i); USBC_Host_SetFunctionAddress_Deafult(sw_hcd->sw_hcd_io->usb_bsp_hdle, USBC_EP_TYPE_RX, i); } USBC_SelectActiveEp(sw_hcd->sw_hcd_io->usb_bsp_hdle, old_ep_index); } }else{ DMSG_INFO("[sw_hcd]: reset port stopped.\n"); UsbPhyEndReset(0); power &= ~(1 << USBC_BP_POWER_H_RESET); USBC_Writeb(power, USBC_REG_PCTL(usbc_base)); sw_hcd->ignore_disconnect = false; power = USBC_Readb(USBC_REG_PCTL(usbc_base)); if(power & (1 << USBC_BP_POWER_H_HIGH_SPEED_FLAG)){ DMSG_DBG_HCD("high-speed device connected\n"); sw_hcd->port1_status |= USB_PORT_STAT_HIGH_SPEED; } sw_hcd->port1_status &= ~USB_PORT_STAT_RESET; sw_hcd->port1_status |= USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET << 16) | (USB_PORT_STAT_C_ENABLE << 16); usb_hcd_poll_rh_status(sw_hcd_to_hcd(sw_hcd)); sw_hcd->vbuserr_retry = VBUSERR_RETRY_COUNT; } return ; } /* ******************************************************************************* * sw_hcd_root_disconnect * * Description: * 断开连接 * * Parameters: * sw_hcd : input. USB控制器 * * Return value: * void * * note: * void * ******************************************************************************* */ void sw_hcd_root_disconnect(struct sw_hcd *sw_hcd) { sw_hcd->port1_status = (1 << USB_PORT_FEAT_POWER) | (1 << USB_PORT_FEAT_C_CONNECTION); usb_hcd_poll_rh_status(sw_hcd_to_hcd(sw_hcd)); sw_hcd->is_active = 0; return; } EXPORT_SYMBOL(sw_hcd_root_disconnect); /* ******************************************************************************* * sw_hcd_hub_status_data * * Description: * Caller may or may not hold sw_hcd->lock * * Parameters: * void * * Return value: * void * * note: * void * ******************************************************************************* */ int sw_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) { struct sw_hcd *sw_hcd = hcd_to_sw_hcd(hcd); int retval = 0; /* called in_irq() via usb_hcd_poll_rh_status() */ if (sw_hcd->port1_status & 0xffff0000) { *buf = 0x02; retval = 1; } return retval; } EXPORT_SYMBOL(sw_hcd_hub_status_data); /* ******************************************************************************* * sw_hcd_hub_control * * Description: * void * * Parameters: * void * * Return value: * void * * note: * void * ******************************************************************************* */ int sw_hcd_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct sw_hcd *sw_hcd = hcd_to_sw_hcd(hcd); u32 temp = 0; int retval = 0; unsigned long flags = 0; void __iomem *usbc_base = sw_hcd->mregs; if(hcd == NULL){ DMSG_PANIC("ERR: invalid argment\n"); return -ESHUTDOWN; } spin_lock_irqsave(&sw_hcd->lock, flags); if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) { spin_unlock_irqrestore(&sw_hcd->lock, flags); return -ESHUTDOWN; } DMSG_DBG_HCD("sw_hcd_hub_control: typeReq = %x, wValue = 0x%x, wIndex = 0x%x\n", typeReq, wValue, wIndex); /* hub features: always zero, setting is a NOP * port features: reported, sometimes updated when host is active * no indicators */ switch (typeReq) { case ClearHubFeature: case SetHubFeature: switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: break; default: goto error; } break; case ClearPortFeature: if ((wIndex & 0xff) != 1){ goto error; } switch (wValue) { case USB_PORT_FEAT_ENABLE: break; case USB_PORT_FEAT_SUSPEND: sw_hcd_port_suspend(sw_hcd, false); break; case USB_PORT_FEAT_POWER: /* fixme */ sw_hcd_set_vbus(sw_hcd, 0); break; case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_ENABLE: case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_SUSPEND: break; default: goto error; } DMSG_DBG_HCD("DBG: clear feature %d\n", wValue); sw_hcd->port1_status &= ~(1 << wValue); break; case GetHubDescriptor: { struct usb_hub_descriptor *desc = (void *)buf; desc->bDescLength = 9; desc->bDescriptorType = 0x29; desc->bNbrPorts = 1; desc->wHubCharacteristics = cpu_to_le16( 0x0001 /* per-port power switching */ | 0x0010 /* no overcurrent reporting */ ); desc->bPwrOn2PwrGood = 5; /* msec/2 */ desc->bHubContrCurrent = 0; /* workaround bogus struct definition */ desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */ desc->u.hs.DeviceRemovable[1] = 0xff; } break; case GetHubStatus: temp = 0; *(__le32 *) buf = cpu_to_le32(temp); break; case GetPortStatus: { if (wIndex != 1){ DMSG_PANIC("ERR: GetPortStatus parameter wIndex is not 1.\n"); goto error; } /* finish RESET signaling? */ if ((sw_hcd->port1_status & USB_PORT_STAT_RESET) && time_after_eq(jiffies, sw_hcd->rh_timer)){ sw_hcd_port_reset(sw_hcd, false); } /* finish RESUME signaling? */ if ((sw_hcd->port1_status & SW_HCD_PORT_STAT_RESUME) && time_after_eq(jiffies, sw_hcd->rh_timer)) { u8 power = 0; power = USBC_Readb(USBC_REG_PCTL(usbc_base)); power &= ~(1 << USBC_BP_POWER_H_RESUME); USBC_Writeb(power, USBC_REG_PCTL(usbc_base)); DMSG_DBG_HCD("DBG: root port resume stopped, power %02x\n", power); /* ISSUE: DaVinci (RTL 1.300) disconnects after * resume of high speed peripherals (but not full * speed ones). */ sw_hcd->is_active = 1; sw_hcd->port1_status &= ~(USB_PORT_STAT_SUSPEND | SW_HCD_PORT_STAT_RESUME); sw_hcd->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; usb_hcd_poll_rh_status(sw_hcd_to_hcd(sw_hcd)); } put_unaligned(cpu_to_le32(sw_hcd->port1_status & ~SW_HCD_PORT_STAT_RESUME), (__le32 *) buf); /* port change status is more interesting */ DMSG_DBG_HCD("DBG: port status %08x\n", sw_hcd->port1_status); } break; case SetPortFeature: { if ((wIndex & 0xff) != 1){ goto error; } switch (wValue) { case USB_PORT_FEAT_POWER: /* NOTE: this controller has a strange state machine * that involves "requesting sessions" according to * magic side effects from incompletely-described * rules about startup... * * This call is what really starts the host mode; be * very careful about side effects if you reorder any * initialization logic, e.g. for OTG, or change any * logic relating to VBUS power-up. */ sw_hcd_start(sw_hcd); break; case USB_PORT_FEAT_RESET: sw_hcd_port_reset(sw_hcd, true); break; case USB_PORT_FEAT_SUSPEND: sw_hcd_port_suspend(sw_hcd, true); break; case USB_PORT_FEAT_TEST: { if (unlikely(is_host_active(sw_hcd))){ DMSG_PANIC("ERR: usb host is not active\n"); goto error; } wIndex >>= 8; switch (wIndex) { case 1: DMSG_DBG_HCD("TEST_J\n"); temp = 1 << USBC_BP_TMCTL_TEST_J; break; case 2: DMSG_DBG_HCD("TEST_K\n"); temp = 1 << USBC_BP_TMCTL_TEST_K; break; case 3: DMSG_DBG_HCD("TEST_SE0_NAK\n"); temp = 1 << USBC_BP_TMCTL_TEST_SE0_NAK; break; case 4: DMSG_DBG_HCD("TEST_PACKET\n"); temp = 1 << USBC_BP_TMCTL_TEST_PACKET; sw_hcd_load_testpacket(sw_hcd); break; case 5: DMSG_DBG_HCD("TEST_FORCE_ENABLE\n"); temp = (1 << USBC_BP_TMCTL_FORCE_HOST) | (1 << USBC_BP_TMCTL_FORCE_HS); USBC_REG_set_bit_b(USBC_BP_DEVCTL_SESSION, USBC_REG_DEVCTL(usbc_base)); break; case 6: DMSG_DBG_HCD("TEST_FIFO_ACCESS\n"); temp = 1 << USBC_BP_TMCTL_FIFO_ACCESS; break; default: DMSG_PANIC("ERR: unkown SetPortFeature USB_PORT_FEAT_TEST wIndex(%d)\n", wIndex); goto error; } USBC_Writeb(temp, USBC_REG_TMCTL(usbc_base)); } break; default:{ DMSG_PANIC("ERR: unkown SetPortFeature wValue(%d)\n", wValue); goto error; } } DMSG_DBG_HCD("DBG: set feature %d\n", wValue); sw_hcd->port1_status |= 1 << wValue; } break; default: error: DMSG_PANIC("ERR: protocol stall on error\n"); /* "protocol stall" on error */ retval = -EPIPE; } spin_unlock_irqrestore(&sw_hcd->lock, flags); return retval; } EXPORT_SYMBOL(sw_hcd_hub_control);
mozilla-b2g/kernel_flatfish
drivers/usb/sun7i_usb/hcd/core/sw_hcd_virt_hub.c
C
gpl-2.0
15,810
/* * This file contains work-arounds for many known PCI hardware * bugs. Devices present only on certain architectures (host * bridges et cetera) should be handled in arch-specific code. * * Note: any quirks for hotpluggable devices must _NOT_ be declared __init. * * Copyright (c) 1999 Martin Mares <mj@ucw.cz> * * Init/reset quirks for USB host controllers should be in the * USB quirks file, where their drivers can access reuse it. * * The bridge optimization stuff has been removed. If you really * have a silly BIOS which is unable to set your host bridge right, * use the PowerTweak utility (see http://powertweak.sourceforge.net). */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/kallsyms.h> #include <linux/dmi.h> #include <linux/pci-aspm.h> #include <linux/ioport.h> #include "pci.h" int isa_dma_bridge_buggy; EXPORT_SYMBOL(isa_dma_bridge_buggy); int pci_pci_problems; EXPORT_SYMBOL(pci_pci_problems); #ifdef CONFIG_PCI_QUIRKS /* * This quirk function disables memory decoding and releases memory resources * of the device specified by kernel's boot parameter 'pci=resource_alignment='. * It also rounds up size to specified alignment. * Later on, the kernel will assign page-aligned memory resource back * to the device. */ static void __devinit quirk_resource_alignment(struct pci_dev *dev) { int i; struct resource *r; resource_size_t align, size; u16 command; if (!pci_is_reassigndev(dev)) return; if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { dev_warn(&dev->dev, "Can't reassign resources to host bridge.\n"); return; } dev_info(&dev->dev, "Disabling memory decoding and releasing memory resources.\n"); pci_read_config_word(dev, PCI_COMMAND, &command); command &= ~PCI_COMMAND_MEMORY; pci_write_config_word(dev, PCI_COMMAND, command); align = pci_specified_resource_alignment(dev); for (i=0; i < PCI_BRIDGE_RESOURCES; i++) { r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) continue; size = resource_size(r); if (size < align) { size = align; dev_info(&dev->dev, "Rounding up size of resource #%d to %#llx.\n", i, (unsigned long long)size); } r->end = size - 1; r->start = 0; } /* Need to disable bridge's resource window, * to enable the kernel to reassign new resource * window later on. */ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) continue; r->end = resource_size(r) - 1; r->start = 0; } pci_disable_bridge_window(dev); } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment); #ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS /* The Mellanox Tavor device gives false positive parity errors * Mark this device with a broken_parity_status, to allow * PCI scanning code to "skip" this now blacklisted device. */ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) { dev->broken_parity_status = 1; /* This device gives false positives */ } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); /* Deal with broken BIOS'es that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) { struct pci_dev *d = NULL; unsigned char dlc; /* We have to make sure a particular bit is set in the PIIX3 ISA bridge, so we have to go out and find it. */ while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { pci_read_config_byte(d, 0x82, &dlc); if (!(dlc & 1<<1)) { dev_info(&d->dev, "PIIX3: Enabling Passive Release\n"); dlc |= 1<<1; pci_write_config_byte(d, 0x82, dlc); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); /* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround but VIA don't answer queries. If you happen to have good contacts at VIA ask them for me please -- Alan This appears to be BIOS not version dependent. So presumably there is a chipset level fix */ static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) { if (!isa_dma_bridge_buggy) { isa_dma_bridge_buggy=1; dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n"); } } /* * Its not totally clear which chipsets are the problematic ones * We know 82C586 and 82C596 variants are affected. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); /* * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear * for some HT machines to use C4 w/o hanging. */ static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) { u32 pmbase; u16 pm1a; pci_read_config_dword(dev, 0x40, &pmbase); pmbase = pmbase & 0xff80; pm1a = inw(pmbase); if (pm1a & 0x10) { dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); outw(0x10, pmbase); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); /* * Chipsets where PCI->PCI transfers vanish or hang */ static void __devinit quirk_nopcipci(struct pci_dev *dev) { if ((pci_pci_problems & PCIPCI_FAIL)==0) { dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_FAIL; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci); static void __devinit quirk_nopciamd(struct pci_dev *dev) { u8 rev; pci_read_config_byte(dev, 0x08, &rev); if (rev == 0x13) { /* Erratum 24 */ dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n"); pci_pci_problems |= PCIAGP_FAIL; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd); /* * Triton requires workarounds to be used by the drivers */ static void __devinit quirk_triton(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_TRITON)==0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_TRITON; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); /* * VIA Apollo KT133 needs PCI latency patch * Made according to a windows driver based patch by George E. Breese * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for * the info on which Mr Breese based his work. * * Updated based on further information from the site and also on * information provided by VIA */ static void quirk_vialatency(struct pci_dev *dev) { struct pci_dev *p; u8 busarb; /* Ok we have a potential problem chipset here. Now see if we have a buggy southbridge */ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); if (p!=NULL) { /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ /* Check for buggy part revisions */ if (p->revision < 0x40 || p->revision > 0x42) goto exit; } else { p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); if (p==NULL) /* No problem parts */ goto exit; /* Check for buggy part revisions */ if (p->revision < 0x10 || p->revision > 0x12) goto exit; } /* * Ok we have the problem. Now set the PCI master grant to * occur every master grant. The apparent bug is that under high * PCI load (quite common in Linux of course) you can get data * loss when the CPU is held off the bus for 3 bus master requests * This happens to include the IDE controllers.... * * VIA only apply this fix when an SB Live! is present but under * both Linux and Windows this isnt enough, and we have seen * corruption without SB Live! but with things like 3 UDMA IDE * controllers. So we ignore that bit of the VIA recommendation.. */ pci_read_config_byte(dev, 0x76, &busarb); /* Set bit 4 and bi 5 of byte 76 to 0x01 "Master priority rotation on every PCI master grant */ busarb &= ~(1<<5); busarb |= (1<<4); pci_write_config_byte(dev, 0x76, busarb); dev_info(&dev->dev, "Applying VIA southbridge workaround\n"); exit: pci_dev_put(p); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); /* Must restore this on a resume from RAM */ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); /* * VIA Apollo VP3 needs ETBF on BT848/878 */ static void __devinit quirk_viaetbf(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_VIAETBF)==0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_VIAETBF; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf); static void __devinit quirk_vsfx(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_VSFX)==0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_VSFX; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx); /* * Ali Magik requires workarounds to be used by the drivers * that DMA to AGP space. Latency must be set to 0xA and triton * workaround applied too * [Info kindly provided by ALi] */ static void __init quirk_alimagik(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); /* * Natoma has some interesting boundary conditions with Zoran stuff * at least */ static void __devinit quirk_natoma(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_NATOMA)==0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_NATOMA; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); /* * This chip can cause PCI parity errors if config register 0xA0 is read * while DMAs are occurring. */ static void __devinit quirk_citrine(struct pci_dev *dev) { dev->cfg_size = 0xA0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); /* * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. * If it's needed, re-allocate the region. */ static void __devinit quirk_s3_64M(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { r->start = 0; r->end = 0x3ffffff; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr, const char *name) { region &= ~(size-1); if (region) { struct pci_bus_region bus_region; struct resource *res = dev->resource + nr; res->name = pci_name(dev); res->start = region; res->end = region + size - 1; res->flags = IORESOURCE_IO; /* Convert from PCI bus to resource space. */ bus_region.start = res->start; bus_region.end = res->end; pcibios_bus_to_resource(dev, res, &bus_region); pci_claim_resource(dev, nr); dev_info(&dev->dev, "quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name); } } /* * ATI Northbridge setups MCE the processor if you even * read somewhere between 0x3b0->0x3bb or read 0x3d3 */ static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev) { dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n"); /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */ request_region(0x3b0, 0x0C, "RadeonIGP"); request_region(0x3d3, 0x01, "RadeonIGP"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce); /* * Let's make the southbridge information explicit instead * of having to worry about people probing the ACPI areas, * for example.. (Yes, it happens, and if you read the wrong * ACPI register it will put the machine to sleep with no * way of waking it up again. Bummer). * * ALI M7101: Two IO regions pointed to by words at * 0xE0 (64 bytes of ACPI registers) * 0xE2 (32 bytes of SMB registers) */ static void __devinit quirk_ali7101_acpi(struct pci_dev *dev) { u16 region; pci_read_config_word(dev, 0xE0, &region); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI"); pci_read_config_word(dev, 0xE2, &region); quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi); static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) { u32 devres; u32 mask, size, base; pci_read_config_dword(dev, port, &devres); if ((devres & enable) != enable) return; mask = (devres >> 16) & 15; base = devres & 0xffff; size = 16; for (;;) { unsigned bit = size >> 1; if ((bit & mask) == bit) break; size = bit; } /* * For now we only print it out. Eventually we'll want to * reserve it (at least if it's in the 0x1000+ range), but * let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1); } static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) { u32 devres; u32 mask, size, base; pci_read_config_dword(dev, port, &devres); if ((devres & enable) != enable) return; base = devres & 0xffff0000; mask = (devres & 0x3f) << 16; size = 128 << 16; for (;;) { unsigned bit = size >> 1; if ((bit & mask) == bit) break; size = bit; } /* * For now we only print it out. Eventually we'll want to * reserve it, but let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1); } /* * PIIX4 ACPI: Two IO regions pointed to by longwords at * 0x40 (64 bytes of ACPI registers) * 0x90 (16 bytes of SMB registers) * and a few strange programmable PIIX4 device resources. */ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) { u32 region, res_a; pci_read_config_dword(dev, 0x40, &region); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); pci_read_config_dword(dev, 0x90, &region); quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); /* Device resource A has enables for some of the other ones */ pci_read_config_dword(dev, 0x5c, &res_a); piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21); piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21); /* Device resource D is just bitfields for static resources */ /* Device 12 enabled? */ if (res_a & (1 << 29)) { piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20); piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7); } /* Device 13 enabled? */ if (res_a & (1 << 30)) { piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20); piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7); } piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20); piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); /* * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at * 0x40 (128 bytes of ACPI, GPIO & TCO registers) * 0x58 (64 bytes of GPIO I/O space) */ static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) { u32 region; pci_read_config_dword(dev, 0x40, &region); quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); pci_read_config_dword(dev, 0x58, &region); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) { u32 region; pci_read_config_dword(dev, 0x40, &region); quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); pci_read_config_dword(dev, 0x48, &region); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) { u32 val; u32 size, base; pci_read_config_dword(dev, reg, &val); /* Enabled? */ if (!(val & 1)) return; base = val & 0xfffc; if (dynsize) { /* * This is not correct. It is 16, 32 or 64 bytes depending on * register D31:F0:ADh bits 5:4. * * But this gets us at least _part_ of it. */ size = 16; } else { size = 128; } base &= ~(size-1); /* Just print it out for now. We should reserve it after more debugging */ dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); } static void __devinit quirk_ich6_lpc(struct pci_dev *dev) { /* Shared ACPI/GPIO decode with all ICH6+ */ ich6_lpc_acpi_gpio(dev); /* ICH6-specific generic IO decode */ ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0); ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name) { u32 val; u32 mask, base; pci_read_config_dword(dev, reg, &val); /* Enabled? */ if (!(val & 1)) return; /* * IO base in bits 15:2, mask in bits 23:18, both * are dword-based */ base = val & 0xfffc; mask = (val >> 16) & 0xfc; mask |= 3; /* Just print it out for now. We should reserve it after more debugging */ dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); } /* ICH7-10 has the same common LPC generic IO decode registers */ static void __devinit quirk_ich7_lpc(struct pci_dev *dev) { /* We share the common ACPI/DPIO decode with ICH6 */ ich6_lpc_acpi_gpio(dev); /* And have 4 ICH7+ generic decodes */ ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1"); ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2"); ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3"); ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc); /* * VIA ACPI: One IO region pointed to by longword at * 0x48 or 0x20 (256 bytes of ACPI registers) */ static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev) { u32 region; if (dev->revision & 0x10) { pci_read_config_dword(dev, 0x48, &region); region &= PCI_BASE_ADDRESS_IO_MASK; quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi); /* * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at * 0x48 (256 bytes of ACPI registers) * 0x70 (128 bytes of hardware monitoring register) * 0x90 (16 bytes of SMB registers) */ static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev) { u16 hm; u32 smb; quirk_vt82c586_acpi(dev); pci_read_config_word(dev, 0x70, &hm); hm &= PCI_BASE_ADDRESS_IO_MASK; quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon"); pci_read_config_dword(dev, 0x90, &smb); smb &= PCI_BASE_ADDRESS_IO_MASK; quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi); /* * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at * 0x88 (128 bytes of power management registers) * 0xd0 (16 bytes of SMB registers) */ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) { u16 pm, smb; pci_read_config_word(dev, 0x88, &pm); pm &= PCI_BASE_ADDRESS_IO_MASK; quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM"); pci_read_config_word(dev, 0xd0, &smb); smb &= PCI_BASE_ADDRESS_IO_MASK; quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); /* * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back: * Disable fast back-to-back on the secondary bus segment */ static void __devinit quirk_xio2000a(struct pci_dev *dev) { struct pci_dev *pdev; u16 command; dev_warn(&dev->dev, "TI XIO2000a quirk detected; " "secondary bus fast back-to-back transfers disabled\n"); list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { pci_read_config_word(pdev, PCI_COMMAND, &command); if (command & PCI_COMMAND_FAST_BACK) pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, quirk_xio2000a); #ifdef CONFIG_X86_IO_APIC #include <asm/io_apic.h> /* * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip * devices to the external APIC. * * TODO: When we have device-specific interrupt routers, * this code will go away from quirks. */ static void quirk_via_ioapic(struct pci_dev *dev) { u8 tmp; if (nr_ioapics < 1) tmp = 0; /* nothing routed to external APIC */ else tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ dev_info(&dev->dev, "%sbling VIA external APIC routing\n", tmp == 0 ? "Disa" : "Ena"); /* Offset 0x58: External APIC IRQ output control */ pci_write_config_byte (dev, 0x58, tmp); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. * This leads to doubled level interrupt rates. * Set this bit to get rid of cycle wastage. * Otherwise uncritical. */ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) { u8 misc_control2; #define BYPASS_APIC_DEASSERT 8 pci_read_config_byte(dev, 0x5B, &misc_control2); if (!(misc_control2 & BYPASS_APIC_DEASSERT)) { dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n"); pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); /* * The AMD io apic can hang the box when an apic irq is masked. * We check all revs >= B0 (yet not in the pre production!) as the bug * is currently marked NoFix * * We have multiple reports of hangs with this chipset that went away with * noapic specified. For the moment we assume it's the erratum. We may be wrong * of course. However the advice is demonstrably good even if so.. */ static void __devinit quirk_amd_ioapic(struct pci_dev *dev) { if (dev->revision >= 0x02) { dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n"); dev_warn(&dev->dev, " : booting with the \"noapic\" option\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); static void __init quirk_ioapic_rmw(struct pci_dev *dev) { if (dev->devfn == 0 && dev->bus->number == 0) sis_apic_bug = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); #endif /* CONFIG_X86_IO_APIC */ /* * Some settings of MMRBC can lead to data corruption so block changes. * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide */ static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev) { if (dev->subordinate && dev->revision <= 0x12) { dev_info(&dev->dev, "AMD8131 rev %x detected; " "disabling PCI-X MMRBC\n", dev->revision); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc); /* * FIXME: it is questionable that quirk_via_acpi * is needed. It shows up as an ISA bridge, and does not * support the PCI_INTERRUPT_LINE register at all. Therefore * it seems like setting the pci_dev's 'irq' to the * value of the ACPI SCI interrupt is only done for convenience. * -jgarzik */ static void __devinit quirk_via_acpi(struct pci_dev *d) { /* * VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */ u8 irq; pci_read_config_byte(d, 0x42, &irq); irq &= 0xf; if (irq && (irq != 2)) d->irq = irq; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi); /* * VIA bridges which have VLink */ static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18; static void quirk_via_bridge(struct pci_dev *dev) { /* See what bridge we have and find the device ranges */ switch (dev->device) { case PCI_DEVICE_ID_VIA_82C686: /* The VT82C686 is special, it attaches to PCI and can have any device number. All its subdevices are functions of that single device. */ via_vlink_dev_lo = PCI_SLOT(dev->devfn); via_vlink_dev_hi = PCI_SLOT(dev->devfn); break; case PCI_DEVICE_ID_VIA_8237: case PCI_DEVICE_ID_VIA_8237A: via_vlink_dev_lo = 15; break; case PCI_DEVICE_ID_VIA_8235: via_vlink_dev_lo = 16; break; case PCI_DEVICE_ID_VIA_8231: case PCI_DEVICE_ID_VIA_8233_0: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8233C_0: via_vlink_dev_lo = 17; break; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge); /** * quirk_via_vlink - VIA VLink IRQ number update * @dev: PCI device * * If the device we are dealing with is on a PIC IRQ we need to * ensure that the IRQ line register which usually is not relevant * for PCI cards, is actually written so that interrupts get sent * to the right place. * We only do this on systems where a VIA south bridge was detected, * and only for VIA devices on the motherboard (see quirk_via_bridge * above). */ static void quirk_via_vlink(struct pci_dev *dev) { u8 irq, new_irq; /* Check if we have VLink at all */ if (via_vlink_dev_lo == -1) return; new_irq = dev->irq; /* Don't quirk interrupts outside the legacy IRQ range */ if (!new_irq || new_irq > 15) return; /* Internal device ? */ if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi || PCI_SLOT(dev->devfn) < via_vlink_dev_lo) return; /* This is an internal VLink device on a PIC interrupt. The BIOS ought to have set this but may not have, so we redo it */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); if (new_irq != irq) { dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n", irq, new_irq); udelay(15); /* unknown if delay really needed */ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink); /* * VIA VT82C598 has its device ID settable and many BIOSes * set it to the ID of VT82C597 for backward compatibility. * We need to switch it off to be able to recognize the real * type of the chip. */ static void __devinit quirk_vt82c598_id(struct pci_dev *dev) { pci_write_config_byte(dev, 0xfc, 0); pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id); /* * CardBus controllers have a legacy base address that enables them * to respond as i82365 pcmcia controllers. We don't want them to * do this even if the Linux CardBus driver is not loaded, because * the Linux i82365 driver does not (and should not) handle CardBus. */ static void quirk_cardbus_legacy(struct pci_dev *dev) { if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class) return; pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); /* * Following the PCI ordering rules is optional on the AMD762. I'm not * sure what the designers were smoking but let's not inhale... * * To be fair to AMD, it follows the spec by default, its BIOS people * who turn it off! */ static void quirk_amd_ordering(struct pci_dev *dev) { u32 pcic; pci_read_config_dword(dev, 0x4C, &pcic); if ((pcic&6)!=6) { pcic |= 6; dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n"); pci_write_config_dword(dev, 0x4C, pcic); pci_read_config_dword(dev, 0x84, &pcic); pcic |= (1<<23); /* Required in this mode */ pci_write_config_dword(dev, 0x84, pcic); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); /* * DreamWorks provided workaround for Dunord I-3000 problem * * This card decodes and responds to addresses not apparently * assigned to it. We force a larger allocation to ensure that * nothing gets put too close to it. */ static void __devinit quirk_dunord ( struct pci_dev * dev ) { struct resource *r = &dev->resource [1]; r->start = 0; r->end = 0xffffff; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord); /* * i82380FB mobile docking controller: its PCI-to-PCI bridge * is subtractive decoding (transparent), and does indicate this * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80 * instead of 0x01. */ static void __devinit quirk_transparent_bridge(struct pci_dev *dev) { dev->transparent = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge); /* * Common misconfiguration of the MediaGX/Geode PCI master that will * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 * datasheets found at http://www.national.com/ds/GX for info on what * these bits do. <christer@weinigel.se> */ static void quirk_mediagx_master(struct pci_dev *dev) { u8 reg; pci_read_config_byte(dev, 0x41, &reg); if (reg & 2) { reg &= ~2; dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg); pci_write_config_byte(dev, 0x41, reg); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); /* * Ensure C0 rev restreaming is off. This is normally done by * the BIOS but in the odd case it is not the results are corruption * hence the presence of a Linux check */ static void quirk_disable_pxb(struct pci_dev *pdev) { u16 config; if (pdev->revision != 0x04) /* Only C0 requires this */ return; pci_read_config_word(pdev, 0x40, &config); if (config & (1<<6)) { config &= ~(1<<6); pci_write_config_word(pdev, 0x40, config); dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) { /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */ u8 tmp; pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); if (tmp == 0x01) { pci_read_config_byte(pdev, 0x40, &tmp); pci_write_config_byte(pdev, 0x40, tmp|1); pci_write_config_byte(pdev, 0x9, 1); pci_write_config_byte(pdev, 0xa, 6); pci_write_config_byte(pdev, 0x40, tmp); pdev->class = PCI_CLASS_STORAGE_SATA_AHCI; dev_info(&pdev->dev, "set SATA to AHCI mode\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode */ static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev) { u8 prog; pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); if (prog & 5) { prog &= ~5; pdev->class &= ~5; pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); /* PCI layer will sort out resources */ } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide); /* * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */ static void __init quirk_ide_samemode(struct pci_dev *pdev) { u8 prog; pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) { dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n"); prog &= ~5; pdev->class &= ~5; pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); /* * Some ATA devices break if put into D3 */ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) { /* Quirk the legacy ATA devices only. The AHCI ones are ok */ if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); /* ALi loses some register settings that we cannot then restore */ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3); /* VIA comes back fine but we need to keep it alive or ACPI GTM failures occur when mode detecting */ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3); /* This was originally an Alpha specific thing, but it really fits here. * The i82375 PCI/EISA bridge appears as non-classified. Fix that. */ static void __init quirk_eisa_bridge(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_EISA << 8; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge); /* * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge * is not activated. The myth is that Asus said that they do not want the * users to be irritated by just another PCI Device in the Win98 device * manager. (see the file prog/hotplug/README.p4b in the lm_sensors * package 2.7.0 for details) * * The SMBus PCI Device can be activated by setting a bit in the ICH LPC * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it * becomes necessary to do this tweak in two steps -- the chosen trigger * is either the Host bridge (preferred) or on-board VGA controller. * * Note that we used to unhide the SMBus that way on Toshiba laptops * (Satellite A40 and Tecra M2) but then found that the thermal management * was done by SMM code, which could cause unsynchronized concurrent * accesses to the SMBus registers, with potentially bad effects. Thus you * should be very careful when adding new entries: if SMM is accessing the * Intel SMBus, this is a very good reason to leave it hidden. * * Likewise, many recent laptops use ACPI for thermal management. If the * ACPI DSDT code accesses the SMBus, then Linux should not access it * natively, and keeping the SMBus hidden is the right thing to do. If you * are about to add an entry in the table below, please first disassemble * the DSDT and double-check that there is no code accessing the SMBus. */ static int asus_hides_smbus; static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) { if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB) switch(dev->subsystem_device) { case 0x8025: /* P4B-LX */ case 0x8070: /* P4B */ case 0x8088: /* P4B533 */ case 0x1626: /* L3C notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) switch(dev->subsystem_device) { case 0x80b1: /* P4GE-V */ case 0x80b2: /* P4PE */ case 0x8093: /* P4B533-V */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) switch(dev->subsystem_device) { case 0x8030: /* P4T533 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) switch (dev->subsystem_device) { case 0x8070: /* P4G8X Deluxe */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) switch (dev->subsystem_device) { case 0x80c9: /* PU-DLS */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) switch (dev->subsystem_device) { case 0x1751: /* M2N notebook */ case 0x1821: /* M5N notebook */ case 0x1897: /* A6L notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x184b: /* W1N notebook */ case 0x186a: /* M6Ne notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) switch (dev->subsystem_device) { case 0x80f2: /* P4P800-X */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) switch (dev->subsystem_device) { case 0x1882: /* M6V notebook */ case 0x1977: /* A6VA notebook */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch(dev->subsystem_device) { case 0x088C: /* HP Compaq nc8000 */ case 0x0890: /* HP Compaq nc6000 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) switch (dev->subsystem_device) { case 0x12bc: /* HP D330L */ case 0x12bd: /* HP D530 */ case 0x006a: /* HP Compaq nx9500 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB) switch (dev->subsystem_device) { case 0x12bf: /* HP xw4100 */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch(dev->subsystem_device) { case 0xC00C: /* Samsung P35 notebook */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch(dev->subsystem_device) { case 0x0058: /* Compaq Evo N620c */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3) switch(dev->subsystem_device) { case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */ /* Motherboard doesn't have Host bridge * subvendor/subdevice IDs, therefore checking * its on-board VGA controller */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2) switch(dev->subsystem_device) { case 0x00b8: /* Compaq Evo D510 CMT */ case 0x00b9: /* Compaq Evo D510 SFF */ case 0x00ba: /* Compaq Evo D510 USDT */ /* Motherboard doesn't have Host bridge * subvendor/subdevice IDs and on-board VGA * controller is disabled if an AGP card is * inserted, therefore checking USB UHCI * Controller #1 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) switch (dev->subsystem_device) { case 0x001A: /* Compaq Deskpro EN SSF P667 815E */ /* Motherboard doesn't have host bridge * subvendor/subdevice IDs, therefore checking * its on-board VGA controller */ asus_hides_smbus = 1; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); static void asus_hides_smbus_lpc(struct pci_dev *dev) { u16 val; if (likely(!asus_hides_smbus)) return; pci_read_config_word(dev, 0xF2, &val); if (val & 0x8) { pci_write_config_word(dev, 0xF2, val & (~0x8)); pci_read_config_word(dev, 0xF2, &val); if (val & 0x8) dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val); else dev_info(&dev->dev, "Enabled i801 SMBus device\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); /* It appears we just have one such device. If not, we have a warning */ static void __iomem *asus_rcba_base; static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) { u32 rcba; if (likely(!asus_hides_smbus)) return; WARN_ON(asus_rcba_base); pci_read_config_dword(dev, 0xF0, &rcba); /* use bits 31:14, 16 kB aligned */ asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); if (asus_rcba_base == NULL) return; } static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev) { u32 val; if (likely(!asus_hides_smbus || !asus_rcba_base)) return; /* read the Function Disable register, dword mode only */ val = readl(asus_rcba_base + 0x3418); writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */ } static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev) { if (likely(!asus_hides_smbus || !asus_rcba_base)) return; iounmap(asus_rcba_base); asus_rcba_base = NULL; dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); } static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) { asus_hides_smbus_lpc_ich6_suspend(dev); asus_hides_smbus_lpc_ich6_resume_early(dev); asus_hides_smbus_lpc_ich6_resume(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early); /* * SiS 96x south bridge: BIOS typically hides SMBus device... */ static void quirk_sis_96x_smbus(struct pci_dev *dev) { u8 val = 0; pci_read_config_byte(dev, 0x77, &val); if (val & 0x10) { dev_info(&dev->dev, "Enabling SiS 96x SMBus\n"); pci_write_config_byte(dev, 0x77, val & ~0x10); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); /* * ... This is further complicated by the fact that some SiS96x south * bridges pretend to be 85C503/5513 instead. In that case see if we * spotted a compatible north bridge to make sure. * (pci_find_device doesn't work yet) * * We can also enable the sis96x bit in the discovery register.. */ #define SIS_DETECT_REGISTER 0x40 static void quirk_sis_503(struct pci_dev *dev) { u8 reg; u16 devid; pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg); pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6)); pci_read_config_word(dev, PCI_DEVICE_ID, &devid); if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) { pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg); return; } /* * Ok, it now shows up as a 96x.. run the 96x quirk by * hand in case it has already been processed. * (depends on link order, which is apparently not guaranteed) */ dev->device = devid; quirk_sis_96x_smbus(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); /* * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller * and MC97 modem controller are disabled when a second PCI soundcard is * present. This patch, tweaking the VT8237 ISA bridge, enables them. * -- bjd */ static void asus_hides_ac97_lpc(struct pci_dev *dev) { u8 val; int asus_hides_ac97 = 0; if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { if (dev->device == PCI_DEVICE_ID_VIA_8237) asus_hides_ac97 = 1; } if (!asus_hides_ac97) return; pci_read_config_byte(dev, 0x50, &val); if (val & 0xc0) { pci_write_config_byte(dev, 0x50, val & (~0xc0)); pci_read_config_byte(dev, 0x50, &val); if (val & 0xc0) dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val); else dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) /* * If we are using libata we can drive this chip properly but must * do this early on to make the additional device appear during * the PCI scanning. */ static void quirk_jmicron_ata(struct pci_dev *pdev) { u32 conf1, conf5, class; u8 hdr; /* Only poke fn 0 */ if (PCI_FUNC(pdev->devfn)) return; pci_read_config_dword(pdev, 0x40, &conf1); pci_read_config_dword(pdev, 0x80, &conf5); conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */ conf5 &= ~(1 << 24); /* Clear bit 24 */ switch (pdev->device) { case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ /* The controller should be in single function ahci mode */ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ break; case PCI_DEVICE_ID_JMICRON_JMB365: case PCI_DEVICE_ID_JMICRON_JMB366: /* Redirect IDE second PATA port to the right spot */ conf5 |= (1 << 24); /* Fall through */ case PCI_DEVICE_ID_JMICRON_JMB361: case PCI_DEVICE_ID_JMICRON_JMB363: /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ /* Set the class codes correctly and then direct IDE 0 */ conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */ break; case PCI_DEVICE_ID_JMICRON_JMB368: /* The controller should be in single function IDE mode */ conf1 |= 0x00C00000; /* Set 22, 23 */ break; } pci_write_config_dword(pdev, 0x40, conf1); pci_write_config_dword(pdev, 0x80, conf5); /* Update pdev accordingly */ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr); pdev->hdr_type = hdr & 0x7f; pdev->multifunction = !!(hdr & 0x80); pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class); pdev->class = class >> 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); #endif #ifdef CONFIG_X86_IO_APIC static void __init quirk_alder_ioapic(struct pci_dev *pdev) { int i; if ((pdev->class >> 8) != 0xff00) return; /* the first BAR is the location of the IO APIC...we must * not touch this (and it's already covered by the fixmap), so * forcibly insert it into the resource tree */ if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0)) insert_resource(&iomem_resource, &pdev->resource[0]); /* The next five BARs all seem to be rubbish, so just clean * them out */ for (i=1; i < 6; i++) { memset(&pdev->resource[i], 0, sizeof(pdev->resource[i])); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif static void __devinit quirk_pcie_mch(struct pci_dev *pdev) { pci_msi_off(pdev); pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); /* * It's possible for the MSI to get corrupted if shpc and acpi * are used together on certain PXH-based systems. */ static void __devinit quirk_pcie_pxh(struct pci_dev *dev) { pci_msi_off(dev); dev->no_msi = 1; dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); /* * Some Intel PCI Express chipsets have trouble with downstream * device power management. */ static void quirk_intel_pcie_pm(struct pci_dev * dev) { pci_pm_d3_delay = 120; dev->no_d1d2 = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); #ifdef CONFIG_X86_IO_APIC /* * Boot interrupts on some chipsets cannot be turned off. For these chipsets, * remap the original interrupt in the linux kernel to the boot interrupt, so * that a PCI device's interrupt handler is installed on the boot interrupt * line instead. */ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) { if (noioapicquirk || noioapicreroute) return; dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); /* * On some chipsets we can disable the generation of legacy INTx boot * interrupts. */ /* * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no * 300641-004US, section 5.7.3. */ #define INTEL_6300_IOAPIC_ABAR 0x40 #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; if (noioapicquirk) return; pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); /* * disable boot interrupts on HT-1000 */ #define BC_HT1000_FEATURE_REG 0x64 #define BC_HT1000_PIC_REGS_ENABLE (1<<0) #define BC_HT1000_MAP_IDX 0xC00 #define BC_HT1000_MAP_DATA 0xC01 static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) { u32 pci_config_dword; u8 irq; if (noioapicquirk) return; pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | BC_HT1000_PIC_REGS_ENABLE); for (irq = 0x10; irq < 0x10 + 32; irq++) { outb(irq, BC_HT1000_MAP_IDX); outb(0x00, BC_HT1000_MAP_DATA); } pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); /* * disable boot interrupts on AMD and ATI chipsets */ /* * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode * (due to an erratum). */ #define AMD_813X_MISC 0x40 #define AMD_813X_NOIOAMODE (1<<0) #define AMD_813X_REV_B2 0x13 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) { u32 pci_config_dword; if (noioapicquirk) return; if (dev->revision == AMD_813X_REV_B2) return; pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); pci_config_dword &= ~AMD_813X_NOIOAMODE; pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); #define AMD_8111_PCI_IRQ_ROUTING 0x56 static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; if (noioapicquirk) return; pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); if (!pci_config_word) { dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] " "already disabled\n", dev->vendor, dev->device); return; } pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); #endif /* CONFIG_X86_IO_APIC */ /* * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. * Re-allocate the region if needed... */ static void __init quirk_tc86c001_ide(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; if (r->start & 0x8) { r->start = 0; r->end = 0xf; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE, quirk_tc86c001_ide); static void __devinit quirk_netmos(struct pci_dev *dev) { unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; unsigned int num_serial = dev->subsystem_device & 0xf; /* * These Netmos parts are multiport serial devices with optional * parallel ports. Even when parallel ports are present, they * are identified as class SERIAL, which means the serial driver * will claim them. To prevent this, mark them as class OTHER. * These combo devices should be claimed by parport_serial. * * The subdevice ID is of the form 0x00PS, where <P> is the number * of parallel ports and <S> is the number of serial ports. */ switch (dev->device) { case PCI_DEVICE_ID_NETMOS_9835: /* Well, this rule doesn't hold for the following 9835 device */ if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return; case PCI_DEVICE_ID_NETMOS_9735: case PCI_DEVICE_ID_NETMOS_9745: case PCI_DEVICE_ID_NETMOS_9845: case PCI_DEVICE_ID_NETMOS_9855: if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && num_parallel) { dev_info(&dev->dev, "Netmos %04x (%u parallel, " "%u serial); changing class SERIAL to OTHER " "(use parport_serial)\n", dev->device, num_parallel, num_serial); dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) | (dev->class & 0xff); } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); static void __devinit quirk_e100_interrupt(struct pci_dev *dev) { u16 command, pmcsr; u8 __iomem *csr; u8 cmd_hi; int pm; switch (dev->device) { /* PCI IDs taken from drivers/net/e100.c */ case 0x1029: case 0x1030 ... 0x1034: case 0x1038 ... 0x103E: case 0x1050 ... 0x1057: case 0x1059: case 0x1064 ... 0x106B: case 0x1091 ... 0x1095: case 0x1209: case 0x1229: case 0x2449: case 0x2459: case 0x245D: case 0x27DC: break; default: return; } /* * Some firmware hands off the e100 with interrupts enabled, * which can cause a flood of interrupts if packets are * received before the driver attaches to the device. So * disable all e100 interrupts here. The driver will * re-enable them when it's ready. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0)) return; /* * Check that the device is in the D0 power state. If it's not, * there is no point to look any further. */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); if (pm) { pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) return; } /* Convert from PCI bus to resource space. */ csr = ioremap(pci_resource_start(dev, 0), 8); if (!csr) { dev_warn(&dev->dev, "Can't map e100 registers\n"); return; } cmd_hi = readb(csr + 3); if (cmd_hi == 0) { dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; " "disabling\n"); writeb(1, csr + 3); } iounmap(csr); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); /* * The 82575 and 82598 may experience data corruption issues when transitioning * out of L0S. To prevent this we need to disable L0S on the pci-e link */ static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev) { dev_info(&dev->dev, "Disabling L0s\n"); pci_disable_link_state(dev, PCIE_LINK_STATE_L0S); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); static void __devinit fixup_rev1_53c810(struct pci_dev* dev) { /* rev 1 ncr53c810 chips don't set the class at all which means * they don't get their resources remapped. Fix that here. */ if (dev->class == PCI_CLASS_NOT_DEFINED) { dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n"); dev->class = PCI_CLASS_STORAGE_SCSI; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); #endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ #ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS /* Enable 1k I/O space granularity on the Intel P64H2 */ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) { u16 en1k; u8 io_base_lo, io_limit_lo; unsigned long base, limit; struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES; pci_read_config_word(dev, 0x40, &en1k); if (en1k & 0x200) { dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n"); pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8; limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8; if (base <= limit) { res->start = base; res->end = limit + 0x3ff; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); /* Fix the IOBL_ADR for 1k I/O space granularity on the Intel P64H2 * The IOBL_ADR gets re-written to 4k boundaries in pci_setup_bridge() * in drivers/pci/setup-bus.c */ static void __devinit quirk_p64h2_1k_io_fix_iobl(struct pci_dev *dev) { u16 en1k, iobl_adr, iobl_adr_1k; struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES; pci_read_config_word(dev, 0x40, &en1k); if (en1k & 0x200) { pci_read_config_word(dev, PCI_IO_BASE, &iobl_adr); iobl_adr_1k = iobl_adr | (res->start >> 8) | (res->end & 0xfc00); if (iobl_adr != iobl_adr_1k) { dev_info(&dev->dev, "Fixing P64H2 IOBL_ADR from 0x%x to 0x%x for 1KB granularity\n", iobl_adr,iobl_adr_1k); pci_write_config_word(dev, PCI_IO_BASE, iobl_adr_1k); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io_fix_iobl); /* Under some circumstances, AER is not linked with extended capabilities. * Force it to be linked by setting the corresponding control bit in the * config space. */ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) { uint8_t b; if (pci_read_config_byte(dev, 0xf41, &b) == 0) { if (!(b & 0x20)) { pci_write_config_byte(dev, 0xf41, b | 0x20); dev_info(&dev->dev, "Linking AER extended capability\n"); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) { /* * Disable PCI Bus Parking and PCI Master read caching on CX700 * which causes unspecified timing errors with a VT6212L on the PCI * bus leading to USB2.0 packet loss. The defaults are that these * features are turned off but some BIOSes turn them on. */ uint8_t b; if (pci_read_config_byte(dev, 0x76, &b) == 0) { if (b & 0x40) { /* Turn off PCI Bus Parking */ pci_write_config_byte(dev, 0x76, b ^ 0x40); dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n"); } } if (pci_read_config_byte(dev, 0x72, &b) == 0) { if (b != 0) { /* Turn off PCI Master read caching */ pci_write_config_byte(dev, 0x72, 0x0); /* Set PCI Master Bus time-out to "1x16 PCLK" */ pci_write_config_byte(dev, 0x75, 0x1); /* Disable "Read FIFO Timer" */ pci_write_config_byte(dev, 0x77, 0x0); dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n"); } } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); /* * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the * VPD end tag will hang the device. This problem was initially * observed when a vpd entry was created in sysfs * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry * will dump 32k of data. Reading a full 32k will cause an access * beyond the VPD end tag causing the device to hang. Once the device * is hung, the bnx2 driver will not be able to reset the device. * We believe that it is legal to read beyond the end tag and * therefore the solution is to limit the read/write length. */ static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev) { /* * Only disable the VPD capability for 5706, 5706S, 5708, * 5708S and 5709 rev. A */ if ((dev->device == PCI_DEVICE_ID_NX2_5706) || (dev->device == PCI_DEVICE_ID_NX2_5706S) || (dev->device == PCI_DEVICE_ID_NX2_5708) || (dev->device == PCI_DEVICE_ID_NX2_5708S) || ((dev->device == PCI_DEVICE_ID_NX2_5709) && (dev->revision & 0xf0) == 0x0)) { if (dev->vpd) dev->vpd->len = 0x80; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, quirk_brcm_570x_limit_vpd); /* Originally in EDAC sources for i82875P: * Intel tells BIOS developers to hide device 6 which * configures the overflow device access containing * the DRBs - this is where we expose device 6. * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm */ static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev) { u8 reg; if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) { dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n"); pci_write_config_byte(dev, 0xF4, reg | 0x02); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, quirk_unhide_mch_dev6); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, quirk_unhide_mch_dev6); #ifdef CONFIG_PCI_MSI /* Some chipsets do not support MSI. We cannot easily rely on setting * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually * some other busses controlled by the chipset even if Linux is not * aware of it. Instead of setting the flag on all busses in the * machine, simply disable MSI globally. */ static void __init quirk_disable_all_msi(struct pci_dev *dev) { pci_no_msi(); dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi); /* Disable MSI on chipsets that are known to not support it */ static void __devinit quirk_disable_msi(struct pci_dev *dev) { if (dev->subordinate) { dev_warn(&dev->dev, "MSI quirk detected; " "subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); /* Go through the list of Hypertransport capabilities and * return 1 if a HT MSI capability is found and enabled */ static int __devinit msi_ht_cap_enabled(struct pci_dev *dev) { int pos, ttl = 48; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { dev_info(&dev->dev, "Found %s HT MSI Mapping\n", flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled"); return (flags & HT_MSI_FLAGS_ENABLE) != 0; } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } return 0; } /* Check the hypertransport MSI mapping to know whether MSI is enabled or not */ static void __devinit quirk_msi_ht_cap(struct pci_dev *dev) { if (dev->subordinate && !msi_ht_cap_enabled(dev)) { dev_warn(&dev->dev, "MSI quirk detected; " "subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, quirk_msi_ht_cap); /* The nVidia CK804 chipset may have 2 HT MSI mappings. * MSI are supported if the MSI capability set in any of these mappings. */ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) { struct pci_dev *pdev; if (!dev->subordinate) return; /* check HT MSI cap on this chipset and the root one. * a single one having MSI is enough to be sure that MSI are supported. */ pdev = pci_get_slot(dev->bus, 0); if (!pdev) return; if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) { dev_warn(&dev->dev, "MSI quirk detected; " "subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } pci_dev_put(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_msi_ht_cap); /* Force enable MSI mapping capability on HT bridges */ static void __devinit ht_enable_msi_mapping(struct pci_dev *dev) { int pos, ttl = 48; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { dev_info(&dev->dev, "Enabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags | HT_MSI_FLAGS_ENABLE); } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, ht_enable_msi_mapping); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, ht_enable_msi_mapping); /* The P5N32-SLI motherboards from Asus have a problem with msi * for the MCP55 NIC. It is not yet determined whether the msi problem * also affects other devices. As for now, turn off msi for this device. */ static void __devinit nvenet_msi_disable(struct pci_dev *dev) { if (dmi_name_in_vendors("P5N32-SLI PREMIUM") || dmi_name_in_vendors("P5N32-E SLI")) { dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n"); dev->no_msi = 1; } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15, nvenet_msi_disable); static int __devinit ht_check_msi_mapping(struct pci_dev *dev) { int pos, ttl = 48; int found = 0; /* check if there is HT MSI cap or enabled on this device */ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (found < 1) found = 1; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { if (flags & HT_MSI_FLAGS_ENABLE) { if (found < 2) { found = 2; break; } } } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } return found; } static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge) { struct pci_dev *dev; int pos; int i, dev_no; int found = 0; dev_no = host_bridge->devfn >> 3; for (i = dev_no + 1; i < 0x20; i++) { dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0)); if (!dev) continue; /* found next host bridge ?*/ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); if (pos != 0) { pci_dev_put(dev); break; } if (ht_check_msi_mapping(dev)) { found = 1; pci_dev_put(dev); break; } pci_dev_put(dev); } return found; } #define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */ #define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */ static int __devinit is_end_of_ht_chain(struct pci_dev *dev) { int pos, ctrl_off; int end = 0; u16 flags, ctrl; pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); if (!pos) goto out; pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags); ctrl_off = ((flags >> 10) & 1) ? PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1; pci_read_config_word(dev, pos + ctrl_off, &ctrl); if (ctrl & (1 << 6)) end = 1; out: return end; } static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev) { struct pci_dev *host_bridge; int pos; int i, dev_no; int found = 0; dev_no = dev->devfn >> 3; for (i = dev_no; i >= 0; i--) { host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0)); if (!host_bridge) continue; pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { found = 1; break; } pci_dev_put(host_bridge); } if (!found) return; /* don't enable end_device/host_bridge with leaf directly here */ if (host_bridge == dev && is_end_of_ht_chain(host_bridge) && host_bridge_with_leaf(host_bridge)) goto out; /* root did that ! */ if (msi_ht_cap_enabled(host_bridge)) goto out; ht_enable_msi_mapping(dev); out: pci_dev_put(host_bridge); } static void __devinit ht_disable_msi_mapping(struct pci_dev *dev) { int pos, ttl = 48; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { dev_info(&dev->dev, "Disabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags & ~HT_MSI_FLAGS_ENABLE); } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } } static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all) { struct pci_dev *host_bridge; int pos; int found; if (!pci_msi_enabled()) return; /* check if there is HT MSI cap or enabled on this device */ found = ht_check_msi_mapping(dev); /* no HT MSI CAP */ if (found == 0) return; /* * HT MSI mapping should be disabled on devices that are below * a non-Hypertransport host bridge. Locate the host bridge... */ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); if (host_bridge == NULL) { dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n"); return; } pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { /* Host bridge is to HT */ if (found == 1) { /* it is not enabled, try to enable it */ if (all) ht_enable_msi_mapping(dev); else nv_ht_enable_msi_mapping(dev); } return; } /* HT MSI is not enabled */ if (found == 1) return; /* Host bridge is not to HT, disable HT MSI mapping on this device */ ht_disable_msi_mapping(dev); } static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev) { return __nv_msi_ht_cap_quirk(dev, 1); } static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) { return __nv_msi_ht_cap_quirk(dev, 0); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; } static void __devinit quirk_msi_intx_disable_ati_bug(struct pci_dev *dev) { struct pci_dev *p; /* SB700 MSI issue will be fixed at HW level from revision A21, * we need check PCI REVISION ID of SMBus controller to get SB700 * revision. */ p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p) return; if ((p->revision < 0x3B) && (p->revision >= 0x30)) dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; pci_dev_put(p); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, quirk_msi_intx_disable_bug); #endif /* CONFIG_PCI_MSI */ #ifdef CONFIG_PCI_IOV /* * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the * old Flash Memory Space. */ static void __devinit quirk_i82576_sriov(struct pci_dev *dev) { int pos, flags; u32 bar, start, size; if (PAGE_SIZE > 0x10000) return; flags = pci_resource_flags(dev, 0); if ((flags & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY || (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) != PCI_BASE_ADDRESS_MEM_TYPE_32) return; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); if (!pos) return; pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar); if (bar & PCI_BASE_ADDRESS_MEM_MASK) return; start = pci_resource_start(dev, 1); size = pci_resource_len(dev, 1); if (!start || size != 0x400000 || start & (size - 1)) return; pci_resource_flags(dev, 1) = 0; pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0); pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start); pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2); dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov); #endif /* CONFIG_PCI_IOV */ #endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) { while (f < end) { if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { dev_dbg(&dev->dev, "calling %pF\n", f->hook); f->hook(dev); } f++; } } extern struct pci_fixup __start_pci_fixups_early[]; extern struct pci_fixup __end_pci_fixups_early[]; extern struct pci_fixup __start_pci_fixups_header[]; extern struct pci_fixup __end_pci_fixups_header[]; extern struct pci_fixup __start_pci_fixups_final[]; extern struct pci_fixup __end_pci_fixups_final[]; extern struct pci_fixup __start_pci_fixups_enable[]; extern struct pci_fixup __end_pci_fixups_enable[]; extern struct pci_fixup __start_pci_fixups_resume[]; extern struct pci_fixup __end_pci_fixups_resume[]; extern struct pci_fixup __start_pci_fixups_resume_early[]; extern struct pci_fixup __end_pci_fixups_resume_early[]; extern struct pci_fixup __start_pci_fixups_suspend[]; extern struct pci_fixup __end_pci_fixups_suspend[]; #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) #define VTUNCERRMSK_REG 0x1ac #define VTD_MSK_SPEC_ERRORS (1 << 31) /* * This is a quirk for masking vt-d spec defined errors to platform error * handling logic. With out this, platforms using Intel 7500, 5500 chipsets * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based * on the RAS config settings of the platform) when a vt-d fault happens. * The resulting SMI caused the system to hang. * * VT-d spec related errors are already handled by the VT-d OS code, so no * need to report the same error through other channels. */ static void vtd_mask_spec_errors(struct pci_dev *dev) { u32 word; pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); #endif void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { struct pci_fixup *start, *end; switch(pass) { case pci_fixup_early: start = __start_pci_fixups_early; end = __end_pci_fixups_early; break; case pci_fixup_header: start = __start_pci_fixups_header; end = __end_pci_fixups_header; break; case pci_fixup_final: start = __start_pci_fixups_final; end = __end_pci_fixups_final; break; case pci_fixup_enable: start = __start_pci_fixups_enable; end = __end_pci_fixups_enable; break; case pci_fixup_resume: start = __start_pci_fixups_resume; end = __end_pci_fixups_resume; break; case pci_fixup_resume_early: start = __start_pci_fixups_resume_early; end = __end_pci_fixups_resume_early; break; case pci_fixup_suspend: start = __start_pci_fixups_suspend; end = __end_pci_fixups_suspend; break; default: /* stupid compiler warning, you would think with an enum... */ return; } pci_do_fixups(dev, start, end); } static int __init pci_apply_final_quirks(void) { struct pci_dev *dev = NULL; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_fixup_device(pci_fixup_final, dev); } return 0; } fs_initcall_sync(pci_apply_final_quirks); #else void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} #endif EXPORT_SYMBOL(pci_fixup_device);
uwehermann/easybox-904-lte-firmware
linux/linux-2.6.32.32/drivers/pci/quirks.c
C
gpl-2.0
94,163
/* * drivers/media/video/omap/gfx_tiler.c * * Copyright (C) 2010 Texas Instruments. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/types.h> #include "v4gfx.h" #include "gfx_bc.h" #ifdef CONFIG_TILER_OMAP #include <mach/tiler.h> #define TILER_ALLOCATE_V4L2 #endif void v4gfx_tiler_buffer_free(struct v4gfx_device *vout, unsigned int count, unsigned int startindex) { #ifdef CONFIG_TILER_OMAP int i; if (startindex < 0) startindex = 0; if (startindex + count > VIDEO_MAX_FRAME) count = VIDEO_MAX_FRAME - startindex; for (i = startindex; i < startindex + count; i++) { if (vout->buf_phy_addr_alloced[i]) tiler_free(vout->buf_phy_addr_alloced[i]); if (vout->buf_phy_uv_addr_alloced[i]) tiler_free(vout->buf_phy_uv_addr_alloced[i]); vout->buf_phy_addr[i] = 0; vout->buf_phy_addr_alloced[i] = 0; vout->buf_phy_uv_addr[i] = 0; vout->buf_phy_uv_addr_alloced[i] = 0; } #endif } /* Allocate the buffers for TILER space. Ideally, the buffers will be ONLY in tiler space, with different rotated views available by just a convert. */ int v4gfx_tiler_buffer_setup(struct v4gfx_device *vout, unsigned int *count, unsigned int startindex, struct v4l2_pix_format *pix) { #ifdef CONFIG_TILER_OMAP /* startindex is always passed as 0, possibly tidy up? */ int i, aligned = 1, bpp; enum tiler_fmt fmt; int rv = 0; /* normalize buffers to allocate so we stay within bounds */ int start = (startindex < 0) ? 0 : startindex; int n_alloc = (start + *count > VIDEO_MAX_FRAME) ? VIDEO_MAX_FRAME - start : *count; GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__); bpp = v4gfx_try_format(pix); if (bpp <= 0) { rv = bpp; /* error condition */ goto end; } GFXLOG(1, V4L2DEV(vout), "tiler buffer alloc: " "count = %d, start = %d :\n", *count, startindex); /* special allocation scheme for NV12 format */ if (V4L2_PIX_FMT_NV12 == pix->pixelformat) { tiler_alloc_packed_nv12(&n_alloc, ALIGN(pix->width, 128), pix->height, (void **) vout->buf_phy_addr + start, (void **) vout->buf_phy_uv_addr + start, (void **) vout->buf_phy_addr_alloced + start, (void **) vout->buf_phy_uv_addr_alloced + start, aligned); } else { /* Only bpp of 1, 2, and 4 is supported by tiler */ fmt = (bpp == 1 ? TILFMT_8BIT : bpp == 2 ? TILFMT_16BIT : bpp == 4 ? TILFMT_32BIT : TILFMT_INVALID); if (fmt == TILFMT_INVALID) { rv = -ENOMEM; goto end; } tiler_alloc_packed(&n_alloc, fmt, ALIGN(pix->width, 128 / bpp), pix->height, (void **) vout->buf_phy_addr + start, (void **) vout->buf_phy_addr_alloced + start, aligned); } GFXLOG(1, V4L2DEV(vout), "allocated %d buffers\n", n_alloc); if (n_alloc < *count) { if (n_alloc && (startindex == -1 || V4L2_MEMORY_MMAP != vout->memory)) { /* TODO: check this condition's logic */ v4gfx_tiler_buffer_free(vout, n_alloc, start); *count = 0; rv = -ENOMEM; goto end; } } for (i = start; i < start + n_alloc; i++) { GFXLOG(1, V4L2DEV(vout), "y=%08lx (%d) uv=%08lx (%d)\n", vout->buf_phy_addr[i], vout->buf_phy_addr_alloced[i] ? 1 : 0, vout->buf_phy_uv_addr[i], vout->buf_phy_uv_addr_alloced[i] ? 1 : 0); } *count = n_alloc; end: GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv); return rv; #else return 0; #endif } void v4gfx_tiler_image_incr(struct v4gfx_device *vout, int *cpu_pgwidth, int *tiler_increment) { #ifdef CONFIG_TILER_OMAP /* for NV12, Y buffer is 1bpp*/ if (V4L2_PIX_FMT_NV12 == vout->pix.pixelformat) { *cpu_pgwidth = (vout->pix.width + TILER_PAGE - 1) & ~(TILER_PAGE - 1); *tiler_increment = 64 * TILER_WIDTH; } else { *cpu_pgwidth = (vout->pix.width * vout->bpp + TILER_PAGE - 1) & ~(TILER_PAGE - 1); if (vout->bpp > 1) *tiler_increment = 2 * 64 * TILER_WIDTH; else *tiler_increment = 64 * TILER_WIDTH; } #endif } void v4gfx_tiler_image_incr_uv(struct v4gfx_device *vout, int *tiler_increment) { #ifdef CONFIG_TILER_OMAP if (vout->pix.pixelformat == V4L2_PIX_FMT_NV12) *tiler_increment = 2 * 64 * TILER_WIDTH; /* Otherwise do nothing */ #endif }
michaellass/lenovo_a1_07_kernel
drivers/media/video/omapgfx/gfx_tiler.c
C
gpl-2.0
4,322
/* CMTP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/export.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/compat.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <net/sock.h> #include <linux/isdn/capilli.h> #include "cmtp.h" static int cmtp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; sock_orphan(sk); sock_put(sk); return 0; } static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct cmtp_connadd_req ca; struct cmtp_conndel_req cd; struct cmtp_connlist_req cl; struct cmtp_conninfo ci; struct socket *nsock; void __user *argp = (void __user *)arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case CMTPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } err = cmtp_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case CMTPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return cmtp_del_connection(&cd); case CMTPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = cmtp_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case CMTPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = cmtp_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; } return -EINVAL; } #ifdef CONFIG_COMPAT static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { if (cmd == CMTPGETCONNLIST) { struct cmtp_connlist_req cl; uint32_t uci; int err; if (get_user(cl.cnum, (uint32_t __user *) arg) || get_user(uci, (u32 __user *) (arg + 4))) return -EFAULT; cl.ci = compat_ptr(uci); if (cl.cnum <= 0) return -EINVAL; err = cmtp_get_connlist(&cl); if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) err = -EFAULT; return err; } return cmtp_sock_ioctl(sock, cmd, arg); } #endif static const struct proto_ops cmtp_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = cmtp_sock_release, .ioctl = cmtp_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = cmtp_sock_compat_ioctl, #endif .bind = sock_no_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .poll = sock_no_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto cmtp_proto = { .name = "CMTP", .owner = THIS_MODULE, .obj_size = sizeof(struct bt_sock) }; static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &cmtp_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; return 0; } static const struct net_proto_family cmtp_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = cmtp_sock_create }; int cmtp_init_sockets(void) { int err; err = proto_register(&cmtp_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); if (err < 0) goto error; return 0; error: BT_ERR("Can't register CMTP socket"); proto_unregister(&cmtp_proto); return err; } void cmtp_cleanup_sockets(void) { if (bt_sock_unregister(BTPROTO_CMTP) < 0) BT_ERR("Can't unregister CMTP socket"); proto_unregister(&cmtp_proto); }
12019/Dorimanx-SG2-I9100-Kernel
net/bluetooth/cmtp/sock.c
C
gpl-2.0
5,532
/**************************************************************************** * Ralink Tech Inc. * 4F, No. 2 Technology 5th Rd. * Science-based Industrial Park * Hsin-chu, Taiwan, R.O.C. * (c) Copyright 2002, Ralink Technology, Inc. * * All rights reserved. Ralink's source code is an unpublished work and the * use of a copyright notice does not imply otherwise. This source code * contains confidential trade secret material of Ralink Tech. Any attemp * or participation in deciphering, decoding, reverse engineering or in any * way altering the source code is stricitly prohibited, unless the prior * written consent of Ralink Technology, Inc. is obtained. **************************************************************************** Module Name: rt_profile.c Abstract: Revision History: Who When What --------- ---------- ---------------------------------------------- */ #include "rt_config.h" NDIS_STATUS RTMPReadParametersHook( IN PRTMP_ADAPTER pAd) { PSTRING src = NULL; RTMP_OS_FD srcf; RTMP_OS_FS_INFO osFSInfo; INT retval = NDIS_STATUS_FAILURE; PSTRING buffer; buffer = kmalloc(MAX_INI_BUFFER_SIZE, MEM_ALLOC_FLAG); if(buffer == NULL) return NDIS_STATUS_FAILURE; memset(buffer, 0x00, MAX_INI_BUFFER_SIZE); { #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { src = STA_PROFILE_PATH; } #endif // CONFIG_STA_SUPPORT // #ifdef MULTIPLE_CARD_SUPPORT src = (PSTRING)pAd->MC_FileName; #endif // MULTIPLE_CARD_SUPPORT // } if (src && *src) { RtmpOSFSInfoChange(&osFSInfo, TRUE); srcf = RtmpOSFileOpen(src, O_RDONLY, 0); if (IS_FILE_OPEN_ERR(srcf)) { DBGPRINT(RT_DEBUG_ERROR, ("Open file \"%s\" failed!\n", src)); } else { retval =RtmpOSFileRead(srcf, buffer, MAX_INI_BUFFER_SIZE); if (retval > 0) { RTMPSetProfileParameters(pAd, buffer); retval = NDIS_STATUS_SUCCESS; } else DBGPRINT(RT_DEBUG_ERROR, ("Read file \"%s\" failed(errCode=%d)!\n", src, retval)); retval = RtmpOSFileClose(srcf); if ( retval != 0) { retval = NDIS_STATUS_FAILURE; DBGPRINT(RT_DEBUG_ERROR, ("Close file \"%s\" failed(errCode=%d)!\n", src, retval)); } } RtmpOSFSInfoChange(&osFSInfo, FALSE); } kfree(buffer); return (retval); }
silver-alx/ac100_kernel
drivers/net/wireless/DPA_RT3070_WiFiBTCombo/NETIF/os/linux/rt_profile.c
C
gpl-2.0
2,320
/* main-rc6-test.c */ /* This file is part of the AVR-Crypto-Lib. Copyright (C) 2006-2015 Daniel Otte (bg@nerilex.org) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * rc6 test-suit * */ #include "main-test-common.h" #include "rc6.h" #include "performance_test.h" #include "bcal-performance.h" #include "bcal-nessie.h" #include "bcal_rc6.h" #define RC6_ROUNDS 20 char *algo_name = "RC6-32/20/16"; const bcdesc_t *const algolist[] PROGMEM = { (bcdesc_t*)&rc6_desc, NULL }; /***************************************************************************** * additional validation-functions * *****************************************************************************/ void rc6_genctx_dummy(uint8_t *key, uint16_t keysize_b, void *ctx){ rc6_initl(key, keysize_b, RC6_ROUNDS, ctx); } void testrun_nessie_rc6(void){ bcal_nessie_multiple(algolist); } void testrun_performance_rc6(void){ bcal_performance_multiple(algolist); } /***************************************************************************** * main * *****************************************************************************/ const char nessie_str[] PROGMEM = "nessie"; const char test_str[] PROGMEM = "test"; const char performance_str[] PROGMEM = "performance"; const char echo_str[] PROGMEM = "echo"; const cmdlist_entry_t cmdlist[] PROGMEM = { { nessie_str, NULL, testrun_nessie_rc6}, { test_str, NULL, testrun_nessie_rc6}, { performance_str, NULL, testrun_performance_rc6}, { echo_str, (void*)1, (void_fpt)echo_ctrl}, { NULL, NULL, NULL} }; int main (void){ main_setup(); for(;;){ welcome_msg(algo_name); cmd_interface(cmdlist); } }
nerilex/avr-crypto-lib
test_src/main-rc6-test.c
C
gpl-2.0
2,347
/* * Copyright (C) 2013-2015 Kay Sievers * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org> * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org> * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com> * Copyright (C) 2013-2015 Linux Foundation * Copyright (C) 2014-2015 Djalal Harouni * * kdbus is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the * Free Software Foundation; either version 2.1 of the License, or (at * your option) any later version. */ #include <linux/fs.h> #include <linux/hashtable.h> #include <linux/init.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/uio.h> #include "bus.h" #include "notify.h" #include "connection.h" #include "domain.h" #include "endpoint.h" #include "handle.h" #include "item.h" #include "match.h" #include "message.h" #include "metadata.h" #include "names.h" #include "policy.h" #include "util.h" static void kdbus_bus_free(struct kdbus_node *node) { struct kdbus_bus *bus = container_of(node, struct kdbus_bus, node); WARN_ON(!list_empty(&bus->monitors_list)); WARN_ON(!hash_empty(bus->conn_hash)); kdbus_notify_free(bus); kdbus_user_unref(bus->creator); kdbus_name_registry_free(bus->name_registry); kdbus_domain_unref(bus->domain); kdbus_policy_db_clear(&bus->policy_db); kdbus_meta_proc_unref(bus->creator_meta); kfree(bus); } static void kdbus_bus_release(struct kdbus_node *node, bool was_active) { struct kdbus_bus *bus = container_of(node, struct kdbus_bus, node); if (was_active) atomic_dec(&bus->creator->buses); } static struct kdbus_bus *kdbus_bus_new(struct kdbus_domain *domain, const char *name, struct kdbus_bloom_parameter *bloom, const u64 *pattach_owner, const u64 *pattach_recv, u64 flags, kuid_t uid, kgid_t gid) { struct kdbus_bus *b; u64 attach_owner; u64 attach_recv; int ret; if (bloom->size < 8 || bloom->size > KDBUS_BUS_BLOOM_MAX_SIZE || !KDBUS_IS_ALIGNED8(bloom->size) || bloom->n_hash < 1) return ERR_PTR(-EINVAL); ret = kdbus_sanitize_attach_flags(pattach_recv ? *pattach_recv : 0, &attach_recv); if (ret < 0) return ERR_PTR(ret); ret = kdbus_sanitize_attach_flags(pattach_owner ? *pattach_owner : 0, &attach_owner); if (ret < 0) return ERR_PTR(ret); ret = kdbus_verify_uid_prefix(name, domain->user_namespace, uid); if (ret < 0) return ERR_PTR(ret); b = kzalloc(sizeof(*b), GFP_KERNEL); if (!b) return ERR_PTR(-ENOMEM); kdbus_node_init(&b->node, KDBUS_NODE_BUS); b->node.free_cb = kdbus_bus_free; b->node.release_cb = kdbus_bus_release; b->node.uid = uid; b->node.gid = gid; b->node.mode = S_IRUSR | S_IXUSR; if (flags & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD)) b->node.mode |= S_IRGRP | S_IXGRP; if (flags & KDBUS_MAKE_ACCESS_WORLD) b->node.mode |= S_IROTH | S_IXOTH; b->id = atomic64_inc_return(&domain->last_id); b->bus_flags = flags; b->attach_flags_req = attach_recv; b->attach_flags_owner = attach_owner; generate_random_uuid(b->id128); b->bloom = *bloom; b->domain = kdbus_domain_ref(domain); kdbus_policy_db_init(&b->policy_db); init_rwsem(&b->conn_rwlock); hash_init(b->conn_hash); INIT_LIST_HEAD(&b->monitors_list); INIT_LIST_HEAD(&b->notify_list); spin_lock_init(&b->notify_lock); mutex_init(&b->notify_flush_lock); ret = kdbus_node_link(&b->node, &domain->node, name); if (ret < 0) goto exit_unref; /* cache the metadata/credentials of the creator */ b->creator_meta = kdbus_meta_proc_new(); if (IS_ERR(b->creator_meta)) { ret = PTR_ERR(b->creator_meta); b->creator_meta = NULL; goto exit_unref; } ret = kdbus_meta_proc_collect(b->creator_meta, KDBUS_ATTACH_CREDS | KDBUS_ATTACH_PIDS | KDBUS_ATTACH_AUXGROUPS | KDBUS_ATTACH_TID_COMM | KDBUS_ATTACH_PID_COMM | KDBUS_ATTACH_EXE | KDBUS_ATTACH_CMDLINE | KDBUS_ATTACH_CGROUP | KDBUS_ATTACH_CAPS | KDBUS_ATTACH_SECLABEL | KDBUS_ATTACH_AUDIT); if (ret < 0) goto exit_unref; b->name_registry = kdbus_name_registry_new(); if (IS_ERR(b->name_registry)) { ret = PTR_ERR(b->name_registry); b->name_registry = NULL; goto exit_unref; } /* * Bus-limits of the creator are accounted on its real UID, just like * all other per-user limits. */ b->creator = kdbus_user_lookup(domain, current_uid()); if (IS_ERR(b->creator)) { ret = PTR_ERR(b->creator); b->creator = NULL; goto exit_unref; } return b; exit_unref: kdbus_node_deactivate(&b->node); kdbus_node_unref(&b->node); return ERR_PTR(ret); } /** * kdbus_bus_ref() - increase the reference counter of a kdbus_bus * @bus: The bus to reference * * Every user of a bus, except for its creator, must add a reference to the * kdbus_bus using this function. * * Return: the bus itself */ struct kdbus_bus *kdbus_bus_ref(struct kdbus_bus *bus) { if (bus) kdbus_node_ref(&bus->node); return bus; } /** * kdbus_bus_unref() - decrease the reference counter of a kdbus_bus * @bus: The bus to unref * * Release a reference. If the reference count drops to 0, the bus will be * freed. * * Return: NULL */ struct kdbus_bus *kdbus_bus_unref(struct kdbus_bus *bus) { if (bus) kdbus_node_unref(&bus->node); return NULL; } /** * kdbus_bus_find_conn_by_id() - find a connection with a given id * @bus: The bus to look for the connection * @id: The 64-bit connection id * * Looks up a connection with a given id. The returned connection * is ref'ed, and needs to be unref'ed by the user. Returns NULL if * the connection can't be found. */ struct kdbus_conn *kdbus_bus_find_conn_by_id(struct kdbus_bus *bus, u64 id) { struct kdbus_conn *conn, *found = NULL; down_read(&bus->conn_rwlock); hash_for_each_possible(bus->conn_hash, conn, hentry, id) if (conn->id == id) { found = kdbus_conn_ref(conn); break; } up_read(&bus->conn_rwlock); return found; } /** * kdbus_bus_broadcast() - send a message to all subscribed connections * @bus: The bus the connections are connected to * @conn_src: The source connection, may be %NULL for kernel notifications * @kmsg: The message to send. * * Send @kmsg to all connections that are currently active on the bus. * Connections must still have matches installed in order to let the message * pass. * * The caller must hold the name-registry lock of @bus. */ void kdbus_bus_broadcast(struct kdbus_bus *bus, struct kdbus_conn *conn_src, struct kdbus_kmsg *kmsg) { struct kdbus_conn *conn_dst; unsigned int i; int ret; lockdep_assert_held(&bus->name_registry->rwlock); /* * Make sure broadcast are queued on monitors before we send it out to * anyone else. Otherwise, connections might react to broadcasts before * the monitor gets the broadcast queued. In the worst case, the * monitor sees a reaction to the broadcast before the broadcast itself. * We don't give ordering guarantees across connections (and monitors * can re-construct order via sequence numbers), but we should at least * try to avoid re-ordering for monitors. */ kdbus_bus_eavesdrop(bus, conn_src, kmsg); down_read(&bus->conn_rwlock); hash_for_each(bus->conn_hash, i, conn_dst, hentry) { if (conn_dst->id == kmsg->msg.src_id) continue; if (!kdbus_conn_is_ordinary(conn_dst)) continue; /* * Check if there is a match for the kmsg object in * the destination connection match db */ if (!kdbus_match_db_match_kmsg(conn_dst->match_db, conn_src, kmsg)) continue; if (conn_src) { /* * Anyone can send broadcasts, as they have no * destination. But a receiver needs TALK access to * the sender in order to receive broadcasts. */ if (!kdbus_conn_policy_talk(conn_dst, NULL, conn_src)) continue; ret = kdbus_kmsg_collect_metadata(kmsg, conn_src, conn_dst); if (ret < 0) { kdbus_conn_lost_message(conn_dst); continue; } } else { /* * Check if there is a policy db that prevents the * destination connection from receiving this kernel * notification */ if (!kdbus_conn_policy_see_notification(conn_dst, NULL, kmsg)) continue; } ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL); if (ret < 0) kdbus_conn_lost_message(conn_dst); } up_read(&bus->conn_rwlock); } /** * kdbus_bus_eavesdrop() - send a message to all subscribed monitors * @bus: The bus the monitors are connected to * @conn_src: The source connection, may be %NULL for kernel notifications * @kmsg: The message to send. * * Send @kmsg to all monitors that are currently active on the bus. Monitors * must still have matches installed in order to let the message pass. * * The caller must hold the name-registry lock of @bus. */ void kdbus_bus_eavesdrop(struct kdbus_bus *bus, struct kdbus_conn *conn_src, struct kdbus_kmsg *kmsg) { struct kdbus_conn *conn_dst; int ret; /* * Monitor connections get all messages; ignore possible errors * when sending messages to monitor connections. */ lockdep_assert_held(&bus->name_registry->rwlock); down_read(&bus->conn_rwlock); list_for_each_entry(conn_dst, &bus->monitors_list, monitor_entry) { if (conn_src) { ret = kdbus_kmsg_collect_metadata(kmsg, conn_src, conn_dst); if (ret < 0) { kdbus_conn_lost_message(conn_dst); continue; } } ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL); if (ret < 0) kdbus_conn_lost_message(conn_dst); } up_read(&bus->conn_rwlock); } /** * kdbus_cmd_bus_make() - handle KDBUS_CMD_BUS_MAKE * @domain: domain to operate on * @argp: command payload * * Return: NULL or newly created bus on success, ERR_PTR on failure. */ struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain, void __user *argp) { struct kdbus_bus *bus = NULL; struct kdbus_cmd *cmd; struct kdbus_ep *ep = NULL; int ret; struct kdbus_arg argv[] = { { .type = KDBUS_ITEM_NEGOTIATE }, { .type = KDBUS_ITEM_MAKE_NAME, .mandatory = true }, { .type = KDBUS_ITEM_BLOOM_PARAMETER, .mandatory = true }, { .type = KDBUS_ITEM_ATTACH_FLAGS_SEND }, { .type = KDBUS_ITEM_ATTACH_FLAGS_RECV }, }; struct kdbus_args args = { .allowed_flags = KDBUS_FLAG_NEGOTIATE | KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD, .argv = argv, .argc = ARRAY_SIZE(argv), }; ret = kdbus_args_parse(&args, argp, &cmd); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return NULL; bus = kdbus_bus_new(domain, argv[1].item->str, &argv[2].item->bloom_parameter, argv[3].item ? argv[3].item->data64 : NULL, argv[4].item ? argv[4].item->data64 : NULL, cmd->flags, current_euid(), current_egid()); if (IS_ERR(bus)) { ret = PTR_ERR(bus); bus = NULL; goto exit; } if (atomic_inc_return(&bus->creator->buses) > KDBUS_USER_MAX_BUSES) { atomic_dec(&bus->creator->buses); ret = -EMFILE; goto exit; } if (!kdbus_node_activate(&bus->node)) { atomic_dec(&bus->creator->buses); ret = -ESHUTDOWN; goto exit; } ep = kdbus_ep_new(bus, "bus", cmd->flags, bus->node.uid, bus->node.gid, false); if (IS_ERR(ep)) { ret = PTR_ERR(ep); ep = NULL; goto exit; } if (!kdbus_node_activate(&ep->node)) { ret = -ESHUTDOWN; goto exit; } /* * Drop our own reference, effectively causing the endpoint to be * deactivated and released when the parent bus is. */ ep = kdbus_ep_unref(ep); exit: ret = kdbus_args_clear(&args, ret); if (ret < 0) { if (ep) { kdbus_node_deactivate(&ep->node); kdbus_ep_unref(ep); } if (bus) { kdbus_node_deactivate(&bus->node); kdbus_bus_unref(bus); } return ERR_PTR(ret); } return bus; } /** * kdbus_cmd_bus_creator_info() - handle KDBUS_CMD_BUS_CREATOR_INFO * @conn: connection to operate on * @argp: command payload * * Return: >=0 on success, negative error code on failure. */ int kdbus_cmd_bus_creator_info(struct kdbus_conn *conn, void __user *argp) { struct kdbus_cmd_info *cmd; struct kdbus_bus *bus = conn->ep->bus; struct kdbus_pool_slice *slice = NULL; struct kdbus_item_header item_hdr; struct kdbus_info info = {}; size_t meta_size, name_len; struct kvec kvec[5]; u64 hdr_size = 0; u64 attach_flags; size_t cnt = 0; int ret; struct kdbus_arg argv[] = { { .type = KDBUS_ITEM_NEGOTIATE }, }; struct kdbus_args args = { .allowed_flags = KDBUS_FLAG_NEGOTIATE, .argv = argv, .argc = ARRAY_SIZE(argv), }; ret = kdbus_args_parse(&args, argp, &cmd); if (ret != 0) return ret; ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags); if (ret < 0) goto exit; attach_flags &= bus->attach_flags_owner; ret = kdbus_meta_export_prepare(bus->creator_meta, NULL, &attach_flags, &meta_size); if (ret < 0) goto exit; name_len = strlen(bus->node.name) + 1; info.id = bus->id; info.flags = bus->bus_flags; item_hdr.type = KDBUS_ITEM_MAKE_NAME; item_hdr.size = KDBUS_ITEM_HEADER_SIZE + name_len; kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &hdr_size); kdbus_kvec_set(&kvec[cnt++], &item_hdr, sizeof(item_hdr), &hdr_size); kdbus_kvec_set(&kvec[cnt++], bus->node.name, name_len, &hdr_size); cnt += !!kdbus_kvec_pad(&kvec[cnt], &hdr_size); slice = kdbus_pool_slice_alloc(conn->pool, hdr_size + meta_size, false); if (IS_ERR(slice)) { ret = PTR_ERR(slice); slice = NULL; goto exit; } ret = kdbus_meta_export(bus->creator_meta, NULL, attach_flags, slice, hdr_size, &meta_size); if (ret < 0) goto exit; info.size = hdr_size + meta_size; ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, hdr_size); if (ret < 0) goto exit; kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size); if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) || kdbus_member_set_user(&cmd->info_size, argp, typeof(*cmd), info_size)) ret = -EFAULT; exit: kdbus_pool_slice_release(slice); return kdbus_args_clear(&args, ret); }
TeamFahQ/kernel_linux_next
ipc/kdbus/bus.c
C
gpl-2.0
14,169
/* * CPUFreq sakuractive governor * * Copyright (C) 2011 sakuramilk <c.sakuramilk@gmail.com> * * Based on hotplug governor * Copyright (C) 2010 Texas Instruments, Inc. * Mike Turquette <mturquette@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * * Based on ondemand governor * Copyright (C) 2001 Russell King * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>, * Jun Nakajima <jun.nakajima@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/jiffies.h> #include <linux/kernel_stat.h> #include <linux/mutex.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/ktime.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/slab.h> /* greater than 80% avg load across online CPUs increases frequency */ #define DEFAULT_UP_FREQ_MIN_LOAD (85) /* Keep 10% of idle under the up threshold when decreasing the frequency */ #define DEFAULT_FREQ_DOWN_DIFFERENTIAL (10) /* less than 20% avg load across online CPUs decreases frequency */ #define DEFAULT_DOWN_FREQ_MAX_LOAD (17) /* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */ #define DEFAULT_SAMPLING_PERIOD (100000) /* default number of sampling periods to average before hotplug-in decision */ #define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (5) /* default number of sampling periods to average before hotplug-out decision */ #define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (10) static void do_dbs_timer(struct work_struct *work); static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event); //static int hotplug_boost(struct cpufreq_policy *policy); #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SAKURACTIVE static #endif struct cpufreq_governor cpufreq_gov_sakuractive = { .name = "sakuractive", .governor = cpufreq_governor_dbs, .owner = THIS_MODULE, }; struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; cputime64_t prev_cpu_nice; struct cpufreq_policy *cur_policy; struct delayed_work work; struct work_struct cpu_up_work; struct work_struct cpu_down_work; struct cpufreq_frequency_table *freq_table; int cpu; unsigned int boost_applied; /* * percpu mutex that serializes governor limit change with * do_dbs_timer invocation. We do not want do_dbs_timer to run * when user is changing the governor or limits. */ struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on * different CPUs. It protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); static struct workqueue_struct *khotplug_wq; static struct dbs_tuners { unsigned int sampling_rate; unsigned int up_threshold; unsigned int down_differential; unsigned int down_threshold; unsigned int hotplug_in_sampling_periods; unsigned int hotplug_out_sampling_periods; unsigned int hotplug_load_index; unsigned int *hotplug_load_history; unsigned int ignore_nice; unsigned int io_is_busy; unsigned int boost_timeout; } dbs_tuners_ins = { .sampling_rate = DEFAULT_SAMPLING_PERIOD, .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD, .down_differential = DEFAULT_FREQ_DOWN_DIFFERENTIAL, .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD, .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS, .hotplug_load_index = 0, .ignore_nice = 0, .io_is_busy = 0, .boost_timeout = 0, }; /* * A corner case exists when switching io_is_busy at run-time: comparing idle * times from a non-io_is_busy period to an io_is_busy period (or vice-versa) * will misrepresent the actual change in system idleness. We ignore this * corner case: enabling io_is_busy might cause freq increase and disabling * might cause freq decrease, which probably matches the original intent. */ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { u64 idle_time; u64 iowait_time; /* cpufreq-sakuractive always assumes CONFIG_NO_HZ */ idle_time = get_cpu_idle_time_us(cpu, wall); /* add time spent doing I/O to idle time */ if (dbs_tuners_ins.io_is_busy) { iowait_time = get_cpu_iowait_time_us(cpu, wall); /* cpufreq-sakuractive always assumes CONFIG_NO_HZ */ if (iowait_time != -1ULL && idle_time >= iowait_time) idle_time -= iowait_time; } return idle_time; } /************************** sysfs interface ************************/ /* XXX look at global sysfs macros in cpufreq.h, can those be used here? */ /* cpufreq_sakuractive Governor Tunables */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ } show_one(sampling_rate, sampling_rate); show_one(up_threshold, up_threshold); show_one(down_differential, down_differential); show_one(down_threshold, down_threshold); show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods); show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods); show_one(ignore_nice_load, ignore_nice); show_one(io_is_busy, io_is_busy); show_one(boost_timeout, boost_timeout); static ssize_t store_boost_timeout(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; mutex_lock(&dbs_mutex); dbs_tuners_ins.boost_timeout = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_rate = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input <= dbs_tuners_ins.down_threshold) { return -EINVAL; } mutex_lock(&dbs_mutex); dbs_tuners_ins.up_threshold = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_down_differential(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input >= dbs_tuners_ins.up_threshold) return -EINVAL; mutex_lock(&dbs_mutex); dbs_tuners_ins.down_differential = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input >= dbs_tuners_ins.up_threshold) { return -EINVAL; } mutex_lock(&dbs_mutex); dbs_tuners_ins.down_threshold = input; mutex_unlock(&dbs_mutex); return count; } static ssize_t store_hotplug_in_sampling_periods(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; unsigned int *temp; unsigned int max_windows; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; /* already using this value, bail out */ if (input == dbs_tuners_ins.hotplug_in_sampling_periods) return count; mutex_lock(&dbs_mutex); ret = count; max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, dbs_tuners_ins.hotplug_out_sampling_periods); /* no need to resize array */ if (input <= max_windows) { dbs_tuners_ins.hotplug_in_sampling_periods = input; goto out; } /* resize array */ temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); if (!temp || IS_ERR(temp)) { ret = -ENOMEM; goto out; } memcpy(temp, dbs_tuners_ins.hotplug_load_history, (max_windows * sizeof(unsigned int))); kfree(dbs_tuners_ins.hotplug_load_history); /* replace old buffer, old number of sampling periods & old index */ dbs_tuners_ins.hotplug_load_history = temp; dbs_tuners_ins.hotplug_in_sampling_periods = input; dbs_tuners_ins.hotplug_load_index = max_windows; out: mutex_unlock(&dbs_mutex); return ret; } static ssize_t store_hotplug_out_sampling_periods(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; unsigned int *temp; unsigned int max_windows; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; /* already using this value, bail out */ if (input == dbs_tuners_ins.hotplug_out_sampling_periods) return count; mutex_lock(&dbs_mutex); ret = count; max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, dbs_tuners_ins.hotplug_out_sampling_periods); /* no need to resize array */ if (input <= max_windows) { dbs_tuners_ins.hotplug_out_sampling_periods = input; goto out; } /* resize array */ temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); if (!temp || IS_ERR(temp)) { ret = -ENOMEM; goto out; } memcpy(temp, dbs_tuners_ins.hotplug_load_history, (max_windows * sizeof(unsigned int))); kfree(dbs_tuners_ins.hotplug_load_history); /* replace old buffer, old number of sampling periods & old index */ dbs_tuners_ins.hotplug_load_history = temp; dbs_tuners_ins.hotplug_out_sampling_periods = input; dbs_tuners_ins.hotplug_load_index = max_windows; out: mutex_unlock(&dbs_mutex); return ret; } static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; unsigned int j; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; if (input > 1) input = 1; mutex_lock(&dbs_mutex); if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(hp_cpu_dbs_info, j); dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } mutex_unlock(&dbs_mutex); return count; } static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; mutex_lock(&dbs_mutex); dbs_tuners_ins.io_is_busy = !!input; mutex_unlock(&dbs_mutex); return count; } define_one_global_rw(sampling_rate); define_one_global_rw(up_threshold); define_one_global_rw(down_differential); define_one_global_rw(down_threshold); define_one_global_rw(hotplug_in_sampling_periods); define_one_global_rw(hotplug_out_sampling_periods); define_one_global_rw(ignore_nice_load); define_one_global_rw(io_is_busy); define_one_global_rw(boost_timeout); static struct attribute *dbs_attributes[] = { &sampling_rate.attr, &up_threshold.attr, &down_differential.attr, &down_threshold.attr, &hotplug_in_sampling_periods.attr, &hotplug_out_sampling_periods.attr, &ignore_nice_load.attr, &io_is_busy.attr, &boost_timeout.attr, NULL }; static struct attribute_group dbs_attr_group = { .attrs = dbs_attributes, .name = "sakuractive", }; /************************** sysfs end ************************/ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { /* combined load of all enabled CPUs */ unsigned int total_load = 0; /* single largest CPU load percentage*/ unsigned int max_load = 0; /* largest CPU load in terms of frequency */ unsigned int max_load_freq = 0; /* average load across all enabled CPUs */ unsigned int avg_load = 0; /* average load across multiple sampling periods for hotplug events */ unsigned int hotplug_in_avg_load = 0; unsigned int hotplug_out_avg_load = 0; /* number of sampling periods averaged for hotplug decisions */ unsigned int periods; struct cpufreq_policy *policy; unsigned int i, j; policy = this_dbs_info->cur_policy; /* * cpu load accounting * get highest load, total load and average load across all CPUs */ for_each_cpu(j, policy->cpus) { unsigned int load; unsigned int idle_time, wall_time; cputime64_t cur_wall_time, cur_idle_time; struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); /* update both cur_idle_time and cur_wall_time */ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); /* how much wall time has passed since last iteration? */ wall_time = (unsigned int) cputime64_sub(cur_wall_time, j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; /* how much idle time has passed since last iteration? */ idle_time = (unsigned int) cputime64_sub(cur_idle_time, j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (unlikely(!wall_time || wall_time < idle_time)) continue; /* load is the percentage of time not spent in idle */ load = 100 * (wall_time - idle_time) / wall_time; /* keep track of combined load across all CPUs */ total_load += load; /* keep track of highest single load across all CPUs */ if (load > max_load) max_load = load; } /* use the max load in the OPP freq change policy */ max_load_freq = max_load * policy->cur; /* calculate the average load across all related CPUs */ avg_load = total_load / num_online_cpus(); mutex_lock(&dbs_mutex); /* * hotplug load accounting * average load over multiple sampling periods */ /* how many sampling periods do we use for hotplug decisions? */ periods = max(dbs_tuners_ins.hotplug_in_sampling_periods, dbs_tuners_ins.hotplug_out_sampling_periods); /* store avg_load in the circular buffer */ dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index] = avg_load; /* compute average load across in & out sampling periods */ for (i = 0, j = dbs_tuners_ins.hotplug_load_index; i < periods; i++, j--) { if (i < dbs_tuners_ins.hotplug_in_sampling_periods) hotplug_in_avg_load += dbs_tuners_ins.hotplug_load_history[j]; if (i < dbs_tuners_ins.hotplug_out_sampling_periods) hotplug_out_avg_load += dbs_tuners_ins.hotplug_load_history[j]; if (j == 0) j = periods; } hotplug_in_avg_load = hotplug_in_avg_load / dbs_tuners_ins.hotplug_in_sampling_periods; hotplug_out_avg_load = hotplug_out_avg_load / dbs_tuners_ins.hotplug_out_sampling_periods; /* return to first element if we're at the circular buffer's end */ if (++dbs_tuners_ins.hotplug_load_index == periods) dbs_tuners_ins.hotplug_load_index = 0; /* check if auxiliary CPU is needed based on avg_load */ if (avg_load > dbs_tuners_ins.up_threshold) { /* should we enable auxillary CPUs? */ if (num_online_cpus() < 2 && hotplug_in_avg_load > dbs_tuners_ins.up_threshold) { queue_work_on(this_dbs_info->cpu, khotplug_wq, &this_dbs_info->cpu_up_work); goto out; } } /* check for frequency increase based on max_load */ if (max_load > dbs_tuners_ins.up_threshold) { /* increase to highest frequency supported */ if (policy->cur < policy->max) __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); goto out; } /* check for frequency decrease */ if (avg_load < dbs_tuners_ins.down_threshold) { /* are we at the minimum frequency already? */ if (policy->cur <= policy->min) { /* should we disable auxillary CPUs? */ if (num_online_cpus() > 1 && hotplug_out_avg_load < dbs_tuners_ins.down_threshold) { queue_work_on(this_dbs_info->cpu, khotplug_wq, &this_dbs_info->cpu_down_work); } goto out; } } /* * go down to the lowest frequency which can sustain the load by * keeping 30% of idle in order to not cross the up_threshold */ if ((max_load_freq < (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * policy->cur) && (policy->cur > policy->min)) { unsigned int freq_next; freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); if (freq_next < policy->min) freq_next = policy->min; __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } out: mutex_unlock(&dbs_mutex); return; } static void do_cpu_up(struct work_struct *work) { cpu_up(1); } static void do_cpu_down(struct work_struct *work) { cpu_down(1); } static void do_dbs_timer(struct work_struct *work) { struct cpu_dbs_info_s *dbs_info = container_of(work, struct cpu_dbs_info_s, work.work); unsigned int cpu = dbs_info->cpu; int delay = 0; mutex_lock(&dbs_info->timer_mutex); if (!dbs_info->boost_applied) { dbs_check_cpu(dbs_info); /* We want all related CPUs to do sampling nearly on same jiffy */ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); } else { delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); dbs_info->boost_applied = 0; if (num_online_cpus() < 2) queue_work_on(cpu, khotplug_wq, &dbs_info->cpu_up_work); } queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) { /* We want all related CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); INIT_WORK(&dbs_info->cpu_up_work, do_cpu_up); INIT_WORK(&dbs_info->cpu_down_work, do_cpu_down); if (!dbs_info->boost_applied) delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { cancel_delayed_work_sync(&dbs_info->work); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int i, j, max_periods; int rc; this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; mutex_lock(&dbs_mutex); dbs_enable++; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) { j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS); dbs_tuners_ins.hotplug_load_history = kmalloc( (sizeof(unsigned int) * max_periods), GFP_KERNEL); if (!dbs_tuners_ins.hotplug_load_history) { WARN_ON(1); return -ENOMEM; } for (i = 0; i < max_periods; i++) dbs_tuners_ins.hotplug_load_history[i] = 50; } this_dbs_info->cpu = cpu; this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu); /* * Start the timerschedule work, when this governor * is used for first time */ if (dbs_enable == 1) { rc = sysfs_create_group(cpufreq_global_kobject, &dbs_attr_group); if (rc) { mutex_unlock(&dbs_mutex); return rc; } } if (!dbs_tuners_ins.boost_timeout) dbs_tuners_ins.boost_timeout = dbs_tuners_ins.sampling_rate * 30; mutex_unlock(&dbs_mutex); mutex_init(&this_dbs_info->timer_mutex); dbs_timer_init(this_dbs_info); break; case CPUFREQ_GOV_STOP: dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); mutex_destroy(&this_dbs_info->timer_mutex); dbs_enable--; mutex_unlock(&dbs_mutex); if (!dbs_enable) sysfs_remove_group(cpufreq_global_kobject, &dbs_attr_group); kfree(dbs_tuners_ins.hotplug_load_history); /* * XXX BIG CAVEAT: Stopping the governor with CPU1 offline * will result in it remaining offline until the user onlines * it again. It is up to the user to do this (for now). */ break; case CPUFREQ_GOV_LIMITS: mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&this_dbs_info->timer_mutex); break; } return 0; } #if 0 static int hotplug_boost(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); #if 0 /* Already at max? */ if (policy->cur == policy->max) return; #endif mutex_lock(&this_dbs_info->timer_mutex); this_dbs_info->boost_applied = 1; __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); mutex_unlock(&this_dbs_info->timer_mutex); return 0; } #endif static int __init cpufreq_gov_dbs_init(void) { int err; cputime64_t wall; u64 idle_time; int cpu = get_cpu(); idle_time = get_cpu_idle_time_us(cpu, &wall); put_cpu(); if (idle_time != -1ULL) { dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD; } else { pr_err("cpufreq-sakuractive: %s: assumes CONFIG_NO_HZ\n", __func__); return -EINVAL; } khotplug_wq = create_workqueue("khotplug"); if (!khotplug_wq) { pr_err("Creation of khotplug failed\n"); return -EFAULT; } err = cpufreq_register_governor(&cpufreq_gov_sakuractive); if (err) destroy_workqueue(khotplug_wq); return err; } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_sakuractive); destroy_workqueue(khotplug_wq); } MODULE_AUTHOR("sakuramilk <c.sakuramilk@gmail.com>"); MODULE_DESCRIPTION("'cpufreq_sakuractive' - cpufreq governor for dynamic frequency scaling and CPU hotplug"); MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SAKURACTIVE fs_initcall(cpufreq_gov_dbs_init); #else module_init(cpufreq_gov_dbs_init); #endif module_exit(cpufreq_gov_dbs_exit);
k2wl/kernel-gt-i9082-stock-based
drivers/cpufreq/cpufreq_sakuractive.c
C
gpl-2.0
22,646
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * These functions manipulate an sctp event. The struct ulpevent is used * to carry notifications and data to the ULP (sockets). * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * La Monte H.P. Yarroll <piggy@acm.org> * Ardelle Fan <ardelle.fan@intel.com> * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/slab.h> #include <linux/types.h> #include <linux/skbuff.h> #include <net/sctp/structs.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, struct sctp_association *asoc); static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event); /* Initialize an ULP event from an given skb. */ SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags, unsigned int len) { memset(event, 0, sizeof(struct sctp_ulpevent)); event->msg_flags = msg_flags; event->rmem_len = len; } /* Create a new sctp_ulpevent. */ SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff *skb; skb = alloc_skb(size, gfp); if (!skb) goto fail; event = sctp_skb2event(skb); sctp_ulpevent_init(event, msg_flags, skb->truesize); return event; fail: return NULL; } /* Is this a MSG_NOTIFICATION? */ int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) { return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); } /* Hold the association in case the msg_name needs read out of * the association. */ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, const struct sctp_association *asoc) { struct sk_buff *skb; /* Cast away the const, as we are just wanting to * bump the reference count. */ sctp_association_hold((struct sctp_association *)asoc); skb = sctp_event2skb(event); event->asoc = (struct sctp_association *)asoc; atomic_add(event->rmem_len, &event->asoc->rmem_alloc); sctp_skb_set_owner_r(skb, asoc->base.sk); } /* A simple destructor to give up the reference to the association. */ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) { struct sctp_association *asoc = event->asoc; atomic_sub(event->rmem_len, &asoc->rmem_alloc); sctp_association_put(asoc); } /* Create and initialize an SCTP_ASSOC_CHANGE event. * * 5.3.1.1 SCTP_ASSOC_CHANGE * * Communication notifications inform the ULP that an SCTP association * has either begun or ended. The identifier for a new association is * provided by this notification. * * Note: There is no field checking here. If a field is unused it will be * zero'd out. */ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, __u16 flags, __u16 state, __u16 error, __u16 outbound, __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_assoc_change *sac; struct sk_buff *skb; /* If the lower layer passed in the chunk, it will be * an ABORT, so we need to include it in the sac_info. */ if (chunk) { /* Copy the chunk data to a new skb and reserve enough * head room to use as notification. */ skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_assoc_change), 0, gfp); if (!skb) goto fail; /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); /* Include the notification structure */ sac = (struct sctp_assoc_change *) skb_push(skb, sizeof(struct sctp_assoc_change)); /* Trim the buffer to the right length. */ skb_trim(skb, sizeof(struct sctp_assoc_change) + ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); } else { event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); sac = (struct sctp_assoc_change *) skb_put(skb, sizeof(struct sctp_assoc_change)); } /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_type: * It should be SCTP_ASSOC_CHANGE. */ sac->sac_type = SCTP_ASSOC_CHANGE; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_state: 32 bits (signed integer) * This field holds one of a number of values that communicate the * event that happened to the association. */ sac->sac_state = state; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_flags: 16 bits (unsigned integer) * Currently unused. */ sac->sac_flags = 0; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_length: sizeof (__u32) * This field is the total length of the notification data, including * the notification header. */ sac->sac_length = skb->len; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_error: 32 bits (signed integer) * * If the state was reached due to a error condition (e.g. * COMMUNICATION_LOST) any relevant error information is available in * this field. This corresponds to the protocol error codes defined in * [SCTP]. */ sac->sac_error = error; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_outbound_streams: 16 bits (unsigned integer) * sac_inbound_streams: 16 bits (unsigned integer) * * The maximum number of streams allowed in each direction are * available in sac_outbound_streams and sac_inbound streams. */ sac->sac_outbound_streams = outbound; sac->sac_inbound_streams = inbound; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); sac->sac_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize an SCTP_PEER_ADDR_CHANGE event. * * Socket Extensions for SCTP - draft-01 * 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * When a destination address on a multi-homed peer encounters a change * an interface details event is sent. */ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( const struct sctp_association *asoc, const struct sockaddr_storage *aaddr, int flags, int state, int error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_paddr_change *spc; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); spc = (struct sctp_paddr_change *) skb_put(skb, sizeof(struct sctp_paddr_change)); /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_type: * * It should be SCTP_PEER_ADDR_CHANGE. */ spc->spc_type = SCTP_PEER_ADDR_CHANGE; /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_length: sizeof (__u32) * * This field is the total length of the notification data, including * the notification header. */ spc->spc_length = sizeof(struct sctp_paddr_change); /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_flags: 16 bits (unsigned integer) * Currently unused. */ spc->spc_flags = 0; /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_state: 32 bits (signed integer) * * This field holds one of a number of values that communicate the * event that happened to the address. */ spc->spc_state = state; /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_error: 32 bits (signed integer) * * If the state was reached due to any error condition (e.g. * ADDRESS_UNREACHABLE) any relevant error information is available in * this field. */ spc->spc_error = error; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * spc_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); spc->spc_assoc_id = sctp_assoc2id(asoc); /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_aaddr: sizeof (struct sockaddr_storage) * * The affected address field, holds the remote peer's address that is * encountering the change of state. */ memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage)); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_v4map( sctp_sk(asoc->base.sk), (union sctp_addr *)&spc->spc_aaddr); return event; fail: return NULL; } /* Create and initialize an SCTP_REMOTE_ERROR notification. * * Note: This assumes that the chunk->skb->data already points to the * operation error payload. * * Socket Extensions for SCTP - draft-01 * 5.3.1.3 SCTP_REMOTE_ERROR * * A remote peer may send an Operational Error message to its peer. * This message indicates a variety of error conditions on an * association. The entire error TLV as it appears on the wire is * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP * specification [SCTP] and any extensions for a list of possible * error formats. */ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_remote_error *sre; struct sk_buff *skb; sctp_errhdr_t *ch; __be16 cause; int elen; ch = (sctp_errhdr_t *)(chunk->skb->data); cause = ch->cause; elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t); /* Pull off the ERROR header. */ skb_pull(chunk->skb, sizeof(sctp_errhdr_t)); /* Copy the skb to a new skb with room for us to prepend * notification with. */ skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 0, gfp); /* Pull off the rest of the cause TLV from the chunk. */ skb_pull(chunk->skb, elen); if (!skb) goto fail; /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sre = (struct sctp_remote_error *) skb_push(skb, sizeof(struct sctp_remote_error)); /* Trim the buffer to the right length. */ skb_trim(skb, sizeof(struct sctp_remote_error) + elen); /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_type: * It should be SCTP_REMOTE_ERROR. */ sre->sre_type = SCTP_REMOTE_ERROR; /* * Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_flags: 16 bits (unsigned integer) * Currently unused. */ sre->sre_flags = 0; /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_length: sizeof (__u32) * * This field is the total length of the notification data, * including the notification header. */ sre->sre_length = skb->len; /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_error: 16 bits (unsigned integer) * This value represents one of the Operational Error causes defined in * the SCTP specification, in network byte order. */ sre->sre_error = cause; /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); sre->sre_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize a SCTP_SEND_FAILED notification. * * Socket Extensions for SCTP - draft-01 * 5.3.1.4 SCTP_SEND_FAILED */ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, __u32 error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_send_failed *ssf; struct sk_buff *skb; /* Pull off any padding. */ int len = ntohs(chunk->chunk_hdr->length); /* Make skb with more room so we can prepend notification. */ skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_send_failed), /* headroom */ 0, /* tailroom */ gfp); if (!skb) goto fail; /* Pull off the common chunk header and DATA header. */ skb_pull(skb, sizeof(struct sctp_data_chunk)); len -= sizeof(struct sctp_data_chunk); /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); ssf = (struct sctp_send_failed *) skb_push(skb, sizeof(struct sctp_send_failed)); /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_type: * It should be SCTP_SEND_FAILED. */ ssf->ssf_type = SCTP_SEND_FAILED; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_flags: 16 bits (unsigned integer) * The flag value will take one of the following values * * SCTP_DATA_UNSENT - Indicates that the data was never put on * the wire. * * SCTP_DATA_SENT - Indicates that the data was put on the wire. * Note that this does not necessarily mean that the * data was (or was not) successfully delivered. */ ssf->ssf_flags = flags; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_length: sizeof (__u32) * This field is the total length of the notification data, including * the notification header. */ ssf->ssf_length = sizeof(struct sctp_send_failed) + len; skb_trim(skb, ssf->ssf_length); /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_error: 16 bits (unsigned integer) * This value represents the reason why the send failed, and if set, * will be a SCTP protocol error code as defined in [SCTP] section * 3.3.10. */ ssf->ssf_error = error; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_info: sizeof (struct sctp_sndrcvinfo) * The original send information associated with the undelivered * message. */ memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); /* Per TSVWG discussion with Randy. Allow the application to * reassemble a fragmented message. */ ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_assoc_id: sizeof (sctp_assoc_t) * The association id field, sf_assoc_id, holds the identifier for the * association. All notifications for a given association have the * same association identifier. For TCP style socket, this field is * ignored. */ sctp_ulpevent_set_owner(event, asoc); ssf->ssf_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize a SCTP_SHUTDOWN_EVENT notification. * * Socket Extensions for SCTP - draft-01 * 5.3.1.5 SCTP_SHUTDOWN_EVENT */ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( const struct sctp_association *asoc, __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_shutdown_event *sse; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); sse = (struct sctp_shutdown_event *) skb_put(skb, sizeof(struct sctp_shutdown_event)); /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_type * It should be SCTP_SHUTDOWN_EVENT */ sse->sse_type = SCTP_SHUTDOWN_EVENT; /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_flags: 16 bits (unsigned integer) * Currently unused. */ sse->sse_flags = 0; /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_length: sizeof (__u32) * This field is the total length of the notification data, including * the notification header. */ sse->sse_length = sizeof(struct sctp_shutdown_event); /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_assoc_id: sizeof (sctp_assoc_t) * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); sse->sse_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize a SCTP_ADAPTATION_INDICATION notification. * * Socket Extensions for SCTP * 5.3.1.6 SCTP_ADAPTATION_INDICATION */ struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( const struct sctp_association *asoc, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_adaptation_event *sai; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); sai = (struct sctp_adaptation_event *) skb_put(skb, sizeof(struct sctp_adaptation_event)); sai->sai_type = SCTP_ADAPTATION_INDICATION; sai->sai_flags = 0; sai->sai_length = sizeof(struct sctp_adaptation_event); sai->sai_adaptation_ind = asoc->peer.adaptation_ind; sctp_ulpevent_set_owner(event, asoc); sai->sai_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* A message has been received. Package this message as a notification * to pass it to the upper layers. Go ahead and calculate the sndrcvinfo * even if filtered out later. * * Socket Extensions for SCTP * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) */ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event = NULL; struct sk_buff *skb; size_t padding, len; int rx_count; /* * check to see if we need to make space for this * new skb, expand the rcvbuffer if needed, or drop * the frame */ if (asoc->ep->rcvbuf_policy) rx_count = atomic_read(&asoc->rmem_alloc); else rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); if (rx_count >= asoc->base.sk->sk_rcvbuf) { if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize))) goto fail; } /* Clone the original skb, sharing the data. */ skb = skb_clone(chunk->skb, gfp); if (!skb) goto fail; /* Now that all memory allocations for this chunk succeeded, we * can mark it as received so the tsn_map is updated correctly. */ if (sctp_tsnmap_mark(&asoc->peer.tsn_map, ntohl(chunk->subh.data_hdr->tsn))) goto fail_mark; /* First calculate the padding, so we don't inadvertently * pass up the wrong length to the user. * * RFC 2960 - Section 3.2 Chunk Field Descriptions * * The total length of a chunk(including Type, Length and Value fields) * MUST be a multiple of 4 bytes. If the length of the chunk is not a * multiple of 4 bytes, the sender MUST pad the chunk with all zero * bytes and this padding is not included in the chunk length field. * The sender should never pad with more than 3 bytes. The receiver * MUST ignore the padding bytes. */ len = ntohs(chunk->chunk_hdr->length); padding = WORD_ROUND(len) - len; /* Fixup cloned skb with just this chunks data. */ skb_trim(skb, chunk->chunk_end - padding - skb->data); /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); /* Initialize event with flags 0 and correct length * Since this is a clone of the original skb, only account for * the data of this chunk as other chunks will be accounted separately. */ sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); sctp_ulpevent_receive_data(event, asoc); event->stream = ntohs(chunk->subh.data_hdr->stream); event->ssn = ntohs(chunk->subh.data_hdr->ssn); event->ppid = chunk->subh.data_hdr->ppid; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { event->flags |= SCTP_UNORDERED; event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); } event->tsn = ntohl(chunk->subh.data_hdr->tsn); event->msg_flags |= chunk->chunk_hdr->flags; event->iif = sctp_chunk_iif(chunk); return event; fail_mark: kfree_skb(skb); fail: return NULL; } /* Create a partial delivery related event. * * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT * * When a receiver is engaged in a partial delivery of a * message this notification will be used to indicate * various events. */ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( const struct sctp_association *asoc, __u32 indication, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_pdapi_event *pd; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); pd = (struct sctp_pdapi_event *) skb_put(skb, sizeof(struct sctp_pdapi_event)); /* pdapi_type * It should be SCTP_PARTIAL_DELIVERY_EVENT * * pdapi_flags: 16 bits (unsigned integer) * Currently unused. */ pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; pd->pdapi_flags = 0; /* pdapi_length: 32 bits (unsigned integer) * * This field is the total length of the notification data, including * the notification header. It will generally be sizeof (struct * sctp_pdapi_event). */ pd->pdapi_length = sizeof(struct sctp_pdapi_event); /* pdapi_indication: 32 bits (unsigned integer) * * This field holds the indication being sent to the application. */ pd->pdapi_indication = indication; /* pdapi_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. */ sctp_ulpevent_set_owner(event, asoc); pd->pdapi_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } struct sctp_ulpevent *sctp_ulpevent_make_authkey( const struct sctp_association *asoc, __u16 key_id, __u32 indication, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_authkey_event *ak; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); ak = (struct sctp_authkey_event *) skb_put(skb, sizeof(struct sctp_authkey_event)); ak->auth_type = SCTP_AUTHENTICATION_EVENT; ak->auth_flags = 0; ak->auth_length = sizeof(struct sctp_authkey_event); ak->auth_keynumber = key_id; ak->auth_altkeynumber = 0; ak->auth_indication = indication; /* * The association id field, holds the identifier for the association. */ sctp_ulpevent_set_owner(event, asoc); ak->auth_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* * Socket Extensions for SCTP * 6.3.10. SCTP_SENDER_DRY_EVENT */ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( const struct sctp_association *asoc, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_sender_dry_event *sdry; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event), MSG_NOTIFICATION, gfp); if (!event) return NULL; skb = sctp_event2skb(event); sdry = (struct sctp_sender_dry_event *) skb_put(skb, sizeof(struct sctp_sender_dry_event)); sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT; sdry->sender_dry_flags = 0; sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event); sctp_ulpevent_set_owner(event, asoc); sdry->sender_dry_assoc_id = sctp_assoc2id(asoc); return event; } /* Return the notification type, assuming this is a notification * event. */ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) { union sctp_notification *notification; struct sk_buff *skb; skb = sctp_event2skb(event); notification = (union sctp_notification *) skb->data; return notification->sn_header.sn_type; } /* Copy out the sndrcvinfo into a msghdr. */ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *msghdr) { struct sctp_sndrcvinfo sinfo; if (sctp_ulpevent_is_notification(event)) return; /* Sockets API Extensions for SCTP * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) * * sinfo_stream: 16 bits (unsigned integer) * * For recvmsg() the SCTP stack places the message's stream number in * this value. */ sinfo.sinfo_stream = event->stream; /* sinfo_ssn: 16 bits (unsigned integer) * * For recvmsg() this value contains the stream sequence number that * the remote endpoint placed in the DATA chunk. For fragmented * messages this is the same number for all deliveries of the message * (if more than one recvmsg() is needed to read the message). */ sinfo.sinfo_ssn = event->ssn; /* sinfo_ppid: 32 bits (unsigned integer) * * In recvmsg() this value is * the same information that was passed by the upper layer in the peer * application. Please note that byte order issues are NOT accounted * for and this information is passed opaquely by the SCTP stack from * one end to the other. */ sinfo.sinfo_ppid = event->ppid; /* sinfo_flags: 16 bits (unsigned integer) * * This field may contain any of the following flags and is composed of * a bitwise OR of these values. * * recvmsg() flags: * * SCTP_UNORDERED - This flag is present when the message was sent * non-ordered. */ sinfo.sinfo_flags = event->flags; /* sinfo_tsn: 32 bit (unsigned integer) * * For the receiving side, this field holds a TSN that was * assigned to one of the SCTP Data Chunks. */ sinfo.sinfo_tsn = event->tsn; /* sinfo_cumtsn: 32 bit (unsigned integer) * * This field will hold the current cumulative TSN as * known by the underlying SCTP layer. Note this field is * ignored when sending and only valid for a receive * operation when sinfo_flags are set to SCTP_UNORDERED. */ sinfo.sinfo_cumtsn = event->cumtsn; /* sinfo_assoc_id: sizeof (sctp_assoc_t) * * The association handle field, sinfo_assoc_id, holds the identifier * for the association announced in the COMMUNICATION_UP notification. * All notifications for a given association have the same identifier. * Ignored for one-to-one style sockets. */ sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); /* context value that is set via SCTP_CONTEXT socket option. */ sinfo.sinfo_context = event->asoc->default_rcv_context; /* These fields are not used while receiving. */ sinfo.sinfo_timetolive = 0; put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); } /* Do accounting for bytes received and hold a reference to the association * for each skb. */ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, struct sctp_association *asoc) { struct sk_buff *skb, *frag; skb = sctp_event2skb(event); /* Set the owner and charge rwnd for bytes received. */ sctp_ulpevent_set_owner(event, asoc); sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); if (!skb->data_len) return; /* Note: Not clearing the entire event struct as this is just a * fragment of the real event. However, we still need to do rwnd * accounting. * In general, the skb passed from IP can have only 1 level of * fragments. But we allow multiple levels of fragments. */ skb_walk_frags(skb, frag) sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); } /* Do accounting for bytes just read by user and release the references to * the association. */ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) { struct sk_buff *skb, *frag; unsigned int len; /* Current stack structures assume that the rcv buffer is * per socket. For UDP style sockets this is not true as * multiple associations may be on a single UDP-style socket. * Use the local private area of the skb to track the owning * association. */ skb = sctp_event2skb(event); len = skb->len; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) { /* NOTE: skb_shinfos are recursive. Although IP returns * skb's with only 1 level of fragments, SCTP reassembly can * increase the levels. */ sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); } done: sctp_assoc_rwnd_increase(event->asoc, len); sctp_ulpevent_release_owner(event); } static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) { struct sk_buff *skb, *frag; skb = sctp_event2skb(event); if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) { /* NOTE: skb_shinfos are recursive. Although IP returns * skb's with only 1 level of fragments, SCTP reassembly can * increase the levels. */ sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); } done: sctp_ulpevent_release_owner(event); } /* Free a ulpevent that has an owner. It includes releasing the reference * to the owner, updating the rwnd in case of a DATA event and freeing the * skb. */ void sctp_ulpevent_free(struct sctp_ulpevent *event) { if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_release_owner(event); else sctp_ulpevent_release_data(event); kfree_skb(sctp_event2skb(event)); } /* Purge the skb lists holding ulpevents. */ unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list) { struct sk_buff *skb; unsigned int data_unread = 0; while ((skb = skb_dequeue(list)) != NULL) { struct sctp_ulpevent *event = sctp_skb2event(skb); if (!sctp_ulpevent_is_notification(event)) data_unread += skb->len; sctp_ulpevent_free(event); } return data_unread; }
jeffegg/beaglebone
net/sctp/ulpevent.c
C
gpl-2.0
32,388
/* Miscellaneous utilities for GIMPLE streaming. Things that are used in both input and output are here. Copyright (C) 2009-2015 Free Software Foundation, Inc. Contributed by Doug Kwan <dougkwan@google.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "toplev.h" #include "flags.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "predict.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "basic-block.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "gimple-expr.h" #include "is-a.h" #include "gimple.h" #include "bitmap.h" #include "diagnostic-core.h" #include "hash-map.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" #include "tree-streamer.h" #include "lto-streamer.h" #include "lto-section-names.h" #include "streamer-hooks.h" /* Statistics gathered during LTO, WPA and LTRANS. */ struct lto_stats_d lto_stats; /* LTO uses bitmaps with different life-times. So use a separate obstack for all LTO bitmaps. */ static bitmap_obstack lto_obstack; static bool lto_obstack_initialized; const char *section_name_prefix = LTO_SECTION_NAME_PREFIX; /* Set when streaming LTO for offloading compiler. */ bool lto_stream_offload_p; /* Return a string representing LTO tag TAG. */ const char * lto_tag_name (enum LTO_tags tag) { if (lto_tag_is_tree_code_p (tag)) { /* For tags representing tree nodes, return the name of the associated tree code. */ return get_tree_code_name (lto_tag_to_tree_code (tag)); } if (lto_tag_is_gimple_code_p (tag)) { /* For tags representing gimple statements, return the name of the associated gimple code. */ return gimple_code_name[lto_tag_to_gimple_code (tag)]; } switch (tag) { case LTO_null: return "LTO_null"; case LTO_bb0: return "LTO_bb0"; case LTO_bb1: return "LTO_bb1"; case LTO_eh_region: return "LTO_eh_region"; case LTO_function: return "LTO_function"; case LTO_eh_table: return "LTO_eh_table"; case LTO_ert_cleanup: return "LTO_ert_cleanup"; case LTO_ert_try: return "LTO_ert_try"; case LTO_ert_allowed_exceptions: return "LTO_ert_allowed_exceptions"; case LTO_ert_must_not_throw: return "LTO_ert_must_not_throw"; case LTO_tree_pickle_reference: return "LTO_tree_pickle_reference"; case LTO_field_decl_ref: return "LTO_field_decl_ref"; case LTO_function_decl_ref: return "LTO_function_decl_ref"; case LTO_label_decl_ref: return "LTO_label_decl_ref"; case LTO_namespace_decl_ref: return "LTO_namespace_decl_ref"; case LTO_result_decl_ref: return "LTO_result_decl_ref"; case LTO_ssa_name_ref: return "LTO_ssa_name_ref"; case LTO_type_decl_ref: return "LTO_type_decl_ref"; case LTO_type_ref: return "LTO_type_ref"; case LTO_global_decl_ref: return "LTO_global_decl_ref"; default: return "LTO_UNKNOWN"; } } /* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */ bitmap lto_bitmap_alloc (void) { if (!lto_obstack_initialized) { bitmap_obstack_initialize (&lto_obstack); lto_obstack_initialized = true; } return BITMAP_ALLOC (&lto_obstack); } /* Free bitmap B. */ void lto_bitmap_free (bitmap b) { BITMAP_FREE (b); } /* Get a section name for a particular type or name. The NAME field is only used if SECTION_TYPE is LTO_section_function_body. For all others it is ignored. The callee of this function is responsible to free the returned name. */ char * lto_get_section_name (int section_type, const char *name, struct lto_file_decl_data *f) { const char *add; char post[32]; const char *sep; if (section_type == LTO_section_function_body) { gcc_assert (name != NULL); if (name[0] == '*') name++; add = name; sep = ""; } else if (section_type < LTO_N_SECTION_TYPES) { add = lto_section_name[section_type]; sep = "."; } else internal_error ("bytecode stream: unexpected LTO section %s", name); /* Make the section name unique so that ld -r combining sections doesn't confuse the reader with merged sections. For options don't add a ID, the option reader cannot deal with them and merging should be ok here. */ if (section_type == LTO_section_opts) strcpy (post, ""); else if (f != NULL) sprintf (post, "." HOST_WIDE_INT_PRINT_HEX_PURE, f->id); else sprintf (post, "." HOST_WIDE_INT_PRINT_HEX_PURE, get_random_seed (false)); return concat (section_name_prefix, sep, add, post, NULL); } /* Show various memory usage statistics related to LTO. */ void print_lto_report (const char *s) { unsigned i; fprintf (stderr, "[%s] # of input files: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files); fprintf (stderr, "[%s] # of input cgraph nodes: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_cgraph_nodes); fprintf (stderr, "[%s] # of function bodies: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_function_bodies); for (i = 0; i < NUM_TREE_CODES; i++) if (lto_stats.num_trees[i]) fprintf (stderr, "[%s] # of '%s' objects read: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, get_tree_code_name ((enum tree_code) i), lto_stats.num_trees[i]); if (flag_lto) { fprintf (stderr, "[%s] Compression: " HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, " HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s, lto_stats.num_output_il_bytes, lto_stats.num_compressed_il_bytes); if (lto_stats.num_output_il_bytes > 0) { const float dividend = (float) lto_stats.num_compressed_il_bytes; const float divisor = (float) lto_stats.num_output_il_bytes; fprintf (stderr, " (ratio: %f)", dividend / divisor); } fprintf (stderr, "\n"); } if (flag_wpa) { fprintf (stderr, "[%s] # of output files: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_output_files); fprintf (stderr, "[%s] # of output symtab nodes: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_output_symtab_nodes); fprintf (stderr, "[%s] # of output tree pickle references: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_pickle_refs_output); fprintf (stderr, "[%s] # of output tree bodies: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_tree_bodies_output); fprintf (stderr, "[%s] # callgraph partitions: " HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_cgraph_partitions); fprintf (stderr, "[%s] Compression: " HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, " HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s, lto_stats.num_input_il_bytes, lto_stats.num_uncompressed_il_bytes); if (lto_stats.num_input_il_bytes > 0) { const float dividend = (float) lto_stats.num_uncompressed_il_bytes; const float divisor = (float) lto_stats.num_input_il_bytes; fprintf (stderr, " (ratio: %f)", dividend / divisor); } fprintf (stderr, "\n"); } for (i = 0; i < LTO_N_SECTION_TYPES; i++) fprintf (stderr, "[%s] Size of mmap'd section %s: " HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s, lto_section_name[i], lto_stats.section_size[i]); } #ifdef LTO_STREAMER_DEBUG struct tree_hash_entry { tree key; intptr_t value; }; struct tree_entry_hasher : typed_noop_remove <tree_hash_entry> { typedef tree_hash_entry value_type; typedef tree_hash_entry compare_type; static inline hashval_t hash (const value_type *); static inline bool equal (const value_type *, const compare_type *); }; inline hashval_t tree_entry_hasher::hash (const value_type *e) { return htab_hash_pointer (e->key); } inline bool tree_entry_hasher::equal (const value_type *e1, const compare_type *e2) { return (e1->key == e2->key); } static hash_table<tree_hash_entry> *tree_htab; #endif /* Initialization common to the LTO reader and writer. */ void lto_streamer_init (void) { /* Check that all the TS_* handled by the reader and writer routines match exactly the structures defined in treestruct.def. When a new TS_* astructure is added, the streamer should be updated to handle it. */ streamer_check_handled_ts_structures (); #ifdef LTO_STREAMER_DEBUG tree_htab = new hash_table<tree_hash_entry> (31); #endif } /* Gate function for all LTO streaming passes. */ bool gate_lto_out (void) { return ((flag_generate_lto || flag_generate_offload || in_lto_p) /* Don't bother doing anything if the program has errors. */ && !seen_error ()); } #ifdef LTO_STREAMER_DEBUG /* Add a mapping between T and ORIG_T, which is the numeric value of the original address of T as it was seen by the LTO writer. This mapping is useful when debugging streaming problems. A debugging session can be started on both reader and writer using ORIG_T as a breakpoint value in both sessions. Note that this mapping is transient and only valid while T is being reconstructed. Once T is fully built, the mapping is removed. */ void lto_orig_address_map (tree t, intptr_t orig_t) { struct tree_hash_entry ent; struct tree_hash_entry **slot; ent.key = t; ent.value = orig_t; slot = tree_htab->find_slot (&ent, INSERT); gcc_assert (!*slot); *slot = XNEW (struct tree_hash_entry); **slot = ent; } /* Get the original address of T as it was seen by the writer. This is only valid while T is being reconstructed. */ intptr_t lto_orig_address_get (tree t) { struct tree_hash_entry ent; struct tree_hash_entry **slot; ent.key = t; slot = tree_htab->find_slot (&ent, NO_INSERT); return (slot ? (*slot)->value : 0); } /* Clear the mapping of T to its original address. */ void lto_orig_address_remove (tree t) { struct tree_hash_entry ent; struct tree_hash_entry **slot; ent.key = t; slot = tree_htab->find_slot (&ent, NO_INSERT); gcc_assert (slot); free (*slot); tree_htab->clear_slot (slot); } #endif /* Check that the version MAJOR.MINOR is the correct version number. */ void lto_check_version (int major, int minor) { if (major != LTO_major_version || minor != LTO_minor_version) fatal_error ("bytecode stream generated with LTO version %d.%d instead " "of the expected %d.%d", major, minor, LTO_major_version, LTO_minor_version); } /* Initialize all the streamer hooks used for streaming GIMPLE. */ void lto_streamer_hooks_init (void) { streamer_hooks_init (); streamer_hooks.write_tree = lto_output_tree; streamer_hooks.read_tree = lto_input_tree; streamer_hooks.input_location = lto_input_location; streamer_hooks.output_location = lto_output_location; }
kito-cheng/gcc
gcc/lto-streamer.c
C
gpl-2.0
11,719
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ //============================================================ // include files //============================================================ #include "mp_precomp.h" #include "phydm_precomp.h" u1Byte ODM_GetAutoChannelSelectResult( IN PVOID pDM_VOID, IN u1Byte Band ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; PACS pACS = &pDM_Odm->DM_ACS; #if (DM_ODM_SUPPORT_TYPE & (ODM_WIN|ODM_CE)) if(Band == ODM_BAND_2_4G) { ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("[ACS] ODM_GetAutoChannelSelectResult(): CleanChannel_2G(%d)\n", pACS->CleanChannel_2G)); return (u1Byte)pACS->CleanChannel_2G; } else { ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("[ACS] ODM_GetAutoChannelSelectResult(): CleanChannel_5G(%d)\n", pACS->CleanChannel_5G)); return (u1Byte)pACS->CleanChannel_5G; } #else return (u1Byte)pACS->CleanChannel_2G; #endif } VOID odm_AutoChannelSelectSetting( IN PVOID pDM_VOID, IN BOOLEAN IsEnable ) { #if (DM_ODM_SUPPORT_TYPE & (ODM_WIN|ODM_CE)) PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; u2Byte period = 0x2710;// 40ms in default u2Byte NHMType = 0x7; ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelectSetting()=========> \n")); if(IsEnable) {//20 ms period = 0x1388; NHMType = 0x1; } if(pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) { //PHY parameters initialize for ac series ODM_Write2Byte(pDM_Odm, ODM_REG_NHM_TIMER_11AC+2, period); //0x990[31:16]=0x2710 Time duration for NHM unit: 4us, 0x2710=40ms //ODM_SetBBReg(pDM_Odm, ODM_REG_NHM_TH9_TH10_11AC, BIT8|BIT9|BIT10, NHMType); //0x994[9:8]=3 enable CCX } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { //PHY parameters initialize for n series ODM_Write2Byte(pDM_Odm, ODM_REG_NHM_TIMER_11N+2, period); //0x894[31:16]=0x2710 Time duration for NHM unit: 4us, 0x2710=40ms //ODM_SetBBReg(pDM_Odm, ODM_REG_NHM_TH9_TH10_11N, BIT10|BIT9|BIT8, NHMType); //0x890[9:8]=3 enable CCX } #endif } VOID odm_AutoChannelSelectInit( IN PVOID pDM_VOID ) { #if (DM_ODM_SUPPORT_TYPE & (ODM_WIN|ODM_CE)) PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; PACS pACS = &pDM_Odm->DM_ACS; u1Byte i; if(!(pDM_Odm->SupportAbility & ODM_BB_NHM_CNT)) return; if(pACS->bForceACSResult) return; ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelectInit()=========> \n")); pACS->CleanChannel_2G = 1; pACS->CleanChannel_5G = 36; for (i = 0; i < ODM_MAX_CHANNEL_2G; ++i) { pACS->Channel_Info_2G[0][i] = 0; pACS->Channel_Info_2G[1][i] = 0; } if(pDM_Odm->SupportICType & (ODM_IC_11AC_SERIES|ODM_RTL8192D)) { for (i = 0; i < ODM_MAX_CHANNEL_5G; ++i) { pACS->Channel_Info_5G[0][i] = 0; pACS->Channel_Info_5G[1][i] = 0; } } #endif } VOID odm_AutoChannelSelectReset( IN PVOID pDM_VOID ) { #if (DM_ODM_SUPPORT_TYPE & (ODM_WIN|ODM_CE)) PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; PACS pACS = &pDM_Odm->DM_ACS; if(!(pDM_Odm->SupportAbility & ODM_BB_NHM_CNT)) return; if(pACS->bForceACSResult) return; ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelectReset()=========> \n")); odm_AutoChannelSelectSetting(pDM_Odm,TRUE);// for 20ms measurement Phydm_NHMCounterStatisticsReset(pDM_Odm); #endif } VOID odm_AutoChannelSelect( IN PVOID pDM_VOID, IN u1Byte Channel ) { #if (DM_ODM_SUPPORT_TYPE & (ODM_WIN|ODM_CE)) PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; PACS pACS = &pDM_Odm->DM_ACS; u1Byte ChannelIDX = 0, SearchIDX = 0; u2Byte MaxScore=0; if(!(pDM_Odm->SupportAbility & ODM_BB_NHM_CNT)) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_AutoChannelSelect(): Return: SupportAbility ODM_BB_NHM_CNT is disabled\n")); return; } if(pACS->bForceACSResult) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_AutoChannelSelect(): Force 2G clean channel = %d, 5G clean channel = %d\n", pACS->CleanChannel_2G, pACS->CleanChannel_5G)); return; } ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelect(): Channel = %d=========> \n", Channel)); Phydm_GetNHMCounterStatistics(pDM_Odm); odm_AutoChannelSelectSetting(pDM_Odm,FALSE); if(Channel >=1 && Channel <=14) { ChannelIDX = Channel - 1; pACS->Channel_Info_2G[1][ChannelIDX]++; if(pACS->Channel_Info_2G[1][ChannelIDX] >= 2) pACS->Channel_Info_2G[0][ChannelIDX] = (pACS->Channel_Info_2G[0][ChannelIDX] >> 1) + (pACS->Channel_Info_2G[0][ChannelIDX] >> 2) + (pDM_Odm->NHM_cnt_0>>2); else pACS->Channel_Info_2G[0][ChannelIDX] = pDM_Odm->NHM_cnt_0; ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelect(): NHM_cnt_0 = %d \n", pDM_Odm->NHM_cnt_0)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelect(): Channel_Info[0][%d] = %d, Channel_Info[1][%d] = %d\n", ChannelIDX, pACS->Channel_Info_2G[0][ChannelIDX], ChannelIDX, pACS->Channel_Info_2G[1][ChannelIDX])); for(SearchIDX = 0; SearchIDX < ODM_MAX_CHANNEL_2G; SearchIDX++) { if(pACS->Channel_Info_2G[1][SearchIDX] != 0) { if(pACS->Channel_Info_2G[0][SearchIDX] >= MaxScore) { MaxScore = pACS->Channel_Info_2G[0][SearchIDX]; pACS->CleanChannel_2G = SearchIDX+1; } } } ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("(1)odm_AutoChannelSelect(): 2G: CleanChannel_2G = %d, MaxScore = %d \n", pACS->CleanChannel_2G, MaxScore)); } else if(Channel >= 36) { // Need to do pACS->CleanChannel_5G = Channel; } #endif } #if ( DM_ODM_SUPPORT_TYPE & ODM_AP ) VOID phydm_AutoChannelSelectSettingAP( IN PVOID pDM_VOID, IN u4Byte setting, // 0: STORE_DEFAULT_NHM_SETTING; 1: RESTORE_DEFAULT_NHM_SETTING, 2: ACS_NHM_SETTING IN u4Byte acs_step ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; prtl8192cd_priv priv = pDM_Odm->priv; PACS pACS = &pDM_Odm->DM_ACS; ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("odm_AutoChannelSelectSettingAP()=========> \n")); //3 Store Default Setting if(setting == STORE_DEFAULT_NHM_SETTING) { ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("STORE_DEFAULT_NHM_SETTING\n")); if(pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) // store Reg0x990, Reg0x994, Reg0x998, Reg0x99C, Reg0x9a0 { pACS->Reg0x990 = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TIMER_11AC); // Reg0x990 pACS->Reg0x994 = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TH9_TH10_11AC); // Reg0x994 pACS->Reg0x998 = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11AC); // Reg0x998 pACS->Reg0x99C = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11AC); // Reg0x99c pACS->Reg0x9A0 = ODM_Read1Byte(pDM_Odm, ODM_REG_NHM_TH8_11AC); // Reg0x9a0, u1Byte } else if(pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { pACS->Reg0x890 = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TH9_TH10_11N); // Reg0x890 pACS->Reg0x894 = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TIMER_11N); // Reg0x894 pACS->Reg0x898 = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11N); // Reg0x898 pACS->Reg0x89C = ODM_Read4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11N); // Reg0x89c pACS->Reg0xE28 = ODM_Read1Byte(pDM_Odm, ODM_REG_NHM_TH8_11N); // Reg0xe28, u1Byte } } //3 Restore Default Setting else if(setting == RESTORE_DEFAULT_NHM_SETTING) { ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("RESTORE_DEFAULT_NHM_SETTING\n")); if(pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) // store Reg0x990, Reg0x994, Reg0x998, Reg0x99C, Reg0x9a0 { ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TIMER_11AC, pACS->Reg0x990); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH9_TH10_11AC, pACS->Reg0x994); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11AC, pACS->Reg0x998); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11AC, pACS->Reg0x99C); ODM_Write1Byte(pDM_Odm, ODM_REG_NHM_TH8_11AC, pACS->Reg0x9A0); } else if(pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH9_TH10_11N, pACS->Reg0x890); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TIMER_11N, pACS->Reg0x894); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11N, pACS->Reg0x898); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11N, pACS->Reg0x89C); ODM_Write1Byte(pDM_Odm, ODM_REG_NHM_TH8_11N, pACS->Reg0xE28); } } //3 ACS Setting else if(setting == ACS_NHM_SETTING) { ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("ACS_NHM_SETTING\n")); u2Byte period; period = 0x61a8; pACS->ACS_Step = acs_step; if(pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) { //4 Set NHM period, 0x990[31:16]=0x61a8, Time duration for NHM unit: 4us, 0x61a8=100ms ODM_Write2Byte(pDM_Odm, ODM_REG_NHM_TIMER_11AC+2, period); //4 Set NHM ignore_cca=1, ignore_txon=1, ccx_en=0 ODM_SetBBReg(pDM_Odm, ODM_REG_NHM_TH9_TH10_11AC,BIT8|BIT9|BIT10, 3); if(pACS->ACS_Step == 0) { //4 Set IGI ODM_SetBBReg(pDM_Odm,0xc50,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x3E); if (get_rf_mimo_mode(priv) != MIMO_1T1R) ODM_SetBBReg(pDM_Odm,0xe50,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x3E); //4 Set ACS NHM threshold ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11AC, 0x82786e64); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11AC, 0xffffff8c); ODM_Write1Byte(pDM_Odm, ODM_REG_NHM_TH8_11AC, 0xff); ODM_Write2Byte(pDM_Odm, ODM_REG_NHM_TH9_TH10_11AC+2, 0xffff); } else if(pACS->ACS_Step == 1) { //4 Set IGI ODM_SetBBReg(pDM_Odm,0xc50,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x2A); if (get_rf_mimo_mode(priv) != MIMO_1T1R) ODM_SetBBReg(pDM_Odm,0xe50,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x2A); //4 Set ACS NHM threshold ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11AC, 0x5a50463c); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11AC, 0xffffff64); } } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { //4 Set NHM period, 0x894[31:16]=0x61a8, Time duration for NHM unit: 4us, 0x61a8=100ms ODM_Write2Byte(pDM_Odm, ODM_REG_NHM_TIMER_11N+2, period); //4 Set NHM ignore_cca=1, ignore_txon=1, ccx_en=0 ODM_SetBBReg(pDM_Odm, ODM_REG_NHM_TH9_TH10_11N,BIT8|BIT9|BIT10, 3); if(pACS->ACS_Step == 0) { //4 Set IGI ODM_SetBBReg(pDM_Odm,0xc50,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x3E); if (get_rf_mimo_mode(priv) != MIMO_1T1R) ODM_SetBBReg(pDM_Odm,0xc58,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x3E); //4 Set ACS NHM threshold ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11N, 0x82786e64); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11N, 0xffffff8c); ODM_Write1Byte(pDM_Odm, ODM_REG_NHM_TH8_11N, 0xff); ODM_Write2Byte(pDM_Odm, ODM_REG_NHM_TH9_TH10_11N+2, 0xffff); } else if(pACS->ACS_Step == 1) { //4 Set IGI ODM_SetBBReg(pDM_Odm,0xc50,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x2A); if (get_rf_mimo_mode(priv) != MIMO_1T1R) ODM_SetBBReg(pDM_Odm,0xc58,BIT0|BIT1|BIT2|BIT3|BIT4|BIT5|BIT6,0x2A); //4 Set ACS NHM threshold ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH3_TO_TH0_11N, 0x5a50463c); ODM_Write4Byte(pDM_Odm, ODM_REG_NHM_TH7_TO_TH4_11N, 0xffffff64); } } } } VOID phydm_GetNHMStatisticsAP( IN PVOID pDM_VOID, IN u4Byte idx, // @ 2G, Real channel number = idx+1 IN u4Byte acs_step ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; prtl8192cd_priv priv = pDM_Odm->priv; PACS pACS = &pDM_Odm->DM_ACS; u4Byte value32 = 0; u1Byte i; pACS->ACS_Step = acs_step; if(pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { //4 Check if NHM result is ready for (i=0; i<20; i++) { ODM_delay_ms(1); if ( ODM_GetBBReg(pDM_Odm,rFPGA0_PSDReport,BIT17) ) break; } //4 Get NHM Statistics if ( pACS->ACS_Step==1 ) { value32 = ODM_Read4Byte(pDM_Odm,ODM_REG_NHM_CNT7_TO_CNT4_11N); pACS->NHM_Cnt[idx][9] = (value32 & bMaskByte1) >> 8; pACS->NHM_Cnt[idx][8] = (value32 & bMaskByte0); value32 = ODM_Read4Byte(pDM_Odm,ODM_REG_NHM_CNT_11N); // ODM_REG_NHM_CNT3_TO_CNT0_11N pACS->NHM_Cnt[idx][7] = (value32 & bMaskByte3) >> 24; pACS->NHM_Cnt[idx][6] = (value32 & bMaskByte2) >> 16; pACS->NHM_Cnt[idx][5] = (value32 & bMaskByte1) >> 8; } else if (pACS->ACS_Step==2) { value32 = ODM_Read4Byte(pDM_Odm,ODM_REG_NHM_CNT_11N); // ODM_REG_NHM_CNT3_TO_CNT0_11N pACS->NHM_Cnt[idx][4] = ODM_Read1Byte(pDM_Odm, ODM_REG_NHM_CNT7_TO_CNT4_11N); pACS->NHM_Cnt[idx][3] = (value32 & bMaskByte3) >> 24; pACS->NHM_Cnt[idx][2] = (value32 & bMaskByte2) >> 16; pACS->NHM_Cnt[idx][1] = (value32 & bMaskByte1) >> 8; pACS->NHM_Cnt[idx][0] = (value32 & bMaskByte0); } } else if(pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) { //4 Check if NHM result is ready for (i=0; i<20; i++) { ODM_delay_ms(1); if (ODM_GetBBReg(pDM_Odm,ODM_REG_NHM_DUR_READY_11AC,BIT17)) break; } if ( pACS->ACS_Step==1 ) { value32 = ODM_Read4Byte(pDM_Odm,ODM_REG_NHM_CNT7_TO_CNT4_11AC); pACS->NHM_Cnt[idx][9] = (value32 & bMaskByte1) >> 8; pACS->NHM_Cnt[idx][8] = (value32 & bMaskByte0); value32 = ODM_Read4Byte(pDM_Odm,ODM_REG_NHM_CNT_11AC); // ODM_REG_NHM_CNT3_TO_CNT0_11AC pACS->NHM_Cnt[idx][7] = (value32 & bMaskByte3) >> 24; pACS->NHM_Cnt[idx][6] = (value32 & bMaskByte2) >> 16; pACS->NHM_Cnt[idx][5] = (value32 & bMaskByte1) >> 8; } else if (pACS->ACS_Step==2) { value32 = ODM_Read4Byte(pDM_Odm,ODM_REG_NHM_CNT_11AC); // ODM_REG_NHM_CNT3_TO_CNT0_11AC pACS->NHM_Cnt[idx][4] = ODM_Read1Byte(pDM_Odm, ODM_REG_NHM_CNT7_TO_CNT4_11AC); pACS->NHM_Cnt[idx][3] = (value32 & bMaskByte3) >> 24; pACS->NHM_Cnt[idx][2] = (value32 & bMaskByte2) >> 16; pACS->NHM_Cnt[idx][1] = (value32 & bMaskByte1) >> 8; pACS->NHM_Cnt[idx][0] = (value32 & bMaskByte0); } } } //#define ACS_DEBUG_INFO //acs debug default off /* int phydm_AutoChannelSelectAP( IN PVOID pDM_VOID, IN u4Byte ACS_Type, // 0: RXCount_Type, 1:NHM_Type IN u4Byte available_chnl_num // amount of all channels ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; PACS pACS = &pDM_Odm->DM_ACS; prtl8192cd_priv priv = pDM_Odm->priv; static u4Byte score2G[MAX_2G_CHANNEL_NUM], score5G[MAX_5G_CHANNEL_NUM]; u4Byte score[MAX_BSS_NUM], use_nhm = 0; u4Byte minScore=0xffffffff; u4Byte tmpScore, tmpIdx=0; u4Byte traffic_check = 0; u4Byte fa_count_weighting = 1; int i, j, idx=0, idx_2G_end=-1, idx_5G_begin=-1, minChan=0; struct bss_desc *pBss=NULL; #ifdef _DEBUG_RTL8192CD_ char tmpbuf[400]; int len=0; #endif memset(score2G, '\0', sizeof(score2G)); memset(score5G, '\0', sizeof(score5G)); for (i=0; i<priv->available_chnl_num; i++) { if (priv->available_chnl[i] <= 14) idx_2G_end = i; else break; } for (i=0; i<priv->available_chnl_num; i++) { if (priv->available_chnl[i] > 14) { idx_5G_begin = i; break; } } // DELETE #ifndef CONFIG_RTL_NEW_AUTOCH for (i=0; i<priv->site_survey->count; i++) { pBss = &priv->site_survey->bss[i]; for (idx=0; idx<priv->available_chnl_num; idx++) { if (pBss->channel == priv->available_chnl[idx]) { if (pBss->channel <= 14) setChannelScore(idx, score2G, 0, MAX_2G_CHANNEL_NUM-1); else score5G[idx - idx_5G_begin] += 5; break; } } } #endif if (idx_2G_end >= 0) for (i=0; i<=idx_2G_end; i++) score[i] = score2G[i]; if (idx_5G_begin >= 0) for (i=idx_5G_begin; i<priv->available_chnl_num; i++) score[i] = score5G[i - idx_5G_begin]; #ifdef CONFIG_RTL_NEW_AUTOCH { u4Byte y, ch_begin=0, ch_end= priv->available_chnl_num; u4Byte do_ap_check = 1, ap_ratio = 0; if (idx_2G_end >= 0) ch_end = idx_2G_end+1; if (idx_5G_begin >= 0) ch_begin = idx_5G_begin; #ifdef ACS_DEBUG_INFO//for debug printk("\n"); for (y=ch_begin; y<ch_end; y++) printk("1. init: chnl[%d] 20M_rx[%d] 40M_rx[%d] fa_cnt[%d] score[%d]\n", priv->available_chnl[y], priv->chnl_ss_mac_rx_count[y], priv->chnl_ss_mac_rx_count_40M[y], priv->chnl_ss_fa_count[y], score[y]); printk("\n"); #endif #if defined(CONFIG_RTL_88E_SUPPORT) || defined(CONFIG_WLAN_HAL_8192EE) if( pDM_Odm->SupportICType&(ODM_RTL8188E|ODM_RTL8192E)&& priv->pmib->dot11RFEntry.acs_type ) { u4Byte tmp_score[MAX_BSS_NUM]; memcpy(tmp_score, score, sizeof(score)); if (find_clean_channel(priv, ch_begin, ch_end, tmp_score)) { //memcpy(score, tmp_score, sizeof(score)); #ifdef _DEBUG_RTL8192CD_ printk("!! Found clean channel, select minimum FA channel\n"); #endif goto USE_CLN_CH; } #ifdef _DEBUG_RTL8192CD_ printk("!! Not found clean channel, use NHM algorithm\n"); #endif use_nhm = 1; USE_CLN_CH: for (y=ch_begin; y<ch_end; y++) { for (i=0; i<=9; i++) { u4Byte val32 = priv->nhm_cnt[y][i]; for (j=0; j<i; j++) val32 *= 3; score[y] += val32; } #ifdef _DEBUG_RTL8192CD_ printk("nhm_cnt_%d: H<-[ %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d]->L, score: %d\n", y+1, priv->nhm_cnt[y][9], priv->nhm_cnt[y][8], priv->nhm_cnt[y][7], priv->nhm_cnt[y][6], priv->nhm_cnt[y][5], priv->nhm_cnt[y][4], priv->nhm_cnt[y][3], priv->nhm_cnt[y][2], priv->nhm_cnt[y][1], priv->nhm_cnt[y][0], score[y]); #endif } if (!use_nhm) memcpy(score, tmp_score, sizeof(score)); goto choose_ch; } #endif // For each channel, weighting behind channels with MAC RX counter //For each channel, weighting the channel with FA counter for (y=ch_begin; y<ch_end; y++) { score[y] += 8 * priv->chnl_ss_mac_rx_count[y]; if (priv->chnl_ss_mac_rx_count[y] > 30) do_ap_check = 0; if( priv->chnl_ss_mac_rx_count[y] > MAC_RX_COUNT_THRESHOLD ) traffic_check = 1; #ifdef RTK_5G_SUPPORT if (priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_2G) #endif { if ((int)(y-4) >= (int)ch_begin) score[y-4] += 2 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y-3) >= (int)ch_begin) score[y-3] += 8 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y-2) >= (int)ch_begin) score[y-2] += 8 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y-1) >= (int)ch_begin) score[y-1] += 10 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y+1) < (int)ch_end) score[y+1] += 10 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y+2) < (int)ch_end) score[y+2] += 8 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y+3) < (int)ch_end) score[y+3] += 8 * priv->chnl_ss_mac_rx_count[y]; if ((int)(y+4) < (int)ch_end) score[y+4] += 2 * priv->chnl_ss_mac_rx_count[y]; } //this is for CH_LOAD caculation if( priv->chnl_ss_cca_count[y] > priv->chnl_ss_fa_count[y]) priv->chnl_ss_cca_count[y]-= priv->chnl_ss_fa_count[y]; else priv->chnl_ss_cca_count[y] = 0; } #ifdef ACS_DEBUG_INFO//for debug printk("\n"); for (y=ch_begin; y<ch_end; y++) printk("2. after 20M check: chnl[%d] score[%d]\n",priv->available_chnl[y], score[y]); printk("\n"); #endif for (y=ch_begin; y<ch_end; y++) { if (priv->chnl_ss_mac_rx_count_40M[y]) { score[y] += 5 * priv->chnl_ss_mac_rx_count_40M[y]; if (priv->chnl_ss_mac_rx_count_40M[y] > 30) do_ap_check = 0; if( priv->chnl_ss_mac_rx_count_40M[y] > MAC_RX_COUNT_THRESHOLD ) traffic_check = 1; #ifdef RTK_5G_SUPPORT if (priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_2G) #endif { if ((int)(y-6) >= (int)ch_begin) score[y-6] += 1 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y-5) >= (int)ch_begin) score[y-5] += 4 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y-4) >= (int)ch_begin) score[y-4] += 4 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y-3) >= (int)ch_begin) score[y-3] += 5 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y-2) >= (int)ch_begin) score[y-2] += (5 * priv->chnl_ss_mac_rx_count_40M[y])/2; if ((int)(y-1) >= (int)ch_begin) score[y-1] += 5 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y+1) < (int)ch_end) score[y+1] += 5 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y+2) < (int)ch_end) score[y+2] += (5 * priv->chnl_ss_mac_rx_count_40M[y])/2; if ((int)(y+3) < (int)ch_end) score[y+3] += 5 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y+4) < (int)ch_end) score[y+4] += 4 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y+5) < (int)ch_end) score[y+5] += 4 * priv->chnl_ss_mac_rx_count_40M[y]; if ((int)(y+6) < (int)ch_end) score[y+6] += 1 * priv->chnl_ss_mac_rx_count_40M[y]; } } } #ifdef ACS_DEBUG_INFO//for debug printk("\n"); for (y=ch_begin; y<ch_end; y++) printk("3. after 40M check: chnl[%d] score[%d]\n",priv->available_chnl[y], score[y]); printk("\n"); printk("4. do_ap_check=%d traffic_check=%d\n", do_ap_check, traffic_check); printk("\n"); #endif if( traffic_check == 0) fa_count_weighting = 5; else fa_count_weighting = 1; for (y=ch_begin; y<ch_end; y++) { score[y] += fa_count_weighting * priv->chnl_ss_fa_count[y]; } #ifdef ACS_DEBUG_INFO//for debug printk("\n"); for (y=ch_begin; y<ch_end; y++) printk("5. after fa check: chnl[%d] score[%d]\n",priv->available_chnl[y], score[y]); printk("\n"); #endif if (do_ap_check) { for (i=0; i<priv->site_survey->count; i++) { pBss = &priv->site_survey->bss[i]; for (y=ch_begin; y<ch_end; y++) { if (pBss->channel == priv->available_chnl[y]) { if (pBss->channel <= 14) { #ifdef ACS_DEBUG_INFO//for debug printk("\n"); printk("chnl[%d] has ap rssi=%d bw[0x%02x]\n", pBss->channel, pBss->rssi, pBss->t_stamp[1]); printk("\n"); #endif if (pBss->rssi > 60) ap_ratio = 4; else if (pBss->rssi > 35) ap_ratio = 2; else ap_ratio = 1; if ((pBss->t_stamp[1] & 0x6) == 0) { score[y] += 50 * ap_ratio; if ((int)(y-4) >= (int)ch_begin) score[y-4] += 10 * ap_ratio; if ((int)(y-3) >= (int)ch_begin) score[y-3] += 20 * ap_ratio; if ((int)(y-2) >= (int)ch_begin) score[y-2] += 30 * ap_ratio; if ((int)(y-1) >= (int)ch_begin) score[y-1] += 40 * ap_ratio; if ((int)(y+1) < (int)ch_end) score[y+1] += 40 * ap_ratio; if ((int)(y+2) < (int)ch_end) score[y+2] += 30 * ap_ratio; if ((int)(y+3) < (int)ch_end) score[y+3] += 20 * ap_ratio; if ((int)(y+4) < (int)ch_end) score[y+4] += 10 * ap_ratio; } else if ((pBss->t_stamp[1] & 0x4) == 0) { score[y] += 50 * ap_ratio; if ((int)(y-3) >= (int)ch_begin) score[y-3] += 20 * ap_ratio; if ((int)(y-2) >= (int)ch_begin) score[y-2] += 30 * ap_ratio; if ((int)(y-1) >= (int)ch_begin) score[y-1] += 40 * ap_ratio; if ((int)(y+1) < (int)ch_end) score[y+1] += 50 * ap_ratio; if ((int)(y+2) < (int)ch_end) score[y+2] += 50 * ap_ratio; if ((int)(y+3) < (int)ch_end) score[y+3] += 50 * ap_ratio; if ((int)(y+4) < (int)ch_end) score[y+4] += 50 * ap_ratio; if ((int)(y+5) < (int)ch_end) score[y+5] += 40 * ap_ratio; if ((int)(y+6) < (int)ch_end) score[y+6] += 30 * ap_ratio; if ((int)(y+7) < (int)ch_end) score[y+7] += 20 * ap_ratio; } else { score[y] += 50 * ap_ratio; if ((int)(y-7) >= (int)ch_begin) score[y-7] += 20 * ap_ratio; if ((int)(y-6) >= (int)ch_begin) score[y-6] += 30 * ap_ratio; if ((int)(y-5) >= (int)ch_begin) score[y-5] += 40 * ap_ratio; if ((int)(y-4) >= (int)ch_begin) score[y-4] += 50 * ap_ratio; if ((int)(y-3) >= (int)ch_begin) score[y-3] += 50 * ap_ratio; if ((int)(y-2) >= (int)ch_begin) score[y-2] += 50 * ap_ratio; if ((int)(y-1) >= (int)ch_begin) score[y-1] += 50 * ap_ratio; if ((int)(y+1) < (int)ch_end) score[y+1] += 40 * ap_ratio; if ((int)(y+2) < (int)ch_end) score[y+2] += 30 * ap_ratio; if ((int)(y+3) < (int)ch_end) score[y+3] += 20 * ap_ratio; } } else { if ((pBss->t_stamp[1] & 0x6) == 0) { score[y] += 500; } else if ((pBss->t_stamp[1] & 0x4) == 0) { score[y] += 500; if ((int)(y+1) < (int)ch_end) score[y+1] += 500; } else { score[y] += 500; if ((int)(y-1) >= (int)ch_begin) score[y-1] += 500; } } break; } } } } #ifdef ACS_DEBUG_INFO//for debug printk("\n"); for (y=ch_begin; y<ch_end; y++) printk("6. after ap check: chnl[%d]:%d\n", priv->available_chnl[y],score[y]); printk("\n"); #endif #ifdef SS_CH_LOAD_PROC // caculate noise level -- suggested by wilson for (y=ch_begin; y<ch_end; y++) { int fa_lv=0, cca_lv=0; if (priv->chnl_ss_fa_count[y]>1000) { fa_lv = 100; } else if (priv->chnl_ss_fa_count[y]>500) { fa_lv = 34 * (priv->chnl_ss_fa_count[y]-500) / 500 + 66; } else if (priv->chnl_ss_fa_count[y]>200) { fa_lv = 33 * (priv->chnl_ss_fa_count[y] - 200) / 300 + 33; } else if (priv->chnl_ss_fa_count[y]>100) { fa_lv = 18 * (priv->chnl_ss_fa_count[y] - 100) / 100 + 15; } else { fa_lv = 15 * priv->chnl_ss_fa_count[y] / 100; } if (priv->chnl_ss_cca_count[y]>400) { cca_lv = 100; } else if (priv->chnl_ss_cca_count[y]>200) { cca_lv = 34 * (priv->chnl_ss_cca_count[y] - 200) / 200 + 66; } else if (priv->chnl_ss_cca_count[y]>80) { cca_lv = 33 * (priv->chnl_ss_cca_count[y] - 80) / 120 + 33; } else if (priv->chnl_ss_cca_count[y]>40) { cca_lv = 18 * (priv->chnl_ss_cca_count[y] - 40) / 40 + 15; } else { cca_lv = 15 * priv->chnl_ss_cca_count[y] / 40; } priv->chnl_ss_load[y] = (((fa_lv > cca_lv)? fa_lv : cca_lv)*75+((score[y]>100)?100:score[y])*25)/100; DEBUG_INFO("ch:%d f=%d (%d), c=%d (%d), fl=%d, cl=%d, sc=%d, cu=%d\n", priv->available_chnl[y], priv->chnl_ss_fa_count[y], fa_thd, priv->chnl_ss_cca_count[y], cca_thd, fa_lv, cca_lv, score[y], priv->chnl_ss_load[y]); } #endif } #endif choose_ch: #ifdef DFS // heavy weighted DFS channel if (idx_5G_begin >= 0){ for (i=idx_5G_begin; i<priv->available_chnl_num; i++) { if (!priv->pmib->dot11DFSEntry.disable_DFS && is_DFS_channel(priv->available_chnl[i]) && (score[i]!= 0xffffffff)){ score[i] += 1600; } } } #endif //prevent Auto Channel selecting wrong channel in 40M mode----------------- if ((priv->pmib->dot11BssType.net_work_type & WIRELESS_11N) && priv->pshare->is_40m_bw) { #if 0 if (GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset == 1) { //Upper Primary Channel, cannot select the two lowest channels if (priv->pmib->dot11BssType.net_work_type & WIRELESS_11G) { score[0] = 0xffffffff; score[1] = 0xffffffff; score[2] = 0xffffffff; score[3] = 0xffffffff; score[4] = 0xffffffff; score[13] = 0xffffffff; score[12] = 0xffffffff; score[11] = 0xffffffff; } // if (priv->pmib->dot11BssType.net_work_type & WIRELESS_11A) { // score[idx_5G_begin] = 0xffffffff; // score[idx_5G_begin + 1] = 0xffffffff; // } } else if (GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset == 2) { //Lower Primary Channel, cannot select the two highest channels if (priv->pmib->dot11BssType.net_work_type & WIRELESS_11G) { score[0] = 0xffffffff; score[1] = 0xffffffff; score[2] = 0xffffffff; score[13] = 0xffffffff; score[12] = 0xffffffff; score[11] = 0xffffffff; score[10] = 0xffffffff; score[9] = 0xffffffff; } // if (priv->pmib->dot11BssType.net_work_type & WIRELESS_11A) { // score[priv->available_chnl_num - 2] = 0xffffffff; // score[priv->available_chnl_num - 1] = 0xffffffff; // } } #endif for (i=0; i<=idx_2G_end; ++i) if (priv->available_chnl[i] == 14) score[i] = 0xffffffff; // mask chan14 #ifdef RTK_5G_SUPPORT if (idx_5G_begin >= 0) { for (i=idx_5G_begin; i<priv->available_chnl_num; i++) { int ch = priv->available_chnl[i]; if(priv->available_chnl[i] > 144) --ch; if((ch%4) || ch==140 || ch == 164 ) //mask ch 140, ch 165, ch 184... score[i] = 0xffffffff; } } #endif } if (priv->pmib->dot11RFEntry.disable_ch1213) { for (i=0; i<=idx_2G_end; ++i) { int ch = priv->available_chnl[i]; if ((ch == 12) || (ch == 13)) score[i] = 0xffffffff; } } if (((priv->pmib->dot11StationConfigEntry.dot11RegDomain == DOMAIN_GLOBAL) || (priv->pmib->dot11StationConfigEntry.dot11RegDomain == DOMAIN_WORLD_WIDE)) && (idx_2G_end >= 11) && (idx_2G_end < 14)) { score[13] = 0xffffffff; // mask chan14 score[12] = 0xffffffff; // mask chan13 score[11] = 0xffffffff; // mask chan12 } //------------------------------------------------------------------ #ifdef _DEBUG_RTL8192CD_ for (i=0; i<priv->available_chnl_num; i++) { len += sprintf(tmpbuf+len, "ch%d:%u ", priv->available_chnl[i], score[i]); } strcat(tmpbuf, "\n"); panic_printk("%s", tmpbuf); #endif if ( (priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_5G) && (priv->pmib->dot11nConfigEntry.dot11nUse40M == HT_CHANNEL_WIDTH_80)) { for (i=0; i<priv->available_chnl_num; i++) { if (is80MChannel(priv->available_chnl, priv->available_chnl_num, priv->available_chnl[i])) { tmpScore = 0; for (j=0; j<4; j++) { if ((tmpScore != 0xffffffff) && (score[i+j] != 0xffffffff)) tmpScore += score[i+j]; else tmpScore = 0xffffffff; } tmpScore = tmpScore / 4; if (minScore > tmpScore) { minScore = tmpScore; tmpScore = 0xffffffff; for (j=0; j<4; j++) { if (score[i+j] < tmpScore) { tmpScore = score[i+j]; tmpIdx = i+j; } } idx = tmpIdx; } i += 3; } } if (minScore == 0xffffffff) { // there is no 80M channels priv->pshare->is_40m_bw = HT_CHANNEL_WIDTH_20; for (i=0; i<priv->available_chnl_num; i++) { if (score[i] < minScore) { minScore = score[i]; idx = i; } } } } else if( (priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_5G) && (priv->pmib->dot11nConfigEntry.dot11nUse40M == HT_CHANNEL_WIDTH_20_40)) { for (i=0; i<priv->available_chnl_num; i++) { if(is40MChannel(priv->available_chnl,priv->available_chnl_num,priv->available_chnl[i])) { tmpScore = 0; for(j=0;j<2;j++) { if ((tmpScore != 0xffffffff) && (score[i+j] != 0xffffffff)) tmpScore += score[i+j]; else tmpScore = 0xffffffff; } tmpScore = tmpScore / 2; if(minScore > tmpScore) { minScore = tmpScore; tmpScore = 0xffffffff; for (j=0; j<2; j++) { if (score[i+j] < tmpScore) { tmpScore = score[i+j]; tmpIdx = i+j; } } idx = tmpIdx; } i += 1; } } if (minScore == 0xffffffff) { // there is no 40M channels priv->pshare->is_40m_bw = HT_CHANNEL_WIDTH_20; for (i=0; i<priv->available_chnl_num; i++) { if (score[i] < minScore) { minScore = score[i]; idx = i; } } } } else if( (priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_2G) && (priv->pmib->dot11nConfigEntry.dot11nUse40M == HT_CHANNEL_WIDTH_20_40) && (priv->available_chnl_num >= 8) ) { u4Byte groupScore[14]; memset(groupScore, 0xff , sizeof(groupScore)); for (i=0; i<priv->available_chnl_num-4; i++) { if (score[i] != 0xffffffff && score[i+4] != 0xffffffff) { groupScore[i] = score[i] + score[i+4]; DEBUG_INFO("groupScore, ch %d,%d: %d\n", i+1, i+5, groupScore[i]); if (groupScore[i] < minScore) { #ifdef AUTOCH_SS_SPEEDUP if(priv->pmib->miscEntry.autoch_1611_enable) { if(priv->available_chnl[i]==1 || priv->available_chnl[i]==6 || priv->available_chnl[i]==11) { minScore = groupScore[i]; idx = i; } } else #endif { minScore = groupScore[i]; idx = i; } } } } if (score[idx] < score[idx+4]) { GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_ABOVE; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_ABOVE; } else { idx = idx + 4; GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_BELOW; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_BELOW; } } else { for (i=0; i<priv->available_chnl_num; i++) { if (score[i] < minScore) { #ifdef AUTOCH_SS_SPEEDUP if(priv->pmib->miscEntry.autoch_1611_enable) { if(priv->available_chnl[i]==1 || priv->available_chnl[i]==6 || priv->available_chnl[i]==11) { minScore = score[i]; idx = i; } } else #endif { minScore = score[i]; idx = i; } } } } if (IS_A_CUT_8881A(priv) && (priv->pmib->dot11nConfigEntry.dot11nUse40M == HT_CHANNEL_WIDTH_80)) { if ((priv->available_chnl[idx] == 36) || (priv->available_chnl[idx] == 52) || (priv->available_chnl[idx] == 100) || (priv->available_chnl[idx] == 116) || (priv->available_chnl[idx] == 132) || (priv->available_chnl[idx] == 149) || (priv->available_chnl[idx] == 165)) idx++; else if ((priv->available_chnl[idx] == 48) || (priv->available_chnl[idx] == 64) || (priv->available_chnl[idx] == 112) || (priv->available_chnl[idx] == 128) || (priv->available_chnl[idx] == 144) || (priv->available_chnl[idx] == 161) || (priv->available_chnl[idx] == 177)) idx--; } minChan = priv->available_chnl[idx]; // skip channel 14 if don't support ofdm if ((priv->pmib->dot11RFEntry.disable_ch14_ofdm) && (minChan == 14)) { score[idx] = 0xffffffff; minScore = 0xffffffff; for (i=0; i<priv->available_chnl_num; i++) { if (score[i] < minScore) { minScore = score[i]; idx = i; } } minChan = priv->available_chnl[idx]; } #if 0 //Check if selected channel available for 80M/40M BW or NOT ? if(priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_5G) { if(priv->pmib->dot11nConfigEntry.dot11nUse40M == HT_CHANNEL_WIDTH_80) { if(!is80MChannel(priv->available_chnl,priv->available_chnl_num,minChan)) { //printk("BW=80M, selected channel = %d is unavaliable! reduce to 40M\n", minChan); //priv->pmib->dot11nConfigEntry.dot11nUse40M = HT_CHANNEL_WIDTH_20_40; priv->pshare->is_40m_bw = HT_CHANNEL_WIDTH_20_40; } } if(priv->pmib->dot11nConfigEntry.dot11nUse40M == HT_CHANNEL_WIDTH_20_40) { if(!is40MChannel(priv->available_chnl,priv->available_chnl_num,minChan)) { //printk("BW=40M, selected channel = %d is unavaliable! reduce to 20M\n", minChan); //priv->pmib->dot11nConfigEntry.dot11nUse40M = HT_CHANNEL_WIDTH_20; priv->pshare->is_40m_bw = HT_CHANNEL_WIDTH_20; } } } #endif #ifdef CONFIG_RTL_NEW_AUTOCH RTL_W32(RXERR_RPT, RXERR_RPT_RST); #endif // auto adjust contro-sideband if ((priv->pmib->dot11BssType.net_work_type & WIRELESS_11N) && (priv->pshare->is_40m_bw ==1 || priv->pshare->is_40m_bw ==2)) { #ifdef RTK_5G_SUPPORT if (priv->pmib->dot11RFEntry.phyBandSelect & PHY_BAND_5G) { if( (minChan>144) ? ((minChan-1)%8) : (minChan%8)) { GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_ABOVE; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_ABOVE; } else { GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_BELOW; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_BELOW; } } else #endif { #if 0 #ifdef CONFIG_RTL_NEW_AUTOCH unsigned int ch_max; if (priv->available_chnl[idx_2G_end] >= 13) ch_max = 13; else ch_max = priv->available_chnl[idx_2G_end]; if ((minChan >= 5) && (minChan <= (ch_max-5))) { if (score[minChan+4] > score[minChan-4]) { // what if some channels were cancelled? GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_BELOW; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_BELOW; } else { GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_ABOVE; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_ABOVE; } } else #endif { if (minChan < 5) { GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_ABOVE; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_ABOVE; } else if (minChan > 7) { GET_MIB(priv)->dot11nConfigEntry.dot11n2ndChOffset = HT_2NDCH_OFFSET_BELOW; priv->pshare->offset_2nd_chan = HT_2NDCH_OFFSET_BELOW; } } #endif } } //----------------------- #if defined(__ECOS) && defined(CONFIG_SDIO_HCI) panic_printk("Auto channel choose ch:%d\n", minChan); #else #ifdef _DEBUG_RTL8192CD_ panic_printk("Auto channel choose ch:%d\n", minChan); #endif #endif #ifdef ACS_DEBUG_INFO//for debug printk("7. minChan:%d 2nd_offset:%d\n", minChan, priv->pshare->offset_2nd_chan); #endif return minChan; } */ #endif VOID phydm_CLMInit( IN PVOID pDM_VOID, IN u2Byte sampleNum /*unit : 4us*/ ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) { ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_TIME_PERIOD_11AC, bMaskLWord, sampleNum); /*4us sample 1 time*/ ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_11AC, BIT8, 0x1); /*Enable CCX for CLM*/ } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_TIME_PERIOD_11N, bMaskLWord, sampleNum); /*4us sample 1 time*/ ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_11N, BIT8, 0x1); /*Enable CCX for CLM*/ } ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("[%s] : CLM sampleNum = %d\n", __func__, sampleNum)); } VOID phydm_CLMtrigger( IN PVOID pDM_VOID ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) { ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_11AC, BIT0, 0x0); /*Trigger CLM*/ ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_11AC, BIT0, 0x1); } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) { ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_11N, BIT0, 0x0); /*Trigger CLM*/ ODM_SetBBReg(pDM_Odm, ODM_REG_CLM_11N, BIT0, 0x1); } } BOOLEAN phydm_checkCLMready( IN PVOID pDM_VOID ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; u4Byte value32 = 0; BOOLEAN ret = FALSE; if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) value32 = ODM_GetBBReg(pDM_Odm, ODM_REG_CLM_RESULT_11AC, bMaskDWord); /*make sure CLM calc is ready*/ else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) value32 = ODM_GetBBReg(pDM_Odm, ODM_REG_CLM_READY_11N, bMaskDWord); /*make sure CLM calc is ready*/ if (value32 & BIT16) ret = TRUE; else ret = FALSE; ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("[%s] : CLM ready = %d\n", __func__, ret)); return ret; } u2Byte phydm_getCLMresult( IN PVOID pDM_VOID ) { PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID; u4Byte value32 = 0; u2Byte results = 0; if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) value32 = ODM_GetBBReg(pDM_Odm, ODM_REG_CLM_RESULT_11AC, bMaskDWord); /*read CLM calc result*/ else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) value32 = ODM_GetBBReg(pDM_Odm, ODM_REG_CLM_RESULT_11N, bMaskDWord); /*read CLM calc result*/ results = (u2Byte)(value32 & bMaskLWord); ODM_RT_TRACE(pDM_Odm, ODM_COMP_ACS, ODM_DBG_LOUD, ("[%s] : CLM result = %d\n", __func__, results)); return results; /*results are number of CCA times in sampleNum*/ }
JideTechnology/remixos-kernel
drivers/staging/rtl8812au/hal/phydm/phydm_acs.c
C
gpl-2.0
41,700
/* * linux/drivers/video/omapfb/boot_progressbar.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <plat/vrfb.h> #include "omapfb.h" #define TRUE 1 #define FALSE 0 static int progress_flag = FALSE; static int progress_pos; static struct timer_list progress_timer; #define PROGRESS_BAR_LEFT_POS 54 #define PROGRESS_BAR_RIGHT_POS 425 #define PROGRESS_BAR_START_Y 576 #define PROGRESS_BAR_WIDTH 4 #define PROGRESS_BAR_HEIGHT 8 static unsigned char anycall_progress_bar_left[] = { 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00 }; static unsigned char anycall_progress_bar_right[] = { 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00 }; static unsigned char anycall_progress_bar_center[] = { 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0xf3, 0xc5, 0x00, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00 }; static unsigned char anycall_progress_bar[] = { 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00, 0x33, 0x33, 0x33, 0x00 }; static void progress_timer_handler(unsigned long data); static int show_progress = 1; module_param_named(progress, show_progress, bool, 0); static void omapfb_update_framebuffer( \ struct fb_info *fb, int x, int y, void *buffer, \ int src_width, int src_height) { struct omapfb_info *ofbi = FB2OFB(fb); struct omapfb2_device *fbdev = ofbi->fbdev; struct fb_fix_screeninfo *fix = &fb->fix; struct fb_var_screeninfo *var = &fb->var; int row; int bytes_per_pixel = (var->bits_per_pixel / 8); unsigned char *pSrc = buffer; unsigned char *pDst = fb->screen_base; if (x+src_width > var->xres || y+src_height > var->yres) { dev_err(fbdev->dev, "invalid destination coordinate or" \ " source size (%d, %d) (%d %d)\n", \ x, y, src_width, src_height); return; } pDst += y * fix->line_length + x * bytes_per_pixel; for (row = 0; row < src_height ; row++) { memcpy(pDst, pSrc, src_width * bytes_per_pixel); pSrc += src_width * bytes_per_pixel; pDst += fix->line_length; } } void omapfb_start_progress(struct fb_info *fb) { int x_pos; if (!show_progress) return; init_timer(&progress_timer); progress_timer.expires = (get_jiffies_64() + (HZ/20)); progress_timer.data = (long)fb; progress_timer.function = progress_timer_handler; progress_pos = PROGRESS_BAR_LEFT_POS; /* draw progress background. */ for (x_pos = PROGRESS_BAR_LEFT_POS; x_pos <= PROGRESS_BAR_RIGHT_POS; \ x_pos += PROGRESS_BAR_WIDTH){ omapfb_update_framebuffer(fb, x_pos, PROGRESS_BAR_START_Y, (void *)anycall_progress_bar, PROGRESS_BAR_WIDTH, PROGRESS_BAR_HEIGHT); } omapfb_update_framebuffer(fb, PROGRESS_BAR_LEFT_POS, PROGRESS_BAR_START_Y, (void *)anycall_progress_bar_left, PROGRESS_BAR_WIDTH, PROGRESS_BAR_HEIGHT); progress_pos += PROGRESS_BAR_WIDTH; omapfb_update_framebuffer(fb, progress_pos, PROGRESS_BAR_START_Y, (void *)anycall_progress_bar_right, PROGRESS_BAR_WIDTH, PROGRESS_BAR_HEIGHT); add_timer(&progress_timer); progress_flag = TRUE; } static void omapfb_stop_progress(void) { if (progress_flag == FALSE) return; del_timer(&progress_timer); progress_flag = 0; } static void progress_timer_handler(unsigned long data) { int i; for (i = 0; i < PROGRESS_BAR_WIDTH; i++) { omapfb_update_framebuffer((struct fb_info *)data, progress_pos++, PROGRESS_BAR_START_Y, (void *)anycall_progress_bar_center, 1, PROGRESS_BAR_HEIGHT); } omapfb_update_framebuffer((struct fb_info *)data, progress_pos, PROGRESS_BAR_START_Y, (void *)anycall_progress_bar_right, PROGRESS_BAR_WIDTH, PROGRESS_BAR_HEIGHT); if (progress_pos + PROGRESS_BAR_WIDTH >= PROGRESS_BAR_RIGHT_POS ) { omapfb_stop_progress(); } else { progress_timer.expires = (get_jiffies_64() + (HZ/20)); progress_timer.function = progress_timer_handler; add_timer(&progress_timer); } }
dhiru1602/android_kernel_samsung_latona
drivers/video/omap2/omapfb/omapfb-progressbar.c
C
gpl-2.0
6,333
/* * empathy-streamed-media-factory.c - Source for EmpathyStreamedMediaFactory * Copyright (C) 2008-2011 Collabora Ltd. * @author Sjoerd Simons <sjoerd.simons@collabora.co.uk> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <telepathy-glib/account-channel-request.h> #include <telepathy-glib/simple-handler.h> #include <telepathy-glib/interfaces.h> #include <telepathy-glib/util.h> #include <libempathy/empathy-request-util.h> #include <libempathy/empathy-utils.h> #include "empathy-streamed-media-factory.h" #include "empathy-streamed-media-handler.h" #define DEBUG_FLAG EMPATHY_DEBUG_VOIP #include <libempathy/empathy-debug.h> G_DEFINE_TYPE(EmpathyStreamedMediaFactory, empathy_streamed_media_factory, G_TYPE_OBJECT) static void handle_channels_cb (TpSimpleHandler *handler, TpAccount *account, TpConnection *connection, GList *channels, GList *requests_satisfied, gint64 user_action_time, TpHandleChannelsContext *context, gpointer user_data); /* signal enum */ enum { NEW_STREAMED_MEDIA_HANDLER, LAST_SIGNAL }; static guint signals[LAST_SIGNAL] = {0}; /* private structure */ typedef struct { TpBaseClient *handler; gboolean dispose_has_run; } EmpathyStreamedMediaFactoryPriv; #define GET_PRIV(obj) EMPATHY_GET_PRIV (obj, EmpathyStreamedMediaFactory) static GObject *call_factory = NULL; static void empathy_streamed_media_factory_init (EmpathyStreamedMediaFactory *obj) { EmpathyStreamedMediaFactoryPriv *priv = G_TYPE_INSTANCE_GET_PRIVATE (obj, EMPATHY_TYPE_STREAMED_MEDIA_FACTORY, EmpathyStreamedMediaFactoryPriv); TpAccountManager *am; obj->priv = priv; am = tp_account_manager_dup (); priv->handler = tp_simple_handler_new_with_am (am, FALSE, FALSE, EMPATHY_AV_BUS_NAME_SUFFIX, FALSE, handle_channels_cb, obj, NULL); tp_base_client_take_handler_filter (priv->handler, tp_asv_new ( TP_PROP_CHANNEL_CHANNEL_TYPE, G_TYPE_STRING, TP_IFACE_CHANNEL_TYPE_STREAMED_MEDIA, TP_PROP_CHANNEL_TARGET_HANDLE_TYPE, G_TYPE_UINT, TP_HANDLE_TYPE_CONTACT, NULL)); tp_base_client_take_handler_filter (priv->handler, tp_asv_new ( TP_PROP_CHANNEL_CHANNEL_TYPE, G_TYPE_STRING, TP_IFACE_CHANNEL_TYPE_STREAMED_MEDIA, TP_PROP_CHANNEL_TARGET_HANDLE_TYPE, G_TYPE_UINT, TP_HANDLE_TYPE_CONTACT, TP_PROP_CHANNEL_TYPE_STREAMED_MEDIA_INITIAL_AUDIO, G_TYPE_BOOLEAN, TRUE, NULL)); tp_base_client_take_handler_filter (priv->handler, tp_asv_new ( TP_PROP_CHANNEL_CHANNEL_TYPE, G_TYPE_STRING, TP_IFACE_CHANNEL_TYPE_STREAMED_MEDIA, TP_PROP_CHANNEL_TARGET_HANDLE_TYPE, G_TYPE_UINT, TP_HANDLE_TYPE_CONTACT, TP_PROP_CHANNEL_TYPE_STREAMED_MEDIA_INITIAL_VIDEO, G_TYPE_BOOLEAN, TRUE, NULL)); tp_base_client_add_handler_capabilities_varargs (priv->handler, "org.freedesktop.Telepathy.Channel.Interface.MediaSignalling/ice-udp", "org.freedesktop.Telepathy.Channel.Interface.MediaSignalling/gtalk-p2p", "org.freedesktop.Telepathy.Channel.Interface.MediaSignalling/video/h264", NULL); g_object_unref (am); } static GObject * empathy_streamed_media_factory_constructor (GType type, guint n_construct_params, GObjectConstructParam *construct_params) { g_return_val_if_fail (call_factory == NULL, NULL); call_factory = G_OBJECT_CLASS (empathy_streamed_media_factory_parent_class)->constructor (type, n_construct_params, construct_params); g_object_add_weak_pointer (call_factory, (gpointer)&call_factory); return call_factory; } static void empathy_streamed_media_factory_finalize (GObject *object) { /* free any data held directly by the object here */ if (G_OBJECT_CLASS (empathy_streamed_media_factory_parent_class)->finalize) G_OBJECT_CLASS (empathy_streamed_media_factory_parent_class)->finalize (object); } static void empathy_streamed_media_factory_dispose (GObject *object) { EmpathyStreamedMediaFactoryPriv *priv = GET_PRIV (object); if (priv->dispose_has_run) return; priv->dispose_has_run = TRUE; tp_clear_object (&priv->handler); if (G_OBJECT_CLASS (empathy_streamed_media_factory_parent_class)->dispose) G_OBJECT_CLASS (empathy_streamed_media_factory_parent_class)->dispose (object); } static void empathy_streamed_media_factory_class_init ( EmpathyStreamedMediaFactoryClass *empathy_streamed_media_factory_class) { GObjectClass *object_class = G_OBJECT_CLASS (empathy_streamed_media_factory_class); g_type_class_add_private (empathy_streamed_media_factory_class, sizeof (EmpathyStreamedMediaFactoryPriv)); object_class->constructor = empathy_streamed_media_factory_constructor; object_class->dispose = empathy_streamed_media_factory_dispose; object_class->finalize = empathy_streamed_media_factory_finalize; signals[NEW_STREAMED_MEDIA_HANDLER] = g_signal_new ("new-streamed-media-handler", G_TYPE_FROM_CLASS (empathy_streamed_media_factory_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 2, EMPATHY_TYPE_STREAMED_MEDIA_HANDLER, G_TYPE_BOOLEAN); } EmpathyStreamedMediaFactory * empathy_streamed_media_factory_initialise (void) { g_return_val_if_fail (call_factory == NULL, NULL); return EMPATHY_STREAMED_MEDIA_FACTORY (g_object_new (EMPATHY_TYPE_STREAMED_MEDIA_FACTORY, NULL)); } EmpathyStreamedMediaFactory * empathy_streamed_media_factory_get (void) { g_return_val_if_fail (call_factory != NULL, NULL); return EMPATHY_STREAMED_MEDIA_FACTORY (call_factory); } static void create_streamed_media_handler (EmpathyStreamedMediaFactory *factory, EmpathyTpStreamedMedia *call) { EmpathyStreamedMediaHandler *handler; g_return_if_fail (factory != NULL); handler = empathy_streamed_media_handler_new_for_channel (call); g_signal_emit (factory, signals[NEW_STREAMED_MEDIA_HANDLER], 0, handler, FALSE); g_object_unref (handler); } static void call_status_changed_cb (EmpathyTpStreamedMedia *call, GParamSpec *spec, EmpathyStreamedMediaFactory *self) { if (empathy_tp_streamed_media_get_status (call) <= EMPATHY_TP_STREAMED_MEDIA_STATUS_READYING) return; create_streamed_media_handler (self, call); g_signal_handlers_disconnect_by_func (call, call_status_changed_cb, self); g_object_unref (call); } static void handle_channels_cb (TpSimpleHandler *handler, TpAccount *account, TpConnection *connection, GList *channels, GList *requests_satisfied, gint64 user_action_time, TpHandleChannelsContext *context, gpointer user_data) { EmpathyStreamedMediaFactory *self = user_data; GList *l; for (l = channels; l != NULL; l = g_list_next (l)) { TpChannel *channel = l->data; EmpathyTpStreamedMedia *call; if (tp_proxy_get_invalidated (channel) != NULL) continue; if (tp_channel_get_channel_type_id (channel) != TP_IFACE_QUARK_CHANNEL_TYPE_STREAMED_MEDIA) continue; call = empathy_tp_streamed_media_new (account, channel); if (empathy_tp_streamed_media_get_status (call) <= EMPATHY_TP_STREAMED_MEDIA_STATUS_READYING) { /* We have to wait that the TpStreamedMedia is ready as the * call-handler rely on it. */ tp_g_signal_connect_object (call, "notify::status", G_CALLBACK (call_status_changed_cb), self, 0); continue; } create_streamed_media_handler (self, call); g_object_unref (call); } tp_handle_channels_context_accept (context); } gboolean empathy_streamed_media_factory_register (EmpathyStreamedMediaFactory *self, GError **error) { EmpathyStreamedMediaFactoryPriv *priv = GET_PRIV (self); return tp_base_client_register (priv->handler, error); }
dinoboy197/Empathy
src/empathy-streamed-media-factory.c
C
gpl-2.0
8,514
/* IGraph library. Copyright (C) 2021 The igraph development team <igraph@igraph.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ #include <igraph.h> #include "test_utilities.inc" int main() { igraph_t g_empty, g_lm; igraph_vector_t result; igraph_vs_t vids; igraph_rng_seed(igraph_rng_default(), 42); igraph_vector_init(&result, 0); igraph_vs_all(&vids); igraph_small(&g_empty, 0, 0, -1); igraph_small(&g_lm, 6, 1, 0,1, 0,2, 1,1, 1,3, 2,0, 2,3, 3,4, 3,4, -1); printf("No vertices:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_empty, &result, vids, /*order*/ 1, /*mode*/ IGRAPH_ALL, /*mindist*/ 0) == IGRAPH_SUCCESS); print_vector(&result); printf("Directed graph with loops and multi-edges, order 0:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 0, /*mode*/ IGRAPH_ALL, /*mindist*/ 0) == IGRAPH_SUCCESS); print_vector(&result); printf("Directed graph with loops and multi-edges, order 1, ignoring direction:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 1, /*mode*/ IGRAPH_ALL, /*mindist*/ 0) == IGRAPH_SUCCESS); print_vector(&result); printf("Directed graph with loops and multi-edges, order 1, only checking IGRAPH_IN:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 1, /*mode*/ IGRAPH_IN, /*mindist*/ 0) == IGRAPH_SUCCESS); print_vector(&result); printf("Directed graph with loops and multi-edges, order 10, ignoring direction:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 10, /*mode*/ IGRAPH_ALL, /*mindist*/ 0) == IGRAPH_SUCCESS); print_vector(&result); printf("Directed graph with loops and multi-edges, order 2, mindist 2, IGRAPH_OUT:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 2, /*mode*/ IGRAPH_OUT, /*mindist*/ 2) == IGRAPH_SUCCESS); print_vector(&result); printf("Directed graph with loops and multi-edges, order 4, mindist 4, IGRAPH_ALL:\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 4, /*mode*/ IGRAPH_ALL, /*mindist*/ 4) == IGRAPH_SUCCESS); print_vector(&result); VERIFY_FINALLY_STACK(); igraph_set_error_handler(igraph_error_handler_ignore); printf("Negative order.\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ -4, /*mode*/ IGRAPH_ALL, /*mindist*/ 4) == IGRAPH_EINVAL); printf("Negative mindist.\n"); IGRAPH_ASSERT(igraph_neighborhood_size(&g_lm, &result, vids, /*order*/ 4, /*mode*/ IGRAPH_ALL, /*mindist*/ -4) == IGRAPH_EINVAL); igraph_vector_destroy(&result); igraph_destroy(&g_empty); igraph_destroy(&g_lm); VERIFY_FINALLY_STACK(); return 0; }
igraph/igraph
tests/unit/igraph_neighborhood_size.c
C
gpl-2.0
3,528
/* * linux/mm/vmscan.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, Stephen Tweedie. * kswapd added: 7.1.96 sct * Removed kswapd_ctl limits, and swap out as many pages as needed * to bring the system back to freepages.high: 2.4.97, Rik van Riel. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). * Multiqueue VM started 5.8.00, Rik van Riel. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/vmstat.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> /* for try_to_release_page(), buffer_heads_over_limit */ #include <linux/mm_inline.h> #include <linux/pagevec.h> #include <linux/backing-dev.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include <linux/swapops.h> #include "internal.h" struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; /* This context's GFP mask */ gfp_t gfp_mask; int may_writepage; /* Can pages be swapped as part of reclaim? */ int may_swap; /* This context's SWAP_CLUSTER_MAX. If freeing memory for * suspend, we effectively ignore SWAP_CLUSTER_MAX. * In this context, it doesn't matter that we scan the * whole list at once. */ int swap_cluster_max; int swappiness; int all_unreclaimable; }; /* * The list of shrinker callbacks used by to apply pressure to * ageable caches. */ struct shrinker { shrinker_t shrinker; struct list_head list; int seeks; /* seeks to recreate an obj */ long nr; /* objs pending delete */ }; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) #ifdef ARCH_HAS_PREFETCH #define prefetch_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetch(&prev->_field); \ } \ } while (0) #else #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) #endif #ifdef ARCH_HAS_PREFETCHW #define prefetchw_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) #else #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) #endif /* * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; long vm_total_pages; /* The total number of pages which the VM controls */ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); /* * Add a shrinker callback to be called from the vm */ struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) { struct shrinker *shrinker; shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); if (shrinker) { shrinker->shrinker = theshrinker; shrinker->seeks = seeks; shrinker->nr = 0; down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); } return shrinker; } EXPORT_SYMBOL(set_shrinker); /* * Remove one */ void remove_shrinker(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker); } EXPORT_SYMBOL(remove_shrinker); #define SHRINK_BATCH 128 /* * Call the shrink functions to age shrinkable caches * * Here we assume it costs one seek to replace a lru page and that it also * takes a seek to recreate a cache object. With this in mind we age equal * percentages of the lru and ageable caches. This should balance the seeks * generated by these structures. * * If the vm encounted mapped pages on the LRU it increase the pressure on * slab to avoid swapping. * * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. * * `lru_pages' represents the number of on-LRU pages in all the zones which * are eligible for the caller's allocation attempt. It is used for balancing * slab reclaim versus page reclaim. * * Returns the number of slab objects which we shrunk. */ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) { struct shrinker *shrinker; unsigned long ret = 0; if (scanned == 0) scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) return 1; /* Assume we'll be able to shrink next time */ list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; unsigned long total_scan; unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); delta = (4 * scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); shrinker->nr += delta; if (shrinker->nr < 0) { printk(KERN_ERR "%s: nr=%ld\n", __FUNCTION__, shrinker->nr); shrinker->nr = max_pass; } /* * Avoid risking looping forever due to too large nr value: * never try to free more than twice the estimate number of * freeable entries. */ if (shrinker->nr > max_pass * 2) shrinker->nr = max_pass * 2; total_scan = shrinker->nr; shrinker->nr = 0; while (total_scan >= SHRINK_BATCH) { long this_scan = SHRINK_BATCH; int shrink_ret; int nr_before; nr_before = (*shrinker->shrinker)(0, gfp_mask); shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); if (shrink_ret == -1) break; if (shrink_ret < nr_before) ret += nr_before - shrink_ret; count_vm_events(SLABS_SCANNED, this_scan); total_scan -= this_scan; cond_resched(); } shrinker->nr += total_scan; } up_read(&shrinker_rwsem); return ret; } /* Called without lock on whether page is mapped, so answer is unstable */ static inline int page_mapping_inuse(struct page *page) { struct address_space *mapping; /* Page is in somebody's page tables. */ if (page_mapped(page)) return 1; /* Be more reluctant to reclaim swapcache than pagecache */ if (PageSwapCache(page)) return 1; mapping = page_mapping(page); if (!mapping) return 0; /* File is mmap'd by somebody? */ return mapping_mapped(mapping); } static inline int is_page_cache_freeable(struct page *page) { return page_count(page) - !!PagePrivate(page) == 2; } static int may_write_to_queue(struct backing_dev_info *bdi) { if (current->flags & PF_SWAPWRITE) return 1; if (!bdi_write_congested(bdi)) return 1; if (bdi == current->backing_dev_info) return 1; return 0; } /* * We detected a synchronous write error writing a page out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent * fsync(), msync() or close(). * * The tricky part is that after writepage we cannot touch the mapping: nothing * prevents it from being freed up. But we have a ref on the page and once * that page is locked, the mapping is pinned. * * We're allowed to run sleeping lock_page() here because we know the caller has * __GFP_FS. */ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); if (page_mapping(page) == mapping) { if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } unlock_page(page); } /* possible outcome of pageout() */ typedef enum { /* failed to write page out, page is locked */ PAGE_KEEP, /* move page to the active list, page is locked */ PAGE_ACTIVATE, /* page has been sent to the disk successfully, page is unlocked */ PAGE_SUCCESS, /* page is clean and locked */ PAGE_CLEAN, } pageout_t; /* * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). */ static pageout_t pageout(struct page *page, struct address_space *mapping) { /* * If the page is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being * stalled by pagecache activity. But note that there may be * stalls if we need to run get_block(). We could test * PagePrivate for that. * * If this process is currently in generic_file_write() against * this page's queue, we can perform writeback even if that * will block. * * If the page is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because * swap_backing_dev_info is bust: it doesn't reflect the * congestion state of the swapdevs. Easy to fix, if needed. * See swapfile.c:page_queue_congested(). */ if (!is_page_cache_freeable(page)) return PAGE_KEEP; if (!mapping) { /* * Some data journaling orphaned pages can have * page->mapping == NULL while being dirty with clean buffers. */ if (PagePrivate(page)) { if (try_to_free_buffers(page)) { ClearPageDirty(page); printk("%s: orphaned page\n", __FUNCTION__); return PAGE_CLEAN; } } return PAGE_KEEP; } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (!may_write_to_queue(mapping->backing_dev_info)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .nonblocking = 1, .for_reclaim = 1, }; SetPageReclaim(page); res = mapping->a_ops->writepage(page, &wbc); if (res < 0) handle_write_error(mapping, page, res); if (res == AOP_WRITEPAGE_ACTIVATE) { ClearPageReclaim(page); return PAGE_ACTIVATE; } if (!PageWriteback(page)) { /* synchronous write or broken a_ops? */ ClearPageReclaim(page); } inc_zone_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } return PAGE_CLEAN; } /* * Attempt to detach a locked page from its ->mapping. If it is dirty or if * someone else has a ref on the page, abort and return 0. If it was * successfully detached, return 1. Assumes the caller has a single ref on * this page. */ int remove_mapping(struct address_space *mapping, struct page *page) { BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); write_lock_irq(&mapping->tree_lock); /* * The non racy check for a busy page. * * Must be careful with the order of the tests. When someone has * a ref to the page, it may be possible that they dirty it then * drop the reference. So if PageDirty is tested before page_count * here, then the following race may occur: * * get_user_pages(&page); * [user mapping goes away] * write_to(page); * !PageDirty(page) [good] * SetPageDirty(page); * put_page(page); * !page_count(page) [good, discard it] * * [oops, our write_to data is lost] * * Reversing the order of the tests ensures such a situation cannot * escape unnoticed. The smp_rmb is needed to ensure the page->flags * load is not satisfied before that of page->_count. * * Note that if SetPageDirty is always performed via set_page_dirty, * and thus under tree_lock, then this ordering is not required. */ if (unlikely(page_count(page) != 2)) goto cannot_free; smp_rmb(); if (unlikely(PageDirty(page))) goto cannot_free; if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); write_unlock_irq(&mapping->tree_lock); swap_free(swap); __put_page(page); /* The pagecache ref */ return 1; } __remove_from_page_cache(page); write_unlock_irq(&mapping->tree_lock); __put_page(page); return 1; cannot_free: write_unlock_irq(&mapping->tree_lock); return 0; } /* * shrink_page_list() returns the number of reclaimed pages */ static unsigned long shrink_page_list(struct list_head *page_list, struct scan_control *sc) { LIST_HEAD(ret_pages); struct pagevec freed_pvec; int pgactivate = 0; unsigned long nr_reclaimed = 0; cond_resched(); pagevec_init(&freed_pvec, 1); while (!list_empty(page_list)) { struct address_space *mapping; struct page *page; int may_enter_fs; int referenced; cond_resched(); page = lru_to_page(page_list); list_del(&page->lru); if (TestSetPageLocked(page)) goto keep; VM_BUG_ON(PageActive(page)); sc->nr_scanned++; if (!sc->may_swap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; if (PageWriteback(page)) goto keep_locked; referenced = page_referenced(page, 1); /* In active use or really unfreeable? Activate it. */ if (referenced && page_mapping_inuse(page)) goto activate_locked; #ifdef CONFIG_SWAP /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) if (!add_to_swap(page, GFP_ATOMIC)) goto activate_locked; #endif /* CONFIG_SWAP */ mapping = page_mapping(page); may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page, 0)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: goto keep_locked; case SWAP_SUCCESS: ; /* try to free the page below */ } } if (PageDirty(page)) { if (referenced) goto keep_locked; if (!may_enter_fs) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ switch(pageout(page, mapping)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: if (PageWriteback(page) || PageDirty(page)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ if (TestSetPageLocked(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; mapping = page_mapping(page); case PAGE_CLEAN: ; /* try to free the page below */ } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. * * We do this even if the page is PageDirty(). * try_to_release_page() does not perform I/O, but it is * possible for a page to have PageDirty set, but it is actually * clean (all its buffers are clean). This happens if the * buffers were written out directly, with submit_bh(). ext3 * will do this, as well as the blockdev mapping. * try_to_release_page() will discover that cleanness and will * drop the buffers and mark the page clean - it can be freed. * * Rarely, pages can have buffers and no ->mapping. These are * the pages which were not successfully invalidated in * truncate_complete_page(). We try to drop those buffers here * and if that worked, and the page is no longer mapped into * process address space (page_count == 1) it can be freed. * Otherwise, leave the page on the LRU so it is swappable. */ if (PagePrivate(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; if (!mapping && page_count(page) == 1) goto free_it; } if (!mapping || !remove_mapping(mapping, page)) goto keep_locked; free_it: unlock_page(page); nr_reclaimed++; if (!pagevec_add(&freed_pvec, page)) __pagevec_release_nonlru(&freed_pvec); continue; activate_locked: SetPageActive(page); pgactivate++; keep_locked: unlock_page(page); keep: list_add(&page->lru, &ret_pages); VM_BUG_ON(PageLRU(page)); } list_splice(&ret_pages, page_list); if (pagevec_count(&freed_pvec)) __pagevec_release_nonlru(&freed_pvec); count_vm_events(PGACTIVATE, pgactivate); return nr_reclaimed; } /* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Appropriate locks must be held before calling this function. * * @nr_to_scan: The number of pages to look through on the list. * @src: The LRU list to pull pages off. * @dst: The temp list to put pages on to. * @scanned: The number of pages that were scanned. * * returns how many pages were moved onto *@dst. */ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct list_head *src, struct list_head *dst, unsigned long *scanned) { unsigned long nr_taken = 0; struct page *page; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct list_head *target; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); VM_BUG_ON(!PageLRU(page)); list_del(&page->lru); target = src; if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ ClearPageLRU(page); target = dst; nr_taken++; } /* else it is being freed elsewhere */ list_add(&page->lru, target); } *scanned = scan; return nr_taken; } /* * shrink_inactive_list() is a helper for shrink_zone(). It returns the number * of reclaimed pages */ static unsigned long shrink_inactive_list(unsigned long max_scan, struct zone *zone, struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; unsigned long nr_scanned = 0; unsigned long nr_reclaimed = 0; pagevec_init(&pvec, 1); lru_add_drain(); spin_lock_irq(&zone->lru_lock); do { struct page *page; unsigned long nr_taken; unsigned long nr_scan; unsigned long nr_freed; nr_taken = isolate_lru_pages(sc->swap_cluster_max, &zone->inactive_list, &page_list, &nr_scan); __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken); zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); nr_scanned += nr_scan; nr_freed = shrink_page_list(&page_list, sc); nr_reclaimed += nr_freed; local_irq_disable(); if (current_is_kswapd()) { __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); __count_vm_events(KSWAPD_STEAL, nr_freed); } else __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); __count_zone_vm_events(PGSTEAL, zone, nr_freed); if (nr_taken == 0) goto done; spin_lock(&zone->lru_lock); /* * Put back any unfreeable pages. */ while (!list_empty(&page_list)) { page = lru_to_page(&page_list); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); list_del(&page->lru); if (PageActive(page)) add_page_to_active_list(zone, page); else add_page_to_inactive_list(zone, page); if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); spin_lock_irq(&zone->lru_lock); } } } while (nr_scanned < max_scan); spin_unlock(&zone->lru_lock); done: local_irq_enable(); pagevec_release(&pvec); return nr_reclaimed; } /* * We are about to scan this zone at a certain priority level. If that priority * level is smaller (ie: more urgent) than the previous priority, then note * that priority level within the zone. This is done so that when the next * process comes in to scan this zone, it will immediately start out at this * priority level rather than having to build up its own scanning priority. * Here, this priority affects only the reclaim-mapped threshold. */ static inline void note_zone_scanning_priority(struct zone *zone, int priority) { if (priority < zone->prev_priority) zone->prev_priority = priority; } static inline int zone_is_near_oom(struct zone *zone) { return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) + zone_page_state(zone, NR_INACTIVE))*3; } /* * This moves pages from the active list to the inactive list. * * We move them the other way if the page is referenced by one or more * processes, from rmap. * * If the pages are mostly unmapped, the processing is fast and it is * appropriate to hold zone->lru_lock across the whole operation. But if * the pages are mapped, the processing is slow (page_referenced()) so we * should drop zone->lru_lock around each page. It's impossible to balance * this, so instead we remove the pages from the LRU while processing them. * It is safe to rely on PG_active against the non-LRU pages in here because * nobody will play with that bit on a non-LRU page. * * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, struct scan_control *sc, int priority) { unsigned long pgmoved; int pgdeactivate = 0; unsigned long pgscanned; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ LIST_HEAD(l_active); /* Pages to go onto the active_list */ struct page *page; struct pagevec pvec; int reclaim_mapped = 0; if (sc->may_swap) { long mapped_ratio; long distress; long swap_tendency; if (zone_is_near_oom(zone)) goto force_reclaim_mapped; /* * `distress' is a measure of how much trouble we're having * reclaiming pages. 0 -> no problems. 100 -> great trouble. */ distress = 100 >> min(zone->prev_priority, priority); /* * The point of this algorithm is to decide when to start * reclaiming mapped memory instead of just pagecache. Work out * how much memory * is mapped. */ mapped_ratio = ((global_page_state(NR_FILE_MAPPED) + global_page_state(NR_ANON_PAGES)) * 100) / vm_total_pages; /* * Now decide how much we really want to unmap some pages. The * mapped ratio is downgraded - just because there's a lot of * mapped memory doesn't necessarily mean that page reclaim * isn't succeeding. * * The distress ratio is important - we don't want to start * going oom. * * A 100% value of vm_swappiness overrides this algorithm * altogether. */ swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; /* * Now use this metric to decide whether to start moving mapped * memory onto the inactive list. */ if (swap_tendency >= 100) force_reclaim_mapped: reclaim_mapped = 1; } lru_add_drain(); spin_lock_irq(&zone->lru_lock); pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, &l_hold, &pgscanned); zone->pages_scanned += pgscanned; __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); list_del(&page->lru); if (page_mapped(page)) { if (!reclaim_mapped || (total_swap_pages == 0 && PageAnon(page)) || page_referenced(page, 0)) { list_add(&page->lru, &l_active); continue; } } list_add(&page->lru, &l_inactive); } pagevec_init(&pvec, 1); pgmoved = 0; spin_lock_irq(&zone->lru_lock); while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); VM_BUG_ON(!PageActive(page)); ClearPageActive(page); list_move(&page->lru, &zone->inactive_list); pgmoved++; if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); spin_unlock_irq(&zone->lru_lock); pgdeactivate += pgmoved; pgmoved = 0; if (buffer_heads_over_limit) pagevec_strip(&pvec); __pagevec_release(&pvec); spin_lock_irq(&zone->lru_lock); } } __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); pgdeactivate += pgmoved; if (buffer_heads_over_limit) { spin_unlock_irq(&zone->lru_lock); pagevec_strip(&pvec); spin_lock_irq(&zone->lru_lock); } pgmoved = 0; while (!list_empty(&l_active)) { page = lru_to_page(&l_active); prefetchw_prev_lru_page(page, &l_active, flags); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); VM_BUG_ON(!PageActive(page)); list_move(&page->lru, &zone->active_list); pgmoved++; if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); pgmoved = 0; spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); spin_lock_irq(&zone->lru_lock); } } __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); __count_zone_vm_events(PGREFILL, zone, pgscanned); __count_vm_events(PGDEACTIVATE, pgdeactivate); spin_unlock_irq(&zone->lru_lock); pagevec_release(&pvec); } /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static unsigned long shrink_zone(int priority, struct zone *zone, struct scan_control *sc) { unsigned long nr_active; unsigned long nr_inactive; unsigned long nr_to_scan; unsigned long nr_reclaimed = 0; atomic_inc(&zone->reclaim_in_progress); /* * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. */ zone->nr_scan_active += (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; nr_active = zone->nr_scan_active; if (nr_active >= sc->swap_cluster_max) zone->nr_scan_active = 0; else nr_active = 0; zone->nr_scan_inactive += (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; nr_inactive = zone->nr_scan_inactive; if (nr_inactive >= sc->swap_cluster_max) zone->nr_scan_inactive = 0; else nr_inactive = 0; while (nr_active || nr_inactive) { if (nr_active) { nr_to_scan = min(nr_active, (unsigned long)sc->swap_cluster_max); nr_active -= nr_to_scan; shrink_active_list(nr_to_scan, zone, sc, priority); } if (nr_inactive) { nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, sc); } } throttle_vm_writeout(sc->gfp_mask); atomic_dec(&zone->reclaim_in_progress); return nr_reclaimed; } /* * This is the direct reclaim path, for page-allocating processes. We only * try to reclaim pages from zones which will satisfy the caller's allocation * request. * * We reclaim from a zone even if that zone is over pages_high. Because: * a) The caller may be trying to free *extra* pages to satisfy a higher-order * allocation or * b) The zones may be over pages_high but they must go *over* pages_high to * satisfy the `incremental min' zone defense algorithm. * * Returns the number of reclaimed pages. * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ static unsigned long shrink_zones(int priority, struct zone **zones, struct scan_control *sc) { unsigned long nr_reclaimed = 0; int i; sc->all_unreclaimable = 1; for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; if (!populated_zone(zone)) continue; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; note_zone_scanning_priority(zone, priority); if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ sc->all_unreclaimable = 0; nr_reclaimed += shrink_zone(priority, zone, sc); } return nr_reclaimed; } /* * This is the main entry point to direct page reclaim. * * If a full scan of the inactive list fails to free enough memory then we * are "out of memory" and something needs to be killed. * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this * caller can't do much about. We kick pdflush and take explicit naps in the * hope that some of these pages can be written. But if the allocating task * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) { int priority; int ret = 0; unsigned long total_scanned = 0; unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; int i; struct scan_control sc = { .gfp_mask = gfp_mask, .may_writepage = !laptop_mode, .swap_cluster_max = SWAP_CLUSTER_MAX, .may_swap = 1, .swappiness = vm_swappiness, }; count_vm_event(ALLOCSTALL); for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; lru_pages += zone_page_state(zone, NR_ACTIVE) + zone_page_state(zone, NR_INACTIVE); } for (priority = DEF_PRIORITY; priority >= 0; priority--) { sc.nr_scanned = 0; if (!priority) disable_swap_token(); nr_reclaimed += shrink_zones(priority, zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } total_scanned += sc.nr_scanned; if (nr_reclaimed >= sc.swap_cluster_max) { ret = 1; goto out; } /* * Try to write back as many pages as we just scanned. This * tends to cause slow streaming writers to write data to the * disk smoothly, at the dirtying rate, which is nice. But * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max / 2) { wakeup_pdflush(laptop_mode ? 0 : total_scanned); sc.may_writepage = 1; } /* Take a nap, wait for some writeback to complete */ if (sc.nr_scanned && priority < DEF_PRIORITY - 2) congestion_wait(WRITE, HZ/10); } /* top priority shrink_caches still had more to do? don't OOM, then */ if (!sc.all_unreclaimable) ret = 1; out: /* * Now that we've scanned all the zones at this priority level, note * that level within the zone so that the next thread which performs * scanning of this zone will immediately start out at this priority * level. This affects only the decision whether or not to bring * mapped pages onto the inactive list. */ if (priority < 0) priority = 0; for (i = 0; zones[i] != 0; i++) { struct zone *zone = zones[i]; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; zone->prev_priority = priority; } return ret; } /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at pages_high. * * Returns the number of pages which were actually freed. * * There is special handling here for zones which are full of pinned pages. * This can happen if the pages are all mlocked, or if they are all used by * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. * What we do is to detect the case where all pages in the zone have been * scanned twice and there has been zero successful reclaim. Mark the zone as * dead and from now on, only perform a short scan. Basically we're polling * the zone for when the problem goes away. * * kswapd scans the zones in the highmem->normal->dma direction. It skips * zones which have free_pages > pages_high, but once a zone is found to have * free_pages <= pages_high, we scan that zone and the lower zones regardless * of the number of free pages in the lower zones. This interoperates with * the page allocator fallback scheme to ensure that aging of pages is balanced * across the zones. */ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) { int all_zones_ok; int priority; int i; unsigned long total_scanned; unsigned long nr_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_swap = 1, .swap_cluster_max = SWAP_CLUSTER_MAX, .swappiness = vm_swappiness, }; /* * temp_priority is used to remember the scanning priority at which * this zone was successfully refilled to free_pages == pages_high. */ int temp_priority[MAX_NR_ZONES]; loop_again: total_scanned = 0; nr_reclaimed = 0; sc.may_writepage = !laptop_mode; count_vm_event(PAGEOUTRUN); for (i = 0; i < pgdat->nr_zones; i++) temp_priority[i] = DEF_PRIORITY; for (priority = DEF_PRIORITY; priority >= 0; priority--) { int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long lru_pages = 0; /* The swap token gets in the way of swapout... */ if (!priority) disable_swap_token(); all_zones_ok = 1; /* * Scan in the highmem->dma direction for the highest * zone which needs scanning */ for (i = pgdat->nr_zones - 1; i >= 0; i--) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; if (!zone_watermark_ok(zone, order, zone->pages_high, 0, 0)) { end_zone = i; break; } } if (i < 0) goto out; for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; lru_pages += zone_page_state(zone, NR_ACTIVE) + zone_page_state(zone, NR_INACTIVE); } /* * Now scan the zone in the dma->highmem direction, stopping * at the last zone which needs scanning. * * We do this because the page allocator works in the opposite * direction. This prevents the page allocator from allocating * pages behind kswapd's direction of progress, which would * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; int nr_slab; if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; if (!zone_watermark_ok(zone, order, zone->pages_high, end_zone, 0)) all_zones_ok = 0; temp_priority[i] = priority; sc.nr_scanned = 0; note_zone_scanning_priority(zone, priority); nr_reclaimed += shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; if (zone->all_unreclaimable) continue; if (nr_slab == 0 && zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) + zone_page_state(zone, NR_INACTIVE)) * 6) zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and * the reclaim ratio is low, start doing writepage * even in laptop mode */ if (total_scanned > SWAP_CLUSTER_MAX * 2 && total_scanned > nr_reclaimed + nr_reclaimed / 2) sc.may_writepage = 1; } if (all_zones_ok) break; /* kswapd: all done */ /* * OK, kswapd is getting into trouble. Take a nap, then take * another pass across the zones. */ if (total_scanned && priority < DEF_PRIORITY - 2) congestion_wait(WRITE, HZ/10); /* * We do this so kswapd doesn't build up large priorities for * example when it is freeing in parallel with allocators. It * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ if (nr_reclaimed >= SWAP_CLUSTER_MAX) break; } out: /* * Note within each zone the priority level at which this zone was * brought into a happy state. So that the next thread which scans this * zone will start out at that priority level. */ for (i = 0; i < pgdat->nr_zones; i++) { struct zone *zone = pgdat->node_zones + i; zone->prev_priority = temp_priority[i]; } if (!all_zones_ok) { cond_resched(); try_to_freeze(); goto loop_again; } return nr_reclaimed; } /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned long order; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; DEFINE_WAIT(wait); struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; cpumask_t cpumask; cpumask = node_to_cpumask(pgdat->node_id); if (!cpus_empty(cpumask)) set_cpus_allowed(tsk, cpumask); current->reclaim_state = &reclaim_state; /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; order = 0; for ( ; ; ) { unsigned long new_order; try_to_freeze(); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; pgdat->kswapd_max_order = 0; if (order < new_order) { /* * Don't sleep if someone wants a larger 'order' * allocation */ order = new_order; } else { schedule(); order = pgdat->kswapd_max_order; } finish_wait(&pgdat->kswapd_wait, &wait); balance_pgdat(pgdat, order); } return 0; } /* * A zone is low on free memory, so wake its kswapd task to service it. */ void wakeup_kswapd(struct zone *zone, int order) { pg_data_t *pgdat; if (!populated_zone(zone)) return; pgdat = zone->zone_pgdat; if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) return; if (pgdat->kswapd_max_order < order) pgdat->kswapd_max_order = order; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) return; if (!waitqueue_active(&pgdat->kswapd_wait)) return; wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_PM /* * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages * from LRU lists system-wide, for given pass and priority, and returns the * number of reclaimed pages * * For pass > 3 we also try to shrink the LRU lists that contain a few pages */ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, int pass, struct scan_control *sc) { struct zone *zone; unsigned long nr_to_scan, ret = 0; for_each_zone(zone) { if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && prio != DEF_PRIORITY) continue; /* For pass = 0 we don't shrink the active list */ if (pass > 0) { zone->nr_scan_active += (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; if (zone->nr_scan_active >= nr_pages || pass > 3) { zone->nr_scan_active = 0; nr_to_scan = min(nr_pages, zone_page_state(zone, NR_ACTIVE)); shrink_active_list(nr_to_scan, zone, sc, prio); } } zone->nr_scan_inactive += (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; if (zone->nr_scan_inactive >= nr_pages || pass > 3) { zone->nr_scan_inactive = 0; nr_to_scan = min(nr_pages, zone_page_state(zone, NR_INACTIVE)); ret += shrink_inactive_list(nr_to_scan, zone, sc); if (ret >= nr_pages) return ret; } } return ret; } static unsigned long count_lru_pages(void) { return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE); } /* * Try to free `nr_pages' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_pages) { unsigned long lru_pages, nr_slab; unsigned long ret = 0; int pass; struct reclaim_state reclaim_state; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_swap = 0, .swap_cluster_max = nr_pages, .may_writepage = 1, .swappiness = vm_swappiness, }; current->reclaim_state = &reclaim_state; lru_pages = count_lru_pages(); nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { reclaim_state.reclaimed_slab = 0; shrink_slab(nr_pages, sc.gfp_mask, lru_pages); if (!reclaim_state.reclaimed_slab) break; ret += reclaim_state.reclaimed_slab; if (ret >= nr_pages) goto out; nr_slab -= reclaim_state.reclaimed_slab; } /* * We try to shrink LRUs in 5 passes: * 0 = Reclaim from inactive_list only * 1 = Reclaim from active list but don't reclaim mapped * 2 = 2nd pass of type 1 * 3 = Reclaim mapped (normal reclaim) * 4 = 2nd pass of type 3 */ for (pass = 0; pass < 5; pass++) { int prio; /* Force reclaiming mapped pages in the passes #3 and #4 */ if (pass > 2) { sc.may_swap = 1; sc.swappiness = 100; } for (prio = DEF_PRIORITY; prio >= 0; prio--) { unsigned long nr_to_scan = nr_pages - ret; sc.nr_scanned = 0; ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); if (ret >= nr_pages) goto out; reclaim_state.reclaimed_slab = 0; shrink_slab(sc.nr_scanned, sc.gfp_mask, count_lru_pages()); ret += reclaim_state.reclaimed_slab; if (ret >= nr_pages) goto out; if (sc.nr_scanned && prio < DEF_PRIORITY - 2) congestion_wait(WRITE, HZ / 10); } } /* * If ret = 0, we could not shrink LRUs, but there may be something * in slab caches */ if (!ret) { do { reclaim_state.reclaimed_slab = 0; shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages()); ret += reclaim_state.reclaimed_slab; } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); } out: current->reclaim_state = NULL; return ret; } #endif /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ static int __devinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { pg_data_t *pgdat; cpumask_t mask; if (action == CPU_ONLINE) { for_each_online_pgdat(pgdat) { mask = node_to_cpumask(pgdat->node_id); if (any_online_cpu(mask) != NR_CPUS) /* One of our CPUs online: restore mask */ set_cpus_allowed(pgdat->kswapd, mask); } } return NOTIFY_OK; } /* * This kswapd start function will be called by init and node-hot-add. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. */ int kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int ret = 0; if (pgdat->kswapd) return 0; pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); printk("Failed to start kswapd on node %d\n",nid); ret = -1; } return ret; } static int __init kswapd_init(void) { int nid; swap_setup(); for_each_online_node(nid) kswapd_run(nid); hotcpu_notifier(cpu_callback, 0); return 0; } module_init(kswapd_init) #ifdef CONFIG_NUMA /* * Zone reclaim mode * * If non-zero call zone_reclaim when the number of free pages falls below * the watermarks. */ int zone_reclaim_mode __read_mostly; #define RECLAIM_OFF 0 #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ /* * Priority for ZONE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ #define ZONE_RECLAIM_PRIORITY 4 /* * Percentage of pages in a zone that must be unmapped for zone_reclaim to * occur. */ int sysctl_min_unmapped_ratio = 1; /* * If the number of slab pages in a zone grows beyond this percentage then * slab reclaim needs to occur. */ int sysctl_min_slab_ratio = 5; /* * Try to free up some pages from this zone through reclaim. */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; int priority; unsigned long nr_reclaimed = 0; struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), .swap_cluster_max = max_t(unsigned long, nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, .swappiness = vm_swappiness, }; unsigned long slab_reclaimable; disable_swap_token(); cond_resched(); /* * We need to be able to allocate from the reserves for RECLAIM_SWAP * and we also need to be able to write out pages for RECLAIM_WRITE * and RECLAIM_SWAP. */ p->flags |= PF_MEMALLOC | PF_SWAPWRITE; reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; if (zone_page_state(zone, NR_FILE_PAGES) - zone_page_state(zone, NR_FILE_MAPPED) > zone->min_unmapped_pages) { /* * Free memory by calling shrink zone with increasing * priorities until we have enough memory freed. */ priority = ZONE_RECLAIM_PRIORITY; do { note_zone_scanning_priority(zone, priority); nr_reclaimed += shrink_zone(priority, zone, &sc); priority--; } while (priority >= 0 && nr_reclaimed < nr_pages); } slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (slab_reclaimable > zone->min_slab_pages) { /* * shrink_slab() does not currently allow us to determine how * many pages were freed in this zone. So we take the current * number of slab pages and shake the slab until it is reduced * by the same nr_pages that we used for reclaiming unmapped * pages. * * Note that shrink_slab will free memory on all zones and may * take a long time. */ while (shrink_slab(sc.nr_scanned, gfp_mask, order) && zone_page_state(zone, NR_SLAB_RECLAIMABLE) > slab_reclaimable - nr_pages) ; /* * Update nr_reclaimed by the number of slab pages we * reclaimed from this zone. */ nr_reclaimed += slab_reclaimable - zone_page_state(zone, NR_SLAB_RECLAIMABLE); } p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); return nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { cpumask_t mask; int node_id; /* * Zone reclaim reclaims unmapped file backed pages and * slab pages if we are over the defined limits. * * A small portion of unmapped file backed pages is needed for * file I/O otherwise pages read by file I/O will be immediately * thrown out if the zone is overallocated. So we do not reclaim * if less than a specified percentage of the zone is used by * unmapped file backed pages. */ if (zone_page_state(zone, NR_FILE_PAGES) - zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages && zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return 0; /* * Avoid concurrent zone reclaims, do not reclaim in a zone that does * not have reclaimable pages and if we should not delay the allocation * then do not scan. */ if (!(gfp_mask & __GFP_WAIT) || zone->all_unreclaimable || atomic_read(&zone->reclaim_in_progress) > 0 || (current->flags & PF_MEMALLOC)) return 0; /* * Only run zone reclaim on the local zone or on zones that do not * have associated processors. This will favor the local processor * over remote processors and spread off node memory allocations * as wide as possible. */ node_id = zone_to_nid(zone); mask = node_to_cpumask(node_id); if (!cpus_empty(mask) && node_id != numa_node_id()) return 0; return __zone_reclaim(zone, gfp_mask, order); } #endif
philenotfound/belkin-wemo-linux-2.6.21.x
mm/vmscan.c
C
gpl-2.0
48,319
/* Install given floating-point environment and raise exceptions. Copyright (C) 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by David Huggins-Daines <dhd@debian.org>, 2000 The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <fenv.h> #include <string.h> int feupdateenv (const fenv_t *envp) { union { unsigned long long l; unsigned int sw[2]; } s; fenv_t temp; /* Get the current exception status */ __asm__ ("fstd %%fr0,0(%1) \n\t" "fldd 0(%1),%%fr0 \n\t" : "=m" (s.l) : "r" (&s.l)); memcpy(&temp, envp, sizeof(fenv_t)); /* Currently raised exceptions not cleared */ temp.__status_word |= s.sw[0] & (FE_ALL_EXCEPT << 27); /* Install new environment. */ fesetenv (&temp); /* Success. */ return 0; }
ystk/debian-eglibc
ports/sysdeps/hppa/fpu/feupdateenv.c
C
gpl-2.0
1,504
/* protocol_subnet.c -- handle the meta-protocol, subnets Copyright (C) 1999-2005 Ivo Timmermans, 2000-2012 Guus Sliepen <guus@tinc-vpn.org> 2009 Michael Tokarev <mjt@tls.msk.ru> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "system.h" #include "conf.h" #include "connection.h" #include "logger.h" #include "net.h" #include "netutl.h" #include "node.h" #include "protocol.h" #include "subnet.h" #include "utils.h" #include "xalloc.h" bool send_add_subnet(connection_t *c, const subnet_t *subnet) { char netstr[MAXNETSTR]; if(!net2str(netstr, sizeof netstr, subnet)) return false; return send_request(c, "%d %x %s %s", ADD_SUBNET, rand(), subnet->owner->name, netstr); } bool add_subnet_h(connection_t *c, const char *request) { char subnetstr[MAX_STRING_SIZE]; char name[MAX_STRING_SIZE]; node_t *owner; subnet_t s, *new, *old; memset(&s, 0x0, sizeof(subnet_t)); if(sscanf(request, "%*d %*x " MAX_STRING " " MAX_STRING, name, subnetstr) != 2) { logger(DEBUG_ALWAYS, LOG_ERR, "Got bad %s from %s (%s)", "ADD_SUBNET", c->name, c->hostname); return false; } /* Check if owner name is valid */ if(!check_id(name)) { logger(DEBUG_ALWAYS, LOG_ERR, "Got bad %s from %s (%s): %s", "ADD_SUBNET", c->name, c->hostname, "invalid name"); return false; } /* Check if subnet string is valid */ if(!str2net(&s, subnetstr)) { logger(DEBUG_ALWAYS, LOG_ERR, "Got bad %s from %s (%s): %s", "ADD_SUBNET", c->name, c->hostname, "invalid subnet string"); return false; } if(seen_request(request)) return true; /* Check if the owner of the new subnet is in the connection list */ owner = lookup_node(name); if(tunnelserver && owner != myself && owner != c->node) { /* in case of tunnelserver, ignore indirect subnet registrations */ logger(DEBUG_PROTOCOL, LOG_WARNING, "Ignoring indirect %s from %s (%s) for %s", "ADD_SUBNET", c->name, c->hostname, subnetstr); return true; } if(!owner) { owner = new_node(); owner->name = xstrdup(name); node_add(owner); } /* Check if we already know this subnet */ if(lookup_subnet(owner, &s)) return true; /* If we don't know this subnet, but we are the owner, retaliate with a DEL_SUBNET */ if(owner == myself) { logger(DEBUG_PROTOCOL, LOG_WARNING, "Got %s from %s (%s) for ourself", "ADD_SUBNET", c->name, c->hostname); s.owner = myself; send_del_subnet(c, &s); return true; } /* In tunnel server mode, we should already know all allowed subnets */ if(tunnelserver) { logger(DEBUG_ALWAYS, LOG_WARNING, "Ignoring unauthorized %s from %s (%s): %s", "ADD_SUBNET", c->name, c->hostname, subnetstr); return true; } /* Ignore if strictsubnets is true, but forward it to others */ if(strictsubnets) { logger(DEBUG_ALWAYS, LOG_WARNING, "Ignoring unauthorized %s from %s (%s): %s", "ADD_SUBNET", c->name, c->hostname, subnetstr); if ((!owner->status.reachable) && ((now.tv_sec - owner->last_state_change) >= keylifetime*2)) { logger(DEBUG_CONNECTIONS, LOG_INFO, "Not forwarding informations about %s to ALL (%lf / %d)", owner->name, difftime(now.tv_sec, owner->last_state_change), keylifetime); } else { forward_request(c, request); } return true; } /* If everything is correct, add the subnet to the list of the owner */ *(new = new_subnet()) = s; subnet_add(owner, new); if(owner->status.reachable) subnet_update(owner, new, true); /* Tell the rest */ if(!tunnelserver) forward_request(c, request); /* Fast handoff of roaming MAC addresses */ if(s.type == SUBNET_MAC && owner != myself && (old = lookup_subnet(myself, &s)) && old->expires) old->expires = 1; return true; } bool send_del_subnet(connection_t *c, const subnet_t *s) { char netstr[MAXNETSTR]; if(!net2str(netstr, sizeof netstr, s)) return false; return send_request(c, "%d %x %s %s", DEL_SUBNET, rand(), s->owner->name, netstr); } bool del_subnet_h(connection_t *c, const char *request) { char subnetstr[MAX_STRING_SIZE]; char name[MAX_STRING_SIZE]; node_t *owner; subnet_t s, *find; memset(&s, 0x0, sizeof(subnet_t)); if(sscanf(request, "%*d %*x " MAX_STRING " " MAX_STRING, name, subnetstr) != 2) { logger(DEBUG_ALWAYS, LOG_ERR, "Got bad %s from %s (%s)", "DEL_SUBNET", c->name, c->hostname); return false; } /* Check if owner name is valid */ if(!check_id(name)) { logger(DEBUG_ALWAYS, LOG_ERR, "Got bad %s from %s (%s): %s", "DEL_SUBNET", c->name, c->hostname, "invalid name"); return false; } /* Check if subnet string is valid */ if(!str2net(&s, subnetstr)) { logger(DEBUG_ALWAYS, LOG_ERR, "Got bad %s from %s (%s): %s", "DEL_SUBNET", c->name, c->hostname, "invalid subnet string"); return false; } if(seen_request(request)) return true; /* Check if the owner of the subnet being deleted is in the connection list */ owner = lookup_node(name); if(tunnelserver && owner != myself && owner != c->node) { /* in case of tunnelserver, ignore indirect subnet deletion */ logger(DEBUG_PROTOCOL, LOG_WARNING, "Ignoring indirect %s from %s (%s) for %s", "DEL_SUBNET", c->name, c->hostname, subnetstr); return true; } if(!owner) { logger(DEBUG_PROTOCOL, LOG_WARNING, "Got %s from %s (%s) for %s which is not in our node tree", "DEL_SUBNET", c->name, c->hostname, name); return true; } /* If everything is correct, delete the subnet from the list of the owner */ s.owner = owner; find = lookup_subnet(owner, &s); if(!find) { logger(DEBUG_PROTOCOL, LOG_WARNING, "Got %s from %s (%s) for %s which does not appear in his subnet tree", "DEL_SUBNET", c->name, c->hostname, name); if(strictsubnets) forward_request(c, request); return true; } /* If we are the owner of this subnet, retaliate with an ADD_SUBNET */ if(owner == myself) { logger(DEBUG_PROTOCOL, LOG_WARNING, "Got %s from %s (%s) for ourself", "DEL_SUBNET", c->name, c->hostname); send_add_subnet(c, find); return true; } if(tunnelserver) return true; /* Tell the rest */ if(!tunnelserver) forward_request(c, request); if(strictsubnets) return true; /* Finally, delete it. */ if(owner->status.reachable) subnet_update(owner, find, false); subnet_del(owner, find); return true; }
jed1/tinc
src/protocol_subnet.c
C
gpl-2.0
6,984
/* * rev-parse.c * * Copyright (C) Linus Torvalds, 2005 */ #include "cache.h" #include "commit.h" #include "refs.h" #include "quote.h" #include "builtin.h" #include "parse-options.h" #include "diff.h" #include "revision.h" #include "split-index.h" #define DO_REVS 1 #define DO_NOREV 2 #define DO_FLAGS 4 #define DO_NONFLAGS 8 static int filter = ~0; static const char *def; #define NORMAL 0 #define REVERSED 1 static int show_type = NORMAL; #define SHOW_SYMBOLIC_ASIS 1 #define SHOW_SYMBOLIC_FULL 2 static int symbolic; static int abbrev; static int abbrev_ref; static int abbrev_ref_strict; static int output_sq; static int stuck_long; static struct string_list *ref_excludes; /* * Some arguments are relevant "revision" arguments, * others are about output format or other details. * This sorts it all out. */ static int is_rev_argument(const char *arg) { static const char *rev_args[] = { "--all", "--bisect", "--dense", "--branches=", "--branches", "--header", "--ignore-missing", "--max-age=", "--max-count=", "--min-age=", "--no-merges", "--min-parents=", "--no-min-parents", "--max-parents=", "--no-max-parents", "--objects", "--objects-edge", "--parents", "--pretty", "--remotes=", "--remotes", "--glob=", "--sparse", "--tags=", "--tags", "--topo-order", "--date-order", "--unpacked", NULL }; const char **p = rev_args; /* accept -<digit>, like traditional "head" */ if ((*arg == '-') && isdigit(arg[1])) return 1; for (;;) { const char *str = *p++; int len; if (!str) return 0; len = strlen(str); if (!strcmp(arg, str) || (str[len-1] == '=' && !strncmp(arg, str, len))) return 1; } } /* Output argument as a string, either SQ or normal */ static void show(const char *arg) { if (output_sq) { int sq = '\'', ch; putchar(sq); while ((ch = *arg++)) { if (ch == sq) fputs("'\\'", stdout); putchar(ch); } putchar(sq); putchar(' '); } else puts(arg); } /* Like show(), but with a negation prefix according to type */ static void show_with_type(int type, const char *arg) { if (type != show_type) putchar('^'); show(arg); } /* Output a revision, only if filter allows it */ static void show_rev(int type, const unsigned char *sha1, const char *name) { if (!(filter & DO_REVS)) return; def = NULL; if ((symbolic || abbrev_ref) && name) { if (symbolic == SHOW_SYMBOLIC_FULL || abbrev_ref) { unsigned char discard[20]; char *full; switch (dwim_ref(name, strlen(name), discard, &full)) { case 0: /* * Not found -- not a ref. We could * emit "name" here, but symbolic-full * users are interested in finding the * refs spelled in full, and they would * need to filter non-refs if we did so. */ break; case 1: /* happy */ if (abbrev_ref) full = shorten_unambiguous_ref(full, abbrev_ref_strict); show_with_type(type, full); break; default: /* ambiguous */ error("refname '%s' is ambiguous", name); break; } free(full); } else { show_with_type(type, name); } } else if (abbrev) show_with_type(type, find_unique_abbrev(sha1, abbrev)); else show_with_type(type, sha1_to_hex(sha1)); } /* Output a flag, only if filter allows it. */ static int show_flag(const char *arg) { if (!(filter & DO_FLAGS)) return 0; if (filter & (is_rev_argument(arg) ? DO_REVS : DO_NOREV)) { show(arg); return 1; } return 0; } static int show_default(void) { const char *s = def; if (s) { unsigned char sha1[20]; def = NULL; if (!get_sha1(s, sha1)) { show_rev(NORMAL, sha1, s); return 1; } } return 0; } static int show_reference(const char *refname, const struct object_id *oid, int flag, void *cb_data) { if (ref_excluded(ref_excludes, refname)) return 0; show_rev(NORMAL, oid->hash, refname); return 0; } static int anti_reference(const char *refname, const struct object_id *oid, int flag, void *cb_data) { show_rev(REVERSED, oid->hash, refname); return 0; } static int show_abbrev(const unsigned char *sha1, void *cb_data) { show_rev(NORMAL, sha1, NULL); return 0; } static void show_datestring(const char *flag, const char *datestr) { static char buffer[100]; /* date handling requires both flags and revs */ if ((filter & (DO_FLAGS | DO_REVS)) != (DO_FLAGS | DO_REVS)) return; snprintf(buffer, sizeof(buffer), "%s%lu", flag, approxidate(datestr)); show(buffer); } static int show_file(const char *arg, int output_prefix) { show_default(); if ((filter & (DO_NONFLAGS|DO_NOREV)) == (DO_NONFLAGS|DO_NOREV)) { if (output_prefix) { const char *prefix = startup_info->prefix; show(prefix_filename(prefix, prefix ? strlen(prefix) : 0, arg)); } else show(arg); return 1; } return 0; } static int try_difference(const char *arg) { char *dotdot; unsigned char sha1[20]; unsigned char end[20]; const char *next; const char *this; int symmetric; static const char head_by_default[] = "HEAD"; if (!(dotdot = strstr(arg, ".."))) return 0; next = dotdot + 2; this = arg; symmetric = (*next == '.'); *dotdot = 0; next += symmetric; if (!*next) next = head_by_default; if (dotdot == arg) this = head_by_default; if (this == head_by_default && next == head_by_default && !symmetric) { /* * Just ".."? That is not a range but the * pathspec for the parent directory. */ *dotdot = '.'; return 0; } if (!get_sha1_committish(this, sha1) && !get_sha1_committish(next, end)) { show_rev(NORMAL, end, next); show_rev(symmetric ? NORMAL : REVERSED, sha1, this); if (symmetric) { struct commit_list *exclude; struct commit *a, *b; a = lookup_commit_reference(sha1); b = lookup_commit_reference(end); exclude = get_merge_bases(a, b); while (exclude) { struct commit *commit = pop_commit(&exclude); show_rev(REVERSED, commit->object.oid.hash, NULL); } } *dotdot = '.'; return 1; } *dotdot = '.'; return 0; } static int try_parent_shorthands(const char *arg) { char *dotdot; unsigned char sha1[20]; struct commit *commit; struct commit_list *parents; int parents_only; if ((dotdot = strstr(arg, "^!"))) parents_only = 0; else if ((dotdot = strstr(arg, "^@"))) parents_only = 1; if (!dotdot || dotdot[2]) return 0; *dotdot = 0; if (get_sha1_committish(arg, sha1)) { *dotdot = '^'; return 0; } if (!parents_only) show_rev(NORMAL, sha1, arg); commit = lookup_commit_reference(sha1); for (parents = commit->parents; parents; parents = parents->next) show_rev(parents_only ? NORMAL : REVERSED, parents->item->object.oid.hash, arg); *dotdot = '^'; return 1; } static int parseopt_dump(const struct option *o, const char *arg, int unset) { struct strbuf *parsed = o->value; if (unset) strbuf_addf(parsed, " --no-%s", o->long_name); else if (o->short_name && (o->long_name == NULL || !stuck_long)) strbuf_addf(parsed, " -%c", o->short_name); else strbuf_addf(parsed, " --%s", o->long_name); if (arg) { if (!stuck_long) strbuf_addch(parsed, ' '); else if (o->long_name) strbuf_addch(parsed, '='); sq_quote_buf(parsed, arg); } return 0; } static const char *skipspaces(const char *s) { while (isspace(*s)) s++; return s; } static int cmd_parseopt(int argc, const char **argv, const char *prefix) { static int keep_dashdash = 0, stop_at_non_option = 0; static char const * const parseopt_usage[] = { N_("git rev-parse --parseopt [<options>] -- [<args>...]"), NULL }; static struct option parseopt_opts[] = { OPT_BOOL(0, "keep-dashdash", &keep_dashdash, N_("keep the `--` passed as an arg")), OPT_BOOL(0, "stop-at-non-option", &stop_at_non_option, N_("stop parsing after the " "first non-option argument")), OPT_BOOL(0, "stuck-long", &stuck_long, N_("output in stuck long form")), OPT_END(), }; static const char * const flag_chars = "*=?!"; struct strbuf sb = STRBUF_INIT, parsed = STRBUF_INIT; const char **usage = NULL; struct option *opts = NULL; int onb = 0, osz = 0, unb = 0, usz = 0; strbuf_addstr(&parsed, "set --"); argc = parse_options(argc, argv, prefix, parseopt_opts, parseopt_usage, PARSE_OPT_KEEP_DASHDASH); if (argc < 1 || strcmp(argv[0], "--")) usage_with_options(parseopt_usage, parseopt_opts); /* get the usage up to the first line with a -- on it */ for (;;) { if (strbuf_getline(&sb, stdin) == EOF) die("premature end of input"); ALLOC_GROW(usage, unb + 1, usz); if (!strcmp("--", sb.buf)) { if (unb < 1) die("no usage string given before the `--' separator"); usage[unb] = NULL; break; } usage[unb++] = strbuf_detach(&sb, NULL); } /* parse: (<short>|<short>,<long>|<long>)[*=?!]*<arghint>? SP+ <help> */ while (strbuf_getline(&sb, stdin) != EOF) { const char *s; const char *help; struct option *o; if (!sb.len) continue; ALLOC_GROW(opts, onb + 1, osz); memset(opts + onb, 0, sizeof(opts[onb])); o = &opts[onb++]; help = strchr(sb.buf, ' '); if (!help || *sb.buf == ' ') { o->type = OPTION_GROUP; o->help = xstrdup(skipspaces(sb.buf)); continue; } o->type = OPTION_CALLBACK; o->help = xstrdup(skipspaces(help)); o->value = &parsed; o->flags = PARSE_OPT_NOARG; o->callback = &parseopt_dump; /* name(s) */ s = strpbrk(sb.buf, flag_chars); if (s == NULL) s = help; if (s - sb.buf == 1) /* short option only */ o->short_name = *sb.buf; else if (sb.buf[1] != ',') /* long option only */ o->long_name = xmemdupz(sb.buf, s - sb.buf); else { o->short_name = *sb.buf; o->long_name = xmemdupz(sb.buf + 2, s - sb.buf - 2); } /* flags */ while (s < help) { switch (*s++) { case '=': o->flags &= ~PARSE_OPT_NOARG; continue; case '?': o->flags &= ~PARSE_OPT_NOARG; o->flags |= PARSE_OPT_OPTARG; continue; case '!': o->flags |= PARSE_OPT_NONEG; continue; case '*': o->flags |= PARSE_OPT_HIDDEN; continue; } s--; break; } if (s < help) o->argh = xmemdupz(s, help - s); } strbuf_release(&sb); /* put an OPT_END() */ ALLOC_GROW(opts, onb + 1, osz); memset(opts + onb, 0, sizeof(opts[onb])); argc = parse_options(argc, argv, prefix, opts, usage, (keep_dashdash ? PARSE_OPT_KEEP_DASHDASH : 0) | (stop_at_non_option ? PARSE_OPT_STOP_AT_NON_OPTION : 0) | PARSE_OPT_SHELL_EVAL); strbuf_addstr(&parsed, " --"); sq_quote_argv(&parsed, argv, 0); puts(parsed.buf); return 0; } static int cmd_sq_quote(int argc, const char **argv) { struct strbuf buf = STRBUF_INIT; if (argc) sq_quote_argv(&buf, argv, 0); printf("%s\n", buf.buf); strbuf_release(&buf); return 0; } static void die_no_single_rev(int quiet) { if (quiet) exit(1); else die("Needed a single revision"); } static const char builtin_rev_parse_usage[] = N_("git rev-parse --parseopt [<options>] -- [<args>...]\n" " or: git rev-parse --sq-quote [<arg>...]\n" " or: git rev-parse [<options>] [<arg>...]\n" "\n" "Run \"git rev-parse --parseopt -h\" for more information on the first usage."); int cmd_rev_parse(int argc, const char **argv, const char *prefix) { int i, as_is = 0, verify = 0, quiet = 0, revs_count = 0, type = 0; int did_repo_setup = 0; int has_dashdash = 0; int output_prefix = 0; unsigned char sha1[20]; unsigned int flags = 0; const char *name = NULL; struct object_context unused; if (argc > 1 && !strcmp("--parseopt", argv[1])) return cmd_parseopt(argc - 1, argv + 1, prefix); if (argc > 1 && !strcmp("--sq-quote", argv[1])) return cmd_sq_quote(argc - 2, argv + 2); if (argc > 1 && !strcmp("-h", argv[1])) usage(builtin_rev_parse_usage); for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "--")) { has_dashdash = 1; break; } } /* No options; just report on whether we're in a git repo or not. */ if (argc == 1) { setup_git_directory(); git_config(git_default_config, NULL); return 0; } for (i = 1; i < argc; i++) { const char *arg = argv[i]; if (!strcmp(arg, "--local-env-vars")) { int i; for (i = 0; local_repo_env[i]; i++) printf("%s\n", local_repo_env[i]); continue; } if (!strcmp(arg, "--resolve-git-dir")) { const char *gitdir = argv[++i]; if (!gitdir) die("--resolve-git-dir requires an argument"); gitdir = resolve_gitdir(gitdir); if (!gitdir) die("not a gitdir '%s'", argv[i]); puts(gitdir); continue; } /* The rest of the options require a git repository. */ if (!did_repo_setup) { prefix = setup_git_directory(); git_config(git_default_config, NULL); did_repo_setup = 1; } if (!strcmp(arg, "--git-path")) { if (!argv[i + 1]) die("--git-path requires an argument"); puts(git_path("%s", argv[i + 1])); i++; continue; } if (as_is) { if (show_file(arg, output_prefix) && as_is < 2) verify_filename(prefix, arg, 0); continue; } if (!strcmp(arg,"-n")) { if (++i >= argc) die("-n requires an argument"); if ((filter & DO_FLAGS) && (filter & DO_REVS)) { show(arg); show(argv[i]); } continue; } if (starts_with(arg, "-n")) { if ((filter & DO_FLAGS) && (filter & DO_REVS)) show(arg); continue; } if (*arg == '-') { if (!strcmp(arg, "--")) { as_is = 2; /* Pass on the "--" if we show anything but files.. */ if (filter & (DO_FLAGS | DO_REVS)) show_file(arg, 0); continue; } if (!strcmp(arg, "--default")) { def = argv[++i]; if (!def) die("--default requires an argument"); continue; } if (!strcmp(arg, "--prefix")) { prefix = argv[++i]; if (!prefix) die("--prefix requires an argument"); startup_info->prefix = prefix; output_prefix = 1; continue; } if (!strcmp(arg, "--revs-only")) { filter &= ~DO_NOREV; continue; } if (!strcmp(arg, "--no-revs")) { filter &= ~DO_REVS; continue; } if (!strcmp(arg, "--flags")) { filter &= ~DO_NONFLAGS; continue; } if (!strcmp(arg, "--no-flags")) { filter &= ~DO_FLAGS; continue; } if (!strcmp(arg, "--verify")) { filter &= ~(DO_FLAGS|DO_NOREV); verify = 1; continue; } if (!strcmp(arg, "--quiet") || !strcmp(arg, "-q")) { quiet = 1; flags |= GET_SHA1_QUIETLY; continue; } if (!strcmp(arg, "--short") || starts_with(arg, "--short=")) { filter &= ~(DO_FLAGS|DO_NOREV); verify = 1; abbrev = DEFAULT_ABBREV; if (arg[7] == '=') abbrev = strtoul(arg + 8, NULL, 10); if (abbrev < MINIMUM_ABBREV) abbrev = MINIMUM_ABBREV; else if (40 <= abbrev) abbrev = 40; continue; } if (!strcmp(arg, "--sq")) { output_sq = 1; continue; } if (!strcmp(arg, "--not")) { show_type ^= REVERSED; continue; } if (!strcmp(arg, "--symbolic")) { symbolic = SHOW_SYMBOLIC_ASIS; continue; } if (!strcmp(arg, "--symbolic-full-name")) { symbolic = SHOW_SYMBOLIC_FULL; continue; } if (starts_with(arg, "--abbrev-ref") && (!arg[12] || arg[12] == '=')) { abbrev_ref = 1; abbrev_ref_strict = warn_ambiguous_refs; if (arg[12] == '=') { if (!strcmp(arg + 13, "strict")) abbrev_ref_strict = 1; else if (!strcmp(arg + 13, "loose")) abbrev_ref_strict = 0; else die("unknown mode for %s", arg); } continue; } if (!strcmp(arg, "--all")) { for_each_ref(show_reference, NULL); continue; } if (starts_with(arg, "--disambiguate=")) { for_each_abbrev(arg + 15, show_abbrev, NULL); continue; } if (!strcmp(arg, "--bisect")) { for_each_ref_in("refs/bisect/bad", show_reference, NULL); for_each_ref_in("refs/bisect/good", anti_reference, NULL); continue; } if (starts_with(arg, "--branches=")) { for_each_glob_ref_in(show_reference, arg + 11, "refs/heads/", NULL); clear_ref_exclusion(&ref_excludes); continue; } if (!strcmp(arg, "--branches")) { for_each_branch_ref(show_reference, NULL); clear_ref_exclusion(&ref_excludes); continue; } if (starts_with(arg, "--tags=")) { for_each_glob_ref_in(show_reference, arg + 7, "refs/tags/", NULL); clear_ref_exclusion(&ref_excludes); continue; } if (!strcmp(arg, "--tags")) { for_each_tag_ref(show_reference, NULL); clear_ref_exclusion(&ref_excludes); continue; } if (starts_with(arg, "--glob=")) { for_each_glob_ref(show_reference, arg + 7, NULL); clear_ref_exclusion(&ref_excludes); continue; } if (starts_with(arg, "--remotes=")) { for_each_glob_ref_in(show_reference, arg + 10, "refs/remotes/", NULL); clear_ref_exclusion(&ref_excludes); continue; } if (!strcmp(arg, "--remotes")) { for_each_remote_ref(show_reference, NULL); clear_ref_exclusion(&ref_excludes); continue; } if (starts_with(arg, "--exclude=")) { add_ref_exclusion(&ref_excludes, arg + 10); continue; } if (!strcmp(arg, "--show-toplevel")) { const char *work_tree = get_git_work_tree(); if (work_tree) puts(work_tree); continue; } if (!strcmp(arg, "--show-prefix")) { if (prefix) puts(prefix); else putchar('\n'); continue; } if (!strcmp(arg, "--show-cdup")) { const char *pfx = prefix; if (!is_inside_work_tree()) { const char *work_tree = get_git_work_tree(); if (work_tree) printf("%s\n", work_tree); continue; } while (pfx) { pfx = strchr(pfx, '/'); if (pfx) { pfx++; printf("../"); } } putchar('\n'); continue; } if (!strcmp(arg, "--git-dir")) { const char *gitdir = getenv(GIT_DIR_ENVIRONMENT); char *cwd; int len; if (gitdir) { puts(gitdir); continue; } if (!prefix) { puts(".git"); continue; } cwd = xgetcwd(); len = strlen(cwd); printf("%s%s.git\n", cwd, len && cwd[len-1] != '/' ? "/" : ""); free(cwd); continue; } if (!strcmp(arg, "--git-common-dir")) { const char *pfx = prefix ? prefix : ""; puts(prefix_filename(pfx, strlen(pfx), get_git_common_dir())); continue; } if (!strcmp(arg, "--is-inside-git-dir")) { printf("%s\n", is_inside_git_dir() ? "true" : "false"); continue; } if (!strcmp(arg, "--is-inside-work-tree")) { printf("%s\n", is_inside_work_tree() ? "true" : "false"); continue; } if (!strcmp(arg, "--is-bare-repository")) { printf("%s\n", is_bare_repository() ? "true" : "false"); continue; } if (!strcmp(arg, "--shared-index-path")) { if (read_cache() < 0) die(_("Could not read the index")); if (the_index.split_index) { const unsigned char *sha1 = the_index.split_index->base_sha1; puts(git_path("sharedindex.%s", sha1_to_hex(sha1))); } continue; } if (starts_with(arg, "--since=")) { show_datestring("--max-age=", arg+8); continue; } if (starts_with(arg, "--after=")) { show_datestring("--max-age=", arg+8); continue; } if (starts_with(arg, "--before=")) { show_datestring("--min-age=", arg+9); continue; } if (starts_with(arg, "--until=")) { show_datestring("--min-age=", arg+8); continue; } if (show_flag(arg) && verify) die_no_single_rev(quiet); continue; } /* Not a flag argument */ if (try_difference(arg)) continue; if (try_parent_shorthands(arg)) continue; name = arg; type = NORMAL; if (*arg == '^') { name++; type = REVERSED; } if (!get_sha1_with_context(name, flags, sha1, &unused)) { if (verify) revs_count++; else show_rev(type, sha1, name); continue; } if (verify) die_no_single_rev(quiet); if (has_dashdash) die("bad revision '%s'", arg); as_is = 1; if (!show_file(arg, output_prefix)) continue; verify_filename(prefix, arg, 1); } if (verify) { if (revs_count == 1) { show_rev(type, sha1, name); return 0; } else if (revs_count == 0 && show_default()) return 0; die_no_single_rev(quiet); } else show_default(); return 0; }
sunny256/git
builtin/rev-parse.c
C
gpl-2.0
20,118
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ /*** This file is part of systemd. Copyright 2010 Lennart Poettering systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include <errno.h> #include <sys/mount.h> #include <string.h> #include <stdio.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <sched.h> #include <sys/syscall.h> #include <limits.h> #include <linux/fs.h> #include "strv.h" #include "util.h" #include "path-util.h" #include "namespace.h" #include "missing.h" #include "execute.h" typedef enum MountMode { /* This is ordered by priority! */ INACCESSIBLE, READONLY, PRIVATE_TMP, PRIVATE_VAR_TMP, READWRITE } MountMode; typedef struct BindMount { const char *path; MountMode mode; bool done; bool ignore; } BindMount; static int append_mounts(BindMount **p, char **strv, MountMode mode) { char **i; STRV_FOREACH(i, strv) { (*p)->ignore = false; if ((mode == INACCESSIBLE || mode == READONLY) && (*i)[0] == '-') { (*p)->ignore = true; (*i)++; } if (!path_is_absolute(*i)) return -EINVAL; (*p)->path = *i; (*p)->mode = mode; (*p)++; } return 0; } static int mount_path_compare(const void *a, const void *b) { const BindMount *p = a, *q = b; if (path_equal(p->path, q->path)) { /* If the paths are equal, check the mode */ if (p->mode < q->mode) return -1; if (p->mode > q->mode) return 1; return 0; } /* If the paths are not equal, then order prefixes first */ if (path_startswith(p->path, q->path)) return 1; if (path_startswith(q->path, p->path)) return -1; return 0; } static void drop_duplicates(BindMount *m, unsigned *n) { BindMount *f, *t, *previous; assert(m); assert(n); for (f = m, t = m, previous = NULL; f < m+*n; f++) { /* The first one wins */ if (previous && path_equal(f->path, previous->path)) continue; t->path = f->path; t->mode = f->mode; previous = t; t++; } *n = t - m; } static int apply_mount( BindMount *m, const char *tmp_dir, const char *var_tmp_dir) { const char *what; int r; assert(m); switch (m->mode) { case INACCESSIBLE: what = "/run/systemd/inaccessible"; break; case READONLY: case READWRITE: what = m->path; break; case PRIVATE_TMP: what = tmp_dir; break; case PRIVATE_VAR_TMP: what = var_tmp_dir; break; default: assert_not_reached("Unknown mode"); } assert(what); r = mount(what, m->path, NULL, MS_BIND|MS_REC, NULL); if (r >= 0) log_debug("Successfully mounted %s to %s", what, m->path); else if (m->ignore && errno == ENOENT) r = 0; return r; } static int make_read_only(BindMount *m) { int r; assert(m); if (m->mode != INACCESSIBLE && m->mode != READONLY) return 0; r = mount(NULL, m->path, NULL, MS_BIND|MS_REMOUNT|MS_RDONLY|MS_REC, NULL); if (r < 0 && !(m->ignore && errno == ENOENT)) return -errno; return 0; } int setup_tmpdirs(const char *unit_id, char **tmp_dir, char **var_tmp_dir) { int r = 0; _cleanup_free_ char *tmp = NULL, *var = NULL; assert(tmp_dir); assert(var_tmp_dir); tmp = strjoin("/tmp/systemd-", unit_id, "-XXXXXXX", NULL); var = strjoin("/var/tmp/systemd-", unit_id, "-XXXXXXX", NULL); r = create_tmp_dir(tmp, tmp_dir); if (r < 0) return r; r = create_tmp_dir(var, var_tmp_dir); if (r == 0) return 0; /* failure */ rmdir(*tmp_dir); rmdir(tmp); free(*tmp_dir); *tmp_dir = NULL; return r; } int setup_namespace(char** read_write_dirs, char** read_only_dirs, char** inaccessible_dirs, char* tmp_dir, char* var_tmp_dir, bool private_tmp, unsigned mount_flags) { unsigned n = strv_length(read_write_dirs) + strv_length(read_only_dirs) + strv_length(inaccessible_dirs) + (private_tmp ? 2 : 0); BindMount *m, *mounts = NULL; int r = 0; if (!mount_flags) mount_flags = MS_SHARED; if (unshare(CLONE_NEWNS) < 0) return -errno; if (n) { m = mounts = (BindMount *) alloca(n * sizeof(BindMount)); if ((r = append_mounts(&m, read_write_dirs, READWRITE)) < 0 || (r = append_mounts(&m, read_only_dirs, READONLY)) < 0 || (r = append_mounts(&m, inaccessible_dirs, INACCESSIBLE)) < 0) return r; if (private_tmp) { m->path = "/tmp"; m->mode = PRIVATE_TMP; m++; m->path = "/var/tmp"; m->mode = PRIVATE_VAR_TMP; m++; } assert(mounts + n == m); qsort(mounts, n, sizeof(BindMount), mount_path_compare); drop_duplicates(mounts, &n); } /* Remount / as SLAVE so that nothing now mounted in the namespace shows up in the parent */ if (mount(NULL, "/", NULL, MS_SLAVE|MS_REC, NULL) < 0) return -errno; for (m = mounts; m < mounts + n; ++m) { r = apply_mount(m, tmp_dir, var_tmp_dir); if (r < 0) goto undo_mounts; } for (m = mounts; m < mounts + n; ++m) { r = make_read_only(m); if (r < 0) goto undo_mounts; } /* Remount / as the desired mode */ if (mount(NULL, "/", NULL, mount_flags | MS_REC, NULL) < 0) { r = -errno; goto undo_mounts; } return 0; undo_mounts: for (m = mounts; m < mounts + n; ++m) { if (m->done) umount2(m->path, MNT_DETACH); } return r; }
freedesktop-unofficial-mirror/systemd__systemd-stable
src/core/namespace.c
C
gpl-2.0
7,610
/** * TODO: legal stuff * * purple * * Purple is the legal property of its developers, whose names are too numerous * to list here. Please refer to the COPYRIGHT file distributed with this * source distribution. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ #include <glib/gstdio.h> #include "prpltwtr.h" typedef struct { PurpleAccount *account; gchar *username; } TwitterAccountUserNameChange; static GHashTable *oauth_result_to_hashtable(const gchar * txt); static void account_mismatch_screenname_change_cancel_cb(TwitterAccountUserNameChange * change, gint action_id); static void account_mismatch_screenname_change_ok_cb(TwitterAccountUserNameChange * change, gint action_id); static void account_username_change_verify(PurpleAccount * account, const gchar * username); static void verify_credentials_success_cb(TwitterRequestor * r, xmlnode * node, gpointer user_data); static void verify_credentials_error_cb(TwitterRequestor * r, const TwitterRequestErrorData * error_data, gpointer user_data); static void oauth_request_token_success_cb(TwitterRequestor * r, const gchar * response, gpointer user_data); static void oauth_request_token_error_cb(TwitterRequestor * r, const TwitterRequestErrorData * error_data, gpointer user_data); static const gchar *account_get_oauth_access_token(PurpleAccount * account); static void account_set_oauth_access_token(PurpleAccount * account, const gchar * oauth_token); static const gchar *account_get_oauth_access_token_secret(PurpleAccount * account); static void account_set_oauth_access_token_secret(PurpleAccount * account, const gchar * oauth_token); static void oauth_request_pin_ok(PurpleAccount * account, const gchar * pin); static void oauth_request_pin_cancel(PurpleAccount * account, const gchar * pin); static void oauth_access_token_success_cb(TwitterRequestor * r, const gchar * response, gpointer user_data); static void oauth_access_token_error_cb(TwitterRequestor * r, const TwitterRequestErrorData * error_data, gpointer user_data); static const gchar *twitter_option_url_oauth_authorize(PurpleAccount * account); static const gchar *twitter_option_url_oauth_access_token(PurpleAccount * account); static const gchar *twitter_option_url_oauth_request_token(PurpleAccount * account); static const gchar *twitter_oauth_create_url(PurpleAccount * account, const gchar * endpoint); void prpltwtr_auth_invalidate_token(PurpleAccount * account) { account_set_oauth_access_token(account, NULL); account_set_oauth_access_token_secret(account, NULL); } void prpltwtr_auth_pre_send_auth_basic(TwitterRequestor * r, gboolean * post, const char **url, TwitterRequestParams ** params, gchar *** header_fields, gpointer * requestor_data) { const char *pass = purple_connection_get_password(purple_account_get_connection(r->account)); char **userparts = g_strsplit(purple_account_get_username(r->account), "@", 2); const char *sn = userparts[0]; char *auth_text = g_strdup_printf("%s:%s", sn, pass); char *auth_text_b64 = purple_base64_encode((guchar *) auth_text, strlen(auth_text)); *header_fields = g_new(gchar *, 2); (*header_fields)[0] = g_strdup_printf("Authorization: Basic %s", auth_text_b64); (*header_fields)[1] = NULL; g_strfreev(userparts); g_free(auth_text); g_free(auth_text_b64); } void prpltwtr_auth_post_send_auth_basic(TwitterRequestor * r, gboolean * post, const char **url, TwitterRequestParams ** params, gchar *** header_fields, gpointer * requestor_data) { g_strfreev(*header_fields); } const gchar *prpltwtr_auth_get_oauth_key(PurpleAccount * account) { if (!strcmp(purple_account_get_protocol_id(account), TWITTER_PROTOCOL_ID)) { return TWITTER_OAUTH_KEY; } else { const gchar *key = purple_account_get_string(account, TWITTER_PREF_CONSUMER_KEY, ""); if (!strcmp(key, "")) { purple_debug_error(purple_account_get_protocol_id(account), "No Consumer key specified!\n"); } return key; } } const gchar *prpltwtr_auth_get_oauth_secret(PurpleAccount * account) { if (!strcmp(purple_account_get_protocol_id(account), TWITTER_PROTOCOL_ID)) { return TWITTER_OAUTH_SECRET; } else { const gchar *secret = purple_account_get_string(account, TWITTER_PREF_CONSUMER_SECRET, ""); if (!strcmp(secret, "")) { purple_debug_error(purple_account_get_protocol_id(account), "No Consumer secret specified!\n"); } return secret; } } void prpltwtr_auth_pre_send_oauth(TwitterRequestor * r, gboolean * post, const char **url, TwitterRequestParams ** params, gchar *** header_fields, gpointer * requestor_data) { PurpleAccount *account = r->account; PurpleConnection *gc = purple_account_get_connection(account); TwitterConnectionData *twitter = gc->proto_data; gchar *signing_key = g_strdup_printf("%s&%s", prpltwtr_auth_get_oauth_secret(account), twitter->oauth_token_secret ? twitter->oauth_token_secret : ""); TwitterRequestParams *oauth_params = twitter_request_params_add_oauth_params(account, *post, *url, *params, twitter->oauth_token, signing_key); if (oauth_params == NULL) { TwitterRequestErrorData *error = g_new0(TwitterRequestErrorData, 1); gchar *error_msg = g_strdup(_("Could not sign request")); error->type = TWITTER_REQUEST_ERROR_NO_OAUTH; error->message = error_msg; g_free(error_msg); g_free(error); g_free(signing_key); //TODO: error if couldn't sign return; } g_free(signing_key); *requestor_data = *params; *params = oauth_params; } void prpltwtr_auth_oauth_login(PurpleAccount * account, TwitterConnectionData * twitter) { const gchar *oauth_token; const gchar *oauth_token_secret; oauth_token = account_get_oauth_access_token(account); oauth_token_secret = account_get_oauth_access_token_secret(account); if (oauth_token && oauth_token_secret) { twitter->oauth_token = g_strdup(oauth_token); twitter->oauth_token_secret = g_strdup(oauth_token_secret); twitter_api_verify_credentials(purple_account_get_requestor(account), verify_credentials_success_cb, verify_credentials_error_cb, NULL); } else { twitter_send_request(purple_account_get_requestor(account), FALSE, twitter_option_url_oauth_request_token(account), NULL, oauth_request_token_success_cb, oauth_request_token_error_cb, NULL); } } void prpltwtr_auth_post_send_oauth(TwitterRequestor * r, gboolean * post, const char **url, TwitterRequestParams ** params, gchar *** header_fields, gpointer * requestor_data) { twitter_request_params_free(*params); *params = (TwitterRequestParams *) * requestor_data; } static void oauth_access_token_success_cb(TwitterRequestor * r, const gchar * response, gpointer user_data) { PurpleAccount *account = r->account; PurpleConnection *gc = purple_account_get_connection(account); TwitterConnectionData *twitter = gc->proto_data; GHashTable *results = oauth_result_to_hashtable(response); const gchar *oauth_token = g_hash_table_lookup(results, "oauth_token"); const gchar *oauth_token_secret = g_hash_table_lookup(results, "oauth_token_secret"); const gchar *response_screen_name = g_hash_table_lookup(results, "screen_name"); if (oauth_token && oauth_token_secret) { if (twitter->oauth_token) g_free(twitter->oauth_token); if (twitter->oauth_token_secret) g_free(twitter->oauth_token_secret); twitter->oauth_token = g_strdup(oauth_token); twitter->oauth_token_secret = g_strdup(oauth_token_secret); account_set_oauth_access_token(account, oauth_token); account_set_oauth_access_token_secret(account, oauth_token_secret); //FIXME: set this to be case insensitive { char **userparts = g_strsplit(purple_account_get_username(r->account), "@", 2); const char *username = userparts[0]; if (response_screen_name && !twitter_usernames_match(account, response_screen_name, username)) { account_username_change_verify(account, response_screen_name); } else { prpltwtr_verify_connection(account); } g_strfreev(userparts); } } else { purple_debug_error(purple_account_get_protocol_id(account), "Unknown error receiving access token: %s\n", response); prpltwtr_disconnect(account, _("Unknown response getting access token")); } } static void oauth_access_token_error_cb(TwitterRequestor * r, const TwitterRequestErrorData * error_data, gpointer user_data) { gchar *error = g_strdup_printf(_("Error verifying PIN: %s"), error_data->message ? error_data->message : _("unknown error")); prpltwtr_disconnect(r->account, error); g_free(error); } static GHashTable *oauth_result_to_hashtable(const gchar * txt) { gchar **pieces = g_strsplit(txt, "&", 0); GHashTable *results = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); gchar **p; for (p = pieces; *p; p++) { gchar *equalpos = strchr(*p, '='); if (equalpos) { equalpos[0] = '\0'; g_hash_table_replace(results, g_strdup(*p), g_strdup(equalpos + 1)); } } g_strfreev(pieces); return results; } static void account_mismatch_screenname_change_cancel_cb(TwitterAccountUserNameChange * change, gint action_id) { PurpleAccount *account = change->account; prpltwtr_auth_invalidate_token(account); g_free(change->username); g_free(change); prpltwtr_disconnect(account, _("Username mismatch")); } static void account_mismatch_screenname_change_ok_cb(TwitterAccountUserNameChange * change, gint action_id) { PurpleAccount *account = change->account; purple_account_set_username(account, change->username); g_free(change->username); g_free(change); prpltwtr_verify_connection(account); } static void account_username_change_verify(PurpleAccount * account, const gchar * username) { PurpleConnection *gc = purple_account_get_connection(account); gchar *secondary = g_strdup_printf(_("Do you wish to change the name on this account to %s?"), username); TwitterAccountUserNameChange *change_data = (TwitterAccountUserNameChange *) g_new0(TwitterAccountUserNameChange *, 1); change_data->account = account; change_data->username = g_strdup(username); purple_request_action(gc, _("Mismatched Screen Names"), _("Authorized screen name does not match with account screen name"), secondary, 0, account, NULL, NULL, change_data, 2, _("Cancel"), account_mismatch_screenname_change_cancel_cb, _("Yes"), account_mismatch_screenname_change_ok_cb, NULL); g_free(secondary); } static void verify_credentials_success_cb(TwitterRequestor * r, xmlnode * node, gpointer user_data) { PurpleAccount *account = r->account; TwitterUserTweet *user_tweet = twitter_verify_credentials_parse(node); char **userparts = g_strsplit(purple_account_get_username(r->account), "@", 2); const char *username = userparts[0]; if (!user_tweet || !user_tweet->screen_name) { prpltwtr_disconnect(account, _("Could not verify credentials")); } else if (!twitter_usernames_match(account, user_tweet->screen_name, username)) { account_username_change_verify(account, user_tweet->screen_name); } else { prpltwtr_verify_connection(account); } g_strfreev(userparts); twitter_user_tweet_free(user_tweet); } static void verify_credentials_error_cb(TwitterRequestor * r, const TwitterRequestErrorData * error_data, gpointer user_data) { gchar *error = g_strdup_printf(_("Error verifying credentials: %s"), error_data->message ? error_data->message : _("unknown error")); switch (error_data->type) { case TWITTER_REQUEST_ERROR_SERVER: case TWITTER_REQUEST_ERROR_CANCELED: prpltwtr_recoverable_disconnect(r->account, error); break; case TWITTER_REQUEST_ERROR_NONE: case TWITTER_REQUEST_ERROR_TWITTER_GENERAL: case TWITTER_REQUEST_ERROR_INVALID_XML: case TWITTER_REQUEST_ERROR_NO_OAUTH: case TWITTER_REQUEST_ERROR_UNAUTHORIZED: default: prpltwtr_disconnect(r->account, error); break; } g_free(error); } static void oauth_request_token_success_cb(TwitterRequestor * r, const gchar * response, gpointer user_data) { PurpleAccount *account = r->account; PurpleConnection *gc = purple_account_get_connection(account); TwitterConnectionData *twitter = gc->proto_data; GHashTable *results = oauth_result_to_hashtable(response); const gchar *oauth_token = g_hash_table_lookup(results, "oauth_token"); const gchar *oauth_token_secret = g_hash_table_lookup(results, "oauth_token_secret"); if (oauth_token && oauth_token_secret) { /* http://api.twitter.com/oauth/authorize */ gchar *msg = g_strdup_printf("http://%s?oauth_token=%s", twitter_option_url_oauth_authorize(account), purple_url_encode(oauth_token)); gchar *prompt = g_strdup_printf("%s %s", _("Please enter PIN for"), purple_account_get_username(account)); twitter->oauth_token = g_strdup(oauth_token); twitter->oauth_token_secret = g_strdup(oauth_token_secret); purple_notify_uri(twitter, msg); purple_request_input(twitter, _("OAuth Authentication"), //title prompt, //primary msg, //secondary NULL, //default FALSE, //multiline, FALSE, //password NULL, //hint _("Submit"), //ok text G_CALLBACK(oauth_request_pin_ok), _("Cancel"), G_CALLBACK(oauth_request_pin_cancel), account, NULL, NULL, account); g_free(msg); g_free(prompt); } else { purple_debug_error(purple_account_get_protocol_id(account), "Unknown error receiving request token: %s\n", response); prpltwtr_disconnect(account, _("Invalid response receiving request token")); } g_hash_table_destroy(results); } static void oauth_request_token_error_cb(TwitterRequestor * r, const TwitterRequestErrorData * error_data, gpointer user_data) { gchar *error = g_strdup_printf(_("Error receiving request token: %s"), error_data->message ? error_data->message : _("unknown error")); prpltwtr_disconnect(r->account, error); g_free(error); } static const gchar *account_get_oauth_access_token(PurpleAccount * account) { return purple_account_get_string(account, "oauth_token", NULL); } static void account_set_oauth_access_token(PurpleAccount * account, const gchar * oauth_token) { purple_account_set_string(account, "oauth_token", oauth_token); } static const gchar *account_get_oauth_access_token_secret(PurpleAccount * account) { return purple_account_get_string(account, "oauth_token_secret", NULL); } static void account_set_oauth_access_token_secret(PurpleAccount * account, const gchar * oauth_token) { purple_account_set_string(account, "oauth_token_secret", oauth_token); } static void oauth_request_pin_ok(PurpleAccount * account, const gchar * pin) { TwitterRequestParams *params = twitter_request_params_new(); twitter_request_params_add(params, twitter_request_param_new("oauth_verifier", pin)); twitter_send_request(purple_account_get_requestor(account), FALSE, twitter_option_url_oauth_access_token(account), params, oauth_access_token_success_cb, oauth_access_token_error_cb, NULL); twitter_request_params_free(params); } static void oauth_request_pin_cancel(PurpleAccount * account, const gchar * pin) { prpltwtr_disconnect(account, _("Canceled PIN entry")); } static const gchar *twitter_option_url_oauth_authorize(PurpleAccount * account) { return twitter_oauth_create_url(account, "/authorize"); } static const gchar *twitter_option_url_oauth_request_token(PurpleAccount * account) { return twitter_oauth_create_url(account, "/request_token"); } static const gchar *twitter_option_url_oauth_access_token(PurpleAccount * account) { return twitter_oauth_create_url(account, "/access_token"); } static const gchar *twitter_oauth_create_url(PurpleAccount * account, const gchar * endpoint) { static char url[1024]; char host[256]; g_return_val_if_fail(endpoint != NULL && endpoint[0] != '\0', NULL); if (!strcmp(purple_account_get_protocol_id(account), TWITTER_PROTOCOL_ID)) { snprintf(host, 255, "api.twitter.com/oauth"); } else { snprintf(host, 255, "%s/oauth", purple_account_get_string(account, TWITTER_PREF_API_BASE, STATUSNET_PREF_API_BASE_DEFAULT)); } snprintf(url, 1023, "%s%s%s", host, host[strlen(host) - 1] == '/' || endpoint[0] == '/' ? "" : "/", host[strlen(host) - 1] == '/' && endpoint[0] == '/' ? endpoint + 1 : endpoint); return url; }
dmoonfire/prpltwtr-old
src/prpltwtr/prpltwtr_auth.c
C
gpl-2.0
18,305
/* * net/dccp/ipv4.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/icmp.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/random.h> #include <net/icmp.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/inet_sock.h> #include <net/protocol.h> #include <net/sock.h> #include <net/timewait_sock.h> #include <net/tcp_states.h> #include <net/xfrm.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" #include "feat.h" /* * This is the global socket data structure used for responding to * the Out-of-the-blue (OOTB) packets. A control sock will be created * for this socket at the initialization time. */ static struct socket *dccp_v4_ctl_socket; static int dccp_v4_get_port(struct sock *sk, const unsigned short snum) { return inet_csk_get_port(&dccp_hashinfo, sk, snum, inet_csk_bind_conflict); } int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct rtable *rt; __be32 daddr, nexthop; int tmp; int err; dp->dccps_role = DCCP_ROLE_CLIENT; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; if (inet->opt != NULL && inet->opt->srr) { if (daddr == 0) return -EINVAL; nexthop = inet->opt->faddr; } tmp = ip_route_connect(&rt, nexthop, inet->saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_DCCP, inet->sport, usin->sin_port, sk); if (tmp < 0) return tmp; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (inet->opt == NULL || !inet->opt->srr) daddr = rt->rt_dst; if (inet->saddr == 0) inet->saddr = rt->rt_src; inet->rcv_saddr = inet->saddr; inet->dport = usin->sin_port; inet->daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet->opt != NULL) inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; /* * Socket identity is still unknown (sport may be zero). * However we set state to DCCP_REQUESTING and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ dccp_set_state(sk, DCCP_REQUESTING); err = inet_hash_connect(&dccp_death_row, sk); if (err != 0) goto failure; err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, sk); if (err != 0) goto failure; /* OK, now commit destination to socket. */ sk_setup_caps(sk, &rt->u.dst); dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr, inet->sport, inet->dport); inet->id = dp->dccps_iss ^ jiffies; err = dccp_connect(sk); rt = NULL; if (err != 0) goto failure; out: return err; failure: /* * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); ip_rt_put(rt); sk->sk_route_caps = 0; inet->dport = 0; goto out; } EXPORT_SYMBOL_GPL(dccp_v4_connect); /* * This routine does path mtu discovery as defined in RFC1191. */ static inline void dccp_do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu) { struct dst_entry *dst; const struct inet_sock *inet = inet_sk(sk); const struct dccp_sock *dp = dccp_sk(sk); /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs * send out by Linux are always < 576bytes so they should go through * unfragmented). */ if (sk->sk_state == DCCP_LISTEN) return; /* We don't check in the destentry if pmtu discovery is forbidden * on this route. We just assume that no packet_to_big packets * are send back when pmtu discovery is not active. * There is a small race when the user changes this flag in the * route, but I think that's acceptable. */ if ((dst = __sk_dst_check(sk, 0)) == NULL) return; dst->ops->update_pmtu(dst, mtu); /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && inet_csk(sk)->icsk_pmtu_cookie > mtu) { dccp_sync_mss(sk, mtu); /* * From RFC 4340, sec. 14.1: * * DCCP-Sync packets are the best choice for upward * probing, since DCCP-Sync probes do not risk application * data loss. */ dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); } /* else let the usual retransmit timer handle it */ } /* * This routine is called by the ICMP module when it gets some sort of error * condition. If err < 0 then the socket should be closed and the error * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code. * After adjustment header points to the first 8 bytes of the tcp header. We * need to find the appropriate port. * * The locking strategy used here is very "optimistic". When someone else * accesses the socket the ICMP is just dropped and for some paths there is no * check at all. A more general error queue to queue errors for later handling * is probably better. */ static void dccp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (struct iphdr *)skb->data; const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + (iph->ihl << 2)); struct dccp_sock *dp; struct inet_sock *inet; const int type = skb->h.icmph->type; const int code = skb->h.icmph->code; struct sock *sk; __u64 seq; int err; if (skb->len < (iph->ihl << 2) + 8) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; } sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport, iph->saddr, dh->dccph_sport, inet_iif(skb)); if (sk == NULL) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; } if (sk->sk_state == DCCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; dp = dccp_sk(sk); seq = dccp_hdr_seq(skb); if (sk->sk_state != DCCP_LISTEN && !between48(seq, dp->dccps_swl, dp->dccps_swh)) { NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (!sock_owned_by_user(sk)) dccp_do_pmtu_discovery(sk, iph, info); goto out; } err = icmp_err_convert[code].errno; break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { struct request_sock *req , **prev; case DCCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet_csk_search_req(sk, &prev, dh->dccph_dport, iph->daddr, iph->saddr); if (!req) goto out; /* * ICMPs are not backlogged, hence we cannot get an established * socket here. */ BUG_TRAP(!req->sk); if (seq != dccp_rsk(req)->dreq_iss) { NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } /* * Still in RESPOND, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case DCCP_REQUESTING: case DCCP_RESPOND: if (!sock_owned_by_user(sk)) { DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); dccp_done(sk); } else sk->sk_err_soft = err; goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else /* Only an error on timeout */ sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, __be32 src, __be32 dst) { return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); } void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); struct dccp_hdr *dh = dccp_hdr(skb); dccp_csum_outgoing(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr); } EXPORT_SYMBOL_GPL(dccp_v4_send_check); static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) { return secure_dccp_sequence_number(skb->nh.iph->daddr, skb->nh.iph->saddr, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport); } /* * The three way handshake has completed - we got a valid ACK or DATAACK - * now create the new socket. * * This is the equivalent of TCP's tcp_v4_syn_recv_sock */ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct dccp_sock *newdp; struct sock *newsk; if (sk_acceptq_is_full(sk)) goto exit_overflow; if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) goto exit; newsk = dccp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto exit; sk_setup_caps(newsk, dst); newdp = dccp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); newinet->daddr = ireq->rmt_addr; newinet->rcv_saddr = ireq->loc_addr; newinet->saddr = ireq->loc_addr; newinet->opt = ireq->opt; ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = skb->nh.iph->ttl; newinet->id = jiffies; dccp_sync_mss(newsk, dst_mtu(dst)); __inet_hash(&dccp_hashinfo, newsk, 0); __inet_inherit_port(&dccp_hashinfo, sk, newsk); return newsk; exit_overflow: NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); exit: NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); dst_release(dst); return NULL; } EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); const struct iphdr *iph = skb->nh.iph; struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ struct request_sock *req = inet_csk_search_req(sk, &prev, dh->dccph_sport, iph->saddr, iph->daddr); if (req != NULL) return dccp_check_req(sk, skb, req, prev); nsk = inet_lookup_established(&dccp_hashinfo, iph->saddr, dh->dccph_sport, iph->daddr, dh->dccph_dport, inet_iif(skb)); if (nsk != NULL) { if (nsk->sk_state != DCCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } return sk; } static struct dst_entry* dccp_v4_route_skb(struct sock *sk, struct sk_buff *skb) { struct rtable *rt; struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif, .nl_u = { .ip4_u = { .daddr = skb->nh.iph->saddr, .saddr = skb->nh.iph->daddr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .uli_u = { .ports = { .sport = dccp_hdr(skb)->dccph_dport, .dport = dccp_hdr(skb)->dccph_sport } } }; security_skb_classify_flow(skb, &fl); if (ip_route_output_flow(&rt, &fl, sk, 0)) { IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); return NULL; } return &rt->u.dst; } static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, struct dst_entry *dst) { int err = -1; struct sk_buff *skb; /* First, grab a route. */ if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) goto out; skb = dccp_make_response(sk, dst, req); if (skb != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); struct dccp_hdr *dh = dccp_hdr(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr, ireq->rmt_addr); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); err = net_xmit_eval(err); } out: dst_release(dst); return err; } static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { int err; struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); struct sk_buff *skb; struct dst_entry *dst; u64 seqno = 0; /* Never send a reset in response to a reset. */ if (rxdh->dccph_type == DCCP_PKT_RESET) return; if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) return; dst = dccp_v4_route_skb(dccp_v4_ctl_socket->sk, rxskb); if (dst == NULL) return; skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC); if (skb == NULL) goto out; /* Reserve space for headers. */ skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); skb->dst = dst_clone(dst); dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); /* Build DCCP header and checksum it. */ dh->dccph_type = DCCP_PKT_RESET; dh->dccph_sport = rxdh->dccph_dport; dh->dccph_dport = rxdh->dccph_sport; dh->dccph_doff = dccp_hdr_reset_len / 4; dh->dccph_x = 1; dccp_hdr_reset(skb)->dccph_reset_code = DCCP_SKB_CB(rxskb)->dccpd_reset_code; /* See "8.3.1. Abnormal Termination" in RFC 4340 */ if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); dccp_hdr_set_seq(dh, seqno); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); dccp_csum_outgoing(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr, rxskb->nh.iph->daddr); bh_lock_sock(dccp_v4_ctl_socket->sk); err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, rxskb->nh.iph->daddr, rxskb->nh.iph->saddr, NULL); bh_unlock_sock(dccp_v4_ctl_socket->sk); if (net_xmit_eval(err) == 0) { DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); } out: dst_release(dst); } static void dccp_v4_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->opt); } static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct dccp_request_sock), .rtx_syn_ack = dccp_v4_send_response, .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v4_reqsk_destructor, .send_reset = dccp_v4_ctl_send_reset, }; int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { struct inet_request_sock *ireq; struct request_sock *req; struct dccp_request_sock *dreq; const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ if (((struct rtable *)skb->dst)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { reset_code = DCCP_RESET_CODE_NO_CONNECTION; goto drop; } if (dccp_bad_service_code(sk, service)) { reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* * TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ if (inet_csk_reqsk_queue_is_full(sk)) goto drop; /* * Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = reqsk_alloc(&dccp_request_sock_ops); if (req == NULL) goto drop; if (dccp_parse_options(sk, skb)) goto drop_and_free; dccp_reqsk_init(req, skb); if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; ireq = inet_rsk(req); ireq->loc_addr = skb->nh.iph->daddr; ireq->rmt_addr = skb->nh.iph->saddr; ireq->opt = NULL; /* * Step 3: Process LISTEN state * * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie * * In fact we defer setting S.GSR, S.SWL, S.SWH to * dccp_create_openreq_child. */ dreq = dccp_rsk(req); dreq->dreq_isr = dcb->dccpd_seq; dreq->dreq_iss = dccp_v4_init_sequence(skb); dreq->dreq_service = service; if (dccp_v4_send_response(sk, req, NULL)) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); return 0; drop_and_free: reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); dcb->dccpd_reset_code = reset_code; return -1; } EXPORT_SYMBOL_GPL(dccp_v4_conn_request); int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct dccp_hdr *dh = dccp_hdr(skb); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dh, skb->len)) goto reset; return 0; } /* * Step 3: Process LISTEN state * If P.type == Request or P contains a valid Init Cookie option, * (* Must scan the packet's options to check for Init * Cookies. Only Init Cookies are processed here, * however; other options are processed in Step 8. This * scan need only be performed if the endpoint uses Init * Cookies *) * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair * S.state = RESPOND * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies * Continue with S.state == RESPOND * (* A Response packet will be generated in Step 11 *) * Otherwise, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return * * NOTE: the check for the packet types is done in * dccp_rcv_state_process */ if (sk->sk_state == DCCP_LISTEN) { struct sock *nsk = dccp_v4_hnd_req(sk, skb); if (nsk == NULL) goto discard; if (nsk != sk) { if (dccp_child_process(sk, nsk, skb)) goto reset; return 0; } } if (dccp_rcv_state_process(sk, skb, dh, skb->len)) goto reset; return 0; reset: dccp_v4_ctl_send_reset(sk, skb); discard: kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); /** * dccp_invalid_packet - check for malformed packets * Implements RFC 4340, 8.5: Step 1: Check header basics * Packets that fail these checks are ignored and do not receive Resets. */ int dccp_invalid_packet(struct sk_buff *skb) { const struct dccp_hdr *dh; unsigned int cscov; if (skb->pkt_type != PACKET_HOST) return 1; /* If the packet is shorter than 12 bytes, drop packet and return */ if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { DCCP_WARN("pskb_may_pull failed\n"); return 1; } dh = dccp_hdr(skb); /* If P.type is not understood, drop packet and return */ if (dh->dccph_type >= DCCP_PKT_INVALID) { DCCP_WARN("invalid packet type\n"); return 1; } /* * If P.Data Offset is too small for packet type, drop packet and return */ if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); return 1; } /* * If P.Data Offset is too too large for packet, drop packet and return */ if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); return 1; } /* * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet * has short sequence numbers), drop packet and return */ if (dh->dccph_type >= DCCP_PKT_DATA && dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0) { DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n", dccp_packet_name(dh->dccph_type)); return 1; } /* * If P.CsCov is too large for the packet size, drop packet and return. * This must come _before_ checksumming (not as RFC 4340 suggests). */ cscov = dccp_csum_coverage(skb); if (cscov > skb->len) { DCCP_WARN("P.CsCov %u exceeds packet length %d\n", dh->dccph_cscov, skb->len); return 1; } /* If header checksum is incorrect, drop packet and return. * (This step is completed in the AF-dependent functions.) */ skb->csum = skb_checksum(skb, 0, cscov, 0); return 0; } EXPORT_SYMBOL_GPL(dccp_invalid_packet); /* this is called when real data arrives */ static int dccp_v4_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; struct sock *sk; int min_cov; /* Step 1: Check header basics */ if (dccp_invalid_packet(skb)) goto discard_it; /* Step 1: If header checksum is incorrect, drop packet and return */ if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } dh = dccp_hdr(skb); DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; dccp_pr_debug("%8.8s " "src=%u.%u.%u.%u@%-5d " "dst=%u.%u.%u.%u@%-5d seq=%llu", dccp_packet_name(dh->dccph_type), NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport), NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport), (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq); if (dccp_packet_without_ack(skb)) { DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; dccp_pr_debug_cat("\n"); } else { DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq); } /* Step 2: * Look up flow ID in table and get corresponding socket */ sk = __inet_lookup(&dccp_hashinfo, skb->nh.iph->saddr, dh->dccph_sport, skb->nh.iph->daddr, dh->dccph_dport, inet_iif(skb)); /* * Step 2: * If no socket ... */ if (sk == NULL) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; } /* * Step 2: * ... or S.state == TIMEWAIT, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (sk->sk_state == DCCP_TIME_WAIT) { dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); inet_twsk_put(inet_twsk(sk)); goto no_dccp_socket; } /* * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage * o if MinCsCov = 0, only packets with CsCov = 0 are accepted * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov */ min_cov = dccp_sk(sk)->dccps_pcrlen; if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", dh->dccph_cscov, min_cov); /* FIXME: "Such packets SHOULD be reported using Data Dropped * options (Section 11.7) with Drop Code 0, Protocol * Constraints." */ goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; nf_reset(skb); return sk_receive_skb(sk, skb, 1); no_dccp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; /* * Step 2: * If no socket ... * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; dccp_v4_ctl_send_reset(sk, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; } static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .queue_xmit = ip_queue_xmit, .send_check = dccp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = dccp_v4_conn_request, .syn_recv_sock = dccp_v4_request_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; static int dccp_v4_init_sock(struct sock *sk) { static __u8 dccp_v4_ctl_sock_initialized; int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized); if (err == 0) { if (unlikely(!dccp_v4_ctl_sock_initialized)) dccp_v4_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; } return err; } static struct timewait_sock_ops dccp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct inet_timewait_sock), }; static struct proto dccp_v4_prot = { .name = "DCCP", .owner = THIS_MODULE, .close = dccp_close, .connect = dccp_v4_connect, .disconnect = dccp_disconnect, .ioctl = dccp_ioctl, .init = dccp_v4_init_sock, .setsockopt = dccp_setsockopt, .getsockopt = dccp_getsockopt, .sendmsg = dccp_sendmsg, .recvmsg = dccp_recvmsg, .backlog_rcv = dccp_v4_do_rcv, .hash = dccp_hash, .unhash = dccp_unhash, .accept = inet_csk_accept, .get_port = dccp_v4_get_port, .shutdown = dccp_shutdown, .destroy = dccp_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp_sock), .rsk_prot = &dccp_request_sock_ops, .twsk_prot = &dccp_timewait_sock_ops, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_dccp_setsockopt, .compat_getsockopt = compat_dccp_getsockopt, #endif }; static struct net_protocol dccp_v4_protocol = { .handler = dccp_v4_rcv, .err_handler = dccp_v4_err, .no_policy = 1, }; static const struct proto_ops inet_dccp_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* FIXME: work on tcp_poll to rename it to inet_csk_poll */ .poll = dccp_poll, .ioctl = inet_ioctl, /* FIXME: work on inet_listen to rename it to sock_common_listen */ .listen = inet_dccp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw dccp_v4_protosw = { .type = SOCK_DCCP, .protocol = IPPROTO_DCCP, .prot = &dccp_v4_prot, .ops = &inet_dccp_ops, .capability = -1, .no_check = 0, .flags = INET_PROTOSW_ICSK, }; static int __init dccp_v4_init(void) { int err = proto_register(&dccp_v4_prot, 1); if (err != 0) goto out; err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP); if (err != 0) goto out_proto_unregister; inet_register_protosw(&dccp_v4_protosw); err = inet_csk_ctl_sock_create(&dccp_v4_ctl_socket, PF_INET, SOCK_DCCP, IPPROTO_DCCP); if (err) goto out_unregister_protosw; out: return err; out_unregister_protosw: inet_unregister_protosw(&dccp_v4_protosw); inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); out_proto_unregister: proto_unregister(&dccp_v4_prot); goto out; } static void __exit dccp_v4_exit(void) { inet_unregister_protosw(&dccp_v4_protosw); inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); proto_unregister(&dccp_v4_prot); } module_init(dccp_v4_init); module_exit(dccp_v4_exit); /* * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP */ MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6"); MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
xiandaicxsj/copyKvm
net/dccp/ipv4.c
C
gpl-2.0
28,989
/* Minimal malloc implementation for interposition tests. Copyright (C) 2016-2017 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; see the file COPYING.LIB. If not, see <http://www.gnu.org/licenses/>. */ #include "tst-interpose-aux.h" #include <errno.h> #include <stdarg.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/uio.h> #include <unistd.h> #if INTERPOSE_THREADS #include <pthread.h> #endif /* Print the error message and terminate the process with status 1. */ __attribute__ ((noreturn)) __attribute__ ((format (printf, 1, 2))) static void * fail (const char *format, ...) { /* This assumes that vsnprintf will not call malloc. It does not do so for the format strings we use. */ char message[4096]; va_list ap; va_start (ap, format); vsnprintf (message, sizeof (message), format, ap); va_end (ap); enum { count = 3 }; struct iovec iov[count]; iov[0].iov_base = (char *) "error: "; iov[1].iov_base = (char *) message; iov[2].iov_base = (char *) "\n"; for (int i = 0; i < count; ++i) iov[i].iov_len = strlen (iov[i].iov_base); int unused __attribute__ ((unused)); unused = writev (STDOUT_FILENO, iov, count); _exit (1); } #if INTERPOSE_THREADS static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; #endif static void lock (void) { #if INTERPOSE_THREADS int ret = pthread_mutex_lock (&mutex); if (ret != 0) { errno = ret; fail ("pthread_mutex_lock: %m"); } #endif } static void unlock (void) { #if INTERPOSE_THREADS int ret = pthread_mutex_unlock (&mutex); if (ret != 0) { errno = ret; fail ("pthread_mutex_unlock: %m"); } #endif } struct __attribute__ ((aligned (__alignof__ (max_align_t)))) allocation_header { size_t allocation_index; size_t allocation_size; }; /* Array of known allocations, to track invalid frees. */ enum { max_allocations = 65536 }; static struct allocation_header *allocations[max_allocations]; static size_t allocation_index; static size_t deallocation_count; /* Sanity check for successful malloc interposition. */ __attribute__ ((destructor)) static void check_for_allocations (void) { if (allocation_index == 0) { /* Make sure that malloc is called at least once from libc. */ void *volatile ptr = strdup ("ptr"); /* Compiler barrier. The strdup function calls malloc, which updates allocation_index, but strdup is marked __THROW, so the compiler could optimize away the reload. */ __asm__ volatile ("" ::: "memory"); free (ptr); /* If the allocation count is still zero, it means we did not interpose malloc successfully. */ if (allocation_index == 0) fail ("malloc does not seem to have been interposed"); } } static struct allocation_header *get_header (const char *op, void *ptr) { struct allocation_header *header = ((struct allocation_header *) ptr) - 1; if (header->allocation_index >= allocation_index) fail ("%s: %p: invalid allocation index: %zu (not less than %zu)", op, ptr, header->allocation_index, allocation_index); if (allocations[header->allocation_index] != header) fail ("%s: %p: allocation pointer does not point to header, but %p", op, ptr, allocations[header->allocation_index]); return header; } /* Internal helper functions. Those must be called while the lock is acquired. */ static void * malloc_internal (size_t size) { if (allocation_index == max_allocations) { errno = ENOMEM; return NULL; } size_t allocation_size = size + sizeof (struct allocation_header); if (allocation_size < size) { errno = ENOMEM; return NULL; } size_t index = allocation_index++; void *result = mmap (NULL, allocation_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (result == MAP_FAILED) return NULL; allocations[index] = result; *allocations[index] = (struct allocation_header) { .allocation_index = index, .allocation_size = allocation_size }; return allocations[index] + 1; } static void free_internal (const char *op, struct allocation_header *header) { size_t index = header->allocation_index; int result = mprotect (header, header->allocation_size, PROT_NONE); if (result != 0) fail ("%s: mprotect (%p, %zu): %m", op, header, header->allocation_size); /* Catch double-free issues. */ allocations[index] = NULL; ++deallocation_count; } static void * realloc_internal (void *ptr, size_t new_size) { struct allocation_header *header = get_header ("realloc", ptr); size_t old_size = header->allocation_size - sizeof (struct allocation_header); if (old_size >= new_size) return ptr; void *newptr = malloc_internal (new_size); if (newptr == NULL) return NULL; memcpy (newptr, ptr, old_size); free_internal ("realloc", header); return newptr; } /* Public interfaces. These functions must perform locking. */ size_t malloc_allocation_count (void) { lock (); size_t count = allocation_index; unlock (); return count; } size_t malloc_deallocation_count (void) { lock (); size_t count = deallocation_count; unlock (); return count; } void * malloc (size_t size) { lock (); void *result = malloc_internal (size); unlock (); return result; } void free (void *ptr) { if (ptr == NULL) return; lock (); struct allocation_header *header = get_header ("free", ptr); free_internal ("free", header); unlock (); } void * calloc (size_t a, size_t b) { if (b > 0 && a > SIZE_MAX / b) { errno = ENOMEM; return NULL; } lock (); /* malloc_internal uses mmap, so the memory is zeroed. */ void *result = malloc_internal (a * b); unlock (); return result; } void * realloc (void *ptr, size_t n) { if (n ==0) { free (ptr); return NULL; } else if (ptr == NULL) return malloc (n); else { lock (); void *result = realloc_internal (ptr, n); unlock (); return result; } }
ctyler/spo600-glibc
malloc/tst-interpose-aux.c
C
gpl-2.0
6,803
/* * linux/fs/ext4/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/jbd2.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/vfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/ctype.h> #include <linux/log2.h> #include <linux/crc16.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #include <linux/kthread.h> #include <linux/freezer.h> #include "ext4.h" #include "ext4_extents.h" /* Needed for trace points definition */ #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "mballoc.h" #define CREATE_TRACE_POINTS #include <trace/events/ext4.h> static struct proc_dir_entry *ext4_proc_root; static struct kset *ext4_kset; static struct ext4_lazy_init *ext4_li_info; static struct mutex ext4_li_mtx; static struct ext4_features *ext4_feat; static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_sync_fs_nojournal(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); static int ext4_freeze(struct super_block *sb); static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static inline int ext2_feature_set_ok(struct super_block *sb); static inline int ext3_feature_set_ok(struct super_block *sb); static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); static int ext4_reserve_clusters(struct ext4_sb_info *, ext4_fsblk_t); #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext2_fs_type = { .owner = THIS_MODULE, .name = "ext2", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext2"); MODULE_ALIAS("ext2"); #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) #else #define IS_EXT2_SB(sb) (0) #endif #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); MODULE_ALIAS("ext3"); #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) #else #define IS_EXT3_SB(sb) (0) #endif static int ext4_verify_csum_type(struct super_block *sb, struct ext4_super_block *es) { if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; return es->s_checksum_type == EXT4_CRC32C_CHKSUM; } static __le32 ext4_superblock_csum(struct super_block *sb, struct ext4_super_block *es) { struct ext4_sb_info *sbi = EXT4_SB(sb); int offset = offsetof(struct ext4_super_block, s_checksum); __u32 csum; csum = ext4_chksum(sbi, ~0, (char *)es, offset); return cpu_to_le32(csum); } int ext4_superblock_csum_verify(struct super_block *sb, struct ext4_super_block *es) { if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; return es->s_checksum == ext4_superblock_csum(sb, es); } void ext4_superblock_csum_set(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; es->s_checksum = ext4_superblock_csum(sb, es); } void *ext4_kvmalloc(size_t size, gfp_t flags) { void *ret; ret = kmalloc(size, flags); if (!ret) ret = __vmalloc(size, flags, PAGE_KERNEL); return ret; } void *ext4_kvzalloc(size_t size, gfp_t flags) { void *ret; ret = kzalloc(size, flags); if (!ret) ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); return ret; } void ext4_kvfree(void *ptr) { if (is_vmalloc_addr(ptr)) vfree(ptr); else kfree(ptr); } ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_block_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_table(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_table_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); } __u32 ext4_free_group_clusters(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_blocks_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); } __u32 ext4_free_inodes_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_inodes_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); } __u32 ext4_used_dirs_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_used_dirs_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); } __u32 ext4_itable_unused_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_itable_unused_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); } void ext4_block_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_table_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_table_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); } void ext4_free_group_clusters_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); } void ext4_free_inodes_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); } void ext4_used_dirs_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); } void ext4_itable_unused_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); } static void __save_error_info(struct super_block *sb, const char *func, unsigned int line) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); es->s_last_error_time = cpu_to_le32(get_seconds()); strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); es->s_last_error_line = cpu_to_le32(line); if (!es->s_first_error_time) { es->s_first_error_time = es->s_last_error_time; strncpy(es->s_first_error_func, func, sizeof(es->s_first_error_func)); es->s_first_error_line = cpu_to_le32(line); es->s_first_error_ino = es->s_last_error_ino; es->s_first_error_block = es->s_last_error_block; } /* * Start the daily error reporting function if it hasn't been * started already */ if (!es->s_error_count) mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); le32_add_cpu(&es->s_error_count, 1); } static void save_error_info(struct super_block *sb, const char *func, unsigned int line) { __save_error_info(sb, func, line); ext4_commit_super(sb, 1); } /* * The del_gendisk() function uninitializes the disk-specific data * structures, including the bdi structure, without telling anyone * else. Once this happens, any attempt to call mark_buffer_dirty() * (for example, by ext4_commit_super), will cause a kernel OOPS. * This is a kludge to prevent these oops until we can put in a proper * hook in del_gendisk() to inform the VFS and file system layers. */ static int block_device_ejected(struct super_block *sb) { struct inode *bd_inode = sb->s_bdev->bd_inode; struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info; return bdi->dev == NULL; } static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) { struct super_block *sb = journal->j_private; struct ext4_sb_info *sbi = EXT4_SB(sb); int error = is_journal_aborted(journal); struct ext4_journal_cb_entry *jce; BUG_ON(txn->t_state == T_FINISHED); spin_lock(&sbi->s_md_lock); while (!list_empty(&txn->t_private_list)) { jce = list_entry(txn->t_private_list.next, struct ext4_journal_cb_entry, jce_list); list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); jce->jce_func(sb, jce, error); spin_lock(&sbi->s_md_lock); } spin_unlock(&sbi->s_md_lock); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext4, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the jbd2_journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext4_handle_error(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return; if (!test_opt(sb, ERRORS_CONT)) { journal_t *journal = EXT4_SB(sb)->s_journal; EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; if (journal) jbd2_journal_abort(journal, -EIO); } if (test_opt(sb, ERRORS_RO)) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= MS_RDONLY; } if (test_opt(sb, ERRORS_PANIC)) panic("EXT4-fs (device %s): panic forced after error\n", sb->s_id); } void __ext4_error(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", sb->s_id, function, line, current->comm, &vaf); va_end(args); save_error_info(sb, function, line); ext4_handle_error(sb); } void __ext4_error_inode(struct inode *inode, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); es->s_last_error_block = cpu_to_le64(block); save_error_info(inode->i_sb, function, line); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: block %llu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, &vaf); va_end(args); ext4_handle_error(inode->i_sb); } void __ext4_error_file(struct file *file, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es; struct inode *inode = file_inode(file); char pathname[80], *path; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); save_error_info(inode->i_sb, function, line); path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: " "block %llu: comm %s: path %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, path, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: " "comm %s: path %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, path, &vaf); va_end(args); ext4_handle_error(inode->i_sb); } const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || (EXT4_SB(sb)->s_journal && EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext4_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext4_std_error(struct super_block *sb, const char *function, unsigned int line, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext4_decode_error(sb, errno, nbuf); printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", sb->s_id, function, line, errstr); save_error_info(sb, function, line); ext4_handle_error(sb); } /* * ext4_abort is a much stronger failure handler than ext4_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void __ext4_abort(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { va_list args; save_error_info(sb, function, line); va_start(args, fmt); printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id, function, line); vprintk(fmt, args); printk("\n"); va_end(args); if ((sb->s_flags & MS_RDONLY) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= MS_RDONLY; if (EXT4_SB(sb)->s_journal) jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); } if (test_opt(sb, ERRORS_PANIC)) panic("EXT4-fs panic from previous error\n"); } void __ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", sb->s_id, function, line, &vaf); va_end(args); } void __ext4_grp_locked_error(const char *function, unsigned int line, struct super_block *sb, ext4_group_t grp, unsigned long ino, ext4_fsblk_t block, const char *fmt, ...) __releases(bitlock) __acquires(bitlock) { struct va_format vaf; va_list args; struct ext4_super_block *es = EXT4_SB(sb)->s_es; es->s_last_error_ino = cpu_to_le32(ino); es->s_last_error_block = cpu_to_le64(block); __save_error_info(sb, function, line); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", sb->s_id, function, line, grp); if (ino) printk(KERN_CONT "inode %lu: ", ino); if (block) printk(KERN_CONT "block %llu:", (unsigned long long) block); printk(KERN_CONT "%pV\n", &vaf); va_end(args); if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; } ext4_unlock_group(sb, grp); ext4_handle_error(sb); /* * We only get here in the ERRORS_RO case; relocking the group * may be dangerous, but nothing bad will happen since the * filesystem will have already been marked read/only and the * journal has been aborted. We return 1 as a hint to callers * who might what to use the return value from * ext4_grp_locked_error() to distinguish between the * ERRORS_CONT and ERRORS_RO case, and perhaps return more * aggressively from the ext4 function in question, with a * more appropriate error code. */ ext4_lock_group(sb, grp); return; } void ext4_update_dynamic_rev(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) return; ext4_warning(sb, "updating to rev %d because of new feature flag, " "running e2fsck is recommended", EXT4_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static void ext4_blkdev_put(struct block_device *bdev) { blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static void ext4_blkdev_remove(struct ext4_sb_info *sbi) { struct block_device *bdev; bdev = sbi->journal_bdev; if (bdev) { ext4_blkdev_put(bdev); sbi->journal_bdev = NULL; } } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) { struct list_head *l; ext4_msg(sb, KERN_ERR, "sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); printk(KERN_ERR "sb_info orphan list:\n"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); printk(KERN_ERR " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int i, err; ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); flush_workqueue(sbi->unrsv_conversion_wq); flush_workqueue(sbi->rsv_conversion_wq); destroy_workqueue(sbi->unrsv_conversion_wq); destroy_workqueue(sbi->rsv_conversion_wq); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext4_abort(sb, "Couldn't clean up the journal"); } ext4_es_unregister_shrinker(sbi); del_timer(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); ext4_ext_release(sb); ext4_xattr_put_super(sb); if (!(sb->s_flags & MS_RDONLY)) { EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); } if (!(sb->s_flags & MS_RDONLY)) ext4_commit_super(sb, 1); if (sbi->s_proc) { remove_proc_entry("options", sbi->s_proc); remove_proc_entry(sb->s_id, ext4_proc_root); } kobject_del(&sbi->s_kobj); for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); ext4_kvfree(sbi->s_group_desc); ext4_kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); percpu_counter_destroy(&sbi->s_extent_cache_cnt); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext4_blkdev_remove(sbi); } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); sb->s_fs_info = NULL; /* * Now that we are completely done shutting down the * superblock, we need to actually destroy the kobject. */ kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext4_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); INIT_LIST_HEAD(&ei->i_es_lru); ei->i_es_lru_nr = 0; ei->i_touch_when = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; ei->i_da_metadata_calc_last_lblock = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif ei->jinode = NULL; INIT_LIST_HEAD(&ei->i_rsv_conversion_list); INIT_LIST_HEAD(&ei->i_unrsv_conversion_list); spin_lock_init(&ei->i_completed_io_lock); ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_unwritten, 0); INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work); return &ei->vfs_inode; } static int ext4_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext4_drop_inode(inode, drop); return drop; } static void ext4_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } static void ext4_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT4_I(inode)->i_orphan))) { ext4_msg(inode->i_sb, KERN_ERR, "Inode %lu (%p): orphan list check failed!", inode->i_ino, EXT4_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT4_I(inode), sizeof(struct ext4_inode_info), true); dump_stack(); } call_rcu(&inode->i_rcu, ext4_i_callback); } static void init_once(void *foo) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); init_rwsem(&ei->xattr_sem); init_rwsem(&ei->i_data_sem); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", sizeof(struct ext4_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext4_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext4_inode_cachep); } void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); dquot_drop(inode); ext4_discard_preallocations(inode); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); ext4_es_lru_del(inode); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); jbd2_free_inode(EXT4_I(inode)->jinode); EXT4_I(inode)->jinode = NULL; } } static struct inode *ext4_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext4_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext4_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd2 layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT4_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext4_write_dquot(struct dquot *dquot); static int ext4_acquire_dquot(struct dquot *dquot); static int ext4_release_dquot(struct dquot *dquot); static int ext4_mark_dquot_dirty(struct dquot *dquot); static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext4_quota_on_sysfile(struct super_block *sb, int type, int format_id); static int ext4_quota_off(struct super_block *sb, int type); static int ext4_quota_off_sysfile(struct super_block *sb, int type); static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags); static int ext4_enable_quotas(struct super_block *sb); static const struct dquot_operations ext4_quota_operations = { .get_reserved_space = ext4_get_reserved_space, .write_dquot = ext4_write_dquot, .acquire_dquot = ext4_acquire_dquot, .release_dquot = ext4_release_dquot, .mark_dirty = ext4_mark_dquot_dirty, .write_info = ext4_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = ext4_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; static const struct quotactl_ops ext4_qctl_sysfile_operations = { .quota_on_meta = ext4_quota_on_sysfile, .quota_off = ext4_quota_off_sysfile, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext4_sops = { .alloc_inode = ext4_alloc_inode, .destroy_inode = ext4_destroy_inode, .write_inode = ext4_write_inode, .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, .put_super = ext4_put_super, .sync_fs = ext4_sync_fs, .freeze_fs = ext4_freeze, .unfreeze_fs = ext4_unfreeze, .statfs = ext4_statfs, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA .quota_read = ext4_quota_read, .quota_write = ext4_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct super_operations ext4_nojournal_sops = { .alloc_inode = ext4_alloc_inode, .destroy_inode = ext4_destroy_inode, .write_inode = ext4_write_inode, .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, .sync_fs = ext4_sync_fs_nojournal, .put_super = ext4_put_super, .statfs = ext4_statfs, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA .quota_read = ext4_quota_read, .quota_write = ext4_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext4_export_ops = { .fh_to_dentry = ext4_fh_to_dentry, .fh_to_parent = ext4_fh_to_parent, .get_parent = ext4_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_debug, Opt_removed, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_debug, "debug"}, {Opt_removed, "oldalloc"}, {Opt_removed, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_noload, "norecovery"}, {Opt_noload, "noload"}, {Opt_removed, "nobh"}, {Opt_removed, "bh"}, {Opt_commit, "commit=%u"}, {Opt_min_batch_time, "min_batch_time=%u"}, {Opt_max_batch_time, "max_batch_time=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_journal_checksum, "journal_checksum"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, {Opt_nodelalloc, "nodelalloc"}, {Opt_removed, "mblk_io_submit"}, {Opt_removed, "nomblk_io_submit"}, {Opt_block_validity, "block_validity"}, {Opt_noblock_validity, "noblock_validity"}, {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, {Opt_journal_ioprio, "journal_ioprio=%u"}, {Opt_auto_da_alloc, "auto_da_alloc=%u"}, {Opt_auto_da_alloc, "auto_da_alloc"}, {Opt_noauto_da_alloc, "noauto_da_alloc"}, {Opt_dioread_nolock, "dioread_nolock"}, {Opt_dioread_lock, "dioread_lock"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_init_itable, "init_itable=%u"}, {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ {Opt_removed, "noreservation"}, /* mount option from ext2/3 */ {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */ {Opt_err, NULL}, }; static ext4_fsblk_t get_sb_block(void **data) { ext4_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /* TODO: use simple_strtoll with >32bit ext4 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *qname; int ret = -1; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return -1; } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) { ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " "when QUOTA feature is enabled"); return -1; } qname = match_strdup(args); if (!qname) { ext4_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return -1; } if (sbi->s_qf_names[qtype]) { if (strcmp(sbi->s_qf_names[qtype], qname) == 0) ret = 1; else ext4_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); goto errout; } if (strchr(qname, '/')) { ext4_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); goto errout; } sbi->s_qf_names[qtype] = qname; set_opt(sb, QUOTA); return 1; errout: kfree(qname); return ret; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext4_sb_info *sbi = EXT4_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return -1; } kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 1; } #endif #define MOPT_SET 0x0001 #define MOPT_CLEAR 0x0002 #define MOPT_NOSUPPORT 0x0004 #define MOPT_EXPLICIT 0x0008 #define MOPT_CLEAR_ERR 0x0010 #define MOPT_GTE0 0x0020 #ifdef CONFIG_QUOTA #define MOPT_Q 0 #define MOPT_QFMT 0x0040 #else #define MOPT_Q MOPT_NOSUPPORT #define MOPT_QFMT MOPT_NOSUPPORT #endif #define MOPT_DATAJ 0x0080 #define MOPT_NO_EXT2 0x0100 #define MOPT_NO_EXT3 0x0200 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) static const struct mount_opts { int token; int mount_opt; int flags; } ext4_mount_opts[] = { {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | EXT4_MOUNT_JOURNAL_CHECKSUM), MOPT_EXT4_ONLY | MOPT_SET}, {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2 | MOPT_SET}, {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2 | MOPT_CLEAR}, {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, {Opt_commit, 0, MOPT_GTE0}, {Opt_max_batch_time, 0, MOPT_GTE0}, {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, {Opt_journal_dev, 0, MOPT_GTE0}, {Opt_journal_ioprio, 0, MOPT_GTE0}, {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR}, #ifdef CONFIG_EXT4_FS_POSIX_ACL {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR}, #else {Opt_acl, 0, MOPT_NOSUPPORT}, {Opt_noacl, 0, MOPT_NOSUPPORT}, #endif {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, MOPT_SET | MOPT_Q}, {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q}, {Opt_usrjquota, 0, MOPT_Q}, {Opt_grpjquota, 0, MOPT_Q}, {Opt_offusrjquota, 0, MOPT_Q}, {Opt_offgrpjquota, 0, MOPT_Q}, {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_err, 0, 0} }; static int handle_mount_opt(struct super_block *sb, char *opt, int token, substring_t *args, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); const struct mount_opts *m; kuid_t uid; kgid_t gid; int arg = 0; #ifdef CONFIG_QUOTA if (token == Opt_usrjquota) return set_qf_name(sb, USRQUOTA, &args[0]); else if (token == Opt_grpjquota) return set_qf_name(sb, GRPQUOTA, &args[0]); else if (token == Opt_offusrjquota) return clear_qf_name(sb, USRQUOTA); else if (token == Opt_offgrpjquota) return clear_qf_name(sb, GRPQUOTA); #endif switch (token) { case Opt_noacl: case Opt_nouser_xattr: ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5"); break; case Opt_sb: return 1; /* handled by get_sb_block() */ case Opt_removed: ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt); return 1; case Opt_abort: sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; return 1; case Opt_i_version: sb->s_flags |= MS_I_VERSION; return 1; } for (m = ext4_mount_opts; m->token != Opt_err; m++) if (token == m->token) break; if (m->token == Opt_err) { ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" " "or missing value", opt); return -1; } if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { ext4_msg(sb, KERN_ERR, "Mount option \"%s\" incompatible with ext2", opt); return -1; } if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { ext4_msg(sb, KERN_ERR, "Mount option \"%s\" incompatible with ext3", opt); return -1; } if (args->from && match_int(args, &arg)) return -1; if (args->from && (m->flags & MOPT_GTE0) && (arg < 0)) return -1; if (m->flags & MOPT_EXPLICIT) set_opt2(sb, EXPLICIT_DELALLOC); if (m->flags & MOPT_CLEAR_ERR) clear_opt(sb, ERRORS_MASK); if (token == Opt_noquota && sb_any_quota_loaded(sb)) { ext4_msg(sb, KERN_ERR, "Cannot change quota " "options when quota turned on"); return -1; } if (m->flags & MOPT_NOSUPPORT) { ext4_msg(sb, KERN_ERR, "%s option not supported", opt); } else if (token == Opt_commit) { if (arg == 0) arg = JBD2_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * arg; } else if (token == Opt_max_batch_time) { if (arg == 0) arg = EXT4_DEF_MAX_BATCH_TIME; sbi->s_max_batch_time = arg; } else if (token == Opt_min_batch_time) { sbi->s_min_batch_time = arg; } else if (token == Opt_inode_readahead_blks) { if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) { ext4_msg(sb, KERN_ERR, "EXT4-fs: inode_readahead_blks must be " "0 or a power of 2 smaller than 2^31"); return -1; } sbi->s_inode_readahead_blks = arg; } else if (token == Opt_init_itable) { set_opt(sb, INIT_INODE_TABLE); if (!args->from) arg = EXT4_DEF_LI_WAIT_MULT; sbi->s_li_wait_mult = arg; } else if (token == Opt_max_dir_size_kb) { sbi->s_max_dir_size_kb = arg; } else if (token == Opt_stripe) { sbi->s_stripe = arg; } else if (token == Opt_resuid) { uid = make_kuid(current_user_ns(), arg); if (!uid_valid(uid)) { ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg); return -1; } sbi->s_resuid = uid; } else if (token == Opt_resgid) { gid = make_kgid(current_user_ns(), arg); if (!gid_valid(gid)) { ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg); return -1; } sbi->s_resgid = gid; } else if (token == Opt_journal_dev) { if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return -1; } *journal_devnum = arg; } else if (token == Opt_journal_ioprio) { if (arg > 7) { ext4_msg(sb, KERN_ERR, "Invalid journal IO priority" " (must be 0-7)"); return -1; } *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); } else if (m->flags & MOPT_DATAJ) { if (is_remount) { if (!sbi->s_journal) ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option"); else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) { ext4_msg(sb, KERN_ERR, "Cannot change data mode on remount"); return -1; } } else { clear_opt(sb, DATA_FLAGS); sbi->s_mount_opt |= m->mount_opt; } #ifdef CONFIG_QUOTA } else if (m->flags & MOPT_QFMT) { if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != m->mount_opt) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return -1; } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) { ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " "when QUOTA feature is enabled"); return -1; } sbi->s_jquota_fmt = m->mount_opt; #endif } else { if (!args->from) arg = 1; if (m->flags & MOPT_CLEAR) arg = !arg; else if (unlikely(!(m->flags & MOPT_SET))) { ext4_msg(sb, KERN_WARNING, "buggy handling of option %s", opt); WARN_ON(1); return -1; } if (arg != 0) sbi->s_mount_opt |= m->mount_opt; else sbi->s_mount_opt &= ~m->mount_opt; } return 1; } static int parse_options(char *options, struct super_block *sb, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *p; substring_t args[MAX_OPT_ARGS]; int token; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); if (handle_mount_opt(sb, p, token, args, journal_devnum, journal_ioprio, is_remount) < 0) return 0; } #ifdef CONFIG_QUOTA if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) && (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA " "feature is enabled"); return 0; } if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sb, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sb, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext4_msg(sb, KERN_ERR, "old and new quota " "format mixing"); return 0; } if (!sbi->s_jquota_fmt) { ext4_msg(sb, KERN_ERR, "journaled quota format " "not specified"); return 0; } } else { if (sbi->s_jquota_fmt) { ext4_msg(sb, KERN_ERR, "journaled quota format " "specified with no journaling " "enabled"); return 0; } } #endif if (test_opt(sb, DIOREAD_NOLOCK)) { int blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (blocksize < PAGE_CACHE_SIZE) { ext4_msg(sb, KERN_ERR, "can't mount with " "dioread_nolock if block size != PAGE_SIZE"); return 0; } } return 1; } static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext4_sb_info *sbi = EXT4_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); #endif } static const char *token2str(int token) { const struct match_token *t; for (t = tokens; t->token != Opt_err; t++) if (t->token == token && !strchr(t->pattern, '=')) break; return t->pattern; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, int nodefs) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt; const struct mount_opts *m; char sep = nodefs ? '\n' : ','; #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) if (sbi->s_sb_block != 1) SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); for (m = ext4_mount_opts; m->token != Opt_err; m++) { int want_set = m->flags & MOPT_SET; if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || (m->flags & MOPT_CLEAR_ERR)) continue; if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) continue; /* skip if same as the default */ if ((want_set && (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || (!want_set && (sbi->s_mount_opt & m->mount_opt))) continue; /* select Opt_noFoo vs Opt_Foo */ SEQ_OPTS_PRINT("%s", token2str(m->token)); } if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) SEQ_OPTS_PRINT("resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) SEQ_OPTS_PRINT("resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) SEQ_OPTS_PUTS("errors=remount-ro"); if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) SEQ_OPTS_PUTS("errors=continue"); if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) SEQ_OPTS_PUTS("errors=panic"); if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); if (sb->s_flags & MS_I_VERSION) SEQ_OPTS_PUTS("i_version"); if (nodefs || sbi->s_stripe) SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) SEQ_OPTS_PUTS("data=journal"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) SEQ_OPTS_PUTS("data=ordered"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) SEQ_OPTS_PUTS("data=writeback"); } if (nodefs || sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) SEQ_OPTS_PRINT("inode_readahead_blks=%u", sbi->s_inode_readahead_blks); if (nodefs || (test_opt(sb, INIT_INODE_TABLE) && (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); if (nodefs || sbi->s_max_dir_size_kb) SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); ext4_show_quota_options(seq, sb); return 0; } static int ext4_show_options(struct seq_file *seq, struct dentry *root) { return _ext4_show_options(seq, root->d_sb, 0); } static int options_seq_show(struct seq_file *seq, void *offset) { struct super_block *sb = seq->private; int rc; seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw"); rc = _ext4_show_options(seq, sb, 1); seq_puts(seq, "\n"); return rc; } static int options_open_fs(struct inode *inode, struct file *file) { return single_open(file, options_seq_show, PDE_DATA(inode)); } static const struct file_operations ext4_seq_options_fops = { .owner = THIS_MODULE, .open = options_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, int read_only) { struct ext4_sb_info *sbi = EXT4_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) goto done; if (!(sbi->s_mount_state & EXT4_VALID_FS)) ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT4_ERROR_FS)) ext4_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) ext4_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext4_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); if (!sbi->s_journal) es->s_state &= cpu_to_le16(~EXT4_VALID_FS); if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext4_update_dynamic_rev(sb); if (sbi->s_journal) EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); done: if (test_opt(sb, DEBUG)) printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", sb->s_blocksize, sbi->s_groups_count, EXT4_BLOCKS_PER_GROUP(sb), EXT4_INODES_PER_GROUP(sb), sbi->s_mount_opt, sbi->s_mount_opt2); cleancache_init_fs(sb); return res; } int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct flex_groups *new_groups; int size; if (!sbi->s_log_groups_per_flex) return 0; size = ext4_flex_group(sbi, ngroup - 1) + 1; if (size <= sbi->s_flex_groups_allocated) return 0; size = roundup_pow_of_two(size * sizeof(struct flex_groups)); new_groups = ext4_kvzalloc(size, GFP_KERNEL); if (!new_groups) { ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", size / (int) sizeof(struct flex_groups)); return -ENOMEM; } if (sbi->s_flex_groups) { memcpy(new_groups, sbi->s_flex_groups, (sbi->s_flex_groups_allocated * sizeof(struct flex_groups))); ext4_kvfree(sbi->s_flex_groups); } sbi->s_flex_groups = new_groups; sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); return 0; } static int ext4_fill_flex_info(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group; int i, err; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { sbi->s_log_groups_per_flex = 0; return 1; } err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); if (err) goto failed; for (i = 0; i < sbi->s_groups_count; i++) { gdp = ext4_get_group_desc(sb, i, NULL); flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); atomic64_add(ext4_free_group_clusters(sb, gdp), &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } return 1; failed: return 0; } static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, struct ext4_group_desc *gdp) { int offset; __u16 crc = 0; __le32 le_group = cpu_to_le32(block_group); if ((sbi->s_es->s_feature_ro_compat & cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) { /* Use new metadata_csum algorithm */ __le16 save_csum; __u32 csum32; save_csum = gdp->bg_checksum; gdp->bg_checksum = 0; csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, sizeof(le_group)); csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, sbi->s_desc_size); gdp->bg_checksum = save_csum; crc = csum32 & 0xFFFF; goto out; } /* old crc16 code */ offset = offsetof(struct ext4_group_desc, bg_checksum); crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); crc = crc16(crc, (__u8 *)gdp, offset); offset += sizeof(gdp->bg_checksum); /* skip checksum */ /* for checksum of struct ext4_group_desc do the rest...*/ if ((sbi->s_es->s_feature_incompat & cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && offset < le16_to_cpu(sbi->s_es->s_desc_size)) crc = crc16(crc, (__u8 *)gdp + offset, le16_to_cpu(sbi->s_es->s_desc_size) - offset); out: return cpu_to_le16(crc); } int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { if (ext4_has_group_desc_csum(sb) && (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp))) return 0; return 1; } void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { if (!ext4_has_group_desc_csum(sb)) return; gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp); } /* Called at mount-time, super-block is locked */ static int ext4_check_descriptors(struct super_block *sb, ext4_group_t *first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; int flexbg_flag = 0; ext4_group_t i, grp = sbi->s_groups_count; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) flexbg_flag = 1; ext4_debug("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (i == sbi->s_groups_count - 1 || flexbg_flag) last_block = ext4_blocks_count(sbi->s_es) - 1; else last_block = first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); if ((grp == sbi->s_groups_count) && !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) grp = i; block_bitmap = ext4_block_bitmap(sb, gdp); if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " "(block %llu)!", i, block_bitmap); return 0; } inode_bitmap = ext4_inode_bitmap(sb, gdp); if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " "(block %llu)!", i, inode_bitmap); return 0; } inode_table = ext4_inode_table(sb, gdp); if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode table for group %u not in group " "(block %llu)!", i, inode_table); return 0; } ext4_lock_group(sb, i); if (!ext4_group_desc_csum_verify(sb, i, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Checksum for group %u failed (%u!=%u)", i, le16_to_cpu(ext4_group_desc_csum(sbi, i, gdp)), le16_to_cpu(gdp->bg_checksum)); if (!(sb->s_flags & MS_RDONLY)) { ext4_unlock_group(sb, i); return 0; } } ext4_unlock_group(sb, i); if (!flexbg_flag) first_block += EXT4_BLOCKS_PER_GROUP(sb); } if (NULL != first_not_zeroed) *first_not_zeroed = grp; ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, ext4_count_free_clusters(sb))); sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb)); return 1; } /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext4_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, skipping orphan cleanup"); return; } /* Check if feature set would not allow a r/w mount */ if (!ext4_feature_set_ok(sb, 0)) { ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT4_SB(sb)->s_qf_names[i]) { int ret = ext4_quota_on_mount(sb, i); if (ret < 0) ext4_msg(sb, KERN_ERR, "Cannot turn on journaled " "quota: error %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: truncating inode %lu to %lld bytes", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); mutex_lock(&inode->i_mutex); truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); mutex_unlock(&inode->i_mutex); nr_truncates++; } else { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: deleting unreferenced inode %lu", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x) == 1) ? "" : "s" if (nr_orphans) ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal extent format file size. * Resulting logical blkno at s_maxbytes must fit in our on-disk * extent format containers, within a sector_t, and within i_blocks * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, * so that won't be a limiting factor. * * However there is other limiting factor. We do store extents in the form * of starting block and length, hence the resulting length of the extent * covering maximum file size must fit into on-disk format containers as * well. Given that length is always by 1 unit bigger than max unit (because * we count 0 as well) we have to lower the s_maxbytes by one fs block. * * Note, this does *not* consider any metadata overhead for vfs i_blocks. */ static loff_t ext4_max_size(int blkbits, int has_huge_files) { loff_t res; loff_t upper_limit = MAX_LFS_FILESIZE; /* small i_blocks in vfs inode? */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * CONFIG_LBDAF is not enabled implies the inode * i_block represent total blocks in 512 bytes * 32 == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (blkbits - 9); upper_limit <<= blkbits; } /* * 32-bit extent-start container, ee_block. We lower the maxbytes * by one fs block, so ee_len can cover the extent of maximum file * size */ res = (1LL << 32) - 1; res <<= blkbits; /* Sanity check against vm- & vfs- imposed limits */ if (res > upper_limit) res = upper_limit; return res; } /* * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^48 sector limit. */ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) { loff_t res = EXT4_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a dense, block * mapped file such that the file's total number of 512-byte sectors, * including data and all indirect blocks, does not exceed (2^48 - 1). * * __u32 i_blocks_lo and _u16 i_blocks_high represent the total * number of 512-byte sectors of the file. */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * !has_huge_files or CONFIG_LBDAF not enabled implies that * the inode i_block field represents total file blocks in * 2^32 512-byte sectors == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); } else { /* * We use 48 bit ext4_inode i_blocks * With EXT4_HUGE_FILE_FL set the i_blocks * represent total number of blocks in * file system block size */ upper_limit = (1LL << 48) - 1; } /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext4_fsblk_t descriptor_loc(struct super_block *sb, ext4_fsblk_t logical_sb_block, int nr) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return logical_sb_block + nr + 1; bg = sbi->s_desc_per_block * nr; if (ext4_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext4_group_first_block_no(sb, bg)); } /** * ext4_get_stripe_size: Get the stripe size. * @sbi: In memory super block info * * If we have specified it via mount option, then * use the mount option value. If the value specified at mount time is * greater than the blocks per group use the super block value. * If the super block value is greater than blocks per group return 0. * Allocator needs it be less than blocks per group. * */ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) { unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); unsigned long stripe_width = le32_to_cpu(sbi->s_es->s_raid_stripe_width); int ret; if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) ret = sbi->s_stripe; else if (stripe_width <= sbi->s_blocks_per_group) ret = stripe_width; else if (stride <= sbi->s_blocks_per_group) ret = stride; else ret = 0; /* * If the stripe width is 1, this makes no sense and * we set it to 0 to turn off stripe handling code. */ if (ret <= 1) ret = 0; return ret; } /* sysfs supprt */ struct ext4_attr { struct attribute attr; ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, const char *, size_t); union { int offset; int deprecated_val; } u; }; static int parse_strtoull(const char *buf, unsigned long long max, unsigned long long *value) { int ret; ret = kstrtoull(skip_spaces(buf), 0, value); if (!ret && *value > max) ret = -EINVAL; return ret; } static ssize_t delayed_allocation_blocks_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { return snprintf(buf, PAGE_SIZE, "%llu\n", (s64) EXT4_C2B(sbi, percpu_counter_sum(&sbi->s_dirtyclusters_counter))); } static ssize_t session_write_kbytes_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { struct super_block *sb = sbi->s_buddy_cache->i_sb; if (!sb->s_bdev->bd_part) return snprintf(buf, PAGE_SIZE, "0\n"); return snprintf(buf, PAGE_SIZE, "%lu\n", (part_stat_read(sb->s_bdev->bd_part, sectors[1]) - sbi->s_sectors_written_start) >> 1); } static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { struct super_block *sb = sbi->s_buddy_cache->i_sb; if (!sb->s_bdev->bd_part) return snprintf(buf, PAGE_SIZE, "0\n"); return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)(sbi->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1))); } static ssize_t inode_readahead_blks_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned long t; int ret; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; if (t && (!is_power_of_2(t) || t > 0x40000000)) return -EINVAL; sbi->s_inode_readahead_blks = t; return count; } static ssize_t sbi_ui_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset); return snprintf(buf, PAGE_SIZE, "%u\n", *ui); } static ssize_t sbi_ui_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset); unsigned long t; int ret; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; *ui = t; return count; } static ssize_t reserved_clusters_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long) atomic64_read(&sbi->s_resv_clusters)); } static ssize_t reserved_clusters_store(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned long long val; int ret; if (parse_strtoull(buf, -1ULL, &val)) return -EINVAL; ret = ext4_reserve_clusters(sbi, val); return ret ? ret : count; } static ssize_t trigger_test_error(struct ext4_attr *a, struct ext4_sb_info *sbi, const char *buf, size_t count) { int len = count; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (len && buf[len-1] == '\n') len--; if (len) ext4_error(sbi->s_sb, "%.*s", len, buf); return count; } static ssize_t sbi_deprecated_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", a->u.deprecated_val); } #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ .u = { \ .offset = offsetof(struct ext4_sb_info, _elname),\ }, \ } #define EXT4_ATTR(name, mode, show, store) \ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) #define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL) #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL) #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store) #define EXT4_RW_ATTR_SBI_UI(name, elname) \ EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) #define ATTR_LIST(name) &ext4_attr_##name.attr #define EXT4_DEPRECATED_ATTR(_name, _val) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = 0444 }, \ .show = sbi_deprecated_show, \ .u = { \ .deprecated_val = _val, \ }, \ } EXT4_RO_ATTR(delayed_allocation_blocks); EXT4_RO_ATTR(session_write_kbytes); EXT4_RO_ATTR(lifetime_write_kbytes); EXT4_RW_ATTR(reserved_clusters); EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, inode_readahead_blks_store, s_inode_readahead_blks); EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats); EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan); EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error); static struct attribute *ext4_attrs[] = { ATTR_LIST(delayed_allocation_blocks), ATTR_LIST(session_write_kbytes), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(reserved_clusters), ATTR_LIST(inode_readahead_blks), ATTR_LIST(inode_goal), ATTR_LIST(mb_stats), ATTR_LIST(mb_max_to_scan), ATTR_LIST(mb_min_to_scan), ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), ATTR_LIST(max_writeback_mb_bump), ATTR_LIST(extent_max_zeroout_kb), ATTR_LIST(trigger_fs_error), NULL, }; /* Features this copy of ext4 supports */ EXT4_INFO_ATTR(lazy_itable_init); EXT4_INFO_ATTR(batched_discard); EXT4_INFO_ATTR(meta_bg_resize); static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(lazy_itable_init), ATTR_LIST(batched_discard), ATTR_LIST(meta_bg_resize), NULL, }; static ssize_t ext4_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); return a->show ? a->show(a, sbi, buf) : 0; } static ssize_t ext4_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); return a->store ? a->store(a, sbi, buf, len) : 0; } static void ext4_sb_release(struct kobject *kobj) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); complete(&sbi->s_kobj_unregister); } static const struct sysfs_ops ext4_attr_ops = { .show = ext4_attr_show, .store = ext4_attr_store, }; static struct kobj_type ext4_ktype = { .default_attrs = ext4_attrs, .sysfs_ops = &ext4_attr_ops, .release = ext4_sb_release, }; static void ext4_feat_release(struct kobject *kobj) { complete(&ext4_feat->f_kobj_unregister); } static struct kobj_type ext4_feat_ktype = { .default_attrs = ext4_feat_attrs, .sysfs_ops = &ext4_attr_ops, .release = ext4_feat_release, }; /* * Check whether this filesystem can be mounted based on * the features present and the RDONLY/RDWR mount requested. * Returns 1 if this filesystem can be mounted as requested, * 0 if it cannot be. */ static int ext4_feature_set_ok(struct super_block *sb, int readonly) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) { ext4_msg(sb, KERN_ERR, "Couldn't mount because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & ~EXT4_FEATURE_INCOMPAT_SUPP)); return 0; } if (readonly) return 1; /* Check that feature set is OK for a read-write mount */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & ~EXT4_FEATURE_RO_COMPAT_SUPP)); return 0; } /* * Large file size enabled file system can only be mounted * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { if (sizeof(blkcnt_t) < sizeof(u64)) { ext4_msg(sb, KERN_ERR, "Filesystem with huge files " "cannot be mounted RDWR without " "CONFIG_LBDAF"); return 0; } } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC) && !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { ext4_msg(sb, KERN_ERR, "Can't support bigalloc feature without " "extents feature\n"); return 0; } #ifndef CONFIG_QUOTA if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) && !readonly) { ext4_msg(sb, KERN_ERR, "Filesystem with quota feature cannot be mounted RDWR " "without CONFIG_QUOTA"); return 0; } #endif /* CONFIG_QUOTA */ return 1; } /* * This function is called once a day if we have errors logged * on the file system */ static void print_daily_error_info(unsigned long arg) { struct super_block *sb = (struct super_block *) arg; struct ext4_sb_info *sbi; struct ext4_super_block *es; sbi = EXT4_SB(sb); es = sbi->s_es; if (es->s_error_count) ext4_msg(sb, KERN_NOTICE, "error count: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, le32_to_cpu(es->s_first_error_line)); if (es->s_first_error_ino) printk(": inode %u", le32_to_cpu(es->s_first_error_ino)); if (es->s_first_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_first_error_block)); printk("\n"); } if (es->s_last_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, le32_to_cpu(es->s_last_error_line)); if (es->s_last_error_ino) printk(": inode %u", le32_to_cpu(es->s_last_error_ino)); if (es->s_last_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_last_error_block)); printk("\n"); } mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ } /* Find next suitable group and run ext4_init_inode_table */ static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; ext4_group_t group, ngroups; struct super_block *sb; unsigned long timeout = 0; int ret = 0; sb = elr->lr_super; ngroups = EXT4_SB(sb)->s_groups_count; sb_start_write(sb); for (group = elr->lr_next_group; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; break; } if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } if (group >= ngroups) ret = 1; if (!ret) { timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * elr->lr_sbi->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; elr->lr_next_group = group + 1; } sb_end_write(sb); return ret; } /* * Remove lr_request from the list_request and free the * request structure. Should be called with li_list_mtx held */ static void ext4_remove_li_request(struct ext4_li_request *elr) { struct ext4_sb_info *sbi; if (!elr) return; sbi = elr->lr_sbi; list_del(&elr->lr_request); sbi->s_li_request = NULL; kfree(elr); } static void ext4_unregister_li_request(struct super_block *sb) { mutex_lock(&ext4_li_mtx); if (!ext4_li_info) { mutex_unlock(&ext4_li_mtx); return; } mutex_lock(&ext4_li_info->li_list_mtx); ext4_remove_li_request(EXT4_SB(sb)->s_li_request); mutex_unlock(&ext4_li_info->li_list_mtx); mutex_unlock(&ext4_li_mtx); } static struct task_struct *ext4_lazyinit_task; /* * This is the function where ext4lazyinit thread lives. It walks * through the request list searching for next scheduled filesystem. * When such a fs is found, run the lazy initialization request * (ext4_rn_li_request) and keep track of the time spend in this * function. Based on that time we compute next schedule time of * the request. When walking through the list is complete, compute * next waking time and put itself into sleep. */ static int ext4_lazyinit_thread(void *arg) { struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; struct list_head *pos, *n; struct ext4_li_request *elr; unsigned long next_wakeup, cur; BUG_ON(NULL == eli); cont_thread: while (true) { next_wakeup = MAX_JIFFY_OFFSET; mutex_lock(&eli->li_list_mtx); if (list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); goto exit_thread; } list_for_each_safe(pos, n, &eli->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); if (time_after_eq(jiffies, elr->lr_next_sched)) { if (ext4_run_li_request(elr) != 0) { /* error, remove the lazy_init job */ ext4_remove_li_request(elr); continue; } } if (time_before(elr->lr_next_sched, next_wakeup)) next_wakeup = elr->lr_next_sched; } mutex_unlock(&eli->li_list_mtx); try_to_freeze(); cur = jiffies; if ((time_after_eq(cur, next_wakeup)) || (MAX_JIFFY_OFFSET == next_wakeup)) { cond_resched(); continue; } schedule_timeout_interruptible(next_wakeup - cur); if (kthread_should_stop()) { ext4_clear_request_list(); goto exit_thread; } } exit_thread: /* * It looks like the request list is empty, but we need * to check it under the li_list_mtx lock, to prevent any * additions into it, and of course we should lock ext4_li_mtx * to atomically free the list and ext4_li_info, because at * this point another ext4 filesystem could be registering * new one. */ mutex_lock(&ext4_li_mtx); mutex_lock(&eli->li_list_mtx); if (!list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); mutex_unlock(&ext4_li_mtx); goto cont_thread; } mutex_unlock(&eli->li_list_mtx); kfree(ext4_li_info); ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); return 0; } static void ext4_clear_request_list(void) { struct list_head *pos, *n; struct ext4_li_request *elr; mutex_lock(&ext4_li_info->li_list_mtx); list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); ext4_remove_li_request(elr); } mutex_unlock(&ext4_li_info->li_list_mtx); } static int ext4_run_lazyinit_thread(void) { ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); if (IS_ERR(ext4_lazyinit_task)) { int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); kfree(ext4_li_info); ext4_li_info = NULL; printk(KERN_CRIT "EXT4-fs: error %d creating inode table " "initialization thread\n", err); return err; } ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; return 0; } /* * Check whether it make sense to run itable init. thread or not. * If there is at least one uninitialized inode table, return * corresponding group number, else the loop goes through all * groups and return total number of groups. */ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) { ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } return group; } static int ext4_li_info_new(void) { struct ext4_lazy_init *eli = NULL; eli = kzalloc(sizeof(*eli), GFP_KERNEL); if (!eli) return -ENOMEM; INIT_LIST_HEAD(&eli->li_request_list); mutex_init(&eli->li_list_mtx); eli->li_state |= EXT4_LAZYINIT_QUIT; ext4_li_info = eli; return 0; } static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; unsigned long rnd; elr = kzalloc(sizeof(*elr), GFP_KERNEL); if (!elr) return NULL; elr->lr_super = sb; elr->lr_sbi = sbi; elr->lr_next_group = start; /* * Randomize first schedule time of the request to * spread the inode table initialization requests * better. */ get_random_bytes(&rnd, sizeof(rnd)); elr->lr_next_sched = jiffies + (unsigned long)rnd % (EXT4_DEF_LI_MAX_START_DELAY * HZ); return elr; } int ext4_register_li_request(struct super_block *sb, ext4_group_t first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr = NULL; ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; int ret = 0; mutex_lock(&ext4_li_mtx); if (sbi->s_li_request != NULL) { /* * Reset timeout so it can be computed again, because * s_li_wait_mult might have changed. */ sbi->s_li_request->lr_timeout = 0; goto out; } if (first_not_zeroed == ngroups || (sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) goto out; elr = ext4_li_request_new(sb, first_not_zeroed); if (!elr) { ret = -ENOMEM; goto out; } if (NULL == ext4_li_info) { ret = ext4_li_info_new(); if (ret) goto out; } mutex_lock(&ext4_li_info->li_list_mtx); list_add(&elr->lr_request, &ext4_li_info->li_request_list); mutex_unlock(&ext4_li_info->li_list_mtx); sbi->s_li_request = elr; /* * set elr to NULL here since it has been inserted to * the request_list and the removal and free of it is * handled by ext4_clear_request_list from now on. */ elr = NULL; if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { ret = ext4_run_lazyinit_thread(); if (ret) goto out; } out: mutex_unlock(&ext4_li_mtx); if (ret) kfree(elr); return ret; } /* * We do not need to lock anything since this is called on * module unload. */ static void ext4_destroy_lazyinit_thread(void) { /* * If thread exited earlier * there's nothing to be done. */ if (!ext4_li_info || !ext4_lazyinit_task) return; kthread_stop(ext4_lazyinit_task); } static int set_journal_csum_feature_set(struct super_block *sb) { int ret = 1; int compat, incompat; struct ext4_sb_info *sbi = EXT4_SB(sb); if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { /* journal checksum v2 */ compat = 0; incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2; } else { /* journal checksum v1 */ compat = JBD2_FEATURE_COMPAT_CHECKSUM; incompat = 0; } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | incompat); } else if (test_opt(sb, JOURNAL_CHECKSUM)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, incompat); jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } else { jbd2_journal_clear_features(sbi->s_journal, JBD2_FEATURE_COMPAT_CHECKSUM, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | JBD2_FEATURE_INCOMPAT_CSUM_V2); } return ret; } /* * Note: calculating the overhead so we can be compatible with * historical BSD practice is quite difficult in the face of * clusters/bigalloc. This is because multiple metadata blocks from * different block group can end up in the same allocation cluster. * Calculating the exact overhead in the face of clustered allocation * requires either O(all block bitmaps) in memory or O(number of block * groups**2) in time. We will still calculate the superblock for * older file systems --- and if we come across with a bigalloc file * system with zero in s_overhead_clusters the estimate will be close to * correct especially for very large cluster sizes --- but for newer * file systems, it's better to calculate this figure once at mkfs * time, and store it in the superblock. If the superblock value is * present (even for non-bigalloc file systems), we will use it. */ static int count_overhead(struct super_block *sb, ext4_group_t grp, char *buf) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp; ext4_fsblk_t first_block, last_block, b; ext4_group_t i, ngroups = ext4_get_groups_count(sb); int s, j, count = 0; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + sbi->s_itb_per_group + 2); first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + (grp * EXT4_BLOCKS_PER_GROUP(sb)); last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); b = ext4_block_bitmap(sb, gdp); if (b >= first_block && b <= last_block) { ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); count++; } b = ext4_inode_bitmap(sb, gdp); if (b >= first_block && b <= last_block) { ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); count++; } b = ext4_inode_table(sb, gdp); if (b >= first_block && b + sbi->s_itb_per_group <= last_block) for (j = 0; j < sbi->s_itb_per_group; j++, b++) { int c = EXT4_B2C(sbi, b - first_block); ext4_set_bit(c, buf); count++; } if (i != grp) continue; s = 0; if (ext4_bg_has_super(sb, grp)) { ext4_set_bit(s++, buf); count++; } for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { ext4_set_bit(EXT4_B2C(sbi, s++), buf); count++; } } if (!count) return 0; return EXT4_CLUSTERS_PER_GROUP(sb) - ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); } /* * Compute the overhead and stash it in sbi->s_overhead */ int ext4_calculate_overhead(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t i, ngroups = ext4_get_groups_count(sb); ext4_fsblk_t overhead = 0; char *buf = (char *) get_zeroed_page(GFP_KERNEL); if (!buf) return -ENOMEM; /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are overhead */ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); /* * Add the overhead found in each block group */ for (i = 0; i < ngroups; i++) { int blks; blks = count_overhead(sb, i, buf); overhead += blks; if (blks) memset(buf, 0, PAGE_SIZE); cond_resched(); } /* Add the journal blocks as well */ if (sbi->s_journal) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); sbi->s_overhead = overhead; smp_wmb(); free_page((unsigned long) buf); return 0; } static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) { ext4_fsblk_t resv_clusters; /* * By default we reserve 2% or 4096 clusters, whichever is smaller. * This should cover the situations where we can not afford to run * out of space like for example punch hole, or converting * uninitialized extents in delalloc path. In most cases such * allocation would require 1, or 2 blocks, higher numbers are * very rare. */ resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; do_div(resv_clusters, 50); resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); return resv_clusters; } static int ext4_reserve_clusters(struct ext4_sb_info *sbi, ext4_fsblk_t count) { ext4_fsblk_t clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; if (count >= clusters) return -EINVAL; atomic64_set(&sbi->s_resv_clusters, count); return 0; } static int ext4_fill_super(struct super_block *sb, void *data, int silent) { char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi; ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; unsigned long offset = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; char *cp; const char *descr; int ret = -ENOMEM; int blocksize, clustersize; unsigned int db_count; unsigned int i; int needs_recovery, has_huge_files, has_bigalloc; __u64 blocks_count; int err = 0; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; ext4_group_t first_not_zeroed; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out_free_orig; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto out_free_orig; } sb->s_fs_info = sbi; sbi->s_sb = sb; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; sbi->s_sb_block = sb_block; if (sb->s_bdev->bd_part) sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Cleanup superblock name */ for (cp = sb->s_id; (cp = strchr(cp, '/'));) *cp = '!'; /* -EINVAL is default */ ret = -EINVAL; blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); if (!blocksize) { ext4_msg(sb, KERN_ERR, "unable to set blocksize"); goto out_fail; } /* * The ext4 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT4_MIN_BLOCK_SIZE) { logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); } else { logical_sb_block = sb_block; } if (!(bh = sb_bread(sb, logical_sb_block))) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext4 macro-instructions depend on its value */ es = (struct ext4_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT4_SUPER_MAGIC) goto cantfind_ext4; sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); /* Warn if metadata_csum and gdt_csum are both set. */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " "redundant flags; please run fsck."); /* Check for a known checksum algorithm */ if (!ext4_verify_csum_type(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "unknown checksum algorithm."); silent = 1; goto cantfind_ext4; } /* Load the checksum driver */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); ret = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto failed_mount; } } /* Check superblock checksum */ if (!ext4_superblock_csum_verify(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "invalid superblock checksum. Run e2fsck?"); silent = 1; goto cantfind_ext4; } /* Precompute checksum seed for all metadata */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, sizeof(es->s_uuid)); /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); set_opt(sb, INIT_INODE_TABLE); if (def_mount_opts & EXT4_DEFM_DEBUG) set_opt(sb, DEBUG); if (def_mount_opts & EXT4_DEFM_BSDGROUPS) set_opt(sb, GRPID); if (def_mount_opts & EXT4_DEFM_UID16) set_opt(sb, NO_UID32); /* xattr user namespace & acls are now defaulted on */ set_opt(sb, XATTR_USER); #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) set_opt(sb, ORDERED_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) set_opt(sb, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) set_opt(sb, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) set_opt(sb, ERRORS_CONT); else set_opt(sb, ERRORS_RO); if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) set_opt(sb, DISCARD); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) set_opt(sb, BARRIER); /* * enable delayed allocation by default * Use -o nodelalloc to turn it off */ if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) set_opt(sb, DELALLOC); /* * set default s_li_wait_mult for lazyinit, for the case there is * no mount option specified. */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, &journal_devnum, &journal_ioprio, 0)) { ext4_msg(sb, KERN_WARNING, "failed to parse options in superblock: %s", sbi->s_es->s_mount_opts); } sbi->s_def_mount_opt = sbi->s_mount_opt; if (!parse_options((char *) data, sb, &journal_devnum, &journal_ioprio, 0)) goto failed_mount; if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { printk_once(KERN_WARNING "EXT4-fs: Warning: mounting " "with data=journal disables delayed " "allocation and O_DIRECT support!\n"); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DELALLOC)) clear_opt(sb, DELALLOC); } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext4_msg(sb, KERN_WARNING, "feature flags set on rev 0 fs, " "running e2fsck is recommended"); if (IS_EXT2_SB(sb)) { if (ext2_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext2 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " "to feature incompatibilities"); goto failed_mount; } } if (IS_EXT3_SB(sb)) { if (ext3_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext3 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " "to feature incompatibilities"); goto failed_mount; } } /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, "Unsupported filesystem blocksize %d", blocksize); goto failed_mount; } if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { ext4_msg(sb, KERN_ERR, "bad block size %d", blocksize); goto failed_mount; } brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); bh = sb_bread(sb, logical_sb_block); if (!bh) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext4_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); goto failed_mount; } } has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE); sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext4_msg(sb, KERN_ERR, "unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2); } sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || sbi->s_desc_size > EXT4_MAX_DESC_SIZE || !is_power_of_2(sbi->s_desc_size)) { ext4_msg(sb, KERN_ERR, "unsupported descriptor size %lu", sbi->s_desc_size); goto failed_mount; } } else sbi->s_desc_size = EXT4_MIN_DESC_SIZE; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); for (i = 0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } /* Handle clustersize */ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); has_bigalloc = EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC); if (has_bigalloc) { if (clustersize < blocksize) { ext4_msg(sb, KERN_ERR, "cluster size (%d) smaller than " "block size (%d)", clustersize, blocksize); goto failed_mount; } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group); if (sbi->s_clusters_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu", sbi->s_clusters_per_group); goto failed_mount; } if (sbi->s_blocks_per_group != (sbi->s_clusters_per_group * (clustersize / blocksize))) { ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " "clusters per group (%lu) inconsistent", sbi->s_blocks_per_group, sbi->s_clusters_per_group); goto failed_mount; } } else { if (clustersize != blocksize) { ext4_warning(sb, "fragment/cluster size (%d) != " "block size (%d)", clustersize, blocksize); clustersize = blocksize; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } sbi->s_clusters_per_group = sbi->s_blocks_per_group; sbi->s_cluster_bits = 0; } sbi->s_cluster_ratio = clustersize / blocksize; if (sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } /* Do we have standard group size of clustersize * 8 blocks ? */ if (sbi->s_blocks_per_group == clustersize << 3) set_opt2(sb, STD_GROUP_SIZE); /* * Test whether we have more sectors than will fit in sector_t, * and whether the max offset is addressable by the page cache. */ err = generic_check_addressable(sb->s_blocksize_bits, ext4_blocks_count(es)); if (err) { ext4_msg(sb, KERN_ERR, "filesystem" " too large to mount safely on this system"); if (sizeof(sector_t) < 8) ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); goto failed_mount; } if (EXT4_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext4; /* check blocks count against device size */ blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (blocks_count && ext4_blocks_count(es) > blocks_count) { ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " "exceeds size of device (%llu blocks)", ext4_blocks_count(es), blocks_count); goto failed_mount; } /* * It makes no sense for the first data block to be beyond the end * of the filesystem. */ if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { ext4_msg(sb, KERN_WARNING, "bad geometry: first data " "block %u is beyond end of filesystem (%llu)", le32_to_cpu(es->s_first_data_block), ext4_blocks_count(es)); goto failed_mount; } blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { ext4_msg(sb, KERN_WARNING, "groups count too large: %u " "(block count %llu, first data block %u, " "blocks per group %lu)", sbi->s_groups_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); goto failed_mount; } sbi->s_groups_count = blocks_count; sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); sbi->s_group_desc = ext4_kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); ret = -ENOMEM; goto failed_mount; } if (ext4_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root); if (sbi->s_proc) proc_create_data("options", S_IRUGO, sbi->s_proc, &ext4_seq_options_fops, sb); bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); goto failed_mount2; } if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) if (!ext4_fill_flex_info(sb)) { ext4_msg(sb, KERN_ERR, "unable to initialize " "flex_bg meta info!"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); init_timer(&sbi->s_err_report); sbi->s_err_report.function = print_daily_error_info; sbi->s_err_report.data = (unsigned long) sb; /* Register extent status tree shrinker */ ext4_es_register_shrinker(sbi); err = percpu_counter_init(&sbi->s_freeclusters_counter, ext4_count_free_clusters(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext4_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0); } if (!err) { err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0); } if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); goto failed_mount3; } sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_extent_max_zeroout_kb = 32; /* * set up enough so that it can read an inode */ if (!test_opt(sb, NOLOAD) && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) sb->s_op = &ext4_sops; else sb->s_op = &ext4_nojournal_sops; sb->s_export_op = &ext4_export_ops; sb->s_xattr = ext4_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &ext4_quota_operations; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) sb->s_qcop = &ext4_qctl_sysfile_operations; else sb->s_qcop = &ext4_qctl_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) && !(sb->s_flags & MS_RDONLY)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) goto failed_mount3; /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext4_load_journal(sb, es, journal_devnum)) goto failed_mount3; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); goto failed_mount_wq; } else { clear_opt(sb, DATA_FLAGS); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; } if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT) && !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT)) { ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); goto failed_mount_wq; } if (!set_journal_csum_feature_set(sb)) { ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " "feature set"); goto failed_mount_wq; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal * capabilities: ORDERED_DATA if the journal can * cope, else JOURNAL_DATA */ if (jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) set_opt(sb, ORDERED_DATA); else set_opt(sb, JOURNAL_DATA); break; case EXT4_MOUNT_ORDERED_DATA: case EXT4_MOUNT_WRITEBACK_DATA: if (!jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { ext4_msg(sb, KERN_ERR, "Journal does not support " "requested data journaling mode"); goto failed_mount_wq; } default: break; } set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; /* * The journal may have updated the bg summary counts, so we * need to update the global counters. */ percpu_counter_set(&sbi->s_freeclusters_counter, ext4_count_free_clusters(sb)); percpu_counter_set(&sbi->s_freeinodes_counter, ext4_count_free_inodes(sb)); percpu_counter_set(&sbi->s_dirs_counter, ext4_count_dirs(sb)); percpu_counter_set(&sbi->s_dirtyclusters_counter, 0); no_journal: /* * Get the # of file system overhead blocks from the * superblock if present. */ if (es->s_overhead_clusters) sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); else { err = ext4_calculate_overhead(sb); if (err) goto failed_mount_wq; } /* * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ EXT4_SB(sb)->rsv_conversion_wq = alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!EXT4_SB(sb)->rsv_conversion_wq) { printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); ret = -ENOMEM; goto failed_mount4; } EXT4_SB(sb)->unrsv_conversion_wq = alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!EXT4_SB(sb)->unrsv_conversion_wq) { printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); ret = -ENOMEM; goto failed_mount4; } /* * The jbd2_journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext4_iget(sb, EXT4_ROOT_INO); if (IS_ERR(root)) { ext4_msg(sb, KERN_ERR, "get root inode failed"); ret = PTR_ERR(root); root = NULL; goto failed_mount4; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); iput(root); goto failed_mount4; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); ret = -ENOMEM; goto failed_mount4; } if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) { if (sbi->s_want_extra_isize < le16_to_cpu(es->s_want_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_want_extra_isize); if (sbi->s_want_extra_isize < le16_to_cpu(es->s_min_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_min_extra_isize); } } /* Check if enough inode space is available */ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > sbi->s_inode_size) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; ext4_msg(sb, KERN_INFO, "required extra inode space not" "available"); } err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); if (err) { ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " "reserved pool", ext4_calculate_resv_clusters(sbi)); goto failed_mount4a; } err = ext4_setup_system_zone(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize system " "zone (%d)", err); goto failed_mount4a; } ext4_ext_init(sb); err = ext4_mb_init(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", err); goto failed_mount5; } err = ext4_register_li_request(sb, first_not_zeroed); if (err) goto failed_mount6; sbi->s_kobj.kset = ext4_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL, "%s", sb->s_id); if (err) goto failed_mount7; #ifdef CONFIG_QUOTA /* Enable quota usage during mount. */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) && !(sb->s_flags & MS_RDONLY)) { err = ext4_enable_quotas(sb); if (err) goto failed_mount8; } #endif /* CONFIG_QUOTA */ EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); ext4_mark_recovery_complete(sb, es); } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) descr = " journalled data mode"; else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) descr = " ordered data mode"; else descr = " writeback data mode"; } else descr = "out journal"; if (test_opt(sb, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) ext4_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, *sbi->s_es->s_mount_opts ? "; " : "", orig_data); if (es->s_error_count) mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ kfree(orig_data); return 0; cantfind_ext4: if (!silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; #ifdef CONFIG_QUOTA failed_mount8: kobject_del(&sbi->s_kobj); #endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: ext4_mb_release(sb); failed_mount5: ext4_ext_release(sb); ext4_release_system_zone(sb); failed_mount4a: dput(sb->s_root); sb->s_root = NULL; failed_mount4: ext4_msg(sb, KERN_ERR, "mount failed"); if (EXT4_SB(sb)->rsv_conversion_wq) destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); if (EXT4_SB(sb)->unrsv_conversion_wq) destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3: ext4_es_unregister_shrinker(sbi); del_timer(&sbi->s_err_report); if (sbi->s_flex_groups) ext4_kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); percpu_counter_destroy(&sbi->s_extent_cache_cnt); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); ext4_kvfree(sbi->s_group_desc); failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); if (sbi->s_proc) { remove_proc_entry("options", sbi->s_proc); remove_proc_entry(sb->s_id, ext4_proc_root); } #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext4_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); out_free_orig: kfree(orig_data); return err ? err : ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext4_sb_info *sbi = EXT4_SB(sb); journal->j_commit_interval = sbi->s_commit_interval; journal->j_min_batch_time = sbi->s_min_batch_time; journal->j_max_batch_time = sbi->s_max_batch_time; write_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JBD2_BARRIER; else journal->j_flags &= ~JBD2_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; write_unlock(&journal->j_state_lock); } static journal_t *ext4_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext4_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext4_msg(sb, KERN_ERR, "no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext4_msg(sb, KERN_ERR, "journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %lld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "invalid journal inode"); iput(journal_inode); return NULL; } journal = jbd2_journal_init_inode(journal_inode); if (!journal) { ext4_msg(sb, KERN_ERR, "Could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext4_init_journal_params(sb, journal); return journal; } static journal_t *ext4_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head *bh; journal_t *journal; ext4_fsblk_t start; ext4_fsblk_t len; int hblock, blocksize; ext4_fsblk_t sb_block; unsigned long offset; struct ext4_super_block *es; struct block_device *bdev; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext4_msg(sb, KERN_ERR, "blocksize too small for journal device"); goto out_bdev; } sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; offset = EXT4_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext4_msg(sb, KERN_ERR, "couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext4_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext4_msg(sb, KERN_ERR, "external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext4_msg(sb, KERN_ERR, "journal UUID does not match"); brelse(bh); goto out_bdev; } len = ext4_blocks_count(es); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = jbd2_journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext4_msg(sb, KERN_ERR, "failed to create device journal"); goto out_bdev; } journal->j_private = sb; ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); wait_on_buffer(journal->j_sb_buffer); if (!buffer_uptodate(journal->j_sb_buffer)) { ext4_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext4_msg(sb, KERN_ERR, "External journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT4_SB(sb)->journal_bdev = bdev; ext4_init_journal_params(sb, journal); return journal; out_journal: jbd2_journal_destroy(journal); out_bdev: ext4_blkdev_put(bdev); return NULL; } static int ext4_load_journal(struct super_block *sb, struct ext4_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext4_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "INFO: recovery " "required on readonly filesystem"); if (really_read_only) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed"); return -EROFS; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } if (journal_inum && journal_dev) { ext4_msg(sb, KERN_ERR, "filesystem has both journal " "and inode journals!"); return -EINVAL; } if (journal_inum) { if (!(journal = ext4_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext4_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) err = jbd2_journal_wipe(journal, !really_read_only); if (!err) { char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); if (save) memcpy(save, ((char *) es) + EXT4_S_ERR_START, EXT4_S_ERR_LEN); err = jbd2_journal_load(journal); if (save) memcpy(((char *) es) + EXT4_S_ERR_START, save, EXT4_S_ERR_LEN); kfree(save); } if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); jbd2_journal_destroy(journal); return err; } EXT4_SB(sb)->s_journal = journal; ext4_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext4_commit_super(sb, 1); } return 0; } static int ext4_commit_super(struct super_block *sb, int sync) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; if (!sbh || block_device_ejected(sb)) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext4_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); if (sb->s_bdev->bd_part) es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1)); else es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); ext4_free_blocks_count_set(es, EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeclusters_counter))); es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeinodes_counter)); BUFFER_TRACE(sbh, "marking dirty"); ext4_superblock_csum_set(sb); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (error) return error; error = buffer_write_io_error(sbh); if (error) { ext4_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal = EXT4_SB(sb)->s_journal; if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { BUG_ON(journal != NULL); return; } jbd2_journal_lock_updates(journal); if (jbd2_journal_flush(journal) < 0) goto out; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); } out: jbd2_journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); journal = EXT4_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext4_error() or ext4_abort() */ j_errno = jbd2_journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext4_decode_error(sb, j_errno, nbuf); ext4_warning(sb, "Filesystem error recorded " "from previous mount: %s", errstr); ext4_warning(sb, "Marking fs in need of filesystem check."); EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); ext4_commit_super(sb, 1); jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext4_force_commit(struct super_block *sb) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; return ext4_journal_force_commit(journal); } static int ext4_sync_fs(struct super_block *sb, int wait) { int ret = 0; tid_t target; bool needs_barrier = false; struct ext4_sb_info *sbi = EXT4_SB(sb); trace_ext4_sync_fs(sb, wait); flush_workqueue(sbi->rsv_conversion_wq); flush_workqueue(sbi->unrsv_conversion_wq); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); /* * Data writeback is possible w/o journal transaction, so barrier must * being sent at the end of the function. But we can skip it if * transaction_commit will do it for us. */ target = jbd2_get_latest_transaction(sbi->s_journal); if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) needs_barrier = true; if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) ret = jbd2_log_wait_commit(sbi->s_journal, target); } if (needs_barrier) { int err; err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); if (!ret) ret = err; } return ret; } static int ext4_sync_fs_nojournal(struct super_block *sb, int wait) { int ret = 0; trace_ext4_sync_fs(sb, wait); flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq); flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); dquot_writeback_dquots(sb, -1); if (wait && test_opt(sb, BARRIER)) ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); return ret; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. * * Note that only this function cannot bring a filesystem to be in a clean * state independently. It relies on upper layer to stop all data & metadata * modifications. */ static int ext4_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; /* Now we set up the journal barrier. */ jbd2_journal_lock_updates(journal); /* * Don't clear the needs_recovery flag if we failed to flush * the journal. */ error = jbd2_journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: /* we rely on upper layer to stop further updates */ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext4_unfreeze(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return 0; /* Reset the needs_recovery flag before the fs is unlocked. */ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); return 0; } /* * Structure to save mount options for ext4_remount's benefit */ struct ext4_mount_options { unsigned long s_mount_opt; unsigned long s_mount_opt2; kuid_t s_resuid; kgid_t s_resgid; unsigned long s_commit_interval; u32 s_min_batch_time, s_max_batch_time; #ifdef CONFIG_QUOTA int s_jquota_fmt; char *s_qf_names[MAXQUOTAS]; #endif }; static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned long old_sb_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; int err = 0; #ifdef CONFIG_QUOTA int i, j; #endif char *orig_data = kstrdup(data, GFP_KERNEL); /* Store the original options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_mount_opt2 = sbi->s_mount_opt2; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; old_opts.s_min_batch_time = sbi->s_min_batch_time; old_opts.s_max_batch_time = sbi->s_max_batch_time; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); if (!old_opts.s_qf_names[i]) { for (j = 0; j < i; j++) kfree(old_opts.s_qf_names[j]); kfree(orig_data); return -ENOMEM; } } else old_opts.s_qf_names[i] = NULL; #endif if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); } else { /* Make sure we can mount this feature set readwrite */ if (!ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } /* * Make sure the group descriptor checksums * are sane. If they aren't, refuse to remount r/w. */ for (g = 0; g < sbi->s_groups_count; g++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, g, NULL); if (!ext4_group_desc_csum_verify(sb, g, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_remount: Checksum for group %u failed (%u!=%u)", g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), le16_to_cpu(gdp->bg_checksum)); err = -EINVAL; goto restore_opts; } } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount/remount for now. */ if (es->s_last_orphan) { ext4_msg(sb, KERN_WARNING, "Couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount/remount instead"); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ if (sbi->s_journal) ext4_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if (!ext4_setup_super(sb, es, 0)) sb->s_flags &= ~MS_RDONLY; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) { err = -EROFS; goto restore_opts; } enable_quota = 1; } } /* * Reinitialize lazy itable initialization thread based on * current settings */ if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) ext4_unregister_li_request(sb); else { ext4_group_t first_not_zeroed; first_not_zeroed = ext4_has_uninit_itable(sb); ext4_register_li_request(sb, first_not_zeroed); } ext4_setup_system_zone(sb); if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) ext4_commit_super(sb, 1); #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); if (enable_quota) { if (sb_any_quota_suspended(sb)) dquot_resume(sb, -1); else if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) { err = ext4_enable_quotas(sb); if (err) goto restore_opts; } } #endif ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_mount_opt2 = old_opts.s_mount_opt2; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif kfree(orig_data); return err; } static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t overhead = 0, resv_blocks; u64 fsid; s64 bfree; resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); if (!test_opt(sb, MINIX_DF)) overhead = sbi->s_overhead; buf->f_type = EXT4_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); /* prevent underflow in case that few free space is available */ buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); buf->f_bavail = buf->f_bfree - (ext4_r_blocks_count(es) + resv_blocks); if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction * before quota file is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext4_create() quota_sync() * jbd2_journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) jbd2_journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; } static int ext4_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext4_journal_start(inode, EXT4_HT_QUOTA, EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_mark_dquot_dirty(struct dquot *dquot) { struct super_block *sb = dquot->dq_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); /* Are we journaling quotas? */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) || sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext4_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext4_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext4_journal_start(sb->s_root->d_inode, EXT4_HT_QUOTA, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext4_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], EXT4_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ if (path->dentry->d_parent != sb->s_root) ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (EXT4_SB(sb)->s_journal && ext4_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags) { int err; struct inode *qf_inode; unsigned long qf_inums[MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)); if (!qf_inums[type]) return -EPERM; qf_inode = ext4_iget(sb, qf_inums[type]); if (IS_ERR(qf_inode)) { ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]); return PTR_ERR(qf_inode); } /* Don't account quota for quota files to avoid recursion */ qf_inode->i_flags |= S_NOQUOTA; err = dquot_enable(qf_inode, type, format_id, flags); iput(qf_inode); return err; } /* Enable usage tracking for all quota types. */ static int ext4_enable_quotas(struct super_block *sb) { int type, err = 0; unsigned long qf_inums[MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; for (type = 0; type < MAXQUOTAS; type++) { if (qf_inums[type]) { err = ext4_quota_enable(sb, type, QFMT_VFS_V1, DQUOT_USAGE_ENABLED); if (err) { ext4_warning(sb, "Failed to enable quota tracking " "(type=%d, err=%d). Please run " "e2fsck to fix.", type, err); return err; } } } return 0; } /* * quota_on function that is used when QUOTA feature is set. */ static int ext4_quota_on_sysfile(struct super_block *sb, int type, int format_id) { if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) return -EINVAL; /* * USAGE was enabled at mount time. Only need to enable LIMITS now. */ return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED); } static int ext4_quota_off(struct super_block *sb, int type) { struct inode *inode = sb_dqopt(sb)->files[type]; handle_t *handle; /* Force all delayed allocation blocks to be allocated. * Caller already holds s_umount sem */ if (test_opt(sb, DELALLOC)) sync_filesystem(sb); if (!inode) goto out; /* Update modification times of quota files when userspace can * start looking at them */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); if (IS_ERR(handle)) goto out; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return dquot_quota_off(sb, type); } /* * quota_off function that is used when QUOTA feature is set. */ static int ext4_quota_off_sysfile(struct super_block *sb, int type) { if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) return -EINVAL; /* Disable only the limits. */ return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext4_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (EXT4_SB(sb)->s_journal && !handle) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because transaction is not started", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } bh = ext4_bread(handle, inode, blk, 1, &err); if (!bh) goto out; err = ext4_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); out: if (err) return err; if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); } return len; } #endif static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super); } #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static inline void register_as_ext2(void) { int err = register_filesystem(&ext2_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext2 (%d)\n", err); } static inline void unregister_as_ext2(void) { unregister_filesystem(&ext2_fs_type); } static inline int ext2_feature_set_ok(struct super_block *sb) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP)) return 0; return 1; } #else static inline void register_as_ext2(void) { } static inline void unregister_as_ext2(void) { } static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } #endif #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static inline void register_as_ext3(void) { int err = register_filesystem(&ext3_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext3 (%d)\n", err); } static inline void unregister_as_ext3(void) { unregister_filesystem(&ext3_fs_type); } static inline int ext3_feature_set_ok(struct super_block *sb) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP)) return 0; if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) return 0; return 1; } #else static inline void register_as_ext3(void) { } static inline void unregister_as_ext3(void) { } static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; } #endif static struct file_system_type ext4_fs_type = { .owner = THIS_MODULE, .name = "ext4", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext4"); static int __init ext4_init_feat_adverts(void) { struct ext4_features *ef; int ret = -ENOMEM; ef = kzalloc(sizeof(struct ext4_features), GFP_KERNEL); if (!ef) goto out; ef->f_kobj.kset = ext4_kset; init_completion(&ef->f_kobj_unregister); ret = kobject_init_and_add(&ef->f_kobj, &ext4_feat_ktype, NULL, "features"); if (ret) { kfree(ef); goto out; } ext4_feat = ef; ret = 0; out: return ret; } static void ext4_exit_feat_adverts(void) { kobject_put(&ext4_feat->f_kobj); wait_for_completion(&ext4_feat->f_kobj_unregister); kfree(ext4_feat); } /* Shared across all ext4 file systems */ wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; static int __init ext4_init_fs(void) { int i, err; ext4_li_info = NULL; mutex_init(&ext4_li_mtx); /* Build-time check for flags consistency */ ext4_check_flag_values(); for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { mutex_init(&ext4__aio_mutex[i]); init_waitqueue_head(&ext4__ioend_wq[i]); } err = ext4_init_es(); if (err) return err; err = ext4_init_pageio(); if (err) goto out7; err = ext4_init_system_zone(); if (err) goto out6; ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); if (!ext4_kset) { err = -ENOMEM; goto out5; } ext4_proc_root = proc_mkdir("fs/ext4", NULL); err = ext4_init_feat_adverts(); if (err) goto out4; err = ext4_init_mballoc(); if (err) goto out3; err = ext4_init_xattr(); if (err) goto out2; err = init_inodecache(); if (err) goto out1; register_as_ext3(); register_as_ext2(); err = register_filesystem(&ext4_fs_type); if (err) goto out; return 0; out: unregister_as_ext2(); unregister_as_ext3(); destroy_inodecache(); out1: ext4_exit_xattr(); out2: ext4_exit_mballoc(); out3: ext4_exit_feat_adverts(); out4: if (ext4_proc_root) remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); out5: ext4_exit_system_zone(); out6: ext4_exit_pageio(); out7: ext4_exit_es(); return err; } static void __exit ext4_exit_fs(void) { ext4_destroy_lazyinit_thread(); unregister_as_ext2(); unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); destroy_inodecache(); ext4_exit_xattr(); ext4_exit_mballoc(); ext4_exit_feat_adverts(); remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); ext4_exit_system_zone(); ext4_exit_pageio(); ext4_exit_es(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); module_init(ext4_init_fs) module_exit(ext4_exit_fs)
rkharwar/ubuntu-saucy-powerpc
fs/ext4/super.c
C
gpl-2.0
158,220
/* 32-bit ELF support for S+core. Copyright (C) 2006-2017 Free Software Foundation, Inc. Contributed by Brain.lin (brain.lin@sunplusct.com) Mei Ligang (ligang@sunnorth.com.cn) Pei-Lin Tsai (pltsai@sunplus.com) This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include "bfd.h" #include "libbfd.h" #include "libiberty.h" #include "elf-bfd.h" #include "elf/score.h" #include "elf/common.h" #include "elf/internal.h" #include "hashtab.h" #include "elf32-score.h" int score3 = 0; int score7 = 1; /* The SCORE ELF linker needs additional information for each symbol in the global hash table. */ struct score_elf_link_hash_entry { struct elf_link_hash_entry root; /* Number of R_SCORE_ABS32, R_SCORE_REL32 relocs against this symbol. */ unsigned int possibly_dynamic_relocs; /* If the R_SCORE_ABS32, R_SCORE_REL32 reloc is against a readonly section. */ bfd_boolean readonly_reloc; /* We must not create a stub for a symbol that has relocations related to taking the function's address, i.e. any but R_SCORE_CALL15 ones. */ bfd_boolean no_fn_stub; /* Are we forced local? This will only be set if we have converted the initial global GOT entry to a local GOT entry. */ bfd_boolean forced_local; }; /* Traverse a score ELF linker hash table. */ #define score_elf_link_hash_traverse(table, func, info) \ (elf_link_hash_traverse \ ((table), \ (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \ (info))) /* This structure is used to hold .got entries while estimating got sizes. */ struct score_got_entry { /* The input bfd in which the symbol is defined. */ bfd *abfd; /* The index of the symbol, as stored in the relocation r_info, if we have a local symbol; -1 otherwise. */ long symndx; union { /* If abfd == NULL, an address that must be stored in the got. */ bfd_vma address; /* If abfd != NULL && symndx != -1, the addend of the relocation that should be added to the symbol value. */ bfd_vma addend; /* If abfd != NULL && symndx == -1, the hash table entry corresponding to a global symbol in the got (or, local, if h->forced_local). */ struct score_elf_link_hash_entry *h; } d; /* The offset from the beginning of the .got section to the entry corresponding to this symbol+addend. If it's a global symbol whose offset is yet to be decided, it's going to be -1. */ long gotidx; }; /* This structure is passed to score_elf_sort_hash_table_f when sorting the dynamic symbols. */ struct score_elf_hash_sort_data { /* The symbol in the global GOT with the lowest dynamic symbol table index. */ struct elf_link_hash_entry *low; /* The least dynamic symbol table index corresponding to a symbol with a GOT entry. */ long min_got_dynindx; /* The greatest dynamic symbol table index corresponding to a symbol with a GOT entry that is not referenced (e.g., a dynamic symbol with dynamic relocations pointing to it from non-primary GOTs). */ long max_unref_got_dynindx; /* The greatest dynamic symbol table index not corresponding to a symbol without a GOT entry. */ long max_non_got_dynindx; }; struct score_got_info { /* The global symbol in the GOT with the lowest index in the dynamic symbol table. */ struct elf_link_hash_entry *global_gotsym; /* The number of global .got entries. */ unsigned int global_gotno; /* The number of local .got entries. */ unsigned int local_gotno; /* The number of local .got entries we have used. */ unsigned int assigned_gotno; /* A hash table holding members of the got. */ struct htab *got_entries; /* In multi-got links, a pointer to the next got (err, rather, most of the time, it points to the previous got). */ struct score_got_info *next; }; /* A structure used to count GOT entries, for GOT entry or ELF symbol table traversal. */ struct _score_elf_section_data { struct bfd_elf_section_data elf; union { struct score_got_info *got_info; bfd_byte *tdata; } u; }; #define score_elf_section_data(sec) \ ((struct _score_elf_section_data *) elf_section_data (sec)) /* The size of a symbol-table entry. */ #define SCORE_ELF_SYM_SIZE(abfd) \ (get_elf_backend_data (abfd)->s->sizeof_sym) /* In case we're on a 32-bit machine, construct a 64-bit "-1" value from smaller values. Start with zero, widen, *then* decrement. */ #define MINUS_ONE (((bfd_vma)0) - 1) #define MINUS_TWO (((bfd_vma)0) - 2) #define PDR_SIZE 32 /* The number of local .got entries we reserve. */ #define SCORE_RESERVED_GOTNO (2) #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1" /* The offset of $gp from the beginning of the .got section. */ #define ELF_SCORE_GP_OFFSET(abfd) (0x3ff0) /* The maximum size of the GOT for it to be addressable using 15-bit offsets from $gp. */ #define SCORE_ELF_GOT_MAX_SIZE(abfd) (ELF_SCORE_GP_OFFSET(abfd) + 0x3fff) #define SCORE_ELF_STUB_SECTION_NAME (".SCORE.stub") #define SCORE_FUNCTION_STUB_SIZE (16) #define STUB_LW 0xc3bcc010 /* lw r29, [r28, -0x3ff0] */ #define STUB_MOVE 0x8363bc56 /* mv r27, r3 */ #define STUB_LI16 0x87548000 /* ori r26, .dynsym_index */ #define STUB_BRL 0x801dbc09 /* brl r29 */ #define SCORE_ELF_GOT_SIZE(abfd) \ (get_elf_backend_data (abfd)->s->arch_size / 8) #define SCORE_ELF_ADD_DYNAMIC_ENTRY(info, tag, val) \ (_bfd_elf_add_dynamic_entry (info, (bfd_vma) tag, (bfd_vma) val)) /* The size of an external dynamic table entry. */ #define SCORE_ELF_DYN_SIZE(abfd) \ (get_elf_backend_data (abfd)->s->sizeof_dyn) /* The size of an external REL relocation. */ #define SCORE_ELF_REL_SIZE(abfd) \ (get_elf_backend_data (abfd)->s->sizeof_rel) /* The default alignment for sections, as a power of two. */ #define SCORE_ELF_LOG_FILE_ALIGN(abfd)\ (get_elf_backend_data (abfd)->s->log_file_align) static bfd_byte *hi16_rel_addr; /* This will be used when we sort the dynamic relocation records. */ static bfd *reldyn_sorting_bfd; /* SCORE ELF uses two common sections. One is the usual one, and the other is for small objects. All the small objects are kept together, and then referenced via the gp pointer, which yields faster assembler code. This is what we use for the small common section. This approach is copied from ecoff.c. */ static asection score_elf_scom_section; static asymbol score_elf_scom_symbol; static asymbol *score_elf_scom_symbol_ptr; static bfd_vma score_bfd_get_16 (bfd *abfd, const void *data) { return bfd_get_16 (abfd, data); } static bfd_vma score3_bfd_getl32 (const void *p) { const bfd_byte *addr = p; unsigned long v; v = (unsigned long) addr[2]; v |= (unsigned long) addr[3] << 8; v |= (unsigned long) addr[0] << 16; v |= (unsigned long) addr[1] << 24; return v; } static bfd_vma score3_bfd_getl48 (const void *p) { const bfd_byte *addr = p; unsigned long long v; v = (unsigned long long) addr[4]; v |= (unsigned long long) addr[5] << 8; v |= (unsigned long long) addr[2] << 16; v |= (unsigned long long) addr[3] << 24; v |= (unsigned long long) addr[0] << 32; v |= (unsigned long long) addr[1] << 40; return v; } static bfd_vma score_bfd_get_32 (bfd *abfd, const void *data) { if (/* score3 && */ abfd->xvec->byteorder == BFD_ENDIAN_LITTLE) return score3_bfd_getl32 (data); else return bfd_get_32 (abfd, data); } static bfd_vma score_bfd_get_48 (bfd *abfd, const void *p) { if (/* score3 && */ abfd->xvec->byteorder == BFD_ENDIAN_LITTLE) return score3_bfd_getl48 (p); else return bfd_get_bits (p, 48, 1); } static void score_bfd_put_16 (bfd *abfd, bfd_vma addr, void *data) { return bfd_put_16 (abfd, addr, data); } static void score3_bfd_putl32 (bfd_vma data, void *p) { bfd_byte *addr = p; addr[0] = (data >> 16) & 0xff; addr[1] = (data >> 24) & 0xff; addr[2] = data & 0xff; addr[3] = (data >> 8) & 0xff; } static void score3_bfd_putl48 (bfd_vma data, void *p) { bfd_byte *addr = p; addr[0] = (data >> 32) & 0xff; addr[1] = (data >> 40) & 0xff; addr[2] = (data >> 16) & 0xff; addr[3] = (data >> 24) & 0xff; addr[4] = data & 0xff; addr[5] = (data >> 8) & 0xff; } static void score_bfd_put_32 (bfd *abfd, bfd_vma addr, void *data) { if (/* score3 && */ abfd->xvec->byteorder == BFD_ENDIAN_LITTLE) return score3_bfd_putl32 (addr, data); else return bfd_put_32 (abfd, addr, data); } static void score_bfd_put_48 (bfd *abfd, bfd_vma val, void *p) { if (/* score3 && */ abfd->xvec->byteorder == BFD_ENDIAN_LITTLE) return score3_bfd_putl48 (val, p); else return bfd_put_bits (val, p, 48, 1); } static bfd_reloc_status_type score_elf_hi16_reloc (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc_entry, asymbol *symbol ATTRIBUTE_UNUSED, void * data, asection *input_section ATTRIBUTE_UNUSED, bfd *output_bfd ATTRIBUTE_UNUSED, char **error_message ATTRIBUTE_UNUSED) { hi16_rel_addr = (bfd_byte *) data + reloc_entry->address; return bfd_reloc_ok; } static bfd_reloc_status_type score_elf_lo16_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol ATTRIBUTE_UNUSED, void * data, asection *input_section, bfd *output_bfd ATTRIBUTE_UNUSED, char **error_message ATTRIBUTE_UNUSED) { bfd_vma addend = 0, offset = 0; unsigned long val; unsigned long hi16_offset, hi16_value, uvalue; hi16_value = score_bfd_get_32 (abfd, hi16_rel_addr); hi16_offset = ((((hi16_value >> 16) & 0x3) << 15) | (hi16_value & 0x7fff)) >> 1; addend = score_bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address); offset = ((((addend >> 16) & 0x3) << 15) | (addend & 0x7fff)) >> 1; val = reloc_entry->addend; if (reloc_entry->address > input_section->size) return bfd_reloc_outofrange; uvalue = ((hi16_offset << 16) | (offset & 0xffff)) + val; hi16_offset = (uvalue >> 16) << 1; hi16_value = (hi16_value & ~0x37fff) | (hi16_offset & 0x7fff) | ((hi16_offset << 1) & 0x30000); score_bfd_put_32 (abfd, hi16_value, hi16_rel_addr); offset = (uvalue & 0xffff) << 1; addend = (addend & ~0x37fff) | (offset & 0x7fff) | ((offset << 1) & 0x30000); score_bfd_put_32 (abfd, addend, (bfd_byte *) data + reloc_entry->address); return bfd_reloc_ok; } /* Set the GP value for OUTPUT_BFD. Returns FALSE if this is a dangerous relocation. */ static bfd_boolean score_elf_assign_gp (bfd *output_bfd, bfd_vma *pgp) { unsigned int count; asymbol **sym; unsigned int i; /* If we've already figured out what GP will be, just return it. */ *pgp = _bfd_get_gp_value (output_bfd); if (*pgp) return TRUE; count = bfd_get_symcount (output_bfd); sym = bfd_get_outsymbols (output_bfd); /* The linker script will have created a symbol named `_gp' with the appropriate value. */ if (sym == NULL) i = count; else { for (i = 0; i < count; i++, sym++) { const char *name; name = bfd_asymbol_name (*sym); if (*name == '_' && strcmp (name, "_gp") == 0) { *pgp = bfd_asymbol_value (*sym); _bfd_set_gp_value (output_bfd, *pgp); break; } } } if (i >= count) { /* Only get the error once. */ *pgp = 4; _bfd_set_gp_value (output_bfd, *pgp); return FALSE; } return TRUE; } /* We have to figure out the gp value, so that we can adjust the symbol value correctly. We look up the symbol _gp in the output BFD. If we can't find it, we're stuck. We cache it in the ELF target data. We don't need to adjust the symbol value for an external symbol if we are producing relocatable output. */ static bfd_reloc_status_type score_elf_final_gp (bfd *output_bfd, asymbol *symbol, bfd_boolean relocatable, char **error_message, bfd_vma *pgp) { if (bfd_is_und_section (symbol->section) && ! relocatable) { *pgp = 0; return bfd_reloc_undefined; } *pgp = _bfd_get_gp_value (output_bfd); if (*pgp == 0 && (! relocatable || (symbol->flags & BSF_SECTION_SYM) != 0)) { if (relocatable) { /* Make up a value. */ *pgp = symbol->section->output_section->vma + 0x4000; _bfd_set_gp_value (output_bfd, *pgp); } else if (!score_elf_assign_gp (output_bfd, pgp)) { *error_message = (char *) _("GP relative relocation when _gp not defined"); return bfd_reloc_dangerous; } } return bfd_reloc_ok; } static bfd_reloc_status_type score_elf_gprel15_with_gp (bfd *abfd, asymbol *symbol, arelent *reloc_entry, asection *input_section, bfd_boolean relocateable, void * data, bfd_vma gp ATTRIBUTE_UNUSED) { bfd_vma relocation; unsigned long insn; if (bfd_is_com_section (symbol->section)) relocation = 0; else relocation = symbol->value; relocation += symbol->section->output_section->vma; relocation += symbol->section->output_offset; if (reloc_entry->address > input_section->size) return bfd_reloc_outofrange; insn = score_bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address); if (((reloc_entry->addend & 0xffffc000) != 0) && ((reloc_entry->addend & 0xffffc000) != 0xffffc000)) return bfd_reloc_overflow; insn = (insn & ~0x7fff) | (reloc_entry->addend & 0x7fff); score_bfd_put_32 (abfd, insn, (bfd_byte *) data + reloc_entry->address); if (relocateable) reloc_entry->address += input_section->output_offset; return bfd_reloc_ok; } static bfd_reloc_status_type gprel32_with_gp (bfd *abfd, asymbol *symbol, arelent *reloc_entry, asection *input_section, bfd_boolean relocatable, void *data, bfd_vma gp) { bfd_vma relocation; bfd_vma val; if (bfd_is_com_section (symbol->section)) relocation = 0; else relocation = symbol->value; relocation += symbol->section->output_section->vma; relocation += symbol->section->output_offset; if (reloc_entry->address > bfd_get_section_limit (abfd, input_section)) return bfd_reloc_outofrange; /* Set val to the offset into the section or symbol. */ val = reloc_entry->addend; if (reloc_entry->howto->partial_inplace) val += score_bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address); /* Adjust val for the final section location and GP value. If we are producing relocatable output, we don't want to do this for an external symbol. */ if (! relocatable || (symbol->flags & BSF_SECTION_SYM) != 0) val += relocation - gp; if (reloc_entry->howto->partial_inplace) score_bfd_put_32 (abfd, val, (bfd_byte *) data + reloc_entry->address); else reloc_entry->addend = val; if (relocatable) reloc_entry->address += input_section->output_offset; return bfd_reloc_ok; } static bfd_reloc_status_type score_elf_gprel15_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol, void * data, asection *input_section, bfd *output_bfd, char **error_message) { bfd_boolean relocateable; bfd_reloc_status_type ret; bfd_vma gp; if (output_bfd != NULL && (symbol->flags & BSF_SECTION_SYM) == 0 && reloc_entry->addend == 0) { reloc_entry->address += input_section->output_offset; return bfd_reloc_ok; } if (output_bfd != NULL) relocateable = TRUE; else { relocateable = FALSE; output_bfd = symbol->section->output_section->owner; } ret = score_elf_final_gp (output_bfd, symbol, relocateable, error_message, &gp); if (ret != bfd_reloc_ok) return ret; return score_elf_gprel15_with_gp (abfd, symbol, reloc_entry, input_section, relocateable, data, gp); } /* Do a R_SCORE_GPREL32 relocation. This is a 32 bit value which must become the offset from the gp register. */ static bfd_reloc_status_type score_elf_gprel32_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol, void *data, asection *input_section, bfd *output_bfd, char **error_message) { bfd_boolean relocatable; bfd_reloc_status_type ret; bfd_vma gp; /* R_SCORE_GPREL32 relocations are defined for local symbols only. */ if (output_bfd != NULL && (symbol->flags & BSF_SECTION_SYM) == 0 && (symbol->flags & BSF_LOCAL) != 0) { *error_message = (char *) _("32bits gp relative relocation occurs for an external symbol"); return bfd_reloc_outofrange; } if (output_bfd != NULL) relocatable = TRUE; else { relocatable = FALSE; output_bfd = symbol->section->output_section->owner; } ret = score_elf_final_gp (output_bfd, symbol, relocatable, error_message, &gp); if (ret != bfd_reloc_ok) return ret; gp = 0; return gprel32_with_gp (abfd, symbol, reloc_entry, input_section, relocatable, data, gp); } /* A howto special_function for R_SCORE_GOT15 relocations. This is just like any other 16-bit relocation when applied to global symbols, but is treated in the same as R_SCORE_HI16 when applied to local symbols. */ static bfd_reloc_status_type score_elf_got15_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol, void *data, asection *input_section, bfd *output_bfd, char **error_message) { if ((symbol->flags & (BSF_GLOBAL | BSF_WEAK)) != 0 || bfd_is_und_section (bfd_get_section (symbol)) || bfd_is_com_section (bfd_get_section (symbol))) /* The relocation is against a global symbol. */ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data, input_section, output_bfd, error_message); return score_elf_hi16_reloc (abfd, reloc_entry, symbol, data, input_section, output_bfd, error_message); } static bfd_reloc_status_type score_elf_got_lo16_reloc (bfd *abfd, arelent *reloc_entry, asymbol *symbol ATTRIBUTE_UNUSED, void * data, asection *input_section, bfd *output_bfd ATTRIBUTE_UNUSED, char **error_message ATTRIBUTE_UNUSED) { bfd_vma addend = 0, offset = 0; signed long val; signed long hi16_offset, hi16_value, uvalue; hi16_value = score_bfd_get_32 (abfd, hi16_rel_addr); hi16_offset = ((((hi16_value >> 16) & 0x3) << 15) | (hi16_value & 0x7fff)) >> 1; addend = score_bfd_get_32 (abfd, (bfd_byte *) data + reloc_entry->address); offset = ((((addend >> 16) & 0x3) << 15) | (addend & 0x7fff)) >> 1; val = reloc_entry->addend; if (reloc_entry->address > input_section->size) return bfd_reloc_outofrange; uvalue = ((hi16_offset << 16) | (offset & 0xffff)) + val; if ((uvalue > -0x8000) && (uvalue < 0x7fff)) hi16_offset = 0; else hi16_offset = (uvalue >> 16) & 0x7fff; hi16_value = (hi16_value & ~0x37fff) | (hi16_offset & 0x7fff) | ((hi16_offset << 1) & 0x30000); score_bfd_put_32 (abfd, hi16_value, hi16_rel_addr); offset = (uvalue & 0xffff) << 1; addend = (addend & ~0x37fff) | (offset & 0x7fff) | ((offset << 1) & 0x30000); score_bfd_put_32 (abfd, addend, (bfd_byte *) data + reloc_entry->address); return bfd_reloc_ok; } static reloc_howto_type elf32_score_howto_table[] = { /* No relocation. */ HOWTO (R_SCORE_NONE, /* type */ 0, /* rightshift */ 3, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_NONE", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_HI16 */ HOWTO (R_SCORE_HI16, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ score_elf_hi16_reloc, /* special_function */ "R_SCORE_HI16", /* name */ TRUE, /* partial_inplace */ 0x37fff, /* src_mask */ 0x37fff, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_LO16 */ HOWTO (R_SCORE_LO16, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ score_elf_lo16_reloc, /* special_function */ "R_SCORE_LO16", /* name */ TRUE, /* partial_inplace */ 0x37fff, /* src_mask */ 0x37fff, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_BCMP */ HOWTO (R_SCORE_BCMP, /* type */ 1, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ TRUE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_BCMP", /* name */ FALSE, /* partial_inplace */ 0x03e00381, /* src_mask */ 0x03e00381, /* dst_mask */ FALSE), /* pcrel_offset */ /*R_SCORE_24 */ HOWTO (R_SCORE_24, /* type */ 1, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 24, /* bitsize */ FALSE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_24", /* name */ FALSE, /* partial_inplace */ 0x3ff7fff, /* src_mask */ 0x3ff7fff, /* dst_mask */ FALSE), /* pcrel_offset */ /*R_SCORE_PC19 */ HOWTO (R_SCORE_PC19, /* type */ 1, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 19, /* bitsize */ TRUE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_PC19", /* name */ FALSE, /* partial_inplace */ 0x3ff03fe, /* src_mask */ 0x3ff03fe, /* dst_mask */ FALSE), /* pcrel_offset */ /*R_SCORE16_11 */ HOWTO (R_SCORE16_11, /* type */ 1, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 11, /* bitsize */ FALSE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE16_11", /* name */ FALSE, /* partial_inplace */ 0x000000ffe, /* src_mask */ 0x000000ffe, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE16_PC8 */ HOWTO (R_SCORE16_PC8, /* type */ 1, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 9, /* bitsize */ TRUE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE16_PC8", /* name */ FALSE, /* partial_inplace */ 0x000001ff, /* src_mask */ 0x000001ff, /* dst_mask */ FALSE), /* pcrel_offset */ /* 32 bit absolute */ HOWTO (R_SCORE_ABS32, /* type 8 */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_ABS32", /* name */ FALSE, /* partial_inplace */ 0xffffffff, /* src_mask */ 0xffffffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* 16 bit absolute */ HOWTO (R_SCORE_ABS16, /* type 11 */ 0, /* rightshift */ 1, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_ABS16", /* name */ FALSE, /* partial_inplace */ 0x0000ffff, /* src_mask */ 0x0000ffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_DUMMY2 */ HOWTO (R_SCORE_DUMMY2, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_DUMMY2", /* name */ TRUE, /* partial_inplace */ 0x00007fff, /* src_mask */ 0x00007fff, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_GP15 */ HOWTO (R_SCORE_GP15, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ score_elf_gprel15_reloc,/* special_function */ "R_SCORE_GP15", /* name */ TRUE, /* partial_inplace */ 0x00007fff, /* src_mask */ 0x00007fff, /* dst_mask */ FALSE), /* pcrel_offset */ /* GNU extension to record C++ vtable hierarchy. */ HOWTO (R_SCORE_GNU_VTINHERIT, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ NULL, /* special_function */ "R_SCORE_GNU_VTINHERIT", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ FALSE), /* pcrel_offset */ /* GNU extension to record C++ vtable member usage */ HOWTO (R_SCORE_GNU_VTENTRY, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ _bfd_elf_rel_vtable_reloc_fn, /* special_function */ "R_SCORE_GNU_VTENTRY", /* name */ FALSE, /* partial_inplace */ 0, /* src_mask */ 0, /* dst_mask */ FALSE), /* pcrel_offset */ /* Reference to global offset table. */ HOWTO (R_SCORE_GOT15, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ score_elf_got15_reloc, /* special_function */ "R_SCORE_GOT15", /* name */ TRUE, /* partial_inplace */ 0x00007fff, /* src_mask */ 0x00007fff, /* dst_mask */ FALSE), /* pcrel_offset */ /* Low 16 bits of displacement in global offset table. */ HOWTO (R_SCORE_GOT_LO16, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ score_elf_got_lo16_reloc, /* special_function */ "R_SCORE_GOT_LO16", /* name */ TRUE, /* partial_inplace */ 0x37ffe, /* src_mask */ 0x37ffe, /* dst_mask */ FALSE), /* pcrel_offset */ /* 15 bit call through global offset table. */ HOWTO (R_SCORE_CALL15, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_signed, /* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_CALL15", /* name */ TRUE, /* partial_inplace */ 0x0000ffff, /* src_mask */ 0x0000ffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* 32 bit GP relative reference. */ HOWTO (R_SCORE_GPREL32, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ score_elf_gprel32_reloc, /* special_function */ "R_SCORE_GPREL32", /* name */ TRUE, /* partial_inplace */ 0xffffffff, /* src_mask */ 0xffffffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* 32 bit symbol relative relocation. */ HOWTO (R_SCORE_REL32, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_REL32", /* name */ TRUE, /* partial_inplace */ 0xffffffff, /* src_mask */ 0xffffffff, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_DUMMY_HI16 */ HOWTO (R_SCORE_DUMMY_HI16, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 16, /* bitsize */ FALSE, /* pc_relative */ 1, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ score_elf_hi16_reloc, /* special_function */ "R_SCORE_DUMMY_HI16", /* name */ TRUE, /* partial_inplace */ 0x37fff, /* src_mask */ 0x37fff, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_IMM30 */ HOWTO (R_SCORE_IMM30, /* type */ 2, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 30, /* bitsize */ FALSE, /* pc_relative */ 7, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_IMM30", /* name */ FALSE, /* partial_inplace */ 0x7f7fff7f80LL, /* src_mask */ 0x7f7fff7f80LL, /* dst_mask */ FALSE), /* pcrel_offset */ /* R_SCORE_IMM32 */ HOWTO (R_SCORE_IMM32, /* type */ 0, /* rightshift */ 2, /* size (0 = byte, 1 = short, 2 = long) */ 32, /* bitsize */ FALSE, /* pc_relative */ 5, /* bitpos */ complain_overflow_dont,/* complain_on_overflow */ bfd_elf_generic_reloc, /* special_function */ "R_SCORE_IMM32", /* name */ FALSE, /* partial_inplace */ 0x7f7fff7fe0LL, /* src_mask */ 0x7f7fff7fe0LL, /* dst_mask */ FALSE), /* pcrel_offset */ }; struct score_reloc_map { bfd_reloc_code_real_type bfd_reloc_val; unsigned char elf_reloc_val; }; static const struct score_reloc_map elf32_score_reloc_map[] = { {BFD_RELOC_NONE, R_SCORE_NONE}, {BFD_RELOC_HI16_S, R_SCORE_HI16}, {BFD_RELOC_LO16, R_SCORE_LO16}, {BFD_RELOC_SCORE_BCMP, R_SCORE_BCMP}, {BFD_RELOC_SCORE_JMP, R_SCORE_24}, {BFD_RELOC_SCORE_BRANCH, R_SCORE_PC19}, {BFD_RELOC_SCORE16_JMP, R_SCORE16_11}, {BFD_RELOC_SCORE16_BRANCH, R_SCORE16_PC8}, {BFD_RELOC_32, R_SCORE_ABS32}, {BFD_RELOC_16, R_SCORE_ABS16}, {BFD_RELOC_SCORE_DUMMY2, R_SCORE_DUMMY2}, {BFD_RELOC_SCORE_GPREL15, R_SCORE_GP15}, {BFD_RELOC_VTABLE_INHERIT, R_SCORE_GNU_VTINHERIT}, {BFD_RELOC_VTABLE_ENTRY, R_SCORE_GNU_VTENTRY}, {BFD_RELOC_SCORE_GOT15, R_SCORE_GOT15}, {BFD_RELOC_SCORE_GOT_LO16, R_SCORE_GOT_LO16}, {BFD_RELOC_SCORE_CALL15, R_SCORE_CALL15}, {BFD_RELOC_GPREL32, R_SCORE_GPREL32}, {BFD_RELOC_32_PCREL, R_SCORE_REL32}, {BFD_RELOC_SCORE_DUMMY_HI16, R_SCORE_DUMMY_HI16}, {BFD_RELOC_SCORE_IMM30, R_SCORE_IMM30}, {BFD_RELOC_SCORE_IMM32, R_SCORE_IMM32}, }; /* got_entries only match if they're identical, except for gotidx, so use all fields to compute the hash, and compare the appropriate union members. */ static hashval_t score_elf_got_entry_hash (const void *entry_) { const struct score_got_entry *entry = (struct score_got_entry *)entry_; return entry->symndx + (!entry->abfd ? entry->d.address : entry->abfd->id); } static int score_elf_got_entry_eq (const void *entry1, const void *entry2) { const struct score_got_entry *e1 = (struct score_got_entry *)entry1; const struct score_got_entry *e2 = (struct score_got_entry *)entry2; return e1->abfd == e2->abfd && e1->symndx == e2->symndx && (! e1->abfd ? e1->d.address == e2->d.address : e1->symndx >= 0 ? e1->d.addend == e2->d.addend : e1->d.h == e2->d.h); } /* If H needs a GOT entry, assign it the highest available dynamic index. Otherwise, assign it the lowest available dynamic index. */ static bfd_boolean score_elf_sort_hash_table_f (struct score_elf_link_hash_entry *h, void *data) { struct score_elf_hash_sort_data *hsd = data; /* Symbols without dynamic symbol table entries aren't interesting at all. */ if (h->root.dynindx == -1) return TRUE; /* Global symbols that need GOT entries that are not explicitly referenced are marked with got offset 2. Those that are referenced get a 1, and those that don't need GOT entries get -1. */ if (h->root.got.offset == 2) { if (hsd->max_unref_got_dynindx == hsd->min_got_dynindx) hsd->low = (struct elf_link_hash_entry *) h; h->root.dynindx = hsd->max_unref_got_dynindx++; } else if (h->root.got.offset != 1) h->root.dynindx = hsd->max_non_got_dynindx++; else { h->root.dynindx = --hsd->min_got_dynindx; hsd->low = (struct elf_link_hash_entry *) h; } return TRUE; } static asection * score_elf_got_section (bfd *abfd, bfd_boolean maybe_excluded) { asection *sgot = bfd_get_linker_section (abfd, ".got"); if (sgot == NULL || (! maybe_excluded && (sgot->flags & SEC_EXCLUDE) != 0)) return NULL; return sgot; } /* Returns the GOT information associated with the link indicated by INFO. If SGOTP is non-NULL, it is filled in with the GOT section. */ static struct score_got_info * score_elf_got_info (bfd *abfd, asection **sgotp) { asection *sgot; struct score_got_info *g; sgot = score_elf_got_section (abfd, TRUE); BFD_ASSERT (sgot != NULL); BFD_ASSERT (elf_section_data (sgot) != NULL); g = score_elf_section_data (sgot)->u.got_info; BFD_ASSERT (g != NULL); if (sgotp) *sgotp = sgot; return g; } /* Sort the dynamic symbol table so that symbols that need GOT entries appear towards the end. This reduces the amount of GOT space required. MAX_LOCAL is used to set the number of local symbols known to be in the dynamic symbol table. During s3_bfd_score_elf_size_dynamic_sections, this value is 1. Afterward, the section symbols are added and the count is higher. */ static bfd_boolean score_elf_sort_hash_table (struct bfd_link_info *info, unsigned long max_local) { struct score_elf_hash_sort_data hsd; struct score_got_info *g; bfd *dynobj; dynobj = elf_hash_table (info)->dynobj; g = score_elf_got_info (dynobj, NULL); hsd.low = NULL; hsd.max_unref_got_dynindx = hsd.min_got_dynindx = elf_hash_table (info)->dynsymcount /* In the multi-got case, assigned_gotno of the master got_info indicate the number of entries that aren't referenced in the primary GOT, but that must have entries because there are dynamic relocations that reference it. Since they aren't referenced, we move them to the end of the GOT, so that they don't prevent other entries that are referenced from getting too large offsets. */ - (g->next ? g->assigned_gotno : 0); hsd.max_non_got_dynindx = max_local; score_elf_link_hash_traverse (elf_hash_table (info), score_elf_sort_hash_table_f, &hsd); /* There should have been enough room in the symbol table to accommodate both the GOT and non-GOT symbols. */ BFD_ASSERT (hsd.max_non_got_dynindx <= hsd.min_got_dynindx); BFD_ASSERT ((unsigned long)hsd.max_unref_got_dynindx <= elf_hash_table (info)->dynsymcount); /* Now we know which dynamic symbol has the lowest dynamic symbol table index in the GOT. */ g->global_gotsym = hsd.low; return TRUE; } /* Create an entry in an score ELF linker hash table. */ static struct bfd_hash_entry * score_elf_link_hash_newfunc (struct bfd_hash_entry *entry, struct bfd_hash_table *table, const char *string) { struct score_elf_link_hash_entry *ret = (struct score_elf_link_hash_entry *) entry; /* Allocate the structure if it has not already been allocated by a subclass. */ if (ret == NULL) ret = bfd_hash_allocate (table, sizeof (struct score_elf_link_hash_entry)); if (ret == NULL) return (struct bfd_hash_entry *) ret; /* Call the allocation method of the superclass. */ ret = ((struct score_elf_link_hash_entry *) _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, table, string)); if (ret != NULL) { ret->possibly_dynamic_relocs = 0; ret->readonly_reloc = FALSE; ret->no_fn_stub = FALSE; ret->forced_local = FALSE; } return (struct bfd_hash_entry *) ret; } /* Returns the first relocation of type r_type found, beginning with RELOCATION. RELEND is one-past-the-end of the relocation table. */ static const Elf_Internal_Rela * score_elf_next_relocation (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type, const Elf_Internal_Rela *relocation, const Elf_Internal_Rela *relend) { while (relocation < relend) { if (ELF32_R_TYPE (relocation->r_info) == r_type) return relocation; ++relocation; } /* We didn't find it. */ bfd_set_error (bfd_error_bad_value); return NULL; } /* This function is called via qsort() to sort the dynamic relocation entries by increasing r_symndx value. */ static int score_elf_sort_dynamic_relocs (const void *arg1, const void *arg2) { Elf_Internal_Rela int_reloc1; Elf_Internal_Rela int_reloc2; bfd_elf32_swap_reloc_in (reldyn_sorting_bfd, arg1, &int_reloc1); bfd_elf32_swap_reloc_in (reldyn_sorting_bfd, arg2, &int_reloc2); return (ELF32_R_SYM (int_reloc1.r_info) - ELF32_R_SYM (int_reloc2.r_info)); } /* Return whether a relocation is against a local symbol. */ static bfd_boolean score_elf_local_relocation_p (bfd *input_bfd, const Elf_Internal_Rela *relocation, asection **local_sections, bfd_boolean check_forced) { unsigned long r_symndx; Elf_Internal_Shdr *symtab_hdr; struct score_elf_link_hash_entry *h; size_t extsymoff; r_symndx = ELF32_R_SYM (relocation->r_info); symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; extsymoff = (elf_bad_symtab (input_bfd)) ? 0 : symtab_hdr->sh_info; if (r_symndx < extsymoff) return TRUE; if (elf_bad_symtab (input_bfd) && local_sections[r_symndx] != NULL) return TRUE; if (check_forced) { /* Look up the hash table to check whether the symbol was forced local. */ h = (struct score_elf_link_hash_entry *) elf_sym_hashes (input_bfd) [r_symndx - extsymoff]; /* Find the real hash-table entry for this symbol. */ while (h->root.root.type == bfd_link_hash_indirect || h->root.root.type == bfd_link_hash_warning) h = (struct score_elf_link_hash_entry *) h->root.root.u.i.link; if (h->root.forced_local) return TRUE; } return FALSE; } /* Returns the dynamic relocation section for DYNOBJ. */ static asection * score_elf_rel_dyn_section (bfd *dynobj, bfd_boolean create_p) { static const char dname[] = ".rel.dyn"; asection *sreloc; sreloc = bfd_get_linker_section (dynobj, dname); if (sreloc == NULL && create_p) { sreloc = bfd_make_section_anyway_with_flags (dynobj, dname, (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY)); if (sreloc == NULL || ! bfd_set_section_alignment (dynobj, sreloc, SCORE_ELF_LOG_FILE_ALIGN (dynobj))) return NULL; } return sreloc; } static void score_elf_allocate_dynamic_relocations (bfd *abfd, unsigned int n) { asection *s; s = score_elf_rel_dyn_section (abfd, FALSE); BFD_ASSERT (s != NULL); if (s->size == 0) { /* Make room for a null element. */ s->size += SCORE_ELF_REL_SIZE (abfd); ++s->reloc_count; } s->size += n * SCORE_ELF_REL_SIZE (abfd); } /* Create a rel.dyn relocation for the dynamic linker to resolve. REL is the original relocation, which is now being transformed into a dynamic relocation. The ADDENDP is adjusted if necessary; the caller should store the result in place of the original addend. */ static bfd_boolean score_elf_create_dynamic_relocation (bfd *output_bfd, struct bfd_link_info *info, const Elf_Internal_Rela *rel, struct score_elf_link_hash_entry *h, bfd_vma symbol, bfd_vma *addendp, asection *input_section) { Elf_Internal_Rela outrel[3]; asection *sreloc; bfd *dynobj; int r_type; long indx; bfd_boolean defined_p; r_type = ELF32_R_TYPE (rel->r_info); dynobj = elf_hash_table (info)->dynobj; sreloc = score_elf_rel_dyn_section (dynobj, FALSE); BFD_ASSERT (sreloc != NULL); BFD_ASSERT (sreloc->contents != NULL); BFD_ASSERT (sreloc->reloc_count * SCORE_ELF_REL_SIZE (output_bfd) < sreloc->size); outrel[0].r_offset = _bfd_elf_section_offset (output_bfd, info, input_section, rel[0].r_offset); outrel[1].r_offset = _bfd_elf_section_offset (output_bfd, info, input_section, rel[1].r_offset); outrel[2].r_offset = _bfd_elf_section_offset (output_bfd, info, input_section, rel[2].r_offset); if (outrel[0].r_offset == MINUS_ONE) /* The relocation field has been deleted. */ return TRUE; if (outrel[0].r_offset == MINUS_TWO) { /* The relocation field has been converted into a relative value of some sort. Functions like _bfd_elf_write_section_eh_frame expect the field to be fully relocated, so add in the symbol's value. */ *addendp += symbol; return TRUE; } /* We must now calculate the dynamic symbol table index to use in the relocation. */ if (h != NULL && (! info->symbolic || !h->root.def_regular) /* h->root.dynindx may be -1 if this symbol was marked to become local. */ && h->root.dynindx != -1) { indx = h->root.dynindx; /* ??? glibc's ld.so just adds the final GOT entry to the relocation field. It therefore treats relocs against defined symbols in the same way as relocs against undefined symbols. */ defined_p = FALSE; } else { indx = 0; defined_p = TRUE; } /* If the relocation was previously an absolute relocation and this symbol will not be referred to by the relocation, we must adjust it by the value we give it in the dynamic symbol table. Otherwise leave the job up to the dynamic linker. */ if (defined_p && r_type != R_SCORE_REL32) *addendp += symbol; /* The relocation is always an REL32 relocation because we don't know where the shared library will wind up at load-time. */ outrel[0].r_info = ELF32_R_INFO ((unsigned long) indx, R_SCORE_REL32); /* For strict adherence to the ABI specification, we should generate a R_SCORE_64 relocation record by itself before the _REL32/_64 record as well, such that the addend is read in as a 64-bit value (REL32 is a 32-bit relocation, after all). However, since none of the existing ELF64 SCORE dynamic loaders seems to care, we don't waste space with these artificial relocations. If this turns out to not be true, score_elf_allocate_dynamic_relocations() should be tweaked so as to make room for a pair of dynamic relocations per invocation if ABI_64_P, and here we should generate an additional relocation record with R_SCORE_64 by itself for a NULL symbol before this relocation record. */ outrel[1].r_info = ELF32_R_INFO (0, R_SCORE_NONE); outrel[2].r_info = ELF32_R_INFO (0, R_SCORE_NONE); /* Adjust the output offset of the relocation to reference the correct location in the output file. */ outrel[0].r_offset += (input_section->output_section->vma + input_section->output_offset); outrel[1].r_offset += (input_section->output_section->vma + input_section->output_offset); outrel[2].r_offset += (input_section->output_section->vma + input_section->output_offset); /* Put the relocation back out. We have to use the special relocation outputter in the 64-bit case since the 64-bit relocation format is non-standard. */ bfd_elf32_swap_reloc_out (output_bfd, &outrel[0], (sreloc->contents + sreloc->reloc_count * sizeof (Elf32_External_Rel))); /* We've now added another relocation. */ ++sreloc->reloc_count; /* Make sure the output section is writable. The dynamic linker will be writing to it. */ elf_section_data (input_section->output_section)->this_hdr.sh_flags |= SHF_WRITE; return TRUE; } static bfd_boolean score_elf_create_got_section (bfd *abfd, struct bfd_link_info *info, bfd_boolean maybe_exclude) { flagword flags; asection *s; struct elf_link_hash_entry *h; struct bfd_link_hash_entry *bh; struct score_got_info *g; bfd_size_type amt; /* This function may be called more than once. */ s = score_elf_got_section (abfd, TRUE); if (s) { if (! maybe_exclude) s->flags &= ~SEC_EXCLUDE; return TRUE; } flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED); if (maybe_exclude) flags |= SEC_EXCLUDE; /* We have to use an alignment of 2**4 here because this is hardcoded in the function stub generation and in the linker script. */ s = bfd_make_section_anyway_with_flags (abfd, ".got", flags); elf_hash_table (info)->sgot = s; if (s == NULL || ! bfd_set_section_alignment (abfd, s, 4)) return FALSE; /* Define the symbol _GLOBAL_OFFSET_TABLE_. We don't do this in the linker script because we don't want to define the symbol if we are not creating a global offset table. */ bh = NULL; if (! (_bfd_generic_link_add_one_symbol (info, abfd, "_GLOBAL_OFFSET_TABLE_", BSF_GLOBAL, s, 0, NULL, FALSE, get_elf_backend_data (abfd)->collect, &bh))) return FALSE; h = (struct elf_link_hash_entry *) bh; h->non_elf = 0; h->def_regular = 1; h->type = STT_OBJECT; elf_hash_table (info)->hgot = h; if (bfd_link_pic (info) && ! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; amt = sizeof (struct score_got_info); g = bfd_alloc (abfd, amt); if (g == NULL) return FALSE; g->global_gotsym = NULL; g->global_gotno = 0; g->local_gotno = SCORE_RESERVED_GOTNO; g->assigned_gotno = SCORE_RESERVED_GOTNO; g->next = NULL; g->got_entries = htab_try_create (1, score_elf_got_entry_hash, score_elf_got_entry_eq, NULL); if (g->got_entries == NULL) return FALSE; score_elf_section_data (s)->u.got_info = g; score_elf_section_data (s)->elf.this_hdr.sh_flags |= SHF_ALLOC | SHF_WRITE | SHF_SCORE_GPREL; return TRUE; } /* Calculate the %high function. */ static bfd_vma score_elf_high (bfd_vma value) { return ((value + (bfd_vma) 0x8000) >> 16) & 0xffff; } /* Create a local GOT entry for VALUE. Return the index of the entry, or -1 if it could not be created. */ static struct score_got_entry * score_elf_create_local_got_entry (bfd *abfd, bfd *ibfd ATTRIBUTE_UNUSED, struct score_got_info *gg, asection *sgot, bfd_vma value, unsigned long r_symndx ATTRIBUTE_UNUSED, struct score_elf_link_hash_entry *h ATTRIBUTE_UNUSED, int r_type ATTRIBUTE_UNUSED) { struct score_got_entry entry, **loc; struct score_got_info *g; entry.abfd = NULL; entry.symndx = -1; entry.d.address = value; g = gg; loc = (struct score_got_entry **) htab_find_slot (g->got_entries, &entry, INSERT); if (*loc) return *loc; entry.gotidx = SCORE_ELF_GOT_SIZE (abfd) * g->assigned_gotno++; *loc = bfd_alloc (abfd, sizeof entry); if (! *loc) return NULL; memcpy (*loc, &entry, sizeof entry); if (g->assigned_gotno >= g->local_gotno) { (*loc)->gotidx = -1; /* We didn't allocate enough space in the GOT. */ _bfd_error_handler (_("not enough GOT space for local GOT entries")); bfd_set_error (bfd_error_bad_value); return NULL; } score_bfd_put_32 (abfd, value, (sgot->contents + entry.gotidx)); return *loc; } /* Find a GOT entry whose higher-order 16 bits are the same as those for value. Return the index into the GOT for this entry. */ static bfd_vma score_elf_got16_entry (bfd *abfd, bfd *ibfd, struct bfd_link_info *info, bfd_vma value, bfd_boolean external) { asection *sgot; struct score_got_info *g; struct score_got_entry *entry; if (!external) { /* Although the ABI says that it is "the high-order 16 bits" that we want, it is really the %high value. The complete value is calculated with a `addiu' of a LO16 relocation, just as with a HI16/LO16 pair. */ value = score_elf_high (value) << 16; } g = score_elf_got_info (elf_hash_table (info)->dynobj, &sgot); entry = score_elf_create_local_got_entry (abfd, ibfd, g, sgot, value, 0, NULL, R_SCORE_GOT15); if (entry) return entry->gotidx; else return MINUS_ONE; } static void s3_bfd_score_elf_hide_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *entry, bfd_boolean force_local) { bfd *dynobj; asection *got; struct score_got_info *g; struct score_elf_link_hash_entry *h; h = (struct score_elf_link_hash_entry *) entry; if (h->forced_local) return; h->forced_local = TRUE; dynobj = elf_hash_table (info)->dynobj; if (dynobj != NULL && force_local) { got = score_elf_got_section (dynobj, FALSE); if (got == NULL) return; g = score_elf_section_data (got)->u.got_info; if (g->next) { struct score_got_entry e; struct score_got_info *gg = g; /* Since we're turning what used to be a global symbol into a local one, bump up the number of local entries of each GOT that had an entry for it. This will automatically decrease the number of global entries, since global_gotno is actually the upper limit of global entries. */ e.abfd = dynobj; e.symndx = -1; e.d.h = h; for (g = g->next; g != gg; g = g->next) if (htab_find (g->got_entries, &e)) { BFD_ASSERT (g->global_gotno > 0); g->local_gotno++; g->global_gotno--; } /* If this was a global symbol forced into the primary GOT, we no longer need an entry for it. We can't release the entry at this point, but we must at least stop counting it as one of the symbols that required a forced got entry. */ if (h->root.got.offset == 2) { BFD_ASSERT (gg->assigned_gotno > 0); gg->assigned_gotno--; } } else if (g->global_gotno == 0 && g->global_gotsym == NULL) /* If we haven't got through GOT allocation yet, just bump up the number of local entries, as this symbol won't be counted as global. */ g->local_gotno++; else if (h->root.got.offset == 1) { /* If we're past non-multi-GOT allocation and this symbol had been marked for a global got entry, give it a local entry instead. */ BFD_ASSERT (g->global_gotno > 0); g->local_gotno++; g->global_gotno--; } } _bfd_elf_link_hash_hide_symbol (info, &h->root, force_local); } /* If H is a symbol that needs a global GOT entry, but has a dynamic symbol table index lower than any we've seen to date, record it for posterity. */ static bfd_boolean score_elf_record_global_got_symbol (struct elf_link_hash_entry *h, bfd *abfd, struct bfd_link_info *info, struct score_got_info *g) { struct score_got_entry entry, **loc; /* A global symbol in the GOT must also be in the dynamic symbol table. */ if (h->dynindx == -1) { switch (ELF_ST_VISIBILITY (h->other)) { case STV_INTERNAL: case STV_HIDDEN: s3_bfd_score_elf_hide_symbol (info, h, TRUE); break; } if (!bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; } entry.abfd = abfd; entry.symndx = -1; entry.d.h = (struct score_elf_link_hash_entry *)h; loc = (struct score_got_entry **)htab_find_slot (g->got_entries, &entry, INSERT); /* If we've already marked this entry as needing GOT space, we don't need to do it again. */ if (*loc) return TRUE; *loc = bfd_alloc (abfd, sizeof entry); if (! *loc) return FALSE; entry.gotidx = -1; memcpy (*loc, &entry, sizeof (entry)); if (h->got.offset != MINUS_ONE) return TRUE; /* By setting this to a value other than -1, we are indicating that there needs to be a GOT entry for H. Avoid using zero, as the generic ELF copy_indirect_symbol tests for <= 0. */ h->got.offset = 1; return TRUE; } /* Reserve space in G for a GOT entry containing the value of symbol SYMNDX in input bfd ABDF, plus ADDEND. */ static bfd_boolean score_elf_record_local_got_symbol (bfd *abfd, long symndx, bfd_vma addend, struct score_got_info *g) { struct score_got_entry entry, **loc; entry.abfd = abfd; entry.symndx = symndx; entry.d.addend = addend; loc = (struct score_got_entry **)htab_find_slot (g->got_entries, &entry, INSERT); if (*loc) return TRUE; entry.gotidx = g->local_gotno++; *loc = bfd_alloc (abfd, sizeof(entry)); if (! *loc) return FALSE; memcpy (*loc, &entry, sizeof (entry)); return TRUE; } /* Returns the GOT offset at which the indicated address can be found. If there is not yet a GOT entry for this value, create one. Returns -1 if no satisfactory GOT offset can be found. */ static bfd_vma score_elf_local_got_index (bfd *abfd, bfd *ibfd, struct bfd_link_info *info, bfd_vma value, unsigned long r_symndx, struct score_elf_link_hash_entry *h, int r_type) { asection *sgot; struct score_got_info *g; struct score_got_entry *entry; g = score_elf_got_info (elf_hash_table (info)->dynobj, &sgot); entry = score_elf_create_local_got_entry (abfd, ibfd, g, sgot, value, r_symndx, h, r_type); if (!entry) return MINUS_ONE; else return entry->gotidx; } /* Returns the GOT index for the global symbol indicated by H. */ static bfd_vma score_elf_global_got_index (bfd *abfd, struct elf_link_hash_entry *h) { bfd_vma got_index; asection *sgot; struct score_got_info *g; long global_got_dynindx = 0; g = score_elf_got_info (abfd, &sgot); if (g->global_gotsym != NULL) global_got_dynindx = g->global_gotsym->dynindx; /* Once we determine the global GOT entry with the lowest dynamic symbol table index, we must put all dynamic symbols with greater indices into the GOT. That makes it easy to calculate the GOT offset. */ BFD_ASSERT (h->dynindx >= global_got_dynindx); got_index = ((h->dynindx - global_got_dynindx + g->local_gotno) * SCORE_ELF_GOT_SIZE (abfd)); BFD_ASSERT (got_index < sgot->size); return got_index; } /* Returns the offset for the entry at the INDEXth position in the GOT. */ static bfd_vma score_elf_got_offset_from_index (bfd *dynobj, bfd *output_bfd, bfd *input_bfd ATTRIBUTE_UNUSED, bfd_vma got_index) { asection *sgot; bfd_vma gp; score_elf_got_info (dynobj, &sgot); gp = _bfd_get_gp_value (output_bfd); return sgot->output_section->vma + sgot->output_offset + got_index - gp; } /* Follow indirect and warning hash entries so that each got entry points to the final symbol definition. P must point to a pointer to the hash table we're traversing. Since this traversal may modify the hash table, we set this pointer to NULL to indicate we've made a potentially-destructive change to the hash table, so the traversal must be restarted. */ static int score_elf_resolve_final_got_entry (void **entryp, void *p) { struct score_got_entry *entry = (struct score_got_entry *)*entryp; htab_t got_entries = *(htab_t *)p; if (entry->abfd != NULL && entry->symndx == -1) { struct score_elf_link_hash_entry *h = entry->d.h; while (h->root.root.type == bfd_link_hash_indirect || h->root.root.type == bfd_link_hash_warning) h = (struct score_elf_link_hash_entry *) h->root.root.u.i.link; if (entry->d.h == h) return 1; entry->d.h = h; /* If we can't find this entry with the new bfd hash, re-insert it, and get the traversal restarted. */ if (! htab_find (got_entries, entry)) { htab_clear_slot (got_entries, entryp); entryp = htab_find_slot (got_entries, entry, INSERT); if (! *entryp) *entryp = entry; /* Abort the traversal, since the whole table may have moved, and leave it up to the parent to restart the process. */ *(htab_t *)p = NULL; return 0; } /* We might want to decrement the global_gotno count, but it's either too early or too late for that at this point. */ } return 1; } /* Turn indirect got entries in a got_entries table into their final locations. */ static void score_elf_resolve_final_got_entries (struct score_got_info *g) { htab_t got_entries; do { got_entries = g->got_entries; htab_traverse (got_entries, score_elf_resolve_final_got_entry, &got_entries); } while (got_entries == NULL); } /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. for -r */ static void score_elf_add_to_rel (bfd *abfd, bfd_byte *address, reloc_howto_type *howto, bfd_signed_vma increment) { bfd_signed_vma addend; bfd_vma contents; unsigned long offset; unsigned long r_type = howto->type; unsigned long hi16_addend, hi16_offset, hi16_value, uvalue; contents = score_bfd_get_32 (abfd, address); /* Get the (signed) value from the instruction. */ addend = contents & howto->src_mask; if (addend & ((howto->src_mask + 1) >> 1)) { bfd_signed_vma mask; mask = -1; mask &= ~howto->src_mask; addend |= mask; } /* Add in the increment, (which is a byte value). */ switch (r_type) { case R_SCORE_PC19: offset = (((contents & howto->src_mask) & 0x3ff0000) >> 6) | ((contents & howto->src_mask) & 0x3ff); offset += increment; contents = (contents & ~howto-> src_mask) | (((offset << 6) & howto->src_mask) & 0x3ff0000) | (offset & 0x3ff); score_bfd_put_32 (abfd, contents, address); break; case R_SCORE_HI16: break; case R_SCORE_LO16: hi16_addend = score_bfd_get_32 (abfd, address - 4); hi16_offset = ((((hi16_addend >> 16) & 0x3) << 15) | (hi16_addend & 0x7fff)) >> 1; offset = ((((contents >> 16) & 0x3) << 15) | (contents & 0x7fff)) >> 1; offset = (hi16_offset << 16) | (offset & 0xffff); uvalue = increment + offset; hi16_offset = (uvalue >> 16) << 1; hi16_value = (hi16_addend & (~(howto->dst_mask))) | (hi16_offset & 0x7fff) | ((hi16_offset << 1) & 0x30000); score_bfd_put_32 (abfd, hi16_value, address - 4); offset = (uvalue & 0xffff) << 1; contents = (contents & (~(howto->dst_mask))) | (offset & 0x7fff) | ((offset << 1) & 0x30000); score_bfd_put_32 (abfd, contents, address); break; case R_SCORE_24: offset = (((contents & howto->src_mask) >> 1) & 0x1ff8000) | ((contents & howto->src_mask) & 0x7fff); offset += increment; contents = (contents & ~howto-> src_mask) | (((offset << 1) & howto->src_mask) & 0x3ff0000) | (offset & 0x7fff); score_bfd_put_32 (abfd, contents, address); break; case R_SCORE16_11: contents = score_bfd_get_16 (abfd, address); offset = contents & howto->src_mask; offset += increment; contents = (contents & ~howto->src_mask) | (offset & howto->src_mask); score_bfd_put_16 (abfd, contents, address); break; case R_SCORE16_PC8: contents = score_bfd_get_16 (abfd, address); offset = (contents & howto->src_mask) + ((increment >> 1) & 0x1ff); contents = (contents & (~howto->src_mask)) | (offset & howto->src_mask); score_bfd_put_16 (abfd, contents, address); break; case R_SCORE_BCMP: contents = score_bfd_get_32 (abfd, address); offset = (contents & howto->src_mask); offset <<= howto->rightshift; offset += increment; offset >>= howto->rightshift; contents = (contents & (~howto->src_mask)) | (offset & howto->src_mask); score_bfd_put_32 (abfd, contents, address); break; case R_SCORE_IMM30: contents = score_bfd_get_48 (abfd, address); offset = (contents & howto->src_mask); offset <<= howto->rightshift; offset += increment; offset >>= howto->rightshift; contents = (contents & (~howto->src_mask)) | (offset & howto->src_mask); score_bfd_put_48 (abfd, contents, address); break; case R_SCORE_IMM32: contents = score_bfd_get_48 (abfd, address); offset = (contents & howto->src_mask); offset += increment; contents = (contents & (~howto->src_mask)) | (offset & howto->src_mask); score_bfd_put_48 (abfd, contents, address); break; default: addend += increment; contents = (contents & ~howto->dst_mask) | (addend & howto->dst_mask); score_bfd_put_32 (abfd, contents, address); break; } } /* Perform a relocation as part of a final link. */ static bfd_reloc_status_type score_elf_final_link_relocate (reloc_howto_type *howto, bfd *input_bfd, bfd *output_bfd, asection *input_section, bfd_byte *contents, Elf_Internal_Rela *rel, Elf_Internal_Rela *relocs, bfd_vma symbol, struct bfd_link_info *info, const char *sym_name ATTRIBUTE_UNUSED, int sym_flags ATTRIBUTE_UNUSED, struct score_elf_link_hash_entry *h, asection **local_sections, bfd_boolean gp_disp_p) { unsigned long r_type; unsigned long r_symndx; bfd_byte *hit_data = contents + rel->r_offset; bfd_vma addend; /* The final GP value to be used for the relocatable, executable, or shared object file being produced. */ bfd_vma gp = MINUS_ONE; /* The place (section offset or address) of the storage unit being relocated. */ bfd_vma rel_addr; /* The offset into the global offset table at which the address of the relocation entry symbol, adjusted by the addend, resides during execution. */ bfd_vma g = MINUS_ONE; /* TRUE if the symbol referred to by this relocation is a local symbol. */ bfd_boolean local_p; /* The eventual value we will relocate. */ bfd_vma value = symbol; unsigned long hi16_addend, hi16_offset, hi16_value, uvalue, offset, abs_value = 0; if (elf_gp (output_bfd) == 0) { struct bfd_link_hash_entry *bh; asection *o; bh = bfd_link_hash_lookup (info->hash, "_gp", 0, 0, 1); if (bh != NULL && bh->type == bfd_link_hash_defined) elf_gp (output_bfd) = (bh->u.def.value + bh->u.def.section->output_section->vma + bh->u.def.section->output_offset); else if (bfd_link_relocatable (info)) { bfd_vma lo = -1; /* Find the GP-relative section with the lowest offset. */ for (o = output_bfd->sections; o != NULL; o = o->next) if (o->vma < lo) lo = o->vma; /* And calculate GP relative to that. */ elf_gp (output_bfd) = lo + ELF_SCORE_GP_OFFSET (input_bfd); } else { /* If the relocate_section function needs to do a reloc involving the GP value, it should make a reloc_dangerous callback to warn that GP is not defined. */ } } /* Parse the relocation. */ r_symndx = ELF32_R_SYM (rel->r_info); r_type = ELF32_R_TYPE (rel->r_info); rel_addr = (input_section->output_section->vma + input_section->output_offset + rel->r_offset); local_p = score_elf_local_relocation_p (input_bfd, rel, local_sections, TRUE); if (r_type == R_SCORE_GOT15) { const Elf_Internal_Rela *relend; const Elf_Internal_Rela *lo16_rel; const struct elf_backend_data *bed; bfd_vma lo_value = 0; bed = get_elf_backend_data (output_bfd); relend = relocs + input_section->reloc_count * bed->s->int_rels_per_ext_rel; lo16_rel = score_elf_next_relocation (input_bfd, R_SCORE_GOT_LO16, rel, relend); if ((local_p) && (lo16_rel != NULL)) { bfd_vma tmp = 0; tmp = score_bfd_get_32 (input_bfd, contents + lo16_rel->r_offset); lo_value = (((tmp >> 16) & 0x3) << 14) | ((tmp & 0x7fff) >> 1); } addend = lo_value; } /* For score3 R_SCORE_ABS32. */ else if (r_type == R_SCORE_ABS32 || r_type == R_SCORE_REL32) { addend = (bfd_get_32 (input_bfd, hit_data) >> howto->bitpos) & howto->src_mask; } else { addend = (score_bfd_get_32 (input_bfd, hit_data) >> howto->bitpos) & howto->src_mask; } /* If we haven't already determined the GOT offset, or the GP value, and we're going to need it, get it now. */ switch (r_type) { case R_SCORE_CALL15: case R_SCORE_GOT15: if (!local_p) { g = score_elf_global_got_index (elf_hash_table (info)->dynobj, (struct elf_link_hash_entry *) h); if ((! elf_hash_table (info)->dynamic_sections_created || (bfd_link_pic (info) && (info->symbolic || h->root.dynindx == -1) && h->root.def_regular))) { /* This is a static link or a -Bsymbolic link. The symbol is defined locally, or was forced to be local. We must initialize this entry in the GOT. */ bfd *tmpbfd = elf_hash_table (info)->dynobj; asection *sgot = score_elf_got_section (tmpbfd, FALSE); score_bfd_put_32 (tmpbfd, value, sgot->contents + g); } } else if (r_type == R_SCORE_GOT15 || r_type == R_SCORE_CALL15) { /* There's no need to create a local GOT entry here; the calculation for a local GOT15 entry does not involve G. */ ; } else { g = score_elf_local_got_index (output_bfd, input_bfd, info, symbol + addend, r_symndx, h, r_type); if (g == MINUS_ONE) return bfd_reloc_outofrange; } /* Convert GOT indices to actual offsets. */ g = score_elf_got_offset_from_index (elf_hash_table (info)->dynobj, output_bfd, input_bfd, g); break; case R_SCORE_HI16: case R_SCORE_LO16: case R_SCORE_GPREL32: gp = _bfd_get_gp_value (output_bfd); break; case R_SCORE_GP15: gp = _bfd_get_gp_value (output_bfd); default: break; } switch (r_type) { case R_SCORE_NONE: return bfd_reloc_ok; case R_SCORE_ABS32: case R_SCORE_REL32: if ((bfd_link_pic (info) || (elf_hash_table (info)->dynamic_sections_created && h != NULL && h->root.def_dynamic && !h->root.def_regular)) && r_symndx != STN_UNDEF && (input_section->flags & SEC_ALLOC) != 0) { /* If we're creating a shared library, or this relocation is against a symbol in a shared library, then we can't know where the symbol will end up. So, we create a relocation record in the output, and leave the job up to the dynamic linker. */ value = addend; if (!score_elf_create_dynamic_relocation (output_bfd, info, rel, h, symbol, &value, input_section)) return bfd_reloc_undefined; } else if (r_symndx == STN_UNDEF) /* r_symndx will be STN_UNDEF (zero) only for relocs against symbols from removed linkonce sections, or sections discarded by a linker script. */ value = 0; else { if (r_type != R_SCORE_REL32) value = symbol + addend; else value = addend; } value &= howto->dst_mask; bfd_put_32 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_ABS16: value += addend; if ((long)value > 0x7fff || (long)value < -0x8000) return bfd_reloc_overflow; score_bfd_put_16 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_24: addend = score_bfd_get_32 (input_bfd, hit_data); offset = (((addend & howto->src_mask) >> 1) & 0x1ff8000) | ((addend & howto->src_mask) & 0x7fff); if ((offset & 0x1000000) != 0) offset |= 0xfe000000; value += offset; abs_value = value - rel_addr; if ((abs_value & 0xfe000000) != 0) return bfd_reloc_overflow; addend = (addend & ~howto->src_mask) | (((value << 1) & howto->src_mask) & 0x3ff0000) | (value & 0x7fff); score_bfd_put_32 (input_bfd, addend, hit_data); return bfd_reloc_ok; /* signed imm32. */ case R_SCORE_IMM30: { int not_word_align_p = 0; bfd_vma imm_offset = 0; addend = score_bfd_get_48 (input_bfd, hit_data); imm_offset = ((addend >> 7) & 0xff) | (((addend >> 16) & 0x7fff) << 8) | (((addend >> 32) & 0x7f) << 23); imm_offset <<= howto->rightshift; value += imm_offset; value &= 0xffffffff; /* Check lw48/sw48 rd, value/label word align. */ if ((value & 0x3) != 0) not_word_align_p = 1; value >>= howto->rightshift; addend = (addend & ~howto->src_mask) | (((value & 0xff) >> 0) << 7) | (((value & 0x7fff00) >> 8) << 16) | (((value & 0x3f800000) >> 23) << 32); score_bfd_put_48 (input_bfd, addend, hit_data); if (not_word_align_p) return bfd_reloc_other; else return bfd_reloc_ok; } case R_SCORE_IMM32: { bfd_vma imm_offset = 0; addend = score_bfd_get_48 (input_bfd, hit_data); imm_offset = ((addend >> 5) & 0x3ff) | (((addend >> 16) & 0x7fff) << 10) | (((addend >> 32) & 0x7f) << 25); value += imm_offset; value &= 0xffffffff; addend = (addend & ~howto->src_mask) | ((value & 0x3ff) << 5) | (((value >> 10) & 0x7fff) << 16) | (((value >> 25) & 0x7f) << 32); score_bfd_put_48 (input_bfd, addend, hit_data); return bfd_reloc_ok; } case R_SCORE_PC19: addend = score_bfd_get_32 (input_bfd, hit_data); offset = (((addend & howto->src_mask) & 0x3ff0000) >> 6) | ((addend & howto->src_mask) & 0x3ff); if ((offset & 0x80000) != 0) offset |= 0xfff00000; abs_value = value = value - rel_addr + offset; /* exceed 20 bit : overflow. */ if ((abs_value & 0x80000000) == 0x80000000) abs_value = 0xffffffff - value + 1; if ((abs_value & 0xfff80000) != 0) return bfd_reloc_overflow; addend = (addend & ~howto->src_mask) | (((value << 6) & howto->src_mask) & 0x3ff0000) | (value & 0x3ff); score_bfd_put_32 (input_bfd, addend, hit_data); return bfd_reloc_ok; case R_SCORE16_11: addend = score_bfd_get_16 (input_bfd, hit_data); offset = addend & howto->src_mask; if ((offset & 0x800) != 0) /* Offset is negative. */ offset |= 0xfffff000; value += offset; abs_value = value - rel_addr; if ((abs_value & 0xfffff000) != 0) return bfd_reloc_overflow; addend = (addend & ~howto->src_mask) | (value & howto->src_mask); score_bfd_put_16 (input_bfd, addend, hit_data); return bfd_reloc_ok; case R_SCORE16_PC8: addend = score_bfd_get_16 (input_bfd, hit_data); offset = (addend & howto->src_mask) << 1; if ((offset & 0x200) != 0) /* Offset is negative. */ offset |= 0xfffffe00; abs_value = value = value - rel_addr + offset; /* Sign bit + exceed 9 bit. */ if (((value & 0xfffffe00) != 0) && ((value & 0xfffffe00) != 0xfffffe00)) return bfd_reloc_overflow; value >>= 1; addend = (addend & ~howto->src_mask) | (value & howto->src_mask); score_bfd_put_16 (input_bfd, addend, hit_data); return bfd_reloc_ok; case R_SCORE_BCMP: addend = score_bfd_get_32 (input_bfd, hit_data); offset = (addend & howto->src_mask) << howto->rightshift; if ((offset & 0x200) != 0) /* Offset is negative. */ offset |= 0xfffffe00; value = value - rel_addr + offset; /* Sign bit + exceed 9 bit. */ if (((value & 0xfffffe00) != 0) && ((value & 0xfffffe00) != 0xfffffe00)) return bfd_reloc_overflow; value >>= howto->rightshift; addend = (addend & ~howto->src_mask) | (value & 0x1) | (((value >> 1) & 0x7) << 7) | (((value >> 4) & 0x1f) << 21); score_bfd_put_32 (input_bfd, addend, hit_data); return bfd_reloc_ok; case R_SCORE_HI16: return bfd_reloc_ok; case R_SCORE_LO16: hi16_addend = score_bfd_get_32 (input_bfd, hit_data - 4); hi16_offset = ((((hi16_addend >> 16) & 0x3) << 15) | (hi16_addend & 0x7fff)) >> 1; addend = score_bfd_get_32 (input_bfd, hit_data); offset = ((((addend >> 16) & 0x3) << 15) | (addend & 0x7fff)) >> 1; offset = (hi16_offset << 16) | (offset & 0xffff); if (!gp_disp_p) uvalue = value + offset; else uvalue = offset + gp - rel_addr + 4; hi16_offset = (uvalue >> 16) << 1; hi16_value = (hi16_addend & (~(howto->dst_mask))) | (hi16_offset & 0x7fff) | ((hi16_offset << 1) & 0x30000); score_bfd_put_32 (input_bfd, hi16_value, hit_data - 4); offset = (uvalue & 0xffff) << 1; value = (addend & (~(howto->dst_mask))) | (offset & 0x7fff) | ((offset << 1) & 0x30000); score_bfd_put_32 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_GP15: addend = score_bfd_get_32 (input_bfd, hit_data); offset = addend & 0x7fff; if ((offset & 0x4000) == 0x4000) offset |= 0xffffc000; value = value + offset - gp; if (((value & 0xffffc000) != 0) && ((value & 0xffffc000) != 0xffffc000)) return bfd_reloc_overflow; value = (addend & ~howto->src_mask) | (value & howto->src_mask); score_bfd_put_32 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_GOT15: case R_SCORE_CALL15: if (local_p) { bfd_boolean forced; /* The special case is when the symbol is forced to be local. We need the full address in the GOT since no R_SCORE_GOT_LO16 relocation follows. */ forced = ! score_elf_local_relocation_p (input_bfd, rel, local_sections, FALSE); value = score_elf_got16_entry (output_bfd, input_bfd, info, symbol + addend, forced); if (value == MINUS_ONE) return bfd_reloc_outofrange; value = score_elf_got_offset_from_index (elf_hash_table (info)->dynobj, output_bfd, input_bfd, value); } else { value = g; } if ((long) value > 0x3fff || (long) value < -0x4000) return bfd_reloc_overflow; addend = score_bfd_get_32 (input_bfd, hit_data); value = (addend & ~howto->dst_mask) | (value & howto->dst_mask); score_bfd_put_32 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_GPREL32: value = (addend + symbol - gp); value &= howto->dst_mask; score_bfd_put_32 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_GOT_LO16: addend = score_bfd_get_32 (input_bfd, hit_data); value = (((addend >> 16) & 0x3) << 14) | ((addend & 0x7fff) >> 1); value += symbol; value = (addend & (~(howto->dst_mask))) | ((value & 0x3fff) << 1) | (((value >> 14) & 0x3) << 16); score_bfd_put_32 (input_bfd, value, hit_data); return bfd_reloc_ok; case R_SCORE_DUMMY_HI16: return bfd_reloc_ok; case R_SCORE_GNU_VTINHERIT: case R_SCORE_GNU_VTENTRY: /* We don't do anything with these at present. */ return bfd_reloc_continue; default: return bfd_reloc_notsupported; } } /* Score backend functions. */ static void s3_bfd_score_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc, Elf_Internal_Rela *elf_reloc) { unsigned int r_type; r_type = ELF32_R_TYPE (elf_reloc->r_info); if (r_type >= ARRAY_SIZE (elf32_score_howto_table)) bfd_reloc->howto = NULL; else bfd_reloc->howto = &elf32_score_howto_table[r_type]; } /* Relocate an score ELF section. */ static bfd_boolean s3_bfd_score_elf_relocate_section (bfd *output_bfd, struct bfd_link_info *info, bfd *input_bfd, asection *input_section, bfd_byte *contents, Elf_Internal_Rela *relocs, Elf_Internal_Sym *local_syms, asection **local_sections) { Elf_Internal_Shdr *symtab_hdr; Elf_Internal_Rela *rel; Elf_Internal_Rela *relend; const char *name; unsigned long offset; unsigned long hi16_addend, hi16_offset, hi16_value, uvalue; size_t extsymoff; bfd_boolean gp_disp_p = FALSE; /* Sort dynsym. */ if (elf_hash_table (info)->dynamic_sections_created) { bfd_size_type dynsecsymcount = 0; if (bfd_link_pic (info)) { asection * p; const struct elf_backend_data *bed = get_elf_backend_data (output_bfd); for (p = output_bfd->sections; p ; p = p->next) if ((p->flags & SEC_EXCLUDE) == 0 && (p->flags & SEC_ALLOC) != 0 && !(*bed->elf_backend_omit_section_dynsym) (output_bfd, info, p)) ++ dynsecsymcount; } if (!score_elf_sort_hash_table (info, dynsecsymcount + 1)) return FALSE; } symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; extsymoff = (elf_bad_symtab (input_bfd)) ? 0 : symtab_hdr->sh_info; rel = relocs; relend = relocs + input_section->reloc_count; for (; rel < relend; rel++) { int r_type; reloc_howto_type *howto; unsigned long r_symndx; Elf_Internal_Sym *sym; asection *sec; struct score_elf_link_hash_entry *h; bfd_vma relocation = 0; bfd_reloc_status_type r; arelent bfd_reloc; r_symndx = ELF32_R_SYM (rel->r_info); r_type = ELF32_R_TYPE (rel->r_info); s3_bfd_score_info_to_howto (input_bfd, &bfd_reloc, (Elf_Internal_Rela *) rel); howto = bfd_reloc.howto; h = NULL; sym = NULL; sec = NULL; if (r_symndx < extsymoff) { sym = local_syms + r_symndx; sec = local_sections[r_symndx]; relocation = (sec->output_section->vma + sec->output_offset + sym->st_value); name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec); if (!bfd_link_relocatable (info) && (sec->flags & SEC_MERGE) && ELF_ST_TYPE (sym->st_info) == STT_SECTION) { asection *msec; bfd_vma addend, value; switch (r_type) { case R_SCORE_HI16: break; case R_SCORE_LO16: hi16_addend = score_bfd_get_32 (input_bfd, contents + rel->r_offset - 4); hi16_offset = ((((hi16_addend >> 16) & 0x3) << 15) | (hi16_addend & 0x7fff)) >> 1; value = score_bfd_get_32 (input_bfd, contents + rel->r_offset); offset = ((((value >> 16) & 0x3) << 15) | (value & 0x7fff)) >> 1; addend = (hi16_offset << 16) | (offset & 0xffff); msec = sec; addend = _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend); addend -= relocation; addend += msec->output_section->vma + msec->output_offset; uvalue = addend; hi16_offset = (uvalue >> 16) << 1; hi16_value = (hi16_addend & (~(howto->dst_mask))) | (hi16_offset & 0x7fff) | ((hi16_offset << 1) & 0x30000); score_bfd_put_32 (input_bfd, hi16_value, contents + rel->r_offset - 4); offset = (uvalue & 0xffff) << 1; value = (value & (~(howto->dst_mask))) | (offset & 0x7fff) | ((offset << 1) & 0x30000); score_bfd_put_32 (input_bfd, value, contents + rel->r_offset); break; case R_SCORE_IMM32: { value = score_bfd_get_48 (input_bfd, contents + rel->r_offset); addend = ((value >> 5) & 0x3ff) | (((value >> 16) & 0x7fff) << 10) | (((value >> 32) & 0x7f) << 25); msec = sec; addend = _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend); addend -= relocation; addend += msec->output_section->vma + msec->output_offset; addend &= 0xffffffff; value = (value & ~howto->src_mask) | ((addend & 0x3ff) << 5) | (((addend >> 10) & 0x7fff) << 16) | (((addend >> 25) & 0x7f) << 32); score_bfd_put_48 (input_bfd, value, contents + rel->r_offset); break; } case R_SCORE_IMM30: { int not_word_align_p = 0; value = score_bfd_get_48 (input_bfd, contents + rel->r_offset); addend = ((value >> 7) & 0xff) | (((value >> 16) & 0x7fff) << 8) | (((value >> 32) & 0x7f) << 23); addend <<= howto->rightshift; msec = sec; addend = _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend); addend -= relocation; addend += msec->output_section->vma + msec->output_offset; addend &= 0xffffffff; /* Check lw48/sw48 rd, value/label word align. */ if ((addend & 0x3) != 0) not_word_align_p = 1; addend >>= howto->rightshift; value = (value & ~howto->src_mask) | (((addend & 0xff) >> 0) << 7) | (((addend & 0x7fff00) >> 8) << 16) | (((addend & 0x3f800000) >> 23) << 32); score_bfd_put_48 (input_bfd, value, contents + rel->r_offset); if (not_word_align_p) return bfd_reloc_other; else break; } case R_SCORE_GOT_LO16: value = score_bfd_get_32 (input_bfd, contents + rel->r_offset); addend = (((value >> 16) & 0x3) << 14) | ((value & 0x7fff) >> 1); msec = sec; addend = _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend) - relocation; addend += msec->output_section->vma + msec->output_offset; value = (value & (~(howto->dst_mask))) | ((addend & 0x3fff) << 1) | (((addend >> 14) & 0x3) << 16); score_bfd_put_32 (input_bfd, value, contents + rel->r_offset); break; case R_SCORE_ABS32: case R_SCORE_REL32: value = bfd_get_32 (input_bfd, contents + rel->r_offset); /* Get the (signed) value from the instruction. */ addend = value & howto->src_mask; if (addend & ((howto->src_mask + 1) >> 1)) { bfd_signed_vma mask; mask = -1; mask &= ~howto->src_mask; addend |= mask; } msec = sec; addend = _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend) - relocation; addend += msec->output_section->vma + msec->output_offset; value = (value & ~howto->dst_mask) | (addend & howto->dst_mask); bfd_put_32 (input_bfd, value, contents + rel->r_offset); break; default: value = score_bfd_get_32 (input_bfd, contents + rel->r_offset); /* Get the (signed) value from the instruction. */ addend = value & howto->src_mask; if (addend & ((howto->src_mask + 1) >> 1)) { bfd_signed_vma mask; mask = -1; mask &= ~howto->src_mask; addend |= mask; } msec = sec; addend = _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend) - relocation; addend += msec->output_section->vma + msec->output_offset; value = (value & ~howto->dst_mask) | (addend & howto->dst_mask); score_bfd_put_32 (input_bfd, value, contents + rel->r_offset); break; } } } else { /* For global symbols we look up the symbol in the hash-table. */ h = ((struct score_elf_link_hash_entry *) elf_sym_hashes (input_bfd) [r_symndx - extsymoff]); if (info->wrap_hash != NULL && (input_section->flags & SEC_DEBUGGING) != 0) h = ((struct score_elf_link_hash_entry *) unwrap_hash_lookup (info, input_bfd, &h->root.root)); /* Find the real hash-table entry for this symbol. */ while (h->root.root.type == bfd_link_hash_indirect || h->root.root.type == bfd_link_hash_warning) h = (struct score_elf_link_hash_entry *) h->root.root.u.i.link; /* Record the name of this symbol, for our caller. */ name = h->root.root.root.string; /* See if this is the special GP_DISP_LABEL symbol. Note that such a symbol must always be a global symbol. */ if (strcmp (name, GP_DISP_LABEL) == 0) { /* Relocations against GP_DISP_LABEL are permitted only with R_SCORE_HI16 and R_SCORE_LO16 relocations. */ if (r_type != R_SCORE_HI16 && r_type != R_SCORE_LO16) return bfd_reloc_notsupported; gp_disp_p = TRUE; } /* If this symbol is defined, calculate its address. Note that GP_DISP_LABEL is a magic symbol, always implicitly defined by the linker, so it's inappropriate to check to see whether or not its defined. */ else if ((h->root.root.type == bfd_link_hash_defined || h->root.root.type == bfd_link_hash_defweak) && h->root.root.u.def.section) { sec = h->root.root.u.def.section; if (sec->output_section) relocation = (h->root.root.u.def.value + sec->output_section->vma + sec->output_offset); else { relocation = h->root.root.u.def.value; } } else if (h->root.root.type == bfd_link_hash_undefweak) /* We allow relocations against undefined weak symbols, giving it the value zero, so that you can undefined weak functions and check to see if they exist by looking at their addresses. */ relocation = 0; else if (info->unresolved_syms_in_objects == RM_IGNORE && ELF_ST_VISIBILITY (h->root.other) == STV_DEFAULT) relocation = 0; else if (strcmp (name, "_DYNAMIC_LINK") == 0) { /* If this is a dynamic link, we should have created a _DYNAMIC_LINK symbol in s3_bfd_score_elf_create_dynamic_sections. Otherwise, we should define the symbol with a value of 0. */ BFD_ASSERT (! bfd_link_pic (info)); BFD_ASSERT (bfd_get_section_by_name (output_bfd, ".dynamic") == NULL); relocation = 0; } else if (!bfd_link_relocatable (info)) { (*info->callbacks->undefined_symbol) (info, h->root.root.root.string, input_bfd, input_section, rel->r_offset, (info->unresolved_syms_in_objects == RM_GENERATE_ERROR) || ELF_ST_VISIBILITY (h->root.other)); relocation = 0; } } if (sec != NULL && discarded_section (sec)) RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, rel, 1, relend, howto, 0, contents); if (bfd_link_relocatable (info)) { /* This is a relocatable link. We don't have to change anything, unless the reloc is against a section symbol, in which case we have to adjust according to where the section symbol winds up in the output section. */ if (r_symndx < symtab_hdr->sh_info) { sym = local_syms + r_symndx; if (ELF_ST_TYPE (sym->st_info) == STT_SECTION) { sec = local_sections[r_symndx]; score_elf_add_to_rel (input_bfd, contents + rel->r_offset, howto, (bfd_signed_vma) (sec->output_offset + sym->st_value)); } } continue; } /* This is a final link. */ r = score_elf_final_link_relocate (howto, input_bfd, output_bfd, input_section, contents, rel, relocs, relocation, info, name, (h ? ELF_ST_TYPE ((unsigned int)h->root.root.type) : ELF_ST_TYPE ((unsigned int)sym->st_info)), h, local_sections, gp_disp_p); if (r != bfd_reloc_ok) { const char *msg = (const char *)0; switch (r) { case bfd_reloc_overflow: /* If the overflowing reloc was to an undefined symbol, we have already printed one error message and there is no point complaining again. */ if (!h || h->root.root.type != bfd_link_hash_undefined) (*info->callbacks->reloc_overflow) (info, NULL, name, howto->name, (bfd_vma) 0, input_bfd, input_section, rel->r_offset); break; case bfd_reloc_undefined: (*info->callbacks->undefined_symbol) (info, name, input_bfd, input_section, rel->r_offset, TRUE); break; case bfd_reloc_outofrange: msg = _("internal error: out of range error"); goto common_error; case bfd_reloc_notsupported: msg = _("internal error: unsupported relocation error"); goto common_error; case bfd_reloc_dangerous: msg = _("internal error: dangerous error"); goto common_error; /* Use bfd_reloc_other to check lw48, sw48 word align. */ case bfd_reloc_other: msg = _("address not word align"); goto common_error; default: msg = _("internal error: unknown error"); /* Fall through. */ common_error: (*info->callbacks->warning) (info, msg, name, input_bfd, input_section, rel->r_offset); break; } } } return TRUE; } /* Look through the relocs for a section during the first phase, and allocate space in the global offset table. */ static bfd_boolean s3_bfd_score_elf_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec, const Elf_Internal_Rela *relocs) { bfd *dynobj; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; struct score_got_info *g; size_t extsymoff; const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; asection *sgot; asection *sreloc; const struct elf_backend_data *bed; if (bfd_link_relocatable (info)) return TRUE; dynobj = elf_hash_table (info)->dynobj; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; sym_hashes = elf_sym_hashes (abfd); extsymoff = (elf_bad_symtab (abfd)) ? 0 : symtab_hdr->sh_info; if (dynobj == NULL) { sgot = NULL; g = NULL; } else { sgot = score_elf_got_section (dynobj, FALSE); if (sgot == NULL) g = NULL; else { BFD_ASSERT (score_elf_section_data (sgot) != NULL); g = score_elf_section_data (sgot)->u.got_info; BFD_ASSERT (g != NULL); } } sreloc = NULL; bed = get_elf_backend_data (abfd); rel_end = relocs + sec->reloc_count * bed->s->int_rels_per_ext_rel; for (rel = relocs; rel < rel_end; ++rel) { unsigned long r_symndx; unsigned int r_type; struct elf_link_hash_entry *h; r_symndx = ELF32_R_SYM (rel->r_info); r_type = ELF32_R_TYPE (rel->r_info); if (r_symndx < extsymoff) { h = NULL; } else if (r_symndx >= extsymoff + NUM_SHDR_ENTRIES (symtab_hdr)) { _bfd_error_handler /* xgettext:c-format */ (_("%B: Malformed reloc detected for section %A"), abfd, sec); bfd_set_error (bfd_error_bad_value); return FALSE; } else { h = sym_hashes[r_symndx - extsymoff]; /* This may be an indirect symbol created because of a version. */ if (h != NULL) { while (h->root.type == bfd_link_hash_indirect) h = (struct elf_link_hash_entry *)h->root.u.i.link; /* PR15323, ref flags aren't set for references in the same object. */ h->root.non_ir_ref = 1; } } /* Some relocs require a global offset table. */ if (dynobj == NULL || sgot == NULL) { switch (r_type) { case R_SCORE_GOT15: case R_SCORE_CALL15: if (dynobj == NULL) elf_hash_table (info)->dynobj = dynobj = abfd; if (!score_elf_create_got_section (dynobj, info, FALSE)) return FALSE; g = score_elf_got_info (dynobj, &sgot); break; case R_SCORE_ABS32: case R_SCORE_REL32: if (dynobj == NULL && (bfd_link_pic (info) || h != NULL) && (sec->flags & SEC_ALLOC) != 0) elf_hash_table (info)->dynobj = dynobj = abfd; break; default: break; } } if (!h && (r_type == R_SCORE_GOT_LO16)) { if (! score_elf_record_local_got_symbol (abfd, r_symndx, rel->r_addend, g)) return FALSE; } switch (r_type) { case R_SCORE_CALL15: if (h == NULL) { _bfd_error_handler /* xgettext:c-format */ (_("%B: CALL15 reloc at 0x%lx not against global symbol"), abfd, (unsigned long) rel->r_offset); bfd_set_error (bfd_error_bad_value); return FALSE; } else { /* This symbol requires a global offset table entry. */ if (! score_elf_record_global_got_symbol (h, abfd, info, g)) return FALSE; /* We need a stub, not a plt entry for the undefined function. But we record it as if it needs plt. See _bfd_elf_adjust_dynamic_symbol. */ h->needs_plt = 1; h->type = STT_FUNC; } break; case R_SCORE_GOT15: if (h && ! score_elf_record_global_got_symbol (h, abfd, info, g)) return FALSE; break; case R_SCORE_ABS32: case R_SCORE_REL32: if ((bfd_link_pic (info) || h != NULL) && (sec->flags & SEC_ALLOC) != 0) { if (sreloc == NULL) { sreloc = score_elf_rel_dyn_section (dynobj, TRUE); if (sreloc == NULL) return FALSE; } #define SCORE_READONLY_SECTION (SEC_ALLOC | SEC_LOAD | SEC_READONLY) if (bfd_link_pic (info)) { /* When creating a shared object, we must copy these reloc types into the output file as R_SCORE_REL32 relocs. We make room for this reloc in the .rel.dyn reloc section. */ score_elf_allocate_dynamic_relocations (dynobj, 1); if ((sec->flags & SCORE_READONLY_SECTION) == SCORE_READONLY_SECTION) /* We tell the dynamic linker that there are relocations against the text segment. */ info->flags |= DF_TEXTREL; } else { struct score_elf_link_hash_entry *hscore; /* We only need to copy this reloc if the symbol is defined in a dynamic object. */ hscore = (struct score_elf_link_hash_entry *)h; ++hscore->possibly_dynamic_relocs; if ((sec->flags & SCORE_READONLY_SECTION) == SCORE_READONLY_SECTION) /* We need it to tell the dynamic linker if there are relocations against the text segment. */ hscore->readonly_reloc = TRUE; } /* Even though we don't directly need a GOT entry for this symbol, a symbol must have a dynamic symbol table index greater that DT_SCORE_GOTSYM if there are dynamic relocations against it. */ if (h != NULL) { if (dynobj == NULL) elf_hash_table (info)->dynobj = dynobj = abfd; if (! score_elf_create_got_section (dynobj, info, TRUE)) return FALSE; g = score_elf_got_info (dynobj, &sgot); if (! score_elf_record_global_got_symbol (h, abfd, info, g)) return FALSE; } } break; /* This relocation describes the C++ object vtable hierarchy. Reconstruct it for later use during GC. */ case R_SCORE_GNU_VTINHERIT: if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) return FALSE; break; /* This relocation describes which C++ vtable entries are actually used. Record for later use during GC. */ case R_SCORE_GNU_VTENTRY: if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset)) return FALSE; break; default: break; } /* We must not create a stub for a symbol that has relocations related to taking the function's address. */ switch (r_type) { default: if (h != NULL) { struct score_elf_link_hash_entry *sh; sh = (struct score_elf_link_hash_entry *) h; sh->no_fn_stub = TRUE; } break; case R_SCORE_CALL15: break; } } return TRUE; } static bfd_boolean s3_bfd_score_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym, const char **namep ATTRIBUTE_UNUSED, flagword *flagsp ATTRIBUTE_UNUSED, asection **secp, bfd_vma *valp) { switch (sym->st_shndx) { case SHN_COMMON: if (sym->st_size > elf_gp_size (abfd)) break; /* Fall through. */ case SHN_SCORE_SCOMMON: *secp = bfd_make_section_old_way (abfd, ".scommon"); (*secp)->flags |= SEC_IS_COMMON; *valp = sym->st_size; break; } return TRUE; } static void s3_bfd_score_elf_symbol_processing (bfd *abfd, asymbol *asym) { elf_symbol_type *elfsym; elfsym = (elf_symbol_type *) asym; switch (elfsym->internal_elf_sym.st_shndx) { case SHN_COMMON: if (asym->value > elf_gp_size (abfd)) break; /* Fall through. */ case SHN_SCORE_SCOMMON: if (score_elf_scom_section.name == NULL) { /* Initialize the small common section. */ score_elf_scom_section.name = ".scommon"; score_elf_scom_section.flags = SEC_IS_COMMON; score_elf_scom_section.output_section = &score_elf_scom_section; score_elf_scom_section.symbol = &score_elf_scom_symbol; score_elf_scom_section.symbol_ptr_ptr = &score_elf_scom_symbol_ptr; score_elf_scom_symbol.name = ".scommon"; score_elf_scom_symbol.flags = BSF_SECTION_SYM; score_elf_scom_symbol.section = &score_elf_scom_section; score_elf_scom_symbol_ptr = &score_elf_scom_symbol; } asym->section = &score_elf_scom_section; asym->value = elfsym->internal_elf_sym.st_size; break; } } static int s3_bfd_score_elf_link_output_symbol_hook (struct bfd_link_info *info ATTRIBUTE_UNUSED, const char *name ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym, asection *input_sec, struct elf_link_hash_entry *h ATTRIBUTE_UNUSED) { /* If we see a common symbol, which implies a relocatable link, then if a symbol was small common in an input file, mark it as small common in the output file. */ if (sym->st_shndx == SHN_COMMON && strcmp (input_sec->name, ".scommon") == 0) sym->st_shndx = SHN_SCORE_SCOMMON; return 1; } static bfd_boolean s3_bfd_score_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED, asection *sec, int *retval) { if (strcmp (bfd_get_section_name (abfd, sec), ".scommon") == 0) { *retval = SHN_SCORE_SCOMMON; return TRUE; } return FALSE; } /* Adjust a symbol defined by a dynamic object and referenced by a regular object. The current definition is in some section of the dynamic object, but we're not including those sections. We have to change the definition to something the rest of the link can understand. */ static bfd_boolean s3_bfd_score_elf_adjust_dynamic_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h) { bfd *dynobj; struct score_elf_link_hash_entry *hscore; asection *s; dynobj = elf_hash_table (info)->dynobj; /* Make sure we know what is going on here. */ BFD_ASSERT (dynobj != NULL && (h->needs_plt || h->u.weakdef != NULL || (h->def_dynamic && h->ref_regular && !h->def_regular))); /* If this symbol is defined in a dynamic object, we need to copy any R_SCORE_ABS32 or R_SCORE_REL32 relocs against it into the output file. */ hscore = (struct score_elf_link_hash_entry *)h; if (!bfd_link_relocatable (info) && hscore->possibly_dynamic_relocs != 0 && (h->root.type == bfd_link_hash_defweak || !h->def_regular)) { score_elf_allocate_dynamic_relocations (dynobj, hscore->possibly_dynamic_relocs); if (hscore->readonly_reloc) /* We tell the dynamic linker that there are relocations against the text segment. */ info->flags |= DF_TEXTREL; } /* For a function, create a stub, if allowed. */ if (!hscore->no_fn_stub && h->needs_plt) { if (!elf_hash_table (info)->dynamic_sections_created) return TRUE; /* If this symbol is not defined in a regular file, then set the symbol to the stub location. This is required to make function pointers compare as equal between the normal executable and the shared library. */ if (!h->def_regular) { /* We need .stub section. */ s = bfd_get_linker_section (dynobj, SCORE_ELF_STUB_SECTION_NAME); BFD_ASSERT (s != NULL); h->root.u.def.section = s; h->root.u.def.value = s->size; /* XXX Write this stub address somewhere. */ h->plt.offset = s->size; /* Make room for this stub code. */ s->size += SCORE_FUNCTION_STUB_SIZE; /* The last half word of the stub will be filled with the index of this symbol in .dynsym section. */ return TRUE; } } else if ((h->type == STT_FUNC) && !h->needs_plt) { /* This will set the entry for this symbol in the GOT to 0, and the dynamic linker will take care of this. */ h->root.u.def.value = 0; return TRUE; } /* If this is a weak symbol, and there is a real definition, the processor independent code will have arranged for us to see the real definition first, and we can just use the same value. */ if (h->u.weakdef != NULL) { BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined || h->u.weakdef->root.type == bfd_link_hash_defweak); h->root.u.def.section = h->u.weakdef->root.u.def.section; h->root.u.def.value = h->u.weakdef->root.u.def.value; return TRUE; } /* This is a reference to a symbol defined by a dynamic object which is not a function. */ return TRUE; } /* This function is called after all the input files have been read, and the input sections have been assigned to output sections. */ static bfd_boolean s3_bfd_score_elf_always_size_sections (bfd *output_bfd, struct bfd_link_info *info) { bfd *dynobj; asection *s; struct score_got_info *g; int i; bfd_size_type loadable_size = 0; bfd_size_type local_gotno; bfd *sub; dynobj = elf_hash_table (info)->dynobj; if (dynobj == NULL) /* Relocatable links don't have it. */ return TRUE; g = score_elf_got_info (dynobj, &s); if (s == NULL) return TRUE; /* Calculate the total loadable size of the output. That will give us the maximum number of GOT_PAGE entries required. */ for (sub = info->input_bfds; sub; sub = sub->link.next) { asection *subsection; for (subsection = sub->sections; subsection; subsection = subsection->next) { if ((subsection->flags & SEC_ALLOC) == 0) continue; loadable_size += ((subsection->size + 0xf) &~ (bfd_size_type) 0xf); } } /* There has to be a global GOT entry for every symbol with a dynamic symbol table index of DT_SCORE_GOTSYM or higher. Therefore, it make sense to put those symbols that need GOT entries at the end of the symbol table. We do that here. */ if (! score_elf_sort_hash_table (info, 1)) return FALSE; if (g->global_gotsym != NULL) i = elf_hash_table (info)->dynsymcount - g->global_gotsym->dynindx; else /* If there are no global symbols, or none requiring relocations, then GLOBAL_GOTSYM will be NULL. */ i = 0; /* In the worst case, we'll get one stub per dynamic symbol. */ loadable_size += SCORE_FUNCTION_STUB_SIZE * i; /* Assume there are two loadable segments consisting of contiguous sections. Is 5 enough? */ local_gotno = (loadable_size >> 16) + 5; g->local_gotno += local_gotno; s->size += g->local_gotno * SCORE_ELF_GOT_SIZE (output_bfd); g->global_gotno = i; s->size += i * SCORE_ELF_GOT_SIZE (output_bfd); score_elf_resolve_final_got_entries (g); if (s->size > SCORE_ELF_GOT_MAX_SIZE (output_bfd)) { /* Fixme. Error message or Warning message should be issued here. */ } return TRUE; } /* Set the sizes of the dynamic sections. */ static bfd_boolean s3_bfd_score_elf_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { bfd *dynobj; asection *s; bfd_boolean reltext; dynobj = elf_hash_table (info)->dynobj; BFD_ASSERT (dynobj != NULL); if (elf_hash_table (info)->dynamic_sections_created) { /* Set the contents of the .interp section to the interpreter. */ if (!bfd_link_pic (info) && !info->nointerp) { s = bfd_get_linker_section (dynobj, ".interp"); BFD_ASSERT (s != NULL); s->size = strlen (ELF_DYNAMIC_INTERPRETER) + 1; s->contents = (bfd_byte *) ELF_DYNAMIC_INTERPRETER; } } /* The check_relocs and adjust_dynamic_symbol entry points have determined the sizes of the various dynamic sections. Allocate memory for them. */ reltext = FALSE; for (s = dynobj->sections; s != NULL; s = s->next) { const char *name; if ((s->flags & SEC_LINKER_CREATED) == 0) continue; /* It's OK to base decisions on the section name, because none of the dynobj section names depend upon the input files. */ name = bfd_get_section_name (dynobj, s); if (CONST_STRNEQ (name, ".rel")) { if (s->size == 0) { /* We only strip the section if the output section name has the same name. Otherwise, there might be several input sections for this output section. FIXME: This code is probably not needed these days anyhow, since the linker now does not create empty output sections. */ if (s->output_section != NULL && strcmp (name, bfd_get_section_name (s->output_section->owner, s->output_section)) == 0) s->flags |= SEC_EXCLUDE; } else { const char *outname; asection *target; /* If this relocation section applies to a read only section, then we probably need a DT_TEXTREL entry. If the relocation section is .rel.dyn, we always assert a DT_TEXTREL entry rather than testing whether there exists a relocation to a read only section or not. */ outname = bfd_get_section_name (output_bfd, s->output_section); target = bfd_get_section_by_name (output_bfd, outname + 4); if ((target != NULL && (target->flags & SEC_READONLY) != 0 && (target->flags & SEC_ALLOC) != 0) || strcmp (outname, ".rel.dyn") == 0) reltext = TRUE; /* We use the reloc_count field as a counter if we need to copy relocs into the output file. */ if (strcmp (name, ".rel.dyn") != 0) s->reloc_count = 0; } } else if (CONST_STRNEQ (name, ".got")) { /* s3_bfd_score_elf_always_size_sections() has already done most of the work, but some symbols may have been mapped to versions that we must now resolve in the got_entries hash tables. */ } else if (strcmp (name, SCORE_ELF_STUB_SECTION_NAME) == 0) { /* IRIX rld assumes that the function stub isn't at the end of .text section. So put a dummy. XXX */ s->size += SCORE_FUNCTION_STUB_SIZE; } else if (! CONST_STRNEQ (name, ".init")) { /* It's not one of our sections, so don't allocate space. */ continue; } /* Allocate memory for the section contents. */ s->contents = bfd_zalloc (dynobj, s->size); if (s->contents == NULL && s->size != 0) { bfd_set_error (bfd_error_no_memory); return FALSE; } } if (elf_hash_table (info)->dynamic_sections_created) { /* Add some entries to the .dynamic section. We fill in the values later, in s3_bfd_score_elf_finish_dynamic_sections, but we must add the entries now so that we get the correct size for the .dynamic section. The DT_DEBUG entry is filled in by the dynamic linker and used by the debugger. */ if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_DEBUG, 0)) return FALSE; if (reltext) info->flags |= DF_TEXTREL; if ((info->flags & DF_TEXTREL) != 0) { if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_TEXTREL, 0)) return FALSE; } if (! SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_PLTGOT, 0)) return FALSE; if (score_elf_rel_dyn_section (dynobj, FALSE)) { if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_REL, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_RELSZ, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_RELENT, 0)) return FALSE; } if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_SCORE_BASE_ADDRESS, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_SCORE_LOCAL_GOTNO, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_SCORE_SYMTABNO, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_SCORE_UNREFEXTNO, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_SCORE_GOTSYM, 0)) return FALSE; if (!SCORE_ELF_ADD_DYNAMIC_ENTRY (info, DT_SCORE_HIPAGENO, 0)) return FALSE; } return TRUE; } static bfd_boolean s3_bfd_score_elf_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info) { struct elf_link_hash_entry *h; struct bfd_link_hash_entry *bh; flagword flags; asection *s; flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY); /* ABI requests the .dynamic section to be read only. */ s = bfd_get_linker_section (abfd, ".dynamic"); if (s != NULL) { if (!bfd_set_section_flags (abfd, s, flags)) return FALSE; } /* We need to create .got section. */ if (!score_elf_create_got_section (abfd, info, FALSE)) return FALSE; if (!score_elf_rel_dyn_section (elf_hash_table (info)->dynobj, TRUE)) return FALSE; /* Create .stub section. */ if (bfd_get_linker_section (abfd, SCORE_ELF_STUB_SECTION_NAME) == NULL) { s = bfd_make_section_anyway_with_flags (abfd, SCORE_ELF_STUB_SECTION_NAME, flags | SEC_CODE); if (s == NULL || !bfd_set_section_alignment (abfd, s, 2)) return FALSE; } if (!bfd_link_pic (info)) { const char *name; name = "_DYNAMIC_LINK"; bh = NULL; if (!(_bfd_generic_link_add_one_symbol (info, abfd, name, BSF_GLOBAL, bfd_abs_section_ptr, (bfd_vma) 0, NULL, FALSE, get_elf_backend_data (abfd)->collect, &bh))) return FALSE; h = (struct elf_link_hash_entry *)bh; h->non_elf = 0; h->def_regular = 1; h->type = STT_SECTION; if (!bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; } return TRUE; } /* Finish up dynamic symbol handling. We set the contents of various dynamic sections here. */ static bfd_boolean s3_bfd_score_elf_finish_dynamic_symbol (bfd *output_bfd, struct bfd_link_info *info, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { bfd *dynobj; asection *sgot; struct score_got_info *g; const char *name; dynobj = elf_hash_table (info)->dynobj; if (h->plt.offset != MINUS_ONE) { asection *s; bfd_byte stub[SCORE_FUNCTION_STUB_SIZE]; /* This symbol has a stub. Set it up. */ BFD_ASSERT (h->dynindx != -1); s = bfd_get_linker_section (dynobj, SCORE_ELF_STUB_SECTION_NAME); BFD_ASSERT (s != NULL); /* FIXME: Can h->dynindex be more than 64K? */ if (h->dynindx & 0xffff0000) return FALSE; /* Fill the stub. */ score_bfd_put_32 (output_bfd, STUB_LW, stub); score_bfd_put_32 (output_bfd, STUB_MOVE, stub + 4); score_bfd_put_32 (output_bfd, STUB_LI16 | (h->dynindx << 1), stub + 8); score_bfd_put_32 (output_bfd, STUB_BRL, stub + 12); BFD_ASSERT (h->plt.offset <= s->size); memcpy (s->contents + h->plt.offset, stub, SCORE_FUNCTION_STUB_SIZE); /* Mark the symbol as undefined. plt.offset != -1 occurs only for the referenced symbol. */ sym->st_shndx = SHN_UNDEF; /* The run-time linker uses the st_value field of the symbol to reset the global offset table entry for this external to its stub address when unlinking a shared object. */ sym->st_value = (s->output_section->vma + s->output_offset + h->plt.offset); } BFD_ASSERT (h->dynindx != -1 || h->forced_local); sgot = score_elf_got_section (dynobj, FALSE); BFD_ASSERT (sgot != NULL); BFD_ASSERT (score_elf_section_data (sgot) != NULL); g = score_elf_section_data (sgot)->u.got_info; BFD_ASSERT (g != NULL); /* Run through the global symbol table, creating GOT entries for all the symbols that need them. */ if (g->global_gotsym != NULL && h->dynindx >= g->global_gotsym->dynindx) { bfd_vma offset; bfd_vma value; value = sym->st_value; offset = score_elf_global_got_index (dynobj, h); score_bfd_put_32 (output_bfd, value, sgot->contents + offset); } /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. */ name = h->root.root.string; if (h == elf_hash_table (info)->hdynamic || h == elf_hash_table (info)->hgot) sym->st_shndx = SHN_ABS; else if (strcmp (name, "_DYNAMIC_LINK") == 0) { sym->st_shndx = SHN_ABS; sym->st_info = ELF_ST_INFO (STB_GLOBAL, STT_SECTION); sym->st_value = 1; } else if (strcmp (name, GP_DISP_LABEL) == 0) { sym->st_shndx = SHN_ABS; sym->st_info = ELF_ST_INFO (STB_GLOBAL, STT_SECTION); sym->st_value = elf_gp (output_bfd); } return TRUE; } /* Finish up the dynamic sections. */ static bfd_boolean s3_bfd_score_elf_finish_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { bfd *dynobj; asection *sdyn; asection *sgot; asection *s; struct score_got_info *g; dynobj = elf_hash_table (info)->dynobj; sdyn = bfd_get_linker_section (dynobj, ".dynamic"); sgot = score_elf_got_section (dynobj, FALSE); if (sgot == NULL) g = NULL; else { BFD_ASSERT (score_elf_section_data (sgot) != NULL); g = score_elf_section_data (sgot)->u.got_info; BFD_ASSERT (g != NULL); } if (elf_hash_table (info)->dynamic_sections_created) { bfd_byte *b; BFD_ASSERT (sdyn != NULL); BFD_ASSERT (g != NULL); for (b = sdyn->contents; b < sdyn->contents + sdyn->size; b += SCORE_ELF_DYN_SIZE (dynobj)) { Elf_Internal_Dyn dyn; const char *name; size_t elemsize; bfd_boolean swap_out_p; /* Read in the current dynamic entry. */ (*get_elf_backend_data (dynobj)->s->swap_dyn_in) (dynobj, b, &dyn); /* Assume that we're going to modify it and write it out. */ swap_out_p = TRUE; switch (dyn.d_tag) { case DT_RELENT: dyn.d_un.d_val = SCORE_ELF_REL_SIZE (dynobj); break; case DT_STRSZ: /* Rewrite DT_STRSZ. */ dyn.d_un.d_val = _bfd_elf_strtab_size (elf_hash_table (info)->dynstr); break; case DT_PLTGOT: s = elf_hash_table (info)->sgot; dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; break; case DT_SCORE_BASE_ADDRESS: s = output_bfd->sections; BFD_ASSERT (s != NULL); dyn.d_un.d_ptr = s->vma & ~(bfd_vma) 0xffff; break; case DT_SCORE_LOCAL_GOTNO: dyn.d_un.d_val = g->local_gotno; break; case DT_SCORE_UNREFEXTNO: /* The index into the dynamic symbol table which is the entry of the first external symbol that is not referenced within the same object. */ dyn.d_un.d_val = bfd_count_sections (output_bfd) + 1; break; case DT_SCORE_GOTSYM: if (g->global_gotsym) { dyn.d_un.d_val = g->global_gotsym->dynindx; break; } /* In case if we don't have global got symbols we default to setting DT_SCORE_GOTSYM to the same value as DT_SCORE_SYMTABNO. */ /* Fall through. */ case DT_SCORE_SYMTABNO: name = ".dynsym"; elemsize = SCORE_ELF_SYM_SIZE (output_bfd); s = bfd_get_linker_section (dynobj, name); dyn.d_un.d_val = s->size / elemsize; break; case DT_SCORE_HIPAGENO: dyn.d_un.d_val = g->local_gotno - SCORE_RESERVED_GOTNO; break; default: swap_out_p = FALSE; break; } if (swap_out_p) (*get_elf_backend_data (dynobj)->s->swap_dyn_out) (dynobj, &dyn, b); } } /* The first entry of the global offset table will be filled at runtime. The second entry will be used by some runtime loaders. This isn't the case of IRIX rld. */ if (sgot != NULL && sgot->size > 0) { score_bfd_put_32 (output_bfd, 0, sgot->contents); score_bfd_put_32 (output_bfd, 0x80000000, sgot->contents + SCORE_ELF_GOT_SIZE (output_bfd)); } if (sgot != NULL) elf_section_data (sgot->output_section)->this_hdr.sh_entsize = SCORE_ELF_GOT_SIZE (output_bfd); /* We need to sort the entries of the dynamic relocation section. */ s = score_elf_rel_dyn_section (dynobj, FALSE); if (s != NULL && s->size > (bfd_vma)2 * SCORE_ELF_REL_SIZE (output_bfd)) { reldyn_sorting_bfd = output_bfd; qsort ((Elf32_External_Rel *) s->contents + 1, s->reloc_count - 1, sizeof (Elf32_External_Rel), score_elf_sort_dynamic_relocs); } return TRUE; } /* This function set up the ELF section header for a BFD section in preparation for writing it out. This is where the flags and type fields are set for unusual sections. */ static bfd_boolean s3_bfd_score_elf_fake_sections (bfd *abfd ATTRIBUTE_UNUSED, Elf_Internal_Shdr *hdr, asection *sec) { const char *name; name = bfd_get_section_name (abfd, sec); if (strcmp (name, ".got") == 0 || strcmp (name, ".srdata") == 0 || strcmp (name, ".sdata") == 0 || strcmp (name, ".sbss") == 0) hdr->sh_flags |= SHF_SCORE_GPREL; return TRUE; } /* This function do additional processing on the ELF section header before writing it out. This is used to set the flags and type fields for some sections. */ /* assign_file_positions_except_relocs() check section flag and if it is allocatable, warning message will be issued. backend_fake_section is called before assign_file_positions_except_relocs(); backend_section_processing after it. so, we modify section flag there, but not backend_fake_section. */ static bfd_boolean s3_bfd_score_elf_section_processing (bfd *abfd ATTRIBUTE_UNUSED, Elf_Internal_Shdr *hdr) { if (hdr->bfd_section != NULL) { const char *name = bfd_get_section_name (abfd, hdr->bfd_section); if (strcmp (name, ".sdata") == 0) { hdr->sh_flags |= SHF_ALLOC | SHF_WRITE | SHF_SCORE_GPREL; hdr->sh_type = SHT_PROGBITS; } else if (strcmp (name, ".sbss") == 0) { hdr->sh_flags |= SHF_ALLOC | SHF_WRITE | SHF_SCORE_GPREL; hdr->sh_type = SHT_NOBITS; } else if (strcmp (name, ".srdata") == 0) { hdr->sh_flags |= SHF_ALLOC | SHF_SCORE_GPREL; hdr->sh_type = SHT_PROGBITS; } } return TRUE; } static bfd_boolean s3_bfd_score_elf_write_section (bfd *output_bfd, asection *sec, bfd_byte *contents) { bfd_byte *to, *from, *end; int i; if (strcmp (sec->name, ".pdr") != 0) return FALSE; if (score_elf_section_data (sec)->u.tdata == NULL) return FALSE; to = contents; end = contents + sec->size; for (from = contents, i = 0; from < end; from += PDR_SIZE, i++) { if ((score_elf_section_data (sec)->u.tdata)[i] == 1) continue; if (to != from) memcpy (to, from, PDR_SIZE); to += PDR_SIZE; } bfd_set_section_contents (output_bfd, sec->output_section, contents, (file_ptr) sec->output_offset, sec->size); return TRUE; } /* Copy data from a SCORE ELF indirect symbol to its direct symbol, hiding the old indirect symbol. Process additional relocation information. */ static void s3_bfd_score_elf_copy_indirect_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *dir, struct elf_link_hash_entry *ind) { struct score_elf_link_hash_entry *dirscore, *indscore; _bfd_elf_link_hash_copy_indirect (info, dir, ind); if (ind->root.type != bfd_link_hash_indirect) return; dirscore = (struct score_elf_link_hash_entry *) dir; indscore = (struct score_elf_link_hash_entry *) ind; dirscore->possibly_dynamic_relocs += indscore->possibly_dynamic_relocs; if (indscore->readonly_reloc) dirscore->readonly_reloc = TRUE; if (indscore->no_fn_stub) dirscore->no_fn_stub = TRUE; } /* Remove information about discarded functions from other sections which mention them. */ static bfd_boolean s3_bfd_score_elf_discard_info (bfd *abfd, struct elf_reloc_cookie *cookie, struct bfd_link_info *info) { asection *o; bfd_boolean ret = FALSE; unsigned char *tdata; size_t i, skip; o = bfd_get_section_by_name (abfd, ".pdr"); if ((!o) || (o->size == 0) || (o->size % PDR_SIZE != 0) || (o->output_section != NULL && bfd_is_abs_section (o->output_section))) return FALSE; tdata = bfd_zmalloc (o->size / PDR_SIZE); if (!tdata) return FALSE; cookie->rels = _bfd_elf_link_read_relocs (abfd, o, NULL, NULL, info->keep_memory); if (!cookie->rels) { free (tdata); return FALSE; } cookie->rel = cookie->rels; cookie->relend = cookie->rels + o->reloc_count; for (i = 0, skip = 0; i < o->size; i++) { if (bfd_elf_reloc_symbol_deleted_p (i * PDR_SIZE, cookie)) { tdata[i] = 1; skip++; } } if (skip != 0) { score_elf_section_data (o)->u.tdata = tdata; o->size -= skip * PDR_SIZE; ret = TRUE; } else free (tdata); if (!info->keep_memory) free (cookie->rels); return ret; } /* Signal that discard_info() has removed the discarded relocations for this section. */ static bfd_boolean s3_bfd_score_elf_ignore_discarded_relocs (asection *sec) { if (strcmp (sec->name, ".pdr") == 0) return TRUE; return FALSE; } /* Return the section that should be marked against GC for a given relocation. */ static asection * s3_bfd_score_elf_gc_mark_hook (asection *sec, struct bfd_link_info *info, Elf_Internal_Rela *rel, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { if (h != NULL) switch (ELF32_R_TYPE (rel->r_info)) { case R_SCORE_GNU_VTINHERIT: case R_SCORE_GNU_VTENTRY: return NULL; } return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); } /* Support for core dump NOTE sections. */ static bfd_boolean s3_bfd_score_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) { int offset; unsigned int raw_size; switch (note->descsz) { default: return FALSE; case 148: /* Linux/Score 32-bit. */ /* pr_cursig */ elf_tdata (abfd)->core->signal = score_bfd_get_16 (abfd, note->descdata + 12); /* pr_pid */ elf_tdata (abfd)->core->lwpid = score_bfd_get_32 (abfd, note->descdata + 24); /* pr_reg */ offset = 72; raw_size = 72; break; } /* Make a ".reg/999" section. */ return _bfd_elfcore_make_pseudosection (abfd, ".reg", raw_size, note->descpos + offset); } static bfd_boolean s3_bfd_score_elf_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) { switch (note->descsz) { default: return FALSE; case 124: /* Linux/Score elf_prpsinfo. */ elf_tdata (abfd)->core->program = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); elf_tdata (abfd)->core->command = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); } /* Note that for some reason, a spurious space is tacked onto the end of the args in some (at least one anyway) implementations, so strip it off if it exists. */ { char *command = elf_tdata (abfd)->core->command; int n = strlen (command); if (0 < n && command[n - 1] == ' ') command[n - 1] = '\0'; } return TRUE; } /* Score BFD functions. */ static reloc_howto_type * s3_elf32_score_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, bfd_reloc_code_real_type code) { unsigned int i; for (i = 0; i < ARRAY_SIZE (elf32_score_reloc_map); i++) if (elf32_score_reloc_map[i].bfd_reloc_val == code) return &elf32_score_howto_table[elf32_score_reloc_map[i].elf_reloc_val]; return NULL; } static reloc_howto_type * elf32_score_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, const char *r_name) { unsigned int i; for (i = 0; i < (sizeof (elf32_score_howto_table) / sizeof (elf32_score_howto_table[0])); i++) if (elf32_score_howto_table[i].name != NULL && strcasecmp (elf32_score_howto_table[i].name, r_name) == 0) return &elf32_score_howto_table[i]; return NULL; } static bfd_boolean s3_elf32_score_print_private_bfd_data (bfd *abfd, void * ptr) { FILE *file = (FILE *) ptr; BFD_ASSERT (abfd != NULL && ptr != NULL); /* Print normal ELF private data. */ _bfd_elf_print_private_bfd_data (abfd, ptr); /* xgettext:c-format */ fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); if (elf_elfheader (abfd)->e_flags & EF_SCORE_PIC) { fprintf (file, _(" [pic]")); } if (elf_elfheader (abfd)->e_flags & EF_SCORE_FIXDEP) { fprintf (file, _(" [fix dep]")); } fputc ('\n', file); return TRUE; } static bfd_boolean s3_elf32_score_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info) { bfd *obfd = info->output_bfd; flagword in_flags; flagword out_flags; if (!_bfd_generic_verify_endian_match (ibfd, info)) return FALSE; in_flags = elf_elfheader (ibfd)->e_flags; out_flags = elf_elfheader (obfd)->e_flags; if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour || bfd_get_flavour (obfd) != bfd_target_elf_flavour) return TRUE; in_flags = elf_elfheader (ibfd)->e_flags; out_flags = elf_elfheader (obfd)->e_flags; if (! elf_flags_init (obfd)) { elf_flags_init (obfd) = TRUE; elf_elfheader (obfd)->e_flags = in_flags; if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) && bfd_get_arch_info (obfd)->the_default) { return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd)); } return TRUE; } if (((in_flags & EF_SCORE_PIC) != 0) != ((out_flags & EF_SCORE_PIC) != 0)) _bfd_error_handler (_("%B: warning: linking PIC files with non-PIC files"), ibfd); /* FIXME: Maybe dependency fix compatibility should be checked here. */ return TRUE; } static bfd_boolean s3_elf32_score_new_section_hook (bfd *abfd, asection *sec) { struct _score_elf_section_data *sdata; bfd_size_type amt = sizeof (*sdata); sdata = bfd_zalloc (abfd, amt); if (sdata == NULL) return FALSE; sec->used_by_bfd = sdata; return _bfd_elf_new_section_hook (abfd, sec); } /*****************************************************************************/ /* s3_s7: backend hooks. */ static void _bfd_score_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc, Elf_Internal_Rela *elf_reloc) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_info_to_howto (abfd, bfd_reloc, elf_reloc); else return s7_bfd_score_info_to_howto (abfd, bfd_reloc, elf_reloc); } static bfd_boolean _bfd_score_elf_relocate_section (bfd *output_bfd, struct bfd_link_info *info, bfd *input_bfd, asection *input_section, bfd_byte *contents, Elf_Internal_Rela *relocs, Elf_Internal_Sym *local_syms, asection **local_sections) { if (bfd_get_mach (output_bfd) == bfd_mach_score3) return s3_bfd_score_elf_relocate_section (output_bfd, info, input_bfd, input_section, contents, relocs, local_syms, local_sections); else return s7_bfd_score_elf_relocate_section (output_bfd, info, input_bfd, input_section, contents, relocs, local_syms, local_sections); } static bfd_boolean _bfd_score_elf_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec, const Elf_Internal_Rela *relocs) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_check_relocs (abfd, info, sec, relocs); else return s7_bfd_score_elf_check_relocs (abfd, info, sec, relocs); } static bfd_boolean _bfd_score_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym, const char **namep ATTRIBUTE_UNUSED, flagword *flagsp ATTRIBUTE_UNUSED, asection **secp, bfd_vma *valp) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp); else return s7_bfd_score_elf_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp); } static void _bfd_score_elf_symbol_processing (bfd *abfd, asymbol *asym) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_symbol_processing (abfd, asym); else return s7_bfd_score_elf_symbol_processing (abfd, asym); } static int _bfd_score_elf_link_output_symbol_hook (struct bfd_link_info *info ATTRIBUTE_UNUSED, const char *name ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym, asection *input_sec, struct elf_link_hash_entry *h ATTRIBUTE_UNUSED) { /* If link a empty .o, then this filed is NULL. */ if (info->input_bfds == NULL) { /* If we see a common symbol, which implies a relocatable link, then if a symbol was small common in an input file, mark it as small common in the output file. */ if (sym->st_shndx == SHN_COMMON && strcmp (input_sec->name, ".scommon") == 0) sym->st_shndx = SHN_SCORE_SCOMMON; return 1; } if (bfd_get_mach (info->input_bfds) == bfd_mach_score3) return s3_bfd_score_elf_link_output_symbol_hook (info, name, sym, input_sec, h); else return s7_bfd_score_elf_link_output_symbol_hook (info, name, sym, input_sec, h); } static bfd_boolean _bfd_score_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED, asection *sec, int *retval) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_section_from_bfd_section (abfd, sec, retval); else return s7_bfd_score_elf_section_from_bfd_section (abfd, sec, retval); } static bfd_boolean _bfd_score_elf_adjust_dynamic_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h) { if (bfd_get_mach (info->input_bfds) == bfd_mach_score3) return s3_bfd_score_elf_adjust_dynamic_symbol (info, h); else return s7_bfd_score_elf_adjust_dynamic_symbol (info, h); } static bfd_boolean _bfd_score_elf_always_size_sections (bfd *output_bfd, struct bfd_link_info *info) { if (bfd_get_mach (output_bfd) == bfd_mach_score3) return s3_bfd_score_elf_always_size_sections (output_bfd, info); else return s7_bfd_score_elf_always_size_sections (output_bfd, info); } static bfd_boolean _bfd_score_elf_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { if (bfd_get_mach (output_bfd) == bfd_mach_score3) return s3_bfd_score_elf_size_dynamic_sections (output_bfd, info); else return s7_bfd_score_elf_size_dynamic_sections (output_bfd, info); } static bfd_boolean _bfd_score_elf_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_create_dynamic_sections (abfd, info); else return s7_bfd_score_elf_create_dynamic_sections (abfd, info); } static bfd_boolean _bfd_score_elf_finish_dynamic_symbol (bfd *output_bfd, struct bfd_link_info *info, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { if (bfd_get_mach (output_bfd) == bfd_mach_score3) return s3_bfd_score_elf_finish_dynamic_symbol (output_bfd, info, h, sym); else return s7_bfd_score_elf_finish_dynamic_symbol (output_bfd, info, h, sym); } static bfd_boolean _bfd_score_elf_finish_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { if (bfd_get_mach (output_bfd) == bfd_mach_score3) return s3_bfd_score_elf_finish_dynamic_sections (output_bfd, info); else return s7_bfd_score_elf_finish_dynamic_sections (output_bfd, info); } static bfd_boolean _bfd_score_elf_fake_sections (bfd *abfd ATTRIBUTE_UNUSED, Elf_Internal_Shdr *hdr, asection *sec) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_fake_sections (abfd, hdr, sec); else return s7_bfd_score_elf_fake_sections (abfd, hdr, sec); } static bfd_boolean _bfd_score_elf_section_processing (bfd *abfd ATTRIBUTE_UNUSED, Elf_Internal_Shdr *hdr) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_section_processing (abfd, hdr); else return s7_bfd_score_elf_section_processing (abfd, hdr); } static bfd_boolean _bfd_score_elf_write_section (bfd *output_bfd, struct bfd_link_info *link_info ATTRIBUTE_UNUSED, asection *sec, bfd_byte *contents) { if (bfd_get_mach (output_bfd) == bfd_mach_score3) return s3_bfd_score_elf_write_section (output_bfd, sec, contents); else return s7_bfd_score_elf_write_section (output_bfd, sec, contents); } static void _bfd_score_elf_copy_indirect_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *dir, struct elf_link_hash_entry *ind) { if (bfd_get_mach (info->input_bfds) == bfd_mach_score3) return s3_bfd_score_elf_copy_indirect_symbol (info, dir, ind); else return s7_bfd_score_elf_copy_indirect_symbol (info, dir, ind); } static void _bfd_score_elf_hide_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *entry, bfd_boolean force_local) { if (bfd_get_mach (info->input_bfds) == bfd_mach_score3) return s3_bfd_score_elf_hide_symbol (info, entry, force_local); else return s7_bfd_score_elf_hide_symbol (info, entry, force_local); } static bfd_boolean _bfd_score_elf_discard_info (bfd *abfd, struct elf_reloc_cookie *cookie, struct bfd_link_info *info) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_discard_info (abfd, cookie, info); else return s7_bfd_score_elf_discard_info (abfd, cookie, info); } static bfd_boolean _bfd_score_elf_ignore_discarded_relocs (asection *sec) { if (bfd_get_mach (sec->owner) == bfd_mach_score3) return s3_bfd_score_elf_ignore_discarded_relocs (sec); else return s7_bfd_score_elf_ignore_discarded_relocs (sec); } static asection * _bfd_score_elf_gc_mark_hook (asection *sec, struct bfd_link_info *info, Elf_Internal_Rela *rel, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { if (bfd_get_mach (info->input_bfds) == bfd_mach_score3) return s3_bfd_score_elf_gc_mark_hook (sec, info, rel, h, sym); else return s7_bfd_score_elf_gc_mark_hook (sec, info, rel, h, sym); } static bfd_boolean _bfd_score_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_grok_prstatus (abfd, note); else return s7_bfd_score_elf_grok_prstatus (abfd, note); } static bfd_boolean _bfd_score_elf_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_bfd_score_elf_grok_psinfo (abfd, note); else return s7_bfd_score_elf_grok_psinfo (abfd, note); } static reloc_howto_type * elf32_score_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, bfd_reloc_code_real_type code) { /* s3: NOTE!!! gas will call elf32_score_reloc_type_lookup, and don't write elf file. So just using score3, but we don't know ld will call this or not. If so, this way can't work. */ if (score3) return s3_elf32_score_reloc_type_lookup (abfd, code); else return s7_elf32_score_reloc_type_lookup (abfd, code); } /* Create a score elf linker hash table. This is a copy of _bfd_elf_link_hash_table_create() except with a different hash table entry creation function. */ static struct bfd_link_hash_table * elf32_score_link_hash_table_create (bfd *abfd) { struct elf_link_hash_table *ret; bfd_size_type amt = sizeof (struct elf_link_hash_table); ret = (struct elf_link_hash_table *) bfd_zmalloc (amt); if (ret == NULL) return NULL; if (!_bfd_elf_link_hash_table_init (ret, abfd, score_elf_link_hash_newfunc, sizeof (struct score_elf_link_hash_entry), GENERIC_ELF_DATA)) { free (ret); return NULL; } return &ret->root; } static bfd_boolean elf32_score_print_private_bfd_data (bfd *abfd, void * ptr) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_elf32_score_print_private_bfd_data (abfd, ptr); else return s7_elf32_score_print_private_bfd_data (abfd, ptr); } static bfd_boolean elf32_score_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info) { if (bfd_get_mach (info->output_bfd) == bfd_mach_score3) return s3_elf32_score_merge_private_bfd_data (ibfd, info); else return s7_elf32_score_merge_private_bfd_data (ibfd, info); } static bfd_boolean elf32_score_new_section_hook (bfd *abfd, asection *sec) { if (bfd_get_mach (abfd) == bfd_mach_score3) return s3_elf32_score_new_section_hook (abfd, sec); else return s7_elf32_score_new_section_hook (abfd, sec); } /* s3_s7: don't need to split. */ /* Set the right machine number. */ static bfd_boolean _bfd_score_elf_score_object_p (bfd * abfd) { int e_set = bfd_mach_score7; if (elf_elfheader (abfd)->e_machine == EM_SCORE) { int e_mach = elf_elfheader (abfd)->e_flags & EF_SCORE_MACH & EF_OMIT_PIC_FIXDD; switch (e_mach) { /* Set default target is score7. */ default: case E_SCORE_MACH_SCORE7: e_set = bfd_mach_score7; break; case E_SCORE_MACH_SCORE3: e_set = bfd_mach_score3; break; } } return bfd_default_set_arch_mach (abfd, bfd_arch_score, e_set); } bfd_boolean _bfd_score_elf_common_definition (Elf_Internal_Sym *sym) { return (sym->st_shndx == SHN_COMMON || sym->st_shndx == SHN_SCORE_SCOMMON); } /*****************************************************************************/ #define USE_REL 1 #define TARGET_LITTLE_SYM score_elf32_le_vec #define TARGET_LITTLE_NAME "elf32-littlescore" #define TARGET_BIG_SYM score_elf32_be_vec #define TARGET_BIG_NAME "elf32-bigscore" #define ELF_ARCH bfd_arch_score #define ELF_MACHINE_CODE EM_SCORE #define ELF_MACHINE_ALT1 EM_SCORE_OLD #define ELF_MAXPAGESIZE 0x8000 #define elf_info_to_howto 0 #define elf_info_to_howto_rel _bfd_score_info_to_howto #define elf_backend_relocate_section _bfd_score_elf_relocate_section #define elf_backend_check_relocs _bfd_score_elf_check_relocs #define elf_backend_add_symbol_hook _bfd_score_elf_add_symbol_hook #define elf_backend_symbol_processing _bfd_score_elf_symbol_processing #define elf_backend_link_output_symbol_hook \ _bfd_score_elf_link_output_symbol_hook #define elf_backend_section_from_bfd_section \ _bfd_score_elf_section_from_bfd_section #define elf_backend_adjust_dynamic_symbol \ _bfd_score_elf_adjust_dynamic_symbol #define elf_backend_always_size_sections \ _bfd_score_elf_always_size_sections #define elf_backend_size_dynamic_sections \ _bfd_score_elf_size_dynamic_sections #define elf_backend_omit_section_dynsym \ ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true) #define elf_backend_create_dynamic_sections \ _bfd_score_elf_create_dynamic_sections #define elf_backend_finish_dynamic_symbol \ _bfd_score_elf_finish_dynamic_symbol #define elf_backend_finish_dynamic_sections \ _bfd_score_elf_finish_dynamic_sections #define elf_backend_fake_sections _bfd_score_elf_fake_sections #define elf_backend_section_processing _bfd_score_elf_section_processing #define elf_backend_write_section _bfd_score_elf_write_section #define elf_backend_copy_indirect_symbol _bfd_score_elf_copy_indirect_symbol #define elf_backend_hide_symbol _bfd_score_elf_hide_symbol #define elf_backend_discard_info _bfd_score_elf_discard_info #define elf_backend_ignore_discarded_relocs \ _bfd_score_elf_ignore_discarded_relocs #define elf_backend_gc_mark_hook _bfd_score_elf_gc_mark_hook #define elf_backend_grok_prstatus _bfd_score_elf_grok_prstatus #define elf_backend_grok_psinfo _bfd_score_elf_grok_psinfo #define elf_backend_can_gc_sections 1 #define elf_backend_want_plt_sym 0 #define elf_backend_got_header_size (4 * SCORE_RESERVED_GOTNO) #define elf_backend_plt_header_size 0 #define elf_backend_collect TRUE #define elf_backend_type_change_ok TRUE #define elf_backend_object_p _bfd_score_elf_score_object_p #define bfd_elf32_bfd_reloc_type_lookup elf32_score_reloc_type_lookup #define bfd_elf32_bfd_reloc_name_lookup \ elf32_score_reloc_name_lookup #define bfd_elf32_bfd_link_hash_table_create elf32_score_link_hash_table_create #define bfd_elf32_bfd_print_private_bfd_data elf32_score_print_private_bfd_data #define bfd_elf32_bfd_merge_private_bfd_data elf32_score_merge_private_bfd_data #define bfd_elf32_new_section_hook elf32_score_new_section_hook #include "elf32-target.h"
totalspectrum/binutils-propeller
bfd/elf32-score.c
C
gpl-2.0
157,881
/* gsm_map_summary.c * Routines for GSM MAP Statictics summary window * * Copyright 2004, Michael Lum <mlum [AT] telostech.com> * In association with Telos Technology Inc. * * Modified from summary_dlg.c * * $Id: gsm_map_summary.c 48448 2013-03-21 02:58:59Z wmeier $ * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <gtk/gtk.h> #include <wiretap/wtap.h> #include <epan/epan.h> #include <epan/packet.h> #include <epan/packet_info.h> #include <epan/value_string.h> #include <epan/tap.h> #include <epan/asn1.h> #include <epan/dissectors/packet-gsm_map.h> #include "../stat_menu.h" #include "../globals.h" #include "../file.h" #include "../summary.h" #include "ui/gtk/gui_stat_menu.h" #include "ui/gtk/dlg_utils.h" #include "ui/gtk/gui_utils.h" #include "ui/gtk/gsm_map_stat.h" #define SUM_STR_MAX 1024 static void add_string_to_box(gchar *str, GtkWidget *box) { GtkWidget *lb; lb = gtk_label_new(str); gtk_misc_set_alignment(GTK_MISC(lb), 0.0f, 0.5f); gtk_box_pack_start(GTK_BOX(box), lb,FALSE,FALSE, 0); gtk_widget_show(lb); } void gsm_map_stat_gtk_sum_cb(GtkAction *action _U_, gpointer user_data _U_) { summary_tally summary; GtkWidget *sum_open_w, *main_vb, *file_fr, *data_fr, *file_box, *data_box, *bbox, *close_bt, *invoke_fr, *invoke_box, *rr_fr, *rr_box, *tot_fr, *tot_box; gchar string_buff[SUM_STR_MAX]; double seconds; int i; int tot_invokes, tot_rr; double tot_invokes_size, tot_rr_size; /* initialize the tally */ summary_fill_in(&cfile, &summary); /* initial computations */ seconds = summary.stop_time - summary.start_time; sum_open_w = dlg_window_new("GSM MAP Statistics: Summary"); /* transient_for top_level */ gtk_window_set_destroy_with_parent (GTK_WINDOW(sum_open_w), TRUE); /* Container for each row of widgets */ main_vb = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_set_border_width(GTK_CONTAINER(main_vb), 5); gtk_container_add(GTK_CONTAINER(sum_open_w), main_vb); gtk_widget_show(main_vb); /* File frame */ file_fr = gtk_frame_new("File"); gtk_box_pack_start(GTK_BOX (main_vb), file_fr, TRUE, TRUE, 0); gtk_widget_show(file_fr); file_box = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_add(GTK_CONTAINER(file_fr), file_box); gtk_widget_show(file_box); /* filename */ g_snprintf(string_buff, SUM_STR_MAX, "Name: %s", ((summary.filename) ? summary.filename : "None")); add_string_to_box(string_buff, file_box); /* length */ g_snprintf(string_buff, SUM_STR_MAX, "Length: %" G_GINT64_MODIFIER "d", summary.file_length); add_string_to_box(string_buff, file_box); /* format */ g_snprintf(string_buff, SUM_STR_MAX, "Format: %s", wtap_file_type_string(summary.file_type)); add_string_to_box(string_buff, file_box); if (summary.has_snap) { /* snapshot length */ g_snprintf(string_buff, SUM_STR_MAX, "Snapshot length: %u", summary.snap); add_string_to_box(string_buff, file_box); } /* Data frame */ data_fr = gtk_frame_new("Data"); gtk_box_pack_start(GTK_BOX (main_vb), data_fr, TRUE, TRUE, 0); gtk_widget_show(data_fr); data_box = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_add(GTK_CONTAINER(data_fr), data_box); gtk_widget_show(data_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { /* seconds */ g_snprintf(string_buff, SUM_STR_MAX, "Elapsed time: %.3f seconds", summary.elapsed_time); add_string_to_box(string_buff, data_box); g_snprintf(string_buff, SUM_STR_MAX, "Between first and last packet: %.3f seconds", seconds); add_string_to_box(string_buff, data_box); } /* Packet count */ g_snprintf(string_buff, SUM_STR_MAX, "Packet count: %i", summary.packet_count); add_string_to_box(string_buff, data_box); tot_invokes = 0; tot_invokes_size = 0; for (i=0; i < GSM_MAP_MAX_NUM_OPR_CODES; i++) { tot_invokes += gsm_map_stat.opr_code[i]; tot_invokes_size += gsm_map_stat.size[i]; } tot_rr = 0; tot_rr_size = 0; for (i=0; i < GSM_MAP_MAX_NUM_OPR_CODES; i++) { tot_rr += gsm_map_stat.opr_code_rr[i]; tot_rr_size += gsm_map_stat.size_rr[i]; } /* Invoke frame */ invoke_fr = gtk_frame_new("Invokes"); gtk_box_pack_start(GTK_BOX (main_vb), invoke_fr, TRUE, TRUE, 0); gtk_widget_show(invoke_fr); invoke_box = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_add(GTK_CONTAINER(invoke_fr), invoke_box); gtk_widget_show(invoke_box); /* Total number of invokes */ g_snprintf(string_buff, SUM_STR_MAX, "Total number of Invokes: %u", tot_invokes); add_string_to_box(string_buff, invoke_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { /* Total number of invokes per second */ if (seconds) g_snprintf(string_buff, SUM_STR_MAX, "Total number of Invokes per second: %.2f", tot_invokes/seconds); else g_snprintf(string_buff, SUM_STR_MAX, "Total number of Invokes per second: N/A"); add_string_to_box(string_buff, invoke_box); } /* Total size of invokes */ g_snprintf(string_buff, SUM_STR_MAX, "Total number of bytes for Invokes: %.0f", tot_invokes_size); add_string_to_box(string_buff, invoke_box); /* Average size of invokes */ if (tot_invokes) g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per Invoke: %.2f", tot_invokes_size/tot_invokes); else g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per Invoke: N/A"); add_string_to_box(string_buff, invoke_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { /* Average size of invokes per second */ if (seconds) g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per second: %.2f", tot_invokes_size/seconds); else g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per second: N/A"); add_string_to_box(string_buff, invoke_box); } /* Return Results frame */ rr_fr = gtk_frame_new("Return Results"); gtk_box_pack_start(GTK_BOX (main_vb), rr_fr, TRUE, TRUE, 0); gtk_widget_show(rr_fr); rr_box = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_add(GTK_CONTAINER(rr_fr), rr_box); gtk_widget_show(rr_box); /* Total number of return results */ g_snprintf(string_buff, SUM_STR_MAX, "Total number of Return Results: %u", tot_rr); add_string_to_box(string_buff, rr_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { /* Total number of return results per second */ if (seconds) g_snprintf(string_buff, SUM_STR_MAX, "Total number of Return Results per second: %.2f", tot_rr/seconds); else g_snprintf(string_buff, SUM_STR_MAX, "Total number of Return Results per second: N/A"); add_string_to_box(string_buff, rr_box); } /* Total size of return results */ g_snprintf(string_buff, SUM_STR_MAX, "Total number of bytes for Return Results: %.0f", tot_rr_size); add_string_to_box(string_buff, rr_box); /* Average size of return results */ if (tot_rr) g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per Return Result: %.2f", tot_rr_size/tot_rr); else g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per Return Result: N/A"); add_string_to_box(string_buff, rr_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { /* Average size of return results per second */ if (seconds) g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per second: %.2f", tot_rr_size/seconds); else g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per second: N/A"); add_string_to_box(string_buff, rr_box); } /* Totals frame */ tot_fr = gtk_frame_new("Totals"); gtk_box_pack_start(GTK_BOX (main_vb), tot_fr, TRUE, TRUE, 0); gtk_widget_show(tot_fr); tot_box = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE); gtk_container_add(GTK_CONTAINER(tot_fr), tot_box); gtk_widget_show(tot_box); /* Total number of return results */ g_snprintf(string_buff, SUM_STR_MAX, "Total number of GSM MAP messages: %u", tot_invokes + tot_rr); add_string_to_box(string_buff, tot_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { if (seconds) g_snprintf(string_buff, SUM_STR_MAX, "Total number of GSM MAP messages per second: %.2f", (tot_invokes + tot_rr)/seconds); else g_snprintf(string_buff, SUM_STR_MAX, "Total number of GSM MAP messages per second: N/A"); add_string_to_box(string_buff, tot_box); } g_snprintf(string_buff, SUM_STR_MAX, "Total number of bytes for GSM MAP messages: %.0f", tot_invokes_size + tot_rr_size); add_string_to_box(string_buff, tot_box); if (tot_invokes + tot_rr) g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per GSM MAP messages: %.2f", (tot_invokes_size + tot_rr_size)/(tot_invokes + tot_rr)); else g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes per GSM MAP messages: N/A"); add_string_to_box(string_buff, tot_box); /* * We must have no un-time-stamped packets (i.e., the number of * time-stamped packets must be the same as the number of packets), * and at least two time-stamped packets, in order for the elapsed * time to be valid. */ if (summary.packet_count_ts == summary.packet_count && summary.packet_count_ts >= 2) { if (seconds) g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes second: %.2f", (tot_invokes_size + tot_rr_size)/seconds); else g_snprintf(string_buff, SUM_STR_MAX, "Average number of bytes second: N/A"); add_string_to_box(string_buff, tot_box); } /* Button row. */ bbox = dlg_button_row_new(GTK_STOCK_CLOSE, NULL); gtk_box_pack_start(GTK_BOX(main_vb), bbox, TRUE, TRUE, 0); gtk_widget_show(bbox); close_bt = (GtkWidget *)g_object_get_data(G_OBJECT(bbox), GTK_STOCK_CLOSE); window_set_cancel_button(sum_open_w, close_bt, window_cancel_button_cb); g_signal_connect(sum_open_w, "delete_event", G_CALLBACK(window_delete_event_cb), NULL); gtk_widget_show(sum_open_w); window_present(sum_open_w); } void register_tap_listener_gtkgsm_map_summary(void) { }
P1sec/LTE_monitor_c2xx
wireshark/ui/gtk/gsm_map_summary.c
C
gpl-2.0
12,651
#include <linux/types.h> #include <linux/proc_fs.h> #include <asm/setup.h> #include <linux/pagemap.h> struct st_read_proc { char *name; int (*read_proc)(char *, char **, off_t, int, int *, void *); }; extern unsigned int get_pd_charge_flag(void); extern unsigned int get_boot_into_recovery_flag(void); extern unsigned int resetmode_is_normal(void); extern unsigned int get_boot_into_recovery_flag(void); /* same as in proc_misc.c */ static int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) { if (len <= off + count) *eof = 1; *start = page + off; len -= off; if (len > count) len = count; if (len < 0) len = 0; return len; } static int app_tag_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; u32 charge_flag = 0; u32 recovery_flag = 0; u32 reset_normal_flag = 0; recovery_flag = get_boot_into_recovery_flag(); charge_flag = get_pd_charge_flag(); reset_normal_flag = resetmode_is_normal(); len = snprintf(page, PAGE_SIZE, "recovery_flag:\n%d\n" "charge_flag:\n%d\n" "reset_normal_flag:\n%d\n", recovery_flag, charge_flag, reset_normal_flag); return proc_calc_metrics(page, start, off, count, eof, len); } static struct st_read_proc simple_ones[] = { {"app_info", app_tag_read_proc}, {NULL,} }; void __init proc_app_info_init(void) { struct st_read_proc *p; for (p = simple_ones; p->name; p++) create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL); }
Wonfee/huawei_u9508_kernel
fs/proc/app_info.c
C
gpl-2.0
1,527
/* GStreamer * Copyright (C) <2009> Руслан Ижбулатов <lrn1986 _at_ gmail _dot_ com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <gst/gst-i18n-plugin.h> #include "gstvideomeasure.h" #include "gstvideomeasure_ssim.h" #include "gstvideomeasure_collector.h" GstEvent * gst_event_new_measured (guint64 framenumber, GstClockTime timestamp, const gchar * metric, const GValue * mean, const GValue * lowest, const GValue * highest) { GstStructure *str = gst_structure_new (GST_EVENT_VIDEO_MEASURE, "event", G_TYPE_STRING, "frame-measured", "offset", G_TYPE_UINT64, framenumber, "timestamp", GST_TYPE_CLOCK_TIME, timestamp, "metric", G_TYPE_STRING, metric, NULL); gst_structure_set_value (str, "mean", mean); gst_structure_set_value (str, "lowest", lowest); gst_structure_set_value (str, "highest", highest); return gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, str); } static gboolean plugin_init (GstPlugin * plugin) { gboolean res; #ifdef ENABLE_NLS GST_DEBUG ("binding text domain %s to locale dir %s", GETTEXT_PACKAGE, LOCALEDIR); bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR); bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8"); #endif res = gst_element_register (plugin, "ssim", GST_RANK_NONE, GST_TYPE_SSIM); res &= gst_element_register (plugin, "measurecollector", GST_RANK_NONE, GST_TYPE_MEASURE_COLLECTOR); return res; } GST_PLUGIN_DEFINE2 (GST_VERSION_MAJOR, GST_VERSION_MINOR, videomeasure, "Various video measurers", plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN);
freedesktop-unofficial-mirror/gstreamer-sdk__gst-plugins-bad
gst/videomeasure/gstvideomeasure.c
C
gpl-2.0
2,378
/* * CDE - Common Desktop Environment * * Copyright (c) 1993-2012, The Open Group. All rights reserved. * * These libraries and programs are free software; you can * redistribute them and/or modify them under the terms of the GNU * Lesser General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) * any later version. * * These libraries and programs are distributed in the hope that * they will be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public * License along with these librararies and programs; if not, write * to the Free Software Foundation, Inc., 51 Franklin Street, Fifth * Floor, Boston, MA 02110-1301 USA */ /* $TOG: EditAreaData.c /main/6 1998/03/03 16:18:13 mgreess $ */ /**********************************<+>************************************* *************************************************************************** ** ** File: EditAreaData.c ** ** Project: DtEditor widget for editing services ** ** Description: Contains functions for getting and setting the data ** on which the editor operates. ** ----------- ** ******************************************************************* * * (c) Copyright 1993, 1994 Unix System Labs, Inc., a subsidiary of Novell, Inc. * (c) Copyright 1996 Digital Equipment Corporation. * (c) Copyright 1993, 1994, 1996 Hewlett-Packard Company. * (c) Copyright 1993, 1994, 1996 International Business Machines Corp. * (c) Copyright 1993, 1994, 1996 Sun Microsystems, Inc. * (c) Copyright 1996 Novell, Inc. * (c) Copyright 1996 FUJITSU LIMITED. * (c) Copyright 1996 Hitachi. * ******************************************************************** ** ** ************************************************************************** **********************************<+>*************************************/ #include "EditorP.h" #include <X11/Xutil.h> #include <Xm/TextP.h> #include <unistd.h> #include "DtWidgetI.h" typedef enum _LoadActionType { LOAD_DATA, INSERT_DATA, APPEND_DATA, REPLACE_DATA } LoadActionType; static DtEditorErrorCode Check4EnoughMemory( int numBytes); static DtEditorErrorCode StripEmbeddedNulls( char *stringData, int *length); static DtEditorErrorCode LoadFile( Widget w, char *fileName, LoadActionType action, XmTextPosition startReplace, XmTextPosition endReplace ); #ifdef NEED_STRCASECMP /* * in case strcasecmp is not provided by the system here is one * which does the trick */ static int strcasecmp(s1, s2) register char *s1, *s2; { register int c1, c2; while (*s1 && *s2) { c1 = isupper(*s1) ? tolower(*s1) : *s1; c2 = isupper(*s2) ? tolower(*s2) : *s2; if (c1 != c2) return (1); s1++; s2++; } if (*s1 || *s2) return (1); return (0); } #endif /***************************************************************************** * * Check4EnoughMemory - estimates whether there is enough memory to malloc * "numBytes" of memory. This routine doubles the amount needed because the * routines that use it are putting data into the text widget & we must make * sure the widget will have room, too. * * Returns DtEDITOR_NO_ERRORS * DtEDITOR_ILLEGAL_SIZE * DtEDITOR_INSUFFICIENT_MEMORY * *****************************************************************************/ static DtEditorErrorCode Check4EnoughMemory( int numBytes) { DtEditorErrorCode returnVal = DtEDITOR_ILLEGAL_SIZE; if (numBytes > 0) { char *tmpString = (char *)malloc((2 * numBytes) + (numBytes/10)); if(tmpString == (char *)NULL) returnVal = DtEDITOR_INSUFFICIENT_MEMORY; else { returnVal = DtEDITOR_NO_ERRORS; free(tmpString); } } return( returnVal ); } /* end Check4EnoughMemory */ /***************************************************************************** * * StripEmbeddedNulls - removes any embedded NULLs (\0) in a string of length * 'length'. The removal occurs in place, with 'length' set to the * new, stripped length. The resulting string is terminated with a * trailing NULL. * * Returns DtEDITOR_NO_ERRORS - the string did not contain any embedded NULLs * DtEDITOR_NULLS_REMOVED - the string did contain embedded * NULLs that were removed. * *****************************************************************************/ static DtEditorErrorCode StripEmbeddedNulls( char *stringData, int *length) { DtEditorErrorCode returnVal = DtEDITOR_NO_ERRORS; if (strlen(stringData) != *length) { int firstNull; returnVal = DtEDITOR_NULLS_REMOVED; /* * The file contains NULL characters, so we strip them out and * report that we have done so. */ while((firstNull = strlen(stringData)) != *length) { int lastNull = firstNull; while((lastNull + 1) < *length && stringData[lastNull + 1] == (char)'\0') lastNull++; memcpy(&stringData[firstNull], &stringData[lastNull + 1], *length - lastNull); *length -= 1 + lastNull - firstNull; } } return( returnVal); } /* end StripEmbeddedNulls */ /***************************************************************************** * * Retrieves the current location of the insert cursor * *****************************************************************************/ XmTextPosition DtEditorGetInsertionPosition( Widget widget) { DtEditorWidget editor = (DtEditorWidget) widget; XmTextPosition result; _DtWidgetToAppContext(widget); _DtAppLock(app); result = XmTextGetInsertionPosition(M_text(editor)); _DtAppUnlock(app); return result; } /***************************************************************************** * * Retrieves the current location of the last character in the widget * *****************************************************************************/ XmTextPosition DtEditorGetLastPosition( Widget widget) { DtEditorWidget editor = (DtEditorWidget) widget; XmTextPosition result; _DtWidgetToAppContext(widget); _DtAppLock(app); result = XmTextGetLastPosition(M_text(editor)); _DtAppUnlock(app); return result; } /***************************************************************************** * * Changes the current location of the insert cursor * *****************************************************************************/ void DtEditorSetInsertionPosition( Widget widget, XmTextPosition position) { DtEditorWidget editor = (DtEditorWidget) widget; _DtWidgetToAppContext(widget); _DtAppLock(app); XmTextSetInsertionPosition(M_text(editor), position); _DtAppUnlock(app); } static DtEditorErrorCode setStringValue( DtEditorWidget editor, char *data) { /* * Tell _DtEditorModifyVerifyCB() that we're replacing the entire * contents, so it doesn't try to save the current document in an * undo structure for a later undo. */ M_loadingAllNewData(editor) = True; XmTextSetString( M_text(editor), data ); /* * If the _DtEditorModifyVerifyCB() did not get called, reset the * things which usually get reset there. The modifyVerify callback * will not get called if the contents are being set to a null string * and the widget is already empty. */ if (M_loadingAllNewData(editor) == True) { M_loadingAllNewData(editor) = False; M_unreadChanges(editor) = False; _DtEditorResetUndo(editor); } return( DtEDITOR_NO_ERRORS ); } /* end setStringValue */ static DtEditorErrorCode setDataValue( DtEditorWidget widget, void *rawData, int length) { DtEditorErrorCode status = DtEDITOR_NULL_ITEM, tmpError; /* * Validate input */ if (rawData != (void *)NULL) { /* * Check to see if we have a valid buffer size & enough memory to * load the buffer into the text widget. This is only an estimate * of our needs. * Check4EnoughMemory() returns DtEDITOR_NO_ERRORS, * DtEDITOR_ILLEGAL_SIZE, or DtEDITOR_INSUFFICIENT_MEMORY. */ status = Check4EnoughMemory( length ); if (status == DtEDITOR_NO_ERRORS) { /* * Convert the data buffer into a string & insert into the widget */ char *textData = (char *)XtMalloc(length + 1); memcpy( textData, rawData, length ); textData[length] = '\0'; /* * Strip out any embedded NULLs because the text widget will only * accept data up to the first NULL. * * StripEmbeddedNulls() returns DtEDITOR_NO_ERRORS or * DtEDITOR_NULLS_REMOVED */ status = StripEmbeddedNulls( textData, &length ); /* * Now, insert the converted string into the text widget */ tmpError = setStringValue( widget, textData ); if (tmpError != DtEDITOR_NO_ERRORS) status = tmpError; XtFree( (char *)textData ); } } return( status ); } /* end setDataValue */ static DtEditorErrorCode setWcharValue( DtEditorWidget editor, wchar_t *data) { DtEditorErrorCode status; wchar_t *tmp_wc; int result, num_chars=0; char *mb_value = (char *)NULL; /* * Convert the wide char string to a multi-byte string & stick it in * the text widget. */ /* * Determine how big the resulting mb string may be */ for (num_chars = 0, tmp_wc = data; *tmp_wc != (wchar_t)0L; num_chars++) tmp_wc++; /* * Check to see if we have enough memory to load the string * into the text widget. This is only an estimate of our needs. * status will be set to DtEDITOR_NO_ERRORS, DtEDITOR_ILLEGAL_SIZE, or * DtEDITOR_INSUFFICIENT_MEMORY. */ status = Check4EnoughMemory( (num_chars + 1) * MB_CUR_MAX ); if (status != DtEDITOR_NO_ERRORS) return status; mb_value = XtMalloc( (unsigned)(num_chars + 1) * MB_CUR_MAX ); /* * Convert the wchar string * If wcstombs fails it returns (size_t) -1, so pass in empty * string. */ result = wcstombs( mb_value, data, (num_chars + 1) * MB_CUR_MAX ); if (result == (size_t)-1) result = 0; /* * wcstombs doesn't guarantee string is NULL terminated */ mb_value[result] = 0; status = setStringValue( editor, mb_value ); XtFree(mb_value); return( status ); } /* end setWcharValue */ static DtEditorErrorCode insertStringValue( DtEditorWidget editor, char *data, LoadActionType typeOfInsert, XmTextPosition beginInsert, XmTextPosition endInsert) { int numInserted; switch( typeOfInsert ) { case INSERT_DATA: { beginInsert = endInsert = XmTextGetInsertionPosition( M_text(editor) ); break; } case APPEND_DATA: { beginInsert = endInsert = XmTextGetLastPosition( M_text(editor) ); break; } case REPLACE_DATA: { break; } default: { } } /* end switch */ /* * Insert/Replace/Append the data and move the insertion cursor to * the end of the inserted data. */ numInserted = _DtEditor_CountCharacters( data, strlen(data) ); XmTextReplace(M_text(editor), beginInsert, endInsert, data); XmTextSetInsertionPosition( M_text(editor), (XmTextPosition)(beginInsert + numInserted) ); return( DtEDITOR_NO_ERRORS ); } /* insertStringValue */ static DtEditorErrorCode insertDataValue( DtEditorWidget widget, void *rawData, int length, LoadActionType typeOfInsert, XmTextPosition beginInsert, XmTextPosition endInsert) { DtEditorErrorCode status = DtEDITOR_NULL_ITEM, loadError; /* * Validate input */ if (rawData != (void *) NULL) { /* * Check to see if we have a valid buffer size & enough memory to * insert the buffer into the text widget. This is only an estimate * of our needs. * status will be set to DtEDITOR_NO_ERRORS, DtEDITOR_ILLEGAL_SIZE, or * DtEDITOR_INSUFFICIENT_MEMORY. */ status = Check4EnoughMemory( length ); if (status == DtEDITOR_NO_ERRORS) { /* * Convert the data buffer into a string & insert into the widget */ char *textData = (char *)XtMalloc(length + 1); memcpy( textData, rawData, length ); textData[length] = '\0'; /* * Strip out any embedded NULLs because the text widget will only * accept data up to the first NULL. * * StripEmbeddedNulls() returns DtEDITOR_NO_ERRORS or * DtEDITOR_NULLS_REMOVED */ status = StripEmbeddedNulls( textData, &length ); /* * Now, insert the converted string into the text widget */ loadError = insertStringValue( widget, textData, typeOfInsert, beginInsert, endInsert ); if (loadError != DtEDITOR_NO_ERRORS) status = loadError; XtFree( (char *)textData ); } } return( status ); } /* insertDataValue */ static DtEditorErrorCode insertWcharValue( DtEditorWidget editor, wchar_t *data, LoadActionType typeOfInsert, XmTextPosition beginInsert, XmTextPosition endInsert) { wchar_t *tmp_wc; int result, num_chars=0; char *mb_value = (char *)NULL; DtEditorErrorCode status; /* * Convert the wide char string to a multi-byte string & insert it into * the text widget. */ /* * Determine how big the resulting mb string may be */ for (num_chars = 0, tmp_wc = data; *tmp_wc != (wchar_t)0L; num_chars++) tmp_wc++; /* * Check to see if we have enough memory to insert the string * into the text widget. This is only an estimate of our needs. * status will be set to DtEDITOR_NO_ERRORS, DtEDITOR_ILLEGAL_SIZE, or * DtEDITOR_INSUFFICIENT_MEMORY. */ status = Check4EnoughMemory( (num_chars + 1) * MB_CUR_MAX ); if(status != DtEDITOR_NO_ERRORS) return status; mb_value = XtMalloc( (unsigned)(num_chars + 1) * MB_CUR_MAX ); /* * Convert the wchar string. * If wcstombs fails it returns (size_t) -1, so pass in empty * string. */ result = wcstombs( mb_value, data, (num_chars + 1) * MB_CUR_MAX ); if (result == (size_t)-1) result = 0; /* * wcstombs doesn't guarantee string is NULL terminated */ mb_value[result] = 0; status = insertStringValue( editor, mb_value, typeOfInsert, beginInsert, endInsert ); XtFree( mb_value ); return( status ); } /* insertWcharValue */ /*************************************************************************** * * DtEditorSetContents - sets the contents of the DtEditor widget. * * Inputs: widget to set the contents * * a data structure containing the data to put into the * widget. Depending upon the type of data being set, this * structure will contain various fields: * * string - \0-terminated string of characters * data - the data, the size of the data * * Returns 0 - contents were set sucessfully * !0 - an error occured while setting the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorSetContents( Widget widget, DtEditorContentRec *data ) { DtEditorErrorCode error = DtEDITOR_INVALID_TYPE; DtEditorWidget editor = (DtEditorWidget) widget; _DtWidgetToAppContext(widget); _DtAppLock(app); switch( data->type ) { case DtEDITOR_TEXT: { error = setStringValue ( editor, data->value.string ); break; } case DtEDITOR_DATA: { error = setDataValue ( editor, data->value.data.buf, data->value.data.length); break; } case DtEDITOR_WCHAR: { error = setWcharValue ( editor, data->value.wchar ); break; } default : { error = DtEDITOR_INVALID_TYPE; } } /* end switch */ /* * Update the current-line-display in the status line */ if (error == DtEDITOR_NO_ERRORS) _DtEditorUpdateLineDisplay(editor, 1, False ); _DtAppUnlock(app); return( error ); } /*************************************************************************** * * DtEditorSetContentsFromFile - read a data file, putting the contents * into a DtEditor widget. * * Inputs: widget to load the file into * * to indicate the type of contents loaded from the file: * string - a \0-terminated string of characters * data - untyped data * * filename - name of the file to read * * Returns 0 - contents were loaded sucessfully * !0 - an error occured while loading the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorSetContentsFromFile( Widget widget, char *fileName) { DtEditorErrorCode result; _DtWidgetToAppContext(widget); _DtAppLock(app); result = LoadFile(widget, fileName, LOAD_DATA, 0, 0); _DtAppUnlock(app); return result; } /*************************************************************************** * * DtEditorAppend - append data to the contents of the DtEditor widget. * * Inputs: widget to add to the contents * * a data structure containing the data to append to the * widget. Depending upon the type of data being set, this * structure will contain various fields: * * string - \0-terminated string of characters * data - the data, the size of the data * * Returns 0 - contents were set sucessfully * !0 - an error occured while setting the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorAppend( Widget widget, DtEditorContentRec *data ) { DtEditorErrorCode error = DtEDITOR_INVALID_TYPE; DtEditorWidget editor = (DtEditorWidget) widget; _DtWidgetToAppContext(widget); _DtAppLock(app); switch( data->type ) { case DtEDITOR_TEXT: { error = insertStringValue ( editor, data->value.string, APPEND_DATA, 0, 0 ); break; } case DtEDITOR_DATA: { error = insertDataValue ( editor, data->value.data.buf, data->value.data.length, APPEND_DATA, 0,0); break; } case DtEDITOR_WCHAR: { error = insertWcharValue ( editor, data->value.wchar, APPEND_DATA, 0, 0 ); break; } default: { error = DtEDITOR_INVALID_TYPE; } } /* end switch */ _DtAppUnlock(app); return( error ); } /*************************************************************************** * * DtEditorAppendFromFile - read a data file, appending the contents * into a DtEditor widget. * * Inputs: widget to append the file to * * to indicate the type of contents appended from the file: * string - a \0-terminated string of characters * data - untyped data * * filename - name of the file to read * * Returns 0 - contents were appended sucessfully * !0 - an error occured while appending the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorAppendFromFile( Widget widget, char *fileName) { DtEditorErrorCode result; _DtWidgetToAppContext(widget); _DtAppLock(app); result = LoadFile(widget, fileName, APPEND_DATA, 0, 0); _DtAppUnlock(app); return result; } /*************************************************************************** * * DtEditorInsert - insert data into the contents of the DtEditor widget. * * Inputs: widget to add to the contents * * a data structure containing the data to insert into the * widget. Depending upon the type of data being set, this * structure will contain various fields: * * string - \0-terminated string of characters * data - the data, the size of the data * * Returns 0 - contents were set sucessfully * !0 - an error occured while setting the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorInsert( Widget widget, DtEditorContentRec *data ) { DtEditorErrorCode error = DtEDITOR_INVALID_TYPE; DtEditorWidget editor = (DtEditorWidget) widget; _DtWidgetToAppContext(widget); _DtAppLock(app); switch( data->type ) { case DtEDITOR_TEXT: { error = insertStringValue ( editor, data->value.string, INSERT_DATA, 0, 0 ); break; } case DtEDITOR_DATA: { error = insertDataValue ( editor, data->value.data.buf, data->value.data.length, INSERT_DATA, 0,0); break; } case DtEDITOR_WCHAR: { error = insertWcharValue ( editor, data->value.wchar, INSERT_DATA, 0, 0 ); break; } default : { error = DtEDITOR_INVALID_TYPE; } } /* end switch */ _DtAppUnlock(app); return( error ); } /*************************************************************************** * * DtEditorInsertFromFile - read a data file, inserting the contents * into a DtEditor widget. * * Inputs: widget to insert the file to * * to indicate the type of contents inserted from the file: * string - a \0-terminated string of characters * data - untyped data * * filename - name of the file to read * * Returns 0 - contents were inserted sucessfully * !0 - an error occured while inserting the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorInsertFromFile( Widget widget, char *fileName) { DtEditorErrorCode result; _DtWidgetToAppContext(widget); _DtAppLock(app); result = LoadFile(widget, fileName, INSERT_DATA, 0, 0); _DtAppUnlock(app); return result; } /*************************************************************************** * * DtEditorReplace - replace a specified portion of the contents of the * DtEditor widget with the supplied data. * * Inputs: widget to replace a portion of its contents * * starting character position of the portion to replace * * ending character position of the portion to replace * * a data structure containing the data to replace some data * in the widget. Depending upon the type of data being set, * this structure will contain various fields: * * string - \0-terminated string of characters * data - the data, the size of the data * * * Returns 0 - the portion was replaced sucessfully * !0 - an error occured while replacing the portion * ***************************************************************************/ extern DtEditorErrorCode DtEditorReplace( Widget widget, XmTextPosition startPos, XmTextPosition endPos, DtEditorContentRec *data) { DtEditorErrorCode error = DtEDITOR_INVALID_TYPE; DtEditorWidget editor = (DtEditorWidget) widget; XmTextWidget tw; _DtWidgetToAppContext(widget); _DtAppLock(app); tw = (XmTextWidget) M_text(editor); if( startPos < 0 ) startPos = 0; if( startPos > tw->text.last_position ) startPos = tw->text.last_position; if( endPos < 0 ) endPos = 0; if( endPos > tw->text.last_position ) endPos = tw->text.last_position; if( startPos > endPos ) { error = DtEDITOR_INVALID_RANGE; } else { switch( data->type ) { case DtEDITOR_TEXT: { error = insertStringValue ( editor, data->value.string, REPLACE_DATA, startPos, endPos ); break; } case DtEDITOR_DATA: { error = insertDataValue ( editor, data->value.data.buf, data->value.data.length, REPLACE_DATA, startPos, endPos ); break; } case DtEDITOR_WCHAR: { error = insertWcharValue ( editor, data->value.wchar, REPLACE_DATA, startPos, endPos ); break; } default : { error = DtEDITOR_INVALID_TYPE; } } /* end switch */ } _DtAppUnlock(app); return( error ); } /*************************************************************************** * * DtEditorReplaceFromFile - read a data file, using the contents to replace * a specified portion of the contntes of a * DtEditor widget. * * Inputs: widget to insert the file to * * starting character position of the portion to replace * * ending character position of the portion to replace * * to indicate the type of contents inserted from the file: * string - a \0-terminated string of characters * data - untyped data * * filename - local name of the file to read * * Returns 0 - contents were inserted sucessfully * !0 - an error occured while inserting the contents * ***************************************************************************/ extern DtEditorErrorCode DtEditorReplaceFromFile( Widget widget, XmTextPosition startPos, XmTextPosition endPos, char *fileName) { DtEditorWidget editor = (DtEditorWidget) widget; XmTextWidget tw; DtEditorErrorCode result; _DtWidgetToAppContext(widget); _DtAppLock(app); tw = (XmTextWidget) M_text(editor); if( startPos < 0) startPos = 0; if( startPos > tw->text.last_position ) startPos = tw->text.last_position; if( endPos < 0 ) endPos = 0; if( endPos > tw->text.last_position ) endPos = tw->text.last_position; if(startPos > endPos) { result = DtEDITOR_INVALID_RANGE; } else { result = LoadFile(widget, fileName, REPLACE_DATA, startPos, endPos); } _DtAppUnlock(app); return result; } /*************************************************************************** * * _DtEditorValidateFileAccess - check to see if file exists, whether we * can get to it, and whether it is readable * or writable. * * Note: does not check whether files for reading are read only. * * Inputs: filename - name of the local file to read * flag indicating whether we want to read or write * the file. * * Returns 0 file exists & we have read or write permissions. * * >0 if file cannot be read from/written to. * errno is set to one of the following values: * * General errors: * DtEDITOR_INVALID_FILENAME - 0 length filename * DtEDITOR_NONEXISTENT_FILE - file does not exist * (Note: this may not be considered an error when saving * to a file. The file may just need to be created.) * DtEDITOR_NO_FILE_ACCESS - cannot stat existing file * DtEDITOR_DIRECTORY - file is a directory * DtEDITOR_CHAR_SPECIAL_FILE - file is a device special file * DtEDITOR_BLOCK_MODE_FILE - file is a block mode file * * additional READ_ACCESS errors: * DtEDITOR_UNREADABLE_FILE - * * additional WRITE_ACCESS errors: * DtEDITOR_UNWRITABLE_FILE - * file or directory is write protected for * another reason * ***************************************************************************/ extern DtEditorErrorCode _DtEditorValidateFileAccess( char *fileName, int accessType ) { struct stat statbuf; /* Information on a file. */ DtEditorErrorCode error = DtEDITOR_INVALID_FILENAME; /* * First, make sure we were given a name */ if (fileName && *fileName ) { /* * Does the file already exist? */ if ( access(fileName, F_OK) != 0 ) error = DtEDITOR_NONEXISTENT_FILE; else { error = DtEDITOR_NO_ERRORS; /* * The file exists, so lets do some type checking */ if( stat(fileName, &statbuf) != 0 ) error = DtEDITOR_NO_FILE_ACCESS; else { /* if its a directory - can't save */ if( (statbuf.st_mode & S_IFMT) == S_IFDIR ) { error = DtEDITOR_DIRECTORY; return( error ); } /* if its a character special device - can't save */ if( (statbuf.st_mode & S_IFMT) == S_IFCHR ) { error = DtEDITOR_CHAR_SPECIAL_FILE; return( error ); } /* if its a block mode device - can't save */ if((statbuf.st_mode & S_IFMT) == S_IFBLK) { error = DtEDITOR_BLOCK_MODE_FILE; return( error ); } /* * We now know that it's a regular file so check to whether we * can read or write to it, as appropriate. */ switch( accessType ) { case READ_ACCESS: { if( access(fileName, R_OK) != 0 ) error = DtEDITOR_UNREADABLE_FILE; break; } case WRITE_ACCESS: { if( access(fileName, W_OK) == 0 ) { /* * Can write to it. */ error = DtEDITOR_WRITABLE_FILE; } else { /* * Can't write to it. */ error = DtEDITOR_UNWRITABLE_FILE; } /* end no write permission */ break; } default: { break; } } /* end switch */ } /* end stat suceeded */ } /* end file exists */ } /* end filename passed in */ return( error ); } /* end _DtEditorValidateFileAccess */ /************************************************************************ * * LoadFile - Check if file exists, whether we can get to it, etc. * If so, type and read its contents. * * Inputs: widget to set, add, or insert contents of file into * * name of file to read * * type of file (NULL). This will be set by LoadFile * * action to perform with the data (load, append, insert, * replace a portion, attach) * * The following information will be used if the file * contents will replace a portion of the widget's contents: * * starting character position of the portion to replace * * ending character position of the portion to replace * * Returns: DtEDITOR_NO_ERRORS - file was read sucessfully * DtEDITOR_READ_ONLY_FILE - file was read sucessfully but * is read only * DtEDITOR_DIRECTORY - the file is a directory * DtEDITOR_CHAR_SPECIAL_FILE - the file is a character * special device * DtEDITOR_BLOCK_MODE_FILE - the file is a block mode device * DtEDITOR_NONEXISTENT_FILE - file does not exist * DtEDITOR_NULLS_REMOVED - file contained embedded NULLs * that were removed * DtEDITOR_INSUFFICIENT_MEMORY - unable to allocate * enough memory for contents of file * ************************************************************************/ static DtEditorErrorCode LoadFile( Widget w, char *fileName, LoadActionType action, XmTextPosition startReplace, XmTextPosition endReplace ) { DtEditorContentRec cr; /* Structure for passing data to widget */ struct stat statbuf; /* Information on a file. */ int file_length; /* Length of file. */ FILE *fp = NULL; /* Pointer to open file */ DtEditorErrorCode returnVal = DtEDITOR_NONEXISTENT_FILE; /* Error accessing file & reading contents */ DtEditorErrorCode loadError=DtEDITOR_NO_ERRORS; /* Error from placing bits into text widget */ /* * First, make sure we were given a name */ if (fileName && *fileName ) { /* * Can we read the file? */ returnVal = _DtEditorValidateFileAccess( fileName, READ_ACCESS ); if( returnVal == DtEDITOR_NO_ERRORS ) { /* * Open the file for reading. If we can read/write, then we're * cool, otherwise we might need to tell the user that the * file's read-only, or that we can't even read from it. */ if( (fp = fopen(fileName, "r+")) == NULL ) { /* * We can't update (read/write) the file so try opening read- * only */ if( (fp = fopen(fileName, "r")) == NULL ) { /* * We can't read from the file. */ return ( DtEDITOR_UNREADABLE_FILE ); } else { /* * Tell the application that the file's read-only. * Becareful not to overwrite this value with one of the calls * to set the widget's contents. */ returnVal = DtEDITOR_READ_ONLY_FILE; } } /* end open for read/write */ } /* end try to read the file */ } /* end if no filename */ /* If a file is open, get the bytes */ if ( fp ) { stat( fileName, &statbuf ); file_length = statbuf.st_size; /* * Check to see if we have enough memory to load the file contents * into the text widget. This is only an estimate of our needs. * Check4EnoughMemory() returns DtEDITOR_NO_ERRORS, * DtEDITOR_ILLEGAL_SIZE, or DtEDITOR_INSUFFICIENT_MEMORY. */ loadError = Check4EnoughMemory( file_length ); if (loadError == DtEDITOR_INSUFFICIENT_MEMORY) returnVal = loadError; else { /* * Read the file contents (with room for null) & convert to a * string. We want to use a string because the * DtEditorSetContents/Append/Insert/... functions create another * copy of the data before actually putting it into the widget. */ char *file_string = (char*) XtMalloc(file_length + 1); file_length = fread(file_string, sizeof(char), file_length, fp); file_string[file_length] = '\0'; /* * Strip out any embedded NULLs because the text widget will only * accept data up to the first NULL. * * StripEmbeddedNulls() returns DtEDITOR_NO_ERRORS or * DtEDITOR_NULLS_REMOVED */ loadError = StripEmbeddedNulls( file_string, &file_length ); if ( loadError != DtEDITOR_NO_ERRORS ) returnVal = loadError; /* * Insert it as a string, otherwise the following DtEditor*() * functions will make another copy of the data. */ cr.type = DtEDITOR_TEXT; cr.value.string = file_string; /* * Load, insert, append, or attach the file, as specified */ switch( action ) { case LOAD_DATA: { loadError = DtEditorSetContents ( w, &cr ); break; } case INSERT_DATA: { loadError = DtEditorInsert ( w, &cr ); break; } case APPEND_DATA: { loadError = DtEditorAppend ( w, &cr ); break; } case REPLACE_DATA: { loadError = DtEditorReplace(w, startReplace, endReplace, &cr); break; } default: { } } /* end switch */ if ( loadError != DtEDITOR_NO_ERRORS ) returnVal = loadError; /* * The file is loaded, clean up. */ XtFree( file_string ); } /* end there is enough memory */ /* Close the file */ fclose(fp); } /* end if a file is open */ return( returnVal ); } /* end LoadFile */ static char * StringAdd( char *destination, char *source, int number) { memcpy(destination, source, number); destination[number] = (char)'\0'; destination += number; return destination; } /*************************************************************************** * * CopySubstring - copies out a portion of the text, optionally * adding newlines at any and all wordwrap-caused * "virtual" lines. * * Inputs: widget from which we get the data to write; * startPos determines the first character to write out; * endPos determines the last character to write out; * buf is the character buffer into which we write. It * is assumed to be large enough - be careful. * addNewlines specifies whether to add '/n' to "virtual" lines. * Returns Nuthin' * * ***************************************************************************/ static char * CopySubstring( XmTextWidget widget, XmTextPosition startPos, XmTextPosition endPos, char *buf, Boolean addNewlines) { register XmTextLineTable line_table = widget->text.line_table; int currLine, firstLine; char *pString, *pCurrChar, *pLastChar; int numToCopy; if(startPos < 0) startPos = 0; if(startPos > widget->text.last_position) startPos = widget->text.last_position; if(endPos < 0) endPos = 0; if(endPos > widget->text.last_position) endPos = widget->text.last_position; if(startPos > endPos) return buf; pString = XmTextGetString((Widget)widget); if(addNewlines == False) { pCurrChar = _DtEditorGetPointer(pString, startPos); pLastChar = _DtEditorGetPointer(pString, endPos); numToCopy = pLastChar - pCurrChar + mblen(pLastChar, MB_CUR_MAX); buf = StringAdd(buf, pCurrChar, numToCopy); } else { int *mb_str_loc, total, z, siz; char *bptr; mb_str_loc = (int *) XtMalloc(sizeof(int) * ((endPos-startPos)+1)); if (NULL == mb_str_loc) { /* Should figure out some way to pass back an error code. */ buf = CopySubstring(widget, startPos, endPos, buf, False); return buf; } /* * mb_str_loc[] is being used to replace the call * to _DtEditorGetPointer. That function used * mbtowc() to count the number of chars between the * beginning of pString and startChar. The problem * was that it sat in a loop and was also called for * every line, so it was SLOW. Now, we count once * and store the results in mb_str_loc[]. */ /* Because startPos may not always == 0: */ /* mb_str_loc[0] = startPos */ /* mb_str_loc[endPos - startPos] = endPos */ /* */ /* So when accessing items, dereference off of */ /* startPos. */ mb_str_loc[0] = 0; for(total=0, bptr=pString, z=1; z <= (endPos - startPos); bptr += siz, z++) { if (MB_CUR_MAX > 1) { if ( (siz = mblen(bptr, MB_CUR_MAX)) < 0) { siz = 1; total += 1; } else total += siz; } else { siz = 1; total += 1; } mb_str_loc[z] = total; } firstLine = currLine = _DtEditorGetLineIndex(widget, startPos); do { if(startPos > (XmTextPosition)line_table[currLine].start_pos) pCurrChar = pString + mb_str_loc[0]; else { z = line_table[currLine].start_pos; pCurrChar = pString + mb_str_loc[z - startPos]; } if(addNewlines == True && currLine > firstLine && line_table[currLine].virt_line != 0) { buf[0] = (char)'\n'; buf[1] = (char)'\0'; buf++; } if(currLine >= (widget->text.total_lines - 1)) pLastChar = pString + mb_str_loc[endPos - startPos]; else if((XmTextPosition)line_table[currLine + 1].start_pos <= endPos) { z = line_table[currLine+1].start_pos - 1; pLastChar = pString + mb_str_loc[z - startPos]; } else pLastChar = pString + mb_str_loc[endPos - startPos]; numToCopy = pLastChar - pCurrChar + mblen(pLastChar, MB_CUR_MAX); buf = StringAdd(buf, pCurrChar, numToCopy); currLine++; } while(currLine < widget->text.total_lines && (XmTextPosition)line_table[currLine].start_pos <= endPos); XtFree((char*)mb_str_loc); } XtFree(pString); return buf; } /************************************************************************* * * _DtEditorCopyDataOut - Writes the entire text editor buffer contents to * the specified character array. * * Inputs: tw, to supply the data. * buf, specifying the array to which to write the data. * *************************************************************************/ static char * _DtEditorCopyDataOut( XmTextWidget tw, char *buf, Boolean addNewlines) { buf = CopySubstring(tw, 0, tw->text.last_position, buf, addNewlines); return buf; } static DtEditorErrorCode getStringValue( DtEditorWidget editor, char **buf, Boolean insertNewlines) { XmTextWidget tw = (XmTextWidget) M_text(editor); int bufSize; char *lastChar; DtEditorErrorCode returnVal = DtEDITOR_NO_ERRORS; /* * Calculate the size of the buffer we need for the data. * 1. Start with MB_CUR_MAX for each char in the text. * 3. Add in 1 char for each line, if we have to insert newlines. * 4. Add 1 for a terminating NULL. */ bufSize = tw->text.last_position * MB_CUR_MAX; if(insertNewlines == True) bufSize += tw->text.total_lines; bufSize += 1; returnVal = Check4EnoughMemory(bufSize); if (DtEDITOR_NO_ERRORS != returnVal) return returnVal; *buf = (char *) XtMalloc(bufSize); lastChar = _DtEditorCopyDataOut(tw, *buf, insertNewlines); return returnVal; } /* end getStringValue */ static DtEditorErrorCode getDataValue( DtEditorWidget editor, void **buf, unsigned int *size, Boolean insertNewlines) { DtEditorErrorCode error; error = getStringValue(editor, (char **)buf, insertNewlines); *size = strlen( *buf ); /* remember, strlen doesn't count \0 at end */ return( error ); } /* end getDataValue */ static DtEditorErrorCode getWcharValue( DtEditorWidget editor, wchar_t **data, Boolean insertNewlines) { DtEditorErrorCode error; char *mb_value; wchar_t *pWchar_value; int num_char, result; size_t nbytes; error = getStringValue(editor, &mb_value, insertNewlines); if (error == DtEDITOR_NO_ERRORS) { /* * Allocate space for the wide character string */ num_char = _DtEditor_CountCharacters(mb_value, strlen(mb_value)) + 1; nbytes = (size_t) num_char * sizeof(wchar_t); error = Check4EnoughMemory(nbytes); if (DtEDITOR_NO_ERRORS != error) return error; pWchar_value = (wchar_t*) XtMalloc(nbytes); /* * Convert the multi-byte string to wide character */ result = mbstowcs(pWchar_value, mb_value, num_char*sizeof(wchar_t) ); if (result < 0) pWchar_value[0] = 0L; *data = pWchar_value; XtFree( mb_value ); } return( error ); } /* end getWcharValue */ /*************************************************************************** * * DtEditorGetContents - gets the contents of the DtEditor widget. * * Inputs: widget to retrieve the contents * * pointer to a data structure indicating how the retrieved * data should be formatted. Depending upon the type of format, * this structure will contain various fields: * string - a NULL pointer (char *) to hold the data * a new container will be created. * data - void pointer to hold the data, unsigned int for the * size of the data, * a Boolean indicating whether Newline characters should be * inserted at the end of each line, in string format. * a Boolean indicating whether the the unsaved changes * flag should be cleared. There may be times when an * application will want to request a copy of the contents * without effecting whether DtEditorCheckForUnsavedChanges * reports there are unsaved changes. * * Returns 0 - contents were retrieved sucessfully * !0 - an error occured while retrieving the contents * * The structure passed in will be set according to the * requested format: * string - a \0-terminated string of characters with * optional Newlines * container - handle to a Bento container * data - the data, the size of the data * * The application is responsible for free'ing any data in the * above structure. * ***************************************************************************/ extern DtEditorErrorCode DtEditorGetContents( Widget widget, DtEditorContentRec *data, Boolean hardCarriageReturns, Boolean markContentsAsSaved ) { DtEditorErrorCode error = DtEDITOR_INVALID_TYPE; DtEditorWidget editor = (DtEditorWidget) widget; _DtWidgetToAppContext(widget); _DtAppLock(app); switch( data->type ) { case DtEDITOR_TEXT: { error = getStringValue( editor, &(data->value.string), hardCarriageReturns ); break; } case DtEDITOR_DATA: { error = getDataValue( editor, &(data->value.data.buf), &(data->value.data.length), hardCarriageReturns ); break; } case DtEDITOR_WCHAR: { error = getWcharValue( editor, &(data->value.wchar), hardCarriageReturns ); break; } default : { error = DtEDITOR_INVALID_TYPE; } } /* end switch */ /* * If there were no errors, mark there are now no unsaved changes (unless * we were told not to). */ if ( error == DtEDITOR_NO_ERRORS && markContentsAsSaved == True ) M_unreadChanges( editor ) = False; _DtAppUnlock(app); return( error ); } /*************************************************************************** * * DtEditorSaveContentsToFile - saves the contents of the DtEditor * widget to a disc file as string/data * or a CDE Document (Bento container). * * Inputs: widget to retrieve the contents * * filename - name of the file to read * a Boolean indicating whether the file should be * overwritten if it currently exists. * a Boolean indicating whether Newline characters should be * inserted at the end of each line (string format only). * a Boolean indicating whether the the unsaved changes * flag should be cleared. There may be times when an * application will want to request a copy of the contents * without effecting whether DtEditorCheckForUnsavedChanges * reports there are unsaved changes. * * Returns DtEDITOR_NO_ERRORS - contents were saved sucessfully * DtEDITOR_UNWRITABLE_FILE - file is write protected * DtEDITOR_WRITABLE_FILE - file exists and the * overwriteIfExists parameter is False. * DtEDITOR_SAVE_FAILED - write to the file failed; check * disk space, etc. * OR any errors from DtEditorGetContents * ***************************************************************************/ extern DtEditorErrorCode DtEditorSaveContentsToFile( Widget widget, char *fileName, Boolean overwriteIfExists, Boolean hardCarriageReturns, Boolean markContentsAsSaved ) { FILE *pFile; DtEditorContentRec cr; /* Structure for retrieving contents of widget */ DtEditorWidget editor = (DtEditorWidget) widget; DtEditorErrorCode error = DtEDITOR_INVALID_FILENAME; _DtWidgetToAppContext(widget); _DtAppLock(app); /* * First, make sure we were given a name */ if (fileName && *fileName ) { /* * Can we save to the file? */ error = _DtEditorValidateFileAccess( fileName, WRITE_ACCESS ); if( error == DtEDITOR_NO_ERRORS || error == DtEDITOR_NONEXISTENT_FILE || error == DtEDITOR_WRITABLE_FILE ) { /* * Don't overwrite an existing file if we've been told not to */ if( error == DtEDITOR_WRITABLE_FILE && overwriteIfExists == False ) { _DtAppUnlock(app); return( error ); } /* * Open the file for writing */ if ( (pFile = fopen(fileName, "w")) == NULL ) { _DtAppUnlock(app); return( DtEDITOR_UNWRITABLE_FILE ); } else { /* * Save the unsaved changes flag so we can restore it if the write * to the file fails. */ Boolean saved_state = M_unreadChanges( editor ); /* * Now, get the contents of the widget and write it to the file, * depending upon the format requested. */ cr.type = DtEDITOR_DATA; error = DtEditorGetContents( widget, &cr, hardCarriageReturns, markContentsAsSaved ); if ( error == DtEDITOR_NO_ERRORS ) { /* * Write it to the file */ size_t size_written = fwrite( cr.value.data.buf, 1, cr.value.data.length, pFile ); if( cr.value.data.length != size_written ) error = DtEDITOR_SAVE_FAILED; XtFree( cr.value.data.buf ); } fclose(pFile); if( error == DtEDITOR_SAVE_FAILED ) { /* * Restore the unsaved changes flag since the save failed */ M_unreadChanges( editor ) = saved_state; } } /* end file is writable */ } /* end filename is valid */ } _DtAppUnlock(app); return( error ); } /* end DtEditorSaveContentsToFile */ /* * _DtEditorGetPointer returns a pointer to the _character_ * numbered by startChar within the string pString. * It accounts for possible multibyte chars. */ char * _DtEditorGetPointer( char *pString, int startChar) { char *bptr; int curChar, char_size; if(MB_CUR_MAX > 1) { for(bptr = pString, curChar = 0; curChar < startChar && *bptr != (char)'\0'; curChar++, bptr += char_size) { if ( (char_size = mblen(bptr, MB_CUR_MAX)) < 0) char_size = 1; } } else { bptr = pString + startChar; } return bptr; } /* end _DtEditorGetPointer */
sTeeLM/MINIME
toolkit/srpm/SOURCES/cde-2.2.4/lib/DtWidget/EditAreaData.c
C
gpl-2.0
49,947
#include <stdio.h> #include <l4/sys/kdebug.h> void fork_to_background(void); void fork_to_background(void) { printf("unimplemented: %s\n", __func__); enter_kdebug(); }
MicroTrustRepos/microkernel
src/l4/pkg/ankh/examples/wget/l4re_helpers.c
C
gpl-2.0
171
/* * mm/page-writeback.c * * Copyright (C) 2002, Linus Torvalds. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * Contains functions related to writing back dirty pages at the * address_space level. * * 10Apr2002 Andrew Morton * Initial version */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/init.h> #include <linux/backing-dev.h> #include <linux/task_io_accounting_ops.h> #include <linux/blkdev.h> #include <linux/mpage.h> #include <linux/rmap.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/syscalls.h> #include <linux/buffer_head.h> /* __set_page_dirty_buffers */ #include <linux/pagevec.h> #include <linux/timer.h> #include <linux/sched/rt.h> #include <linux/mm_inline.h> #include <trace/events/writeback.h> #include "internal.h" /* * Sleep at most 200ms at a time in balance_dirty_pages(). */ #define MAX_PAUSE max(HZ/5, 1) /* * Try to keep balance_dirty_pages() call intervals higher than this many pages * by raising pause time to max_pause when falls below it. */ #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) /* * Estimate write bandwidth at 200ms intervals. */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) #define RATELIMIT_CALC_SHIFT 10 /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. */ static long ratelimit_pages = 32; /* The following parameters are exported via /proc/sys/vm */ /* * Start background writeback (via writeback threads) at this percentage */ int dirty_background_ratio = 10; /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of * dirty_background_ratio * the amount of dirtyable memory */ unsigned long dirty_background_bytes; /* * free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true */ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ int vm_dirty_ratio = 20; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of * vm_dirty_ratio * the amount of dirtyable memory */ unsigned long vm_dirty_bytes; /* * The interval between `kupdate'-style writebacks */ unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ EXPORT_SYMBOL_GPL(dirty_writeback_interval); /* * The longest time for which data is allowed to remain dirty */ unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ /* * Flag that makes the machine dump writes/reads and block dirtyings. */ int block_dump; /* * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: * a full sync is triggered after this time elapses without any disk activity. */ int laptop_mode; EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ unsigned long global_dirty_limit; /* * Scale the writeback cache size proportional to the relative writeout speeds. * * We do this by keeping a floating proportion between BDIs, based on page * writeback completions [end_page_writeback()]. Those devices that write out * pages fastest will get the larger share, while the slower will get a smaller * share. * * We use page writeout completions because we are interested in getting rid of * dirty pages. Having them written out is the primary goal. * * We introduce a concept of time, a period over which we measure these events, * because demand can/will vary over time. The length of this period itself is * measured in page writeback completions. * */ static struct fprop_global writeout_completions; static void writeout_period(unsigned long t); /* Timer for aging of writeout_completions */ static struct timer_list writeout_period_timer = TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0); static unsigned long writeout_period_time = 0; /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will * reflect changes in current writeout rate. */ #define VM_COMPLETIONS_PERIOD_LEN (3*HZ) /* * Work out the current dirty-memory clamping and background writeout * thresholds. * * The main aim here is to lower them aggressively if there is a lot of mapped * memory around. To avoid stressing page reclaim with lots of unreclaimable * pages. It is better to clamp down on writers than to start swapping, and * performing lots of scanning. * * We only allow 1/2 of the currently-unmapped memory to be dirtied. * * We don't permit the clamping level to fall below 5% - that is getting rather * excessive. * * We make sure that the background writeout level is below the adjusted * clamping level. */ /* * In a memory zone, there is a certain amount of pages we consider * available for the page cache, which is essentially the number of * free and reclaimable pages, minus some zone reserves to protect * lowmem and the ability to uphold the zone's watermarks without * requiring writeback. * * This number of dirtyable pages is the base value of which the * user-configurable dirty ratio is the effictive number of pages that * are allowed to be actually dirtied. Per individual zone, or * globally by using the sum of dirtyable pages over all zones. * * Because the user is allowed to specify the dirty limit globally as * absolute number of bytes, calculating the per-zone dirty limit can * require translating the configured limit into a percentage of * global dirtyable memory first. */ /** * zone_dirtyable_memory - number of dirtyable pages in a zone * @zone: the zone * * Returns the zone's number of pages potentially available for dirty * page cache. This is the base value for the per-zone dirty limits. */ static unsigned long zone_dirtyable_memory(struct zone *zone) { unsigned long nr_pages; nr_pages = zone_page_state(zone, NR_FREE_PAGES); nr_pages -= min(nr_pages, zone->dirty_balance_reserve); nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); return nr_pages; } static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; unsigned long x = 0; for_each_node_state(node, N_HIGH_MEMORY) { struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; x += zone_dirtyable_memory(z); } /* * Unreclaimable memory (kernel memory or anonymous memory * without swap) can bring down the dirtyable pages below * the zone's dirty balance reserve and the above calculation * will underflow. However we still want to add in nodes * which are below threshold (negative values) to get a more * accurate calculation but make sure that the total never * underflows. */ if ((long)x < 0) x = 0; /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure * that this does not occur. */ return min(x, total); #else return 0; #endif } /** * global_dirtyable_memory - number of globally dirtyable pages * * Returns the global number of pages potentially available for dirty * page cache. This is the base value for the global dirty limits. */ static unsigned long global_dirtyable_memory(void) { unsigned long x; x = global_page_state(NR_FREE_PAGES); x -= min(x, dirty_balance_reserve); x += global_page_state(NR_INACTIVE_FILE); x += global_page_state(NR_ACTIVE_FILE); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); /* Subtract min_free_kbytes */ x -= min_t(unsigned long, x, min_free_kbytes >> (PAGE_SHIFT - 10)); return x + 1; /* Ensure that we never return 0 */ } /* * global_dirty_limits - background-writeback and dirty-throttling thresholds * * Calculate the dirty thresholds based on sysctl parameters * - vm.dirty_background_ratio or vm.dirty_background_bytes * - vm.dirty_ratio or vm.dirty_bytes * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and * real-time tasks. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { unsigned long background; unsigned long dirty; unsigned long uninitialized_var(available_memory); struct task_struct *tsk; if (!vm_dirty_bytes || !dirty_background_bytes) available_memory = global_dirtyable_memory(); if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); else dirty = (vm_dirty_ratio * available_memory) / 100; if (dirty_background_bytes) background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); else background = (dirty_background_ratio * available_memory) / 100; if (background >= dirty) background = dirty / 2; tsk = current; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { background += background / 4; dirty += dirty / 4; } *pbackground = background; *pdirty = dirty; trace_global_dirty_state(background, dirty); } /** * zone_dirty_limit - maximum number of dirty pages allowed in a zone * @zone: the zone * * Returns the maximum number of dirty pages allowed in a zone, based * on the zone's dirtyable memory. */ static unsigned long zone_dirty_limit(struct zone *zone) { unsigned long zone_memory = zone_dirtyable_memory(zone); struct task_struct *tsk = current; unsigned long dirty; if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * zone_memory / global_dirtyable_memory(); else dirty = vm_dirty_ratio * zone_memory / 100; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) dirty += dirty / 4; return dirty; } /** * zone_dirty_ok - tells whether a zone is within its dirty limits * @zone: the zone to check * * Returns %true when the dirty pages in @zone are within the zone's * dirty limit, %false if the limit is exceeded. */ bool zone_dirty_ok(struct zone *zone) { unsigned long limit = zone_dirty_limit(zone); return zone_page_state(zone, NR_FILE_DIRTY) + zone_page_state(zone, NR_UNSTABLE_NFS) + zone_page_state(zone, NR_WRITEBACK) <= limit; } int dirty_background_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_bytes = 0; return ret; } int dirty_background_bytes_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_ratio = 0; return ret; } int dirty_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int old_ratio = vm_dirty_ratio; int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_ratio != old_ratio) { writeback_set_ratelimit(); vm_dirty_bytes = 0; } return ret; } int dirty_bytes_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned long old_bytes = vm_dirty_bytes; int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_bytes != old_bytes) { writeback_set_ratelimit(); vm_dirty_ratio = 0; } return ret; } static unsigned long wp_next_time(unsigned long cur_time) { cur_time += VM_COMPLETIONS_PERIOD_LEN; /* 0 has a special meaning... */ if (!cur_time) return 1; return cur_time; } /* * Increment the BDI's writeout completion count and the global writeout * completion count. Called from test_clear_page_writeback(). */ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) { __inc_bdi_stat(bdi, BDI_WRITTEN); __fprop_inc_percpu_max(&writeout_completions, &bdi->completions, bdi->max_prop_frac); /* First event after period switching was turned off? */ if (!unlikely(writeout_period_time)) { /* * We can race with other __bdi_writeout_inc calls here but * it does not cause any harm since the resulting time when * timer will fire and what is in writeout_period_time will be * roughly the same. */ writeout_period_time = wp_next_time(jiffies); mod_timer(&writeout_period_timer, writeout_period_time); } } void bdi_writeout_inc(struct backing_dev_info *bdi) { unsigned long flags; local_irq_save(flags); __bdi_writeout_inc(bdi); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(bdi_writeout_inc); /* * Obtain an accurate fraction of the BDI's portion. */ static void bdi_writeout_fraction(struct backing_dev_info *bdi, long *numerator, long *denominator) { fprop_fraction_percpu(&writeout_completions, &bdi->completions, numerator, denominator); } /* * On idle system, we can be called long after we scheduled because we use * deferred timers so count with missed periods. */ static void writeout_period(unsigned long t) { int miss_periods = (jiffies - writeout_period_time) / VM_COMPLETIONS_PERIOD_LEN; if (fprop_new_period(&writeout_completions, miss_periods + 1)) { writeout_period_time = wp_next_time(writeout_period_time + miss_periods * VM_COMPLETIONS_PERIOD_LEN); mod_timer(&writeout_period_timer, writeout_period_time); } else { /* * Aging has zeroed all fractions. Stop wasting CPU on period * updates. */ writeout_period_time = 0; } } /* * bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not * exceed 100%. */ static unsigned int bdi_min_ratio = 5; int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { int ret = 0; spin_lock_bh(&bdi_lock); if (min_ratio > bdi->max_ratio) { ret = -EINVAL; } else { min_ratio -= bdi->min_ratio; if (bdi_min_ratio + min_ratio < 100) { bdi_min_ratio += min_ratio; bdi->min_ratio += min_ratio; } else { ret = -EINVAL; } } spin_unlock_bh(&bdi_lock); return ret; } int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) { int ret = 0; if (max_ratio > 100) return -EINVAL; spin_lock_bh(&bdi_lock); if (bdi->min_ratio > max_ratio) { ret = -EINVAL; } else { bdi->max_ratio = max_ratio; bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; } spin_unlock_bh(&bdi_lock); return ret; } EXPORT_SYMBOL(bdi_set_max_ratio); static unsigned long dirty_freerun_ceiling(unsigned long thresh, unsigned long bg_thresh) { return (thresh + bg_thresh) / 2; } static unsigned long hard_dirty_limit(unsigned long thresh) { return max(thresh, global_dirty_limit); } /** * bdi_dirty_limit - @bdi's share of dirty throttling threshold * @bdi: the backing_dev_info to query * @dirty: global dirty limit in pages * * Returns @bdi's dirty limit in pages. The term "dirty" in the context of * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. * * Note that balance_dirty_pages() will only seriously take it as a hard limit * when sleeping max_pause per page is not enough to keep the dirty pages under * control. For example, when the device is completely stalled due to some error * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the bdi dirty pages go high. * * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * * The bdi's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. */ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) { u64 bdi_dirty; long numerator, denominator; /* * Calculate this BDI's share of the dirty ratio. */ bdi_writeout_fraction(bdi, &numerator, &denominator); bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; bdi_dirty *= numerator; do_div(bdi_dirty, denominator); bdi_dirty += (dirty * bdi->min_ratio) / 100; if (bdi_dirty > (dirty * bdi->max_ratio) / 100) bdi_dirty = dirty * bdi->max_ratio / 100; return bdi_dirty; } /* * setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) * limit - setpoint * * it's a 3rd order polynomial that subjects to * * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast * (2) f(setpoint) = 1.0 => the balance point * (3) f(limit) = 0 => the hard limit * (4) df/dx <= 0 => negative feedback control * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) * => fast response on large errors; small oscillation near setpoint */ static inline long long pos_ratio_polynom(unsigned long setpoint, unsigned long dirty, unsigned long limit) { long long pos_ratio; long x; x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, limit - setpoint + 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio += 1 << RATELIMIT_CALC_SHIFT; return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT); } /* * Dirty position control. * * (o) global/bdi setpoints * * We want the dirty pages be balanced around the global/bdi setpoints. * When the number of dirty pages is higher/lower than the setpoint, the * dirty position control ratio (and hence task dirty ratelimit) will be * decreased/increased to bring the dirty pages back to the setpoint. * * pos_ratio = 1 << RATELIMIT_CALC_SHIFT * * if (dirty < setpoint) scale up pos_ratio * if (dirty > setpoint) scale down pos_ratio * * if (bdi_dirty < bdi_setpoint) scale up pos_ratio * if (bdi_dirty > bdi_setpoint) scale down pos_ratio * * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT * * (o) global control line * * ^ pos_ratio * | * | |<===== global dirty control scope ======>| * 2.0 .............* * | .* * | . * * | . * * | . * * | . * * | . * * 1.0 ................................* * | . . * * | . . * * | . . * * | . . * * | . . * * 0 +------------.------------------.----------------------*-------------> * freerun^ setpoint^ limit^ dirty pages * * (o) bdi control line * * ^ pos_ratio * | * | * * | * * | * * | * * | * |<=========== span ============>| * 1.0 .......................* * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * 1/4 ...............................................* * * * * * * * * * * * * | . . * | . . * | . . * 0 +----------------------.-------------------------------.-------------> * bdi_setpoint^ x_intercept^ * * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can * be smoothly throttled down to normal if it starts high in situations like * - start writing to a slow SD card and a fast disk at the same time. The SD * card's bdi_dirty may rush to many times higher than bdi_setpoint. * - the bdi dirty thresh drops quickly due to change of JBOD workload */ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty) { unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); unsigned long limit = hard_dirty_limit(thresh); unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ unsigned long bdi_setpoint; unsigned long span; long long pos_ratio; /* for scaling up/down the rate limit */ long x; if (unlikely(dirty >= limit)) return 0; /* * global setpoint * * See comment for pos_ratio_polynom(). */ setpoint = (freerun + limit) / 2; pos_ratio = pos_ratio_polynom(setpoint, dirty, limit); /* * The strictlimit feature is a tool preventing mistrusted filesystems * from growing a large number of dirty pages before throttling. For * such filesystems balance_dirty_pages always checks bdi counters * against bdi limits. Even if global "nr_dirty" is under "freerun". * This is especially important for fuse which sets bdi->max_ratio to * 1% by default. Without strictlimit feature, fuse writeback may * consume arbitrary amount of RAM because it is accounted in * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty". * * Here, in bdi_position_ratio(), we calculate pos_ratio based on * two values: bdi_dirty and bdi_thresh. Let's consider an example: * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global * limits are set by default to 10% and 20% (background and throttle). * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is * about ~6K pages (as the average of background and throttle bdi * limits). The 3rd order polynomial will provide positive feedback if * bdi_dirty is under bdi_setpoint and vice versa. * * Note, that we cannot use global counters in these calculations * because we want to throttle process writing to a strictlimit BDI * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB * in the example above). */ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) { long long bdi_pos_ratio; unsigned long bdi_bg_thresh; if (bdi_dirty < 8) return min_t(long long, pos_ratio * 2, 2 << RATELIMIT_CALC_SHIFT); if (bdi_dirty >= bdi_thresh) return 0; bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh); bdi_setpoint = dirty_freerun_ceiling(bdi_thresh, bdi_bg_thresh); if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh) return 0; bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty, bdi_thresh); /* * Typically, for strictlimit case, bdi_setpoint << setpoint * and pos_ratio >> bdi_pos_ratio. In the other words global * state ("dirty") is not limiting factor and we have to * make decision based on bdi counters. But there is an * important case when global pos_ratio should get precedence: * global limits are exceeded (e.g. due to activities on other * BDIs) while given strictlimit BDI is below limit. * * "pos_ratio * bdi_pos_ratio" would work for the case above, * but it would look too non-natural for the case of all * activity in the system coming from a single strictlimit BDI * with bdi->max_ratio == 100%. * * Note that min() below somewhat changes the dynamics of the * control system. Normally, pos_ratio value can be well over 3 * (when globally we are at freerun and bdi is well below bdi * setpoint). Now the maximum pos_ratio in the same situation * is 2. We might want to tweak this if we observe the control * system is too slow to adapt. */ return min(pos_ratio, bdi_pos_ratio); } /* * We have computed basic pos_ratio above based on global situation. If * the bdi is over/under its share of dirty pages, we want to scale * pos_ratio further down/up. That is done by the following mechanism. */ /* * bdi setpoint * * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) * * x_intercept - bdi_dirty * := -------------------------- * x_intercept - bdi_setpoint * * The main bdi control line is a linear function that subjects to * * (1) f(bdi_setpoint) = 1.0 * (2) k = - 1 / (8 * write_bw) (in single bdi case) * or equally: x_intercept = bdi_setpoint + 8 * write_bw * * For single bdi case, the dirty pages are observed to fluctuate * regularly within range * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] * for various filesystems, where (2) can yield in a reasonable 12.5% * fluctuation range for pos_ratio. * * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its * own size, so move the slope over accordingly and choose a slope that * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. */ if (unlikely(bdi_thresh > thresh)) bdi_thresh = thresh; /* * It's very possible that bdi_thresh is close to 0 not because the * device is slow, but that it has remained inactive for long time. * Honour such devices a reasonable good (hopefully IO efficient) * threshold, so that the occasional writes won't be blocked and active * writes can rampup the threshold quickly. */ bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); /* * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh */ x = div_u64((u64)bdi_thresh << 16, thresh + 1); bdi_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single bdi case as indicated by * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. * * bdi_thresh thresh - bdi_thresh * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh * thresh thresh */ span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; x_intercept = bdi_setpoint + span; if (bdi_dirty < x_intercept - span / 4) { pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), x_intercept - bdi_setpoint + 1); } else pos_ratio /= 4; /* * bdi reserve area, safeguard against dirty pool underrun and disk idle * It may push the desired control point of global dirty pages higher * than setpoint. */ x_intercept = bdi_thresh / 2; if (bdi_dirty < x_intercept) { if (bdi_dirty > x_intercept / 8) pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); else pos_ratio *= 8; } return pos_ratio; } static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, unsigned long elapsed, unsigned long written) { const unsigned long period = roundup_pow_of_two(3 * HZ); unsigned long avg = bdi->avg_write_bandwidth; unsigned long old = bdi->write_bandwidth; u64 bw; /* * bw = written * HZ / elapsed * * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period * * @written may have decreased due to account_page_redirty(). * Avoid underflowing @bw calculation. */ bw = written - min(written, bdi->written_stamp); bw *= HZ; if (unlikely(elapsed > period)) { do_div(bw, elapsed); avg = bw; goto out; } bw += (u64)bdi->write_bandwidth * (period - elapsed); bw >>= ilog2(period); /* * one more level of smoothing, for filtering out sudden spikes */ if (avg > old && old >= (unsigned long)bw) avg -= (avg - old) >> 3; if (avg < old && old <= (unsigned long)bw) avg += (old - avg) >> 3; out: bdi->write_bandwidth = bw; bdi->avg_write_bandwidth = avg; } /* * The global dirtyable memory and dirty threshold could be suddenly knocked * down by a large amount (eg. on the startup of KVM in a swapless system). * This may throw the system into deep dirty exceeded state and throttle * heavy/light dirtiers alike. To retain good responsiveness, maintain * global_dirty_limit for tracking slowly down to the knocked down dirty * threshold. */ static void update_dirty_limit(unsigned long thresh, unsigned long dirty) { unsigned long limit = global_dirty_limit; /* * Follow up in one step. */ if (limit < thresh) { limit = thresh; goto update; } /* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce * global_dirty_limit which is guaranteed to lie above the dirty pages. */ thresh = max(thresh, dirty); if (limit > thresh) { limit -= (limit - thresh) >> 5; goto update; } return; update: global_dirty_limit = limit; } static void global_update_bandwidth(unsigned long thresh, unsigned long dirty, unsigned long now) { static DEFINE_SPINLOCK(dirty_lock); static unsigned long update_time = INITIAL_JIFFIES; /* * check locklessly first to optimize away locking for the most time */ if (time_before(now, update_time + BANDWIDTH_INTERVAL)) return; spin_lock(&dirty_lock); if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { update_dirty_limit(thresh, dirty); update_time = now; } spin_unlock(&dirty_lock); } /* * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. * * Normal bdi tasks will be curbed at or below it in long term. * Obviously it should be around (write_bw / N) when there are N dd tasks. */ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long dirtied, unsigned long elapsed) { unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); unsigned long limit = hard_dirty_limit(thresh); unsigned long setpoint = (freerun + limit) / 2; unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long dirty_ratelimit = bdi->dirty_ratelimit; unsigned long dirty_rate; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; unsigned long pos_ratio; unsigned long step; unsigned long x; /* * The dirty rate will match the writeout rate in long term, except * when dirty pages are truncated by userspace or re-dirtied by FS. */ dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty); /* * task_ratelimit reflects each dd's dirty rate for the past 200ms. */ task_ratelimit = (u64)dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT; task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ /* * A linear estimation of the "balanced" throttle rate. The theory is, * if there are N dd tasks, each throttled at task_ratelimit, the bdi's * dirty_rate will be measured to be (N * task_ratelimit). So the below * formula will yield the balanced rate limit (write_bw / N). * * Note that the expanded form is not a pure rate feedback: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) * but also takes pos_ratio into account: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) * * (1) is not realistic because pos_ratio also takes part in balancing * the dirty rate. Consider the state * pos_ratio = 0.5 (3) * rate = 2 * (write_bw / N) (4) * If (1) is used, it will stuck in that state! Because each dd will * be throttled at * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) * yielding * dirty_rate = N * task_ratelimit = write_bw (6) * put (6) into (1) we get * rate_(i+1) = rate_(i) (7) * * So we end up using (2) to always keep * rate_(i+1) ~= (write_bw / N) (8) * regardless of the value of pos_ratio. As long as (8) is satisfied, * pos_ratio is able to drive itself to 1.0, which is not only where * the dirty count meet the setpoint, but also where the slope of * pos_ratio is most flat and hence task_ratelimit is least fluctuated. */ balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, dirty_rate | 1); /* * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw */ if (unlikely(balanced_dirty_ratelimit > write_bw)) balanced_dirty_ratelimit = write_bw; /* * We could safely do this and return immediately: * * bdi->dirty_ratelimit = balanced_dirty_ratelimit; * * However to get a more stable dirty_ratelimit, the below elaborated * code makes use of task_ratelimit to filter out singular points and * limit the step size. * * The below code essentially only uses the relative value of * * task_ratelimit - dirty_ratelimit * = (pos_ratio - 1) * dirty_ratelimit * * which reflects the direction and size of dirty position error. */ /* * dirty_ratelimit will follow balanced_dirty_ratelimit iff * task_ratelimit is on the same side of dirty_ratelimit, too. * For example, when * - dirty_ratelimit > balanced_dirty_ratelimit * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) * lowering dirty_ratelimit will help meet both the position and rate * control targets. Otherwise, don't update dirty_ratelimit if it will * only help meet the rate target. After all, what the users ultimately * feel and care are stable dirty rate and small position error. * * |task_ratelimit - dirty_ratelimit| is used to limit the step size * and filter out the singular points of balanced_dirty_ratelimit. Which * keeps jumping around randomly and can even leap far away at times * due to the small 200ms estimation period of dirty_rate (we want to * keep that period small to reduce time lags). */ step = 0; /* * For strictlimit case, calculations above were based on bdi counters * and limits (starting from pos_ratio = bdi_position_ratio() and up to * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). * Hence, to calculate "step" properly, we have to use bdi_dirty as * "dirty" and bdi_setpoint as "setpoint". * * We rampup dirty_ratelimit forcibly if bdi_dirty is low because * it's possible that bdi_thresh is close to zero due to inactivity * of backing device (see the implementation of bdi_dirty_limit()). */ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) { dirty = bdi_dirty; if (bdi_dirty < 8) setpoint = bdi_dirty + 1; else setpoint = (bdi_thresh + bdi_dirty_limit(bdi, bg_thresh)) / 2; } if (dirty < setpoint) { x = min(bdi->balanced_dirty_ratelimit, min(balanced_dirty_ratelimit, task_ratelimit)); if (dirty_ratelimit < x) step = x - dirty_ratelimit; } else { x = max(bdi->balanced_dirty_ratelimit, max(balanced_dirty_ratelimit, task_ratelimit)); if (dirty_ratelimit > x) step = dirty_ratelimit - x; } /* * Don't pursue 100% rate matching. It's impossible since the balanced * rate itself is constantly fluctuating. So decrease the track speed * when it gets close to the target. Helps eliminate pointless tremors. */ step >>= dirty_ratelimit / (2 * step + 1); /* * Limit the tracking speed to avoid overshooting. */ step = (step + 7) / 8; if (dirty_ratelimit < balanced_dirty_ratelimit) dirty_ratelimit += step; else dirty_ratelimit -= step; bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); } void __bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long start_time) { unsigned long now = jiffies; unsigned long elapsed = now - bdi->bw_time_stamp; unsigned long dirtied; unsigned long written; /* * rate-limit, only update once every 200ms. */ if (elapsed < BANDWIDTH_INTERVAL) return; dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); /* * Skip quiet periods when disk bandwidth is under-utilized. * (at least 1s idle time between two flusher runs) */ if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) goto snapshot; if (thresh) { global_update_bandwidth(thresh, dirty, now); bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, dirtied, elapsed); } bdi_update_write_bandwidth(bdi, elapsed, written); snapshot: bdi->dirtied_stamp = dirtied; bdi->written_stamp = written; bdi->bw_time_stamp = now; } static void bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long start_time) { if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) return; spin_lock(&bdi->wb.list_lock); __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, start_time); spin_unlock(&bdi->wb.list_lock); } /* * After a task dirtied this many pages, balance_dirty_pages_ratelimited() * will look to see if it needs to start dirty throttling. * * If dirty_poll_interval is too low, big NUMA machines will call the expensive * global_page_state() too often. So scale it near-sqrt to the safety margin * (the number of pages we may dirty without exceeding the dirty limits). */ static unsigned long dirty_poll_interval(unsigned long dirty, unsigned long thresh) { if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1); return 1; } static unsigned long bdi_max_pause(struct backing_dev_info *bdi, unsigned long bdi_dirty) { unsigned long bw = bdi->avg_write_bandwidth; unsigned long t; /* * Limit pause time for small memory systems. If sleeping for too long * time, a small pool of dirty/writeback pages may go empty and disk go * idle. * * 8 serves as the safety ratio. */ t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); t++; return min_t(unsigned long, t, MAX_PAUSE); } static long bdi_min_pause(struct backing_dev_info *bdi, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause) { long hi = ilog2(bdi->avg_write_bandwidth); long lo = ilog2(bdi->dirty_ratelimit); long t; /* target pause */ long pause; /* estimated next pause */ int pages; /* target nr_dirtied_pause */ /* target for 10ms pause on 1-dd case */ t = max(1, HZ / 100); /* * Scale up pause time for concurrent dirtiers in order to reduce CPU * overheads. * * (N * 10ms) on 2^N concurrent tasks. */ if (hi > lo) t += (hi - lo) * (10 * HZ) / 1024; /* * This is a bit convoluted. We try to base the next nr_dirtied_pause * on the much more stable dirty_ratelimit. However the next pause time * will be computed based on task_ratelimit and the two rate limits may * depart considerably at some time. Especially if task_ratelimit goes * below dirty_ratelimit/2 and the target pause is max_pause, the next * pause time will be max_pause*2 _trimmed down_ to max_pause. As a * result task_ratelimit won't be executed faithfully, which could * eventually bring down dirty_ratelimit. * * We apply two rules to fix it up: * 1) try to estimate the next pause time and if necessary, use a lower * nr_dirtied_pause so as not to exceed max_pause. When this happens, * nr_dirtied_pause will be "dancing" with task_ratelimit. * 2) limit the target pause time to max_pause/2, so that the normal * small fluctuations of task_ratelimit won't trigger rule (1) and * nr_dirtied_pause will remain as stable as dirty_ratelimit. */ t = min(t, 1 + max_pause / 2); pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); /* * Tiny nr_dirtied_pause is found to hurt I/O performance in the test * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. * When the 16 consecutive reads are often interrupted by some dirty * throttling pause during the async writes, cfq will go into idles * (deadline is fine). So push nr_dirtied_pause as high as possible * until reaches DIRTY_POLL_THRESH=32 pages. */ if (pages < DIRTY_POLL_THRESH) { t = max_pause; pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); if (pages > DIRTY_POLL_THRESH) { pages = DIRTY_POLL_THRESH; t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; } } pause = HZ * pages / (task_ratelimit + 1); if (pause > max_pause) { t = max_pause; pages = task_ratelimit * t / roundup_pow_of_two(HZ); } *nr_dirtied_pause = pages; /* * The minimal pause time will normally be half the target pause time. */ return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; } static inline void bdi_dirty_limits(struct backing_dev_info *bdi, unsigned long dirty_thresh, unsigned long background_thresh, unsigned long *bdi_dirty, unsigned long *bdi_thresh, unsigned long *bdi_bg_thresh) { unsigned long bdi_reclaimable; /* * bdi_thresh is not treated as some limiting factor as * dirty_thresh, due to reasons * - in JBOD setup, bdi_thresh can fluctuate a lot * - in a system with HDD and USB key, the USB key may somehow * go into state (bdi_dirty >> bdi_thresh) either because * bdi_dirty starts high, or because bdi_thresh drops low. * In this case we don't want to hard throttle the USB key * dirtiers for 100 seconds until bdi_dirty drops under * bdi_thresh. Instead the auxiliary bdi control line in * bdi_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down bdi_dirty. */ *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); if (bdi_bg_thresh) *bdi_bg_thresh = div_u64((u64)*bdi_thresh * background_thresh, dirty_thresh); /* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas. */ if (*bdi_thresh < 2 * bdi_stat_error(bdi)) { bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); *bdi_dirty = bdi_reclaimable + bdi_stat_sum(bdi, BDI_WRITEBACK); } else { bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); *bdi_dirty = bdi_reclaimable + bdi_stat(bdi, BDI_WRITEBACK); } } /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. * If we're over `background_thresh' then the writeback threads are woken to * perform some writeout. */ static void balance_dirty_pages(struct address_space *mapping, unsigned long pages_dirtied) { unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ unsigned long background_thresh; unsigned long dirty_thresh; long period; long pause; long max_pause; long min_pause; int nr_dirtied_pause; bool dirty_exceeded = false; unsigned long task_ratelimit; unsigned long dirty_ratelimit; unsigned long pos_ratio; struct backing_dev_info *bdi = mapping->backing_dev_info; bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; unsigned long start_time = jiffies; for (;;) { unsigned long now = jiffies; unsigned long uninitialized_var(bdi_thresh); unsigned long thresh; unsigned long uninitialized_var(bdi_dirty); unsigned long dirty; unsigned long bg_thresh; /* * Unstable writes are a feature of certain networked * filesystems (i.e. NFS) in which data may have been * written to the server's write cache, but has not yet * been flushed to permanent storage. */ nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); global_dirty_limits(&background_thresh, &dirty_thresh); if (unlikely(strictlimit)) { bdi_dirty_limits(bdi, dirty_thresh, background_thresh, &bdi_dirty, &bdi_thresh, &bg_thresh); dirty = bdi_dirty; thresh = bdi_thresh; } else { dirty = nr_dirty; thresh = dirty_thresh; bg_thresh = background_thresh; } /* * Throttle it only when the background writeback cannot * catch-up. This avoids (excessively) small writeouts * when the bdi limits are ramping up in case of !strictlimit. * * In strictlimit case make decision based on the bdi counters * and limits. Small writeouts when the bdi limits are ramping * up are the price we consciously pay for strictlimit-ing. */ if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) { current->dirty_paused_when = now; current->nr_dirtied = 0; current->nr_dirtied_pause = dirty_poll_interval(dirty, thresh); break; } if (unlikely(!writeback_in_progress(bdi))) bdi_start_background_writeback(bdi); if (!strictlimit) bdi_dirty_limits(bdi, dirty_thresh, background_thresh, &bdi_dirty, &bdi_thresh, NULL); dirty_exceeded = (bdi_dirty > bdi_thresh) && ((nr_dirty > dirty_thresh) || strictlimit); if (dirty_exceeded && !bdi->dirty_exceeded) bdi->dirty_exceeded = 1; bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, start_time); dirty_ratelimit = bdi->dirty_ratelimit; pos_ratio = bdi_position_ratio(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty); task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >> RATELIMIT_CALC_SHIFT; max_pause = bdi_max_pause(bdi, bdi_dirty); min_pause = bdi_min_pause(bdi, max_pause, task_ratelimit, dirty_ratelimit, &nr_dirtied_pause); if (unlikely(task_ratelimit == 0)) { period = max_pause; pause = max_pause; goto pause; } period = HZ * pages_dirtied / task_ratelimit; pause = period; if (current->dirty_paused_when) pause -= now - current->dirty_paused_when; /* * For less than 1s think time (ext3/4 may block the dirtier * for up to 800ms from time to time on 1-HDD; so does xfs, * however at much less frequency), try to compensate it in * future periods by updating the virtual time; otherwise just * do a reset, as it may be a light dirtier. */ if (pause < min_pause) { trace_balance_dirty_pages(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, period, min(pause, 0L), start_time); if (pause < -HZ) { current->dirty_paused_when = now; current->nr_dirtied = 0; } else if (period) { current->dirty_paused_when += period; current->nr_dirtied = 0; } else if (current->nr_dirtied_pause <= pages_dirtied) current->nr_dirtied_pause += pages_dirtied; break; } if (unlikely(pause > max_pause)) { /* for occasional dropped task_ratelimit */ now += min(pause - max_pause, max_pause); pause = max_pause; } pause: trace_balance_dirty_pages(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, period, pause, start_time); __set_current_state(TASK_KILLABLE); io_schedule_timeout(pause); current->dirty_paused_when = now + pause; current->nr_dirtied = 0; current->nr_dirtied_pause = nr_dirtied_pause; /* * This is typically equal to (nr_dirty < dirty_thresh) and can * also keep "1000+ dd on a slow USB stick" under control. */ if (task_ratelimit) break; /* * In the case of an unresponding NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good bdi's a pipe * to go through, so that tasks on them still remain responsive. * * In theory 1 page is enough to keep the comsumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However bdi_dirty has accounting errors. So use * the larger and more IO friendly bdi_stat_error. */ if (bdi_dirty <= bdi_stat_error(bdi)) break; if (fatal_signal_pending(current)) break; } if (!dirty_exceeded && bdi->dirty_exceeded) bdi->dirty_exceeded = 0; if (writeback_in_progress(bdi)) return; /* * In laptop mode, we wait until hitting the higher threshold before * starting background writeout, and then write out all the way down * to the lower threshold. So slow writers cause minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ if (laptop_mode) return; if (nr_reclaimable > background_thresh) bdi_start_background_writeback(bdi); } void set_page_dirty_balance(struct page *page, int page_mkwrite) { if (set_page_dirty(page) || page_mkwrite) { struct address_space *mapping = page_mapping(page); if (mapping) balance_dirty_pages_ratelimited(mapping); } } static DEFINE_PER_CPU(int, bdp_ratelimits); /* * Normal tasks are throttled by * loop { * dirty tsk->nr_dirtied_pause pages; * take a snap in balance_dirty_pages(); * } * However there is a worst case. If every task exit immediately when dirtied * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be * called to throttle the page dirties. The solution is to save the not yet * throttled page dirties in dirty_throttle_leaks on task exit and charge them * randomly into the running tasks. This works well for the above worst case, * as the new task will pick up and accumulate the old task's leaked dirty * count and eventually get throttled. */ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; /** * balance_dirty_pages_ratelimited - balance dirty memory state * @mapping: address_space which was dirtied * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * On really big machines, get_writeback_state is expensive, so try to avoid * calling it too often (ratelimiting). But once we're over the dirty memory * limit we decrease the ratelimiting by a lot, to prevent individual processes * from overshooting the limit by (ratelimit_pages) each. */ void balance_dirty_pages_ratelimited(struct address_space *mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; int ratelimit; int *p; if (!bdi_cap_account_dirty(bdi)) return; ratelimit = current->nr_dirtied_pause; if (bdi->dirty_exceeded) ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); preempt_disable(); /* * This prevents one CPU to accumulate too many dirtied pages without * calling into balance_dirty_pages(), which can happen when there are * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause. */ p = &__get_cpu_var(bdp_ratelimits); if (unlikely(current->nr_dirtied >= ratelimit)) *p = 0; else if (unlikely(*p >= ratelimit_pages)) { *p = 0; ratelimit = 0; } /* * Pick up the dirtied pages by the exited tasks. This avoids lots of * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers. */ p = &__get_cpu_var(dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { unsigned long nr_pages_dirtied; nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); *p -= nr_pages_dirtied; current->nr_dirtied += nr_pages_dirtied; } preempt_enable(); if (unlikely(current->nr_dirtied >= ratelimit)) balance_dirty_pages(mapping, current->nr_dirtied); } EXPORT_SYMBOL(balance_dirty_pages_ratelimited); void throttle_vm_writeout(gfp_t gfp_mask) { unsigned long background_thresh; unsigned long dirty_thresh; for ( ; ; ) { global_dirty_limits(&background_thresh, &dirty_thresh); dirty_thresh = hard_dirty_limit(dirty_thresh); /* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ dirty_thresh += dirty_thresh / 10; /* wheeee... */ if (global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_WRITEBACK) <= dirty_thresh) break; /* Try safe version */ else if (unlikely(global_page_state_snapshot(NR_UNSTABLE_NFS) + global_page_state_snapshot(NR_WRITEBACK) <= dirty_thresh)) break; congestion_wait(BLK_RW_ASYNC, HZ/10); /* * The caller might hold locks which can prevent IO completion * or progress in the filesystem. So we cannot just sit here * waiting for IO to complete. */ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) break; } } /* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ int dirty_writeback_centisecs_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); return 0; } #ifdef CONFIG_BLOCK void laptop_mode_timer_fn(unsigned long data) { struct request_queue *q = (struct request_queue *)data; int nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); /* * We want to write everything out, not just down to the dirty * threshold */ if (bdi_has_dirty_io(&q->backing_dev_info)) bdi_start_writeback(&q->backing_dev_info, nr_pages, WB_REASON_LAPTOP_TIMER); } /* * We've spun up the disk and we're in laptop mode: schedule writeback * of all dirty data a few seconds from now. If the flush is already scheduled * then push it back - the user is still using the disk. */ void laptop_io_completion(struct backing_dev_info *info) { mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); } /* * We're in laptop mode and we've just synced. The sync's writes will have * caused another writeback to be scheduled by laptop_io_completion. * Nothing needs to be written back anymore, so we unschedule the writeback. */ void laptop_sync_completion(void) { struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) del_timer(&bdi->laptop_mode_wb_timer); rcu_read_unlock(); } #endif /* * If ratelimit_pages is too high then we can get into dirty-data overload * if a large number of processes all perform writes at the same time. * If it is too low then SMP machines will call the (expensive) * get_writeback_state too often. * * Here we set ratelimit_pages to a level which ensures that when all CPUs are * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory * thresholds. */ void writeback_set_ratelimit(void) { unsigned long background_thresh; unsigned long dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); global_dirty_limit = dirty_thresh; ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16; } static int __cpuinit ratelimit_handler(struct notifier_block *self, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DEAD: writeback_set_ratelimit(); return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block __cpuinitdata ratelimit_nb = { .notifier_call = ratelimit_handler, .next = NULL, }; /* * Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory * related to pages that could be allocated for buffers (by * comparing nr_free_buffer_pages() to vm_total_pages. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" * is now applied to total non-HIGHPAGE memory (by subtracting * totalhigh_pages from vm_total_pages), and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. * * But we might still want to scale the dirty_ratio by how * much memory the box has.. */ void __init page_writeback_init(void) { writeback_set_ratelimit(); register_cpu_notifier(&ratelimit_nb); fprop_global_init(&writeout_completions); } /** * tag_pages_for_writeback - tag pages to be written by write_cache_pages * @mapping: address space structure to write * @start: starting page index * @end: ending page index (inclusive) * * This function scans the page range from @start to @end (inclusive) and tags * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is * that write_cache_pages (or whoever calls this function) will then use * TOWRITE tag to identify pages eligible for writeback. This mechanism is * used to avoid livelocking of writeback by a process steadily creating new * dirty pages in the file (thus it is important for this function to be quick * so that it can tag pages faster than a dirtying process can create them). */ /* * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. */ void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) { #define WRITEBACK_TAG_BATCH 4096 unsigned long tagged; do { spin_lock_irq(&mapping->tree_lock); tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, &start, end, WRITEBACK_TAG_BATCH, PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); spin_unlock_irq(&mapping->tree_lock); WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); cond_resched(); /* We check 'start' to handle wrapping when end == ~0UL */ } while (tagged >= WRITEBACK_TAG_BATCH && start); } EXPORT_SYMBOL(tag_pages_for_writeback); /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @writepage: function called for each page * @data: data passed to writepage function * * If a page is already under I/O, write_cache_pages() skips it, even * if it's dirty. This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. * * To avoid livelocks (when other process dirties new pages), we first tag * pages which should be written back with TOWRITE tag and only then start * writing them. For data-integrity sync we have to be careful so that we do * not miss some pages (e.g., because some other process has cleared TOWRITE * tag we set). The rule we follow is that TOWRITE tag can be cleared only * by the process clearing the DIRTY tag (and submitting the page for IO). */ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) { int ret = 0; int done = 0; struct pagevec pvec; int nr_pages; pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; int cycled; int range_whole = 0; int tag; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; if (index == 0) cycled = 1; else cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && (index <= end)) { int i; nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ done = 1; break; } done_index = page->index; lock_page(page); /* * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no * real expectation of this data interity operation * even if there is now a new, dirty page at the same * pagecache address. */ if (unlikely(page->mapping != mapping)) { continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; trace_wbc_writepage(wbc, mapping->backing_dev_info); ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ done_index = page->index + 1; done = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; } } pagevec_release(&pvec); cond_resched(); } if (!cycled && !done) { /* * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ cycled = 1; index = 0; end = writeback_index - 1; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret; } EXPORT_SYMBOL(write_cache_pages); /* * Function used by generic_writepages to call the real writepage * function and set the mapping flags on error */ static int __writepage(struct page *page, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = mapping->a_ops->writepage(page, wbc); mapping_set_error(mapping, ret); return ret; } /** * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * * This is a library function, which implements the writepages() * address_space_operation. */ int generic_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct blk_plug plug; int ret; /* deal with chardevs and other special file */ if (!mapping->a_ops->writepage) return 0; blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, __writepage, mapping); blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(generic_writepages); int do_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; if (wbc->nr_to_write <= 0) return 0; if (mapping->a_ops->writepages) ret = mapping->a_ops->writepages(mapping, wbc); else ret = generic_writepages(mapping, wbc); return ret; } /** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */ int write_one_page(struct page *page, int wait) { struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret; } EXPORT_SYMBOL(write_one_page); /* * For address_spaces which do not use buffers nor write back. */ int __set_page_dirty_no_writeback(struct page *page) { if (!PageDirty(page)) return !TestSetPageDirty(page); return 0; } /* * Helper function for set_page_dirty family. * NOTE: This relies on being atomic wrt interrupts. */ void account_page_dirtied(struct page *page, struct address_space *mapping) { trace_writeback_dirty_page(page, mapping); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); task_io_account_write(PAGE_CACHE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); } } EXPORT_SYMBOL(account_page_dirtied); /* * Helper function for set_page_writeback family. * NOTE: Unlike account_page_dirtied this does not rely on being atomic * wrt interrupts. */ void account_page_writeback(struct page *page) { inc_zone_page_state(page, NR_WRITEBACK); } EXPORT_SYMBOL(account_page_writeback); /* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the * mapping by re-checking page_mapping() inside tree_lock. */ int __set_page_dirty_nobuffers(struct page *page) { if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; unsigned long flags; if (!mapping) return 1; spin_lock_irqsave(&mapping->tree_lock, flags); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irqrestore(&mapping->tree_lock, flags); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return 1; } return 0; } EXPORT_SYMBOL(__set_page_dirty_nobuffers); /* * Call this whenever redirtying a page, to de-account the dirty counters * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to * systematic errors in balanced_dirty_ratelimit and the dirty pages position * control. */ void account_page_redirty(struct page *page) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { current->nr_dirtied--; dec_zone_page_state(page, NR_DIRTIED); dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); } } EXPORT_SYMBOL(account_page_redirty); /* * When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via * redirty_page_for_writepage() and it should then unlock the page and return 0 */ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) { wbc->pages_skipped++; account_page_redirty(page); return __set_page_dirty_nobuffers(page); } EXPORT_SYMBOL(redirty_page_for_writepage); /* * Dirty a page. * * For pages with a mapping this should be done under the page lock * for the benefit of asynchronous memory errors who prefer a consistent * dirty state. This rule can be broken in some special cases, * but should be better not to. * * If the mapping doesn't provide a set_page_dirty a_op, then * just fall through and assume that it wants buffer_heads. */ int set_page_dirty(struct page *page) { struct address_space *mapping = page_mapping(page); if (likely(mapping)) { int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; /* * readahead/lru_deactivate_page could remain * PG_readahead/PG_reclaim due to race with end_page_writeback * About readahead, if the page is written, the flags would be * reset. So no problem. * About lru_deactivate_page, if the page is redirty, the flag * will be reset. So no problem. but if the page is used by readahead * it will confuse readahead and make it restart the size rampup * process. But it's a trivial problem. */ ClearPageReclaim(page); #ifdef CONFIG_BLOCK if (!spd) spd = __set_page_dirty_buffers; #endif return (*spd)(page); } if (!PageDirty(page)) { if (!TestSetPageDirty(page)) return 1; } return 0; } EXPORT_SYMBOL(set_page_dirty); /* * set_page_dirty() is racy if the caller has no reference against * page->mapping->host, and if the page is unlocked. This is because another * CPU could truncate the page off the mapping and then free the mapping. * * Usually, the page _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * * In other cases, the page should be locked before running set_page_dirty(). */ int set_page_dirty_lock(struct page *page) { int ret; lock_page(page); ret = set_page_dirty(page); unlock_page(page); return ret; } EXPORT_SYMBOL(set_page_dirty_lock); /* * Clear a page's dirty flag, while caring for dirty memory accounting. * Returns true if the page was previously dirty. * * This is for preparing to put the page under writeout. We leave the page * tagged as dirty in the radix tree so that a concurrent write-for-sync * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage * implementation will run either set_page_writeback() or set_page_dirty(), * at which stage we bring the page's dirty flag and radix-tree dirty tag * back into sync. * * This incoherency between the page's dirty flag and radix-tree tag is * unfortunate, but it only exists while the page is locked. */ int clear_page_dirty_for_io(struct page *page) { struct address_space *mapping = page_mapping(page); BUG_ON(!PageLocked(page)); if (mapping && mapping_cap_account_dirty(mapping)) { /* * Yes, Virginia, this is indeed insane. * * We use this sequence to make sure that * (a) we account for dirty stats properly * (b) we tell the low-level filesystem to * mark the whole page dirty if it was * dirty in a pagetable. Only to then * (c) clean the page again and return 1 to * cause the writeback. * * This way we avoid all nasty races with the * dirty bit in multiple places and clearing * them concurrently from different threads. * * Note! Normally the "set_page_dirty(page)" * has no effect on the actual dirty bit - since * that will already usually be set. But we * need the side effects, and it can help us * avoid races. * * We basically use the page "master dirty bit" * as a serialization point for all the different * threads doing their things. */ if (page_mkclean(page)) set_page_dirty(page); /* * We carefully synchronise fault handlers against * installing a dirty pte and marking the page dirty * at this point. We do this by having them hold the * page lock at some point after installing their * pte, but before marking the page dirty. * Pages are always locked coming in here, so we get * the desired exclusion. See mm/memory.c:do_wp_page() * for more comments. */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); return 1; } return 0; } return TestClearPageDirty(page); } EXPORT_SYMBOL(clear_page_dirty_for_io); int test_clear_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); int ret; if (mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestClearPageWriteback(page); if (ret) { radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) { __dec_bdi_stat(bdi, BDI_WRITEBACK); __bdi_writeout_inc(bdi); } } spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestClearPageWriteback(page); } if (ret) { dec_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITTEN); } return ret; } int test_set_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); int ret; if (mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestSetPageWriteback(page); if (!ret) { radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) __inc_bdi_stat(bdi, BDI_WRITEBACK); } if (!PageDirty(page)) radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_TOWRITE); spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestSetPageWriteback(page); } if (!ret) account_page_writeback(page); return ret; } EXPORT_SYMBOL(test_set_page_writeback); /* * Return true if any of the pages in the mapping are marked with the * passed tag. */ int mapping_tagged(struct address_space *mapping, int tag) { return radix_tree_tagged(&mapping->page_tree, tag); } EXPORT_SYMBOL(mapping_tagged); /** * wait_for_stable_page() - wait for writeback to finish, if necessary. * @page: The page to wait on. * * This function determines if the given page is related to a backing device * that requires page contents to be held stable during writeback. If so, then * it will wait for any pending writeback to complete. */ void wait_for_stable_page(struct page *page) { struct address_space *mapping = page_mapping(page); struct backing_dev_info *bdi = mapping->backing_dev_info; if (!bdi_cap_stable_pages_required(bdi)) return; wait_on_page_writeback(page); } EXPORT_SYMBOL_GPL(wait_for_stable_page);
andrewwrightt/kernel_moto_shamu
mm/page-writeback.c
C
gpl-2.0
76,843