idx
int64
func
string
target
int64
513,258
test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, bool no_changes, const key_map *map) { int ref_key; uint UNINIT_VAR(ref_key_parts); int order_direction= 0; uint used_key_parts= 0; TABLE *table=tab->table; SQL_SELECT *select=tab->select; key_map usable_keys; QUICK_SELECT_I *save_quick= select ? select->quick : 0; Item *orig_cond= 0; bool orig_cond_saved= false; int best_key= -1; bool changed_key= false; DBUG_ENTER("test_if_skip_sort_order"); /* Check that we are always called with first non-const table */ DBUG_ASSERT(tab == tab->join->join_tab + tab->join->const_tables); /* Keys disabled by ALTER TABLE ... DISABLE KEYS should have already been taken into account. */ usable_keys= *map; /* Find indexes that cover all ORDER/GROUP BY fields */ for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next) { Item *item= (*tmp_order->item)->real_item(); if (item->type() != Item::FIELD_ITEM) { usable_keys.clear_all(); DBUG_RETURN(0); } /* Take multiple-equalities into account. Suppose we have ORDER BY col1, col10 and there are multiple-equal(col1, col2, col3), multiple-equal(col10, col11). Then, - when item=col1, we find the set of indexes that cover one of {col1, col2, col3} - when item=col10, we find the set of indexes that cover one of {col10, col11} And we compute an intersection of these sets to find set of indexes that cover all ORDER BY components. */ key_map col_keys; compute_part_of_sort_key_for_equals(tab->join, table, (Item_field*)item, &col_keys); usable_keys.intersect(col_keys); if (usable_keys.is_clear_all()) goto use_filesort; // No usable keys } ref_key= -1; /* Test if constant range in WHERE */ if (tab->ref.key >= 0 && tab->ref.key_parts) { ref_key= tab->ref.key; ref_key_parts= tab->ref.key_parts; /* todo: why does JT_REF_OR_NULL mean filesort? We could find another index that satisfies the ordering. I would just set ref_key=MAX_KEY here... */ if (tab->type == JT_REF_OR_NULL || tab->type == JT_FT) goto use_filesort; } else if (select && select->quick) // Range found by opt_range { int quick_type= select->quick->get_type(); /* assume results are not ordered when index merge is used TODO: sergeyp: Results of all index merge selects actually are ordered by clustered PK values. */ if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT || quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) { /* we set ref_key=MAX_KEY instead of -1, because test_if_cheaper ordering assumes that "ref_key==-1" means doing full index scan. (This is not very straightforward and we got into this situation for historical reasons. Should be fixed at some point). */ ref_key= MAX_KEY; } else { ref_key= select->quick->index; ref_key_parts= select->quick->used_key_parts; } } if (ref_key >= 0 && ref_key != MAX_KEY) { /* Current access method uses index ref_key with ref_key_parts parts */ if (!usable_keys.is_set(ref_key)) { /* However, ref_key doesn't match the needed ordering */ uint new_ref_key; /* If using index only read, only consider other possible index only keys */ if (table->covering_keys.is_set(ref_key)) usable_keys.intersect(table->covering_keys); if (tab->pre_idx_push_select_cond) { orig_cond= tab->set_cond(tab->pre_idx_push_select_cond); orig_cond_saved= true; } if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts, &usable_keys)) < MAX_KEY) { /* Index new_ref_key - produces the required ordering, - also has the same columns as ref_key for #ref_key_parts (this means we will read the same number of rows as with ref_key). */ /* If new_ref_key allows to construct a quick select which uses more key parts than ref(new_ref_key) would, do that. Otherwise, construct a ref access (todo: it's not clear what is the win in using ref access when we could use quick select also?) */ if ((table->quick_keys.is_set(new_ref_key) && table->quick_key_parts[new_ref_key] > ref_key_parts) || !(tab->ref.key >= 0)) { /* The range optimizer constructed QUICK_RANGE for ref_key, and we want to use instead new_ref_key as the index. We can't just change the index of the quick select, because this may result in an inconsistent QUICK_SELECT object. Below we create a new QUICK_SELECT from scratch so that all its parameters are set correctly by the range optimizer. */ key_map new_ref_key_map; COND *save_cond; bool res; new_ref_key_map.clear_all(); // Force the creation of quick select new_ref_key_map.set_bit(new_ref_key); // only for new_ref_key. /* Reset quick; This will be restored in 'use_filesort' if needed */ select->quick= 0; save_cond= select->cond; if (select->pre_idx_push_select_cond) select->cond= select->pre_idx_push_select_cond; res= select->test_quick_select(tab->join->thd, new_ref_key_map, 0, (tab->join->select_options & OPTION_FOUND_ROWS) ? HA_POS_ERROR : tab->join->unit->select_limit_cnt,TRUE, TRUE, FALSE) <= 0; if (res) { select->cond= save_cond; goto use_filesort; } DBUG_ASSERT(tab->select->quick); tab->type= JT_ALL; tab->ref.key= -1; tab->ref.key_parts= 0; tab->use_quick= 1; best_key= new_ref_key; /* We don't restore select->cond as we want to use the original condition as index condition pushdown is not active for the new index. todo: why not perform index condition pushdown for the new index? */ } else { /* We'll use ref access method on key new_ref_key. In general case the index search tuple for new_ref_key will be different (e.g. when one index is defined as (part1, part2, ...) and another as (part1, part2(N), ...) and the WHERE clause contains "part1 = const1 AND part2=const2". So we build tab->ref from scratch here. */ KEYUSE *keyuse= tab->keyuse; while (keyuse->key != new_ref_key && keyuse->table == tab->table) keyuse++; if (create_ref_for_key(tab->join, tab, keyuse, FALSE, (tab->join->const_table_map | OUTER_REF_TABLE_BIT))) goto use_filesort; pick_table_access_method(tab); } ref_key= new_ref_key; changed_key= true; } } /* Check if we get the rows in requested sorted order by using the key */ if (usable_keys.is_set(ref_key) && (order_direction= test_if_order_by_key(tab->join, order,table,ref_key, &used_key_parts))) goto check_reverse_order; } { uint UNINIT_VAR(best_key_parts); uint saved_best_key_parts= 0; int best_key_direction= 0; JOIN *join= tab->join; ha_rows table_records= table->stat_records(); test_if_cheaper_ordering(tab, order, table, usable_keys, ref_key, select_limit, &best_key, &best_key_direction, &select_limit, &best_key_parts, &saved_best_key_parts); /* filesort() and join cache are usually faster than reading in index order and not using join cache, except in case that chosen index is clustered key. */ if (best_key < 0 || ((select_limit >= table_records) && (tab->type == JT_ALL && tab->join->table_count > tab->join->const_tables + 1) && !(table->file->index_flags(best_key, 0, 1) & HA_CLUSTERED_INDEX))) goto use_filesort; if (select && // psergey: why doesn't this use a quick? table->quick_keys.is_set(best_key) && best_key != ref_key) { key_map tmp_map; tmp_map.clear_all(); // Force the creation of quick select tmp_map.set_bit(best_key); // only best_key. select->quick= 0; bool cond_saved= false; Item *saved_cond; /* Index Condition Pushdown may have removed parts of the condition for this table. Temporarily put them back because we want the whole condition for the range analysis. */ if (select->pre_idx_push_select_cond) { saved_cond= select->cond; select->cond= select->pre_idx_push_select_cond; cond_saved= true; } select->test_quick_select(join->thd, tmp_map, 0, join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : join->unit->select_limit_cnt, TRUE, FALSE, FALSE); if (cond_saved) select->cond= saved_cond; } order_direction= best_key_direction; /* saved_best_key_parts is actual number of used keyparts found by the test_if_order_by_key function. It could differ from keyinfo->user_defined_key_parts, thus we have to restore it in case of desc order as it affects QUICK_SELECT_DESC behaviour. */ used_key_parts= (order_direction == -1) ? saved_best_key_parts : best_key_parts; changed_key= true; } check_reverse_order: DBUG_ASSERT(order_direction != 0); if (order_direction == -1) // If ORDER BY ... DESC { int quick_type; if (select && select->quick) { /* Don't reverse the sort order, if it's already done. (In some cases test_if_order_by_key() can be called multiple times */ if (select->quick->reverse_sorted()) goto skipped_filesort; quick_type= select->quick->get_type(); if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT || quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) { tab->limit= 0; goto use_filesort; // Use filesort } } } /* Update query plan with access pattern for doing ordered access according to what we have decided above. */ if (!no_changes) // We are allowed to update QEP { if (best_key >= 0) { bool quick_created= (select && select->quick && select->quick!=save_quick); /* If ref_key used index tree reading only ('Using index' in EXPLAIN), and best_key doesn't, then revert the decision. */ if (table->covering_keys.is_set(best_key)) table->file->ha_start_keyread(best_key); else table->file->ha_end_keyread(); if (!quick_created) { if (select) // Throw any existing quick select select->quick= 0; // Cleanup either reset to save_quick, // or 'delete save_quick' tab->index= best_key; tab->read_first_record= order_direction > 0 ? join_read_first:join_read_last; tab->type=JT_NEXT; // Read with index_first(), index_next() if (tab->pre_idx_push_select_cond) { tab->set_cond(tab->pre_idx_push_select_cond); /* orig_cond is a part of pre_idx_push_cond, no need to restore it. */ orig_cond= 0; orig_cond_saved= false; } table->file->ha_index_or_rnd_end(); if (tab->join->select_options & SELECT_DESCRIBE) { tab->ref.key= -1; tab->ref.key_parts= 0; if (select_limit < table->stat_records()) tab->limit= select_limit; table->file->ha_end_keyread(); } } else if (tab->type != JT_ALL || tab->select->quick) { /* We're about to use a quick access to the table. We need to change the access method so as the quick access method is actually used. */ DBUG_ASSERT(tab->select->quick); tab->type=JT_ALL; tab->use_quick=1; tab->ref.key= -1; tab->ref.key_parts=0; // Don't use ref key. tab->read_first_record= join_init_read_record; if (tab->is_using_loose_index_scan()) tab->join->tmp_table_param.precomputed_group_by= TRUE; /* Restore the original condition as changes done by pushdown condition are not relevant anymore */ if (tab->select && tab->select->pre_idx_push_select_cond) { tab->set_cond(tab->select->pre_idx_push_select_cond); tab->table->file->cancel_pushed_idx_cond(); } /* TODO: update the number of records in join->best_positions[tablenr] */ } } // best_key >= 0 if (order_direction == -1) // If ORDER BY ... DESC { if (select && select->quick) { /* ORDER BY range_key DESC */ QUICK_SELECT_I *tmp= select->quick->make_reverse(used_key_parts); if (!tmp) { tab->limit= 0; goto use_filesort; // Reverse sort failed -> filesort } /* Cancel Pushed Index Condition, as it doesn't work for reverse scans. */ if (tab->select && tab->select->pre_idx_push_select_cond) { tab->set_cond(tab->select->pre_idx_push_select_cond); tab->table->file->cancel_pushed_idx_cond(); } if (select->quick == save_quick) save_quick= 0; // make_reverse() consumed it select->set_quick(tmp); /* Cancel "Range checked for each record" */ if (tab->use_quick == 2) { tab->use_quick= 1; tab->read_first_record= join_init_read_record; } } else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) { /* SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC Use a traversal function that starts by reading the last row with key part (A) and then traverse the index backwards. */ tab->read_first_record= join_read_last_key; tab->read_record.read_record= join_read_prev_same; /* Cancel "Range checked for each record" */ if (tab->use_quick == 2) { tab->use_quick= 1; tab->read_first_record= join_init_read_record; } /* Cancel Pushed Index Condition, as it doesn't work for reverse scans. */ if (tab->select && tab->select->pre_idx_push_select_cond) { tab->set_cond(tab->select->pre_idx_push_select_cond); tab->table->file->cancel_pushed_idx_cond(); } } } else if (select && select->quick) { /* Cancel "Range checked for each record" */ if (tab->use_quick == 2) { tab->use_quick= 1; tab->read_first_record= join_init_read_record; } select->quick->need_sorted_output(); } tab->read_record.unlock_row= (tab->type == JT_EQ_REF) ? join_read_key_unlock_row : rr_unlock_row; } // QEP has been modified /* Cleanup: We may have both a 'select->quick' and 'save_quick' (original) at this point. Delete the one that we wan't use. */ skipped_filesort: // Keep current (ordered) select->quick if (select && save_quick != select->quick) { delete save_quick; save_quick= NULL; } if (orig_cond_saved && !changed_key) tab->set_cond(orig_cond); if (!no_changes && changed_key && table->file->pushed_idx_cond) table->file->cancel_pushed_idx_cond(); DBUG_RETURN(1); use_filesort: // Restore original save_quick if (select && select->quick != save_quick) { delete select->quick; select->quick= save_quick; } if (orig_cond_saved) tab->set_cond(orig_cond); DBUG_RETURN(0); }
0
359,596
DEFUN (clear_ip_bgp_peer_soft, clear_ip_bgp_peer_soft_cmd, "clear ip bgp A.B.C.D soft", CLEAR_STR IP_STR BGP_STR "BGP neighbor address to clear\n" "Soft reconfig\n") { return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_UNICAST, clear_peer, BGP_CLEAR_SOFT_BOTH, argv[0]); }
0
231,053
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) { BaseType_t xReturn; UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); /* RTOS ports that support interrupt nesting have the concept of a maximum * system call (or maximum API call) interrupt priority. Interrupts that are * above the maximum system call priority are kept permanently enabled, even * when the RTOS kernel is in a critical section, but cannot make any calls to * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion * failure if a FreeRTOS API function is called from an interrupt that has been * assigned a priority above the configured maximum system call priority. * Only FreeRTOS functions that end in FromISR can be called from interrupts * that have been assigned a priority at or (logically) below the maximum * system call interrupt priority. FreeRTOS maintains a separate interrupt * safe API to ensure interrupt entry is as fast and as simple as possible. * More information (albeit Cortex-M specific) is provided on the following * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); /* Similar to xQueueGenericSend, except without blocking if there is no room * in the queue. Also don't directly wake a task that was blocked on a queue * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { const int8_t cTxLock = pxQueue->cTxLock; const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; traceQUEUE_SEND_FROM_ISR( pxQueue ); /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a * semaphore or mutex. That means prvCopyDataToQueue() cannot result * in a task disinheriting a priority and prvCopyDataToQueue() can be * called here even though the disinherit function does not check if * the scheduler is suspended before accessing the ready lists. */ ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); /* The event list is not altered if the queue is locked. This will * be done when the queue is unlocked later. */ if( cTxLock == queueUNLOCKED ) { #if ( configUSE_QUEUE_SETS == 1 ) { if( pxQueue->pxQueueSetContainer != NULL ) { if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) { /* Do not notify the queue set as an existing item * was overwritten in the queue so the number of items * in the queue has not changed. */ mtCOVERAGE_TEST_MARKER(); } else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) { /* The queue is a member of a queue set, and posting * to the queue set caused a higher priority task to * unblock. A context switch is required. */ if( pxHigherPriorityTaskWoken != NULL ) { *pxHigherPriorityTaskWoken = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } else { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ if( pxHigherPriorityTaskWoken != NULL ) { *pxHigherPriorityTaskWoken = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } } #else /* configUSE_QUEUE_SETS */ { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ if( pxHigherPriorityTaskWoken != NULL ) { *pxHigherPriorityTaskWoken = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } /* Not used in this path. */ ( void ) uxPreviousMessagesWaiting; } #endif /* configUSE_QUEUE_SETS */ } else { /* Increment the lock count so the task that unlocks the queue * knows that data was posted while it was locked. */ configASSERT( cTxLock != queueINT8_MAX ); pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); } xReturn = pdPASS; } else { traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); xReturn = errQUEUE_FULL; } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; }
0
294,440
date_s_valid_weeknum_p(int argc, VALUE *argv, VALUE klass) { VALUE vy, vw, vd, vf, vsg; VALUE argv2[5]; rb_scan_args(argc, argv, "41", &vy, &vw, &vd, &vf, &vsg); argv2[0] = vy; argv2[1] = vw; argv2[2] = vd; argv2[3] = vf; if (argc < 5) argv2[4] = INT2FIX(DEFAULT_SG); else argv2[4] = vsg; if (NIL_P(valid_weeknum_sub(5, argv2, klass, 0))) return Qfalse; return Qtrue; }
0
336,682
SPICE_GNUC_VISIBLE int spice_server_set_exit_on_disconnect(SpiceServer *s, int flag) { s->config->exit_on_disconnect = !!flag; return 0; }
0
238,436
static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null) { if (type_may_be_null(reg->type) && reg->id == id && !WARN_ON_ONCE(!reg->id)) { if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. If we * see this happening, don't convert the register. */ return; } if (is_null) { reg->type = SCALAR_VALUE; /* We don't need id and ref_obj_id from this point * onwards anymore, thus we should better reset it, * so that state pruning has chances to take effect. */ reg->id = 0; reg->ref_obj_id = 0; return; } mark_ptr_not_null_reg(reg); if (!reg_may_point_to_spin_lock(reg)) { /* For not-NULL ptr, reg->ref_obj_id will be reset * in release_reg_references(). * * reg->id is still used by spin_lock ptr. Other * than spin_lock ptr type, reg->id can be reset. */ reg->id = 0; } } }
0
512,814
static bool convert_const_to_int(THD *thd, Item_field *field_item, Item **item) { Field *field= field_item->field; int result= 0; /* We don't need to convert an integer to an integer, pretend it's already converted. But we still convert it if it is compared with a Field_year, as YEAR(2) may change the value of an integer when converting it to an integer (say, 0 to 70). */ if ((*item)->cmp_type() == INT_RESULT && field_item->field_type() != MYSQL_TYPE_YEAR) return 1; if ((*item)->const_item() && !(*item)->is_expensive()) { TABLE *table= field->table; Sql_mode_save sql_mode(thd); Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); MY_BITMAP *old_maps[2] = { NULL, NULL }; ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */ /* table->read_set may not be set if we come here from a CREATE TABLE */ if (table && table->read_set) dbug_tmp_use_all_columns(table, old_maps, &table->read_set, &table->write_set); /* For comparison purposes allow invalid dates like 2000-01-32 */ thd->variables.sql_mode= (thd->variables.sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; /* Store the value of the field/constant because the call to save_in_field below overrides that value. Don't save field value if no data has been read yet. */ bool save_field_value= (field_item->const_item() || !(field->table->status & STATUS_NO_RECORD)); if (save_field_value) orig_field_val= field->val_int(); if (!(*item)->save_in_field(field, 1) && !field->is_null()) { int field_cmp= 0; // If item is a decimal value, we must reject it if it was truncated. if (field->type() == MYSQL_TYPE_LONGLONG) { field_cmp= stored_field_cmp_to_item(thd, field, *item); DBUG_PRINT("info", ("convert_const_to_int %d", field_cmp)); } if (0 == field_cmp) { Item *tmp= new (thd->mem_root) Item_int_with_ref(thd, field->val_int(), *item, MY_TEST(field->flags & UNSIGNED_FLAG)); if (tmp) thd->change_item_tree(item, tmp); result= 1; // Item was replaced } } /* Restore the original field value. */ if (save_field_value) { result= field->store(orig_field_val, TRUE); /* orig_field_val must be a valid value that can be restored back. */ DBUG_ASSERT(!result); } if (table && table->read_set) dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_maps); } return result; }
0
276,443
explicit BoostedTreesGetEnsembleStatesOp(OpKernelConstruction* context) : OpKernel(context) {}
0
231,796
void processClientInitialParams( QuicServerConnectionState& conn, const ClientTransportParameters& clientParams) { // TODO validate that we didn't receive original connection ID, stateless // reset token, or preferred address. auto maxData = getIntegerParameter( TransportParameterId::initial_max_data, clientParams.parameters); auto maxStreamDataBidiLocal = getIntegerParameter( TransportParameterId::initial_max_stream_data_bidi_local, clientParams.parameters); auto maxStreamDataBidiRemote = getIntegerParameter( TransportParameterId::initial_max_stream_data_bidi_remote, clientParams.parameters); auto maxStreamDataUni = getIntegerParameter( TransportParameterId::initial_max_stream_data_uni, clientParams.parameters); auto maxStreamsBidi = getIntegerParameter( TransportParameterId::initial_max_streams_bidi, clientParams.parameters); auto maxStreamsUni = getIntegerParameter( TransportParameterId::initial_max_streams_uni, clientParams.parameters); auto idleTimeout = getIntegerParameter( TransportParameterId::idle_timeout, clientParams.parameters); auto ackDelayExponent = getIntegerParameter( TransportParameterId::ack_delay_exponent, clientParams.parameters); auto packetSize = getIntegerParameter( TransportParameterId::max_packet_size, clientParams.parameters); auto partialReliability = getIntegerParameter( static_cast<TransportParameterId>(kPartialReliabilityParameterId), clientParams.parameters); auto activeConnectionIdLimit = getIntegerParameter( TransportParameterId::active_connection_id_limit, clientParams.parameters); auto d6dBasePMTU = getIntegerParameter( static_cast<TransportParameterId>(kD6DBasePMTUParameterId), clientParams.parameters); auto d6dRaiseTimeout = getIntegerParameter( static_cast<TransportParameterId>(kD6DRaiseTimeoutParameterId), clientParams.parameters); auto d6dProbeTimeout = getIntegerParameter( static_cast<TransportParameterId>(kD6DProbeTimeoutParameterId), clientParams.parameters); auto minAckDelay = getIntegerParameter( TransportParameterId::min_ack_delay, clientParams.parameters); if (conn.version == QuicVersion::QUIC_DRAFT) { auto initialSourceConnId = getConnIdParameter( TransportParameterId::initial_source_connection_id, clientParams.parameters); if (!initialSourceConnId || initialSourceConnId.value() != conn.readCodec->getClientConnectionId()) { throw QuicTransportException( "Initial CID does not match.", TransportErrorCode::TRANSPORT_PARAMETER_ERROR); } } // TODO Validate active_connection_id_limit if (packetSize && *packetSize < kMinMaxUDPPayload) { throw QuicTransportException( folly::to<std::string>( "Max packet size too small. received max_packetSize = ", *packetSize), TransportErrorCode::TRANSPORT_PARAMETER_ERROR); } VLOG(10) << "Client advertised flow control "; VLOG(10) << "conn=" << maxData.value_or(0); VLOG(10) << " stream bidi local=" << maxStreamDataBidiLocal.value_or(0) << " "; VLOG(10) << " stream bidi remote=" << maxStreamDataBidiRemote.value_or(0) << " "; VLOG(10) << " stream uni=" << maxStreamDataUni.value_or(0) << " "; VLOG(10) << conn; conn.flowControlState.peerAdvertisedMaxOffset = maxData.value_or(0); conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiLocal = maxStreamDataBidiLocal.value_or(0); conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = maxStreamDataBidiRemote.value_or(0); conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetUni = maxStreamDataUni.value_or(0); conn.streamManager->setMaxLocalBidirectionalStreams( maxStreamsBidi.value_or(0)); conn.streamManager->setMaxLocalUnidirectionalStreams( maxStreamsUni.value_or(0)); conn.peerIdleTimeout = std::chrono::milliseconds(idleTimeout.value_or(0)); conn.peerIdleTimeout = timeMin(conn.peerIdleTimeout, kMaxIdleTimeout); if (ackDelayExponent && *ackDelayExponent > kMaxAckDelayExponent) { throw QuicTransportException( "ack_delay_exponent too large", TransportErrorCode::TRANSPORT_PARAMETER_ERROR); } conn.peerAckDelayExponent = ackDelayExponent.value_or(kDefaultAckDelayExponent); if (minAckDelay.hasValue()) { conn.peerMinAckDelay = std::chrono::microseconds(minAckDelay.value()); } // Default to max because we can probe PMTU now, and this will be the upper // limit uint64_t maxUdpPayloadSize = kDefaultMaxUDPPayload; if (packetSize) { maxUdpPayloadSize = std::min(*packetSize, maxUdpPayloadSize); conn.peerMaxUdpPayloadSize = maxUdpPayloadSize; if (conn.transportSettings.canIgnorePathMTU) { if (*packetSize > kDefaultMaxUDPPayload) { // A good peer should never set oversized limit, so to be safe we // fallback to default conn.udpSendPacketLen = kDefaultUDPSendPacketLen; } else { // Otherwise, canIgnorePathMTU forces us to immediately set // udpSendPacketLen // TODO: rename "canIgnorePathMTU" to "forciblySetPathMTU" conn.udpSendPacketLen = maxUdpPayloadSize; } } } conn.peerActiveConnectionIdLimit = activeConnectionIdLimit.value_or(kDefaultActiveConnectionIdLimit); if (partialReliability && *partialReliability != 0 && conn.transportSettings.partialReliabilityEnabled) { conn.partialReliabilityEnabled = true; } VLOG(10) << "conn.partialReliabilityEnabled=" << conn.partialReliabilityEnabled; if (conn.transportSettings.d6dConfig.enabled) { // Sanity check if (d6dBasePMTU) { if (*d6dBasePMTU >= kMinMaxUDPPayload && *d6dBasePMTU <= kDefaultMaxUDPPayload) { // The reason to take the max is because we don't want d6d to send // probes with a smaller packet size than udpSendPacketLen, which would // be useless and cause meaningless delay on finding the upper bound. conn.d6d.basePMTU = std::max(*d6dBasePMTU, conn.udpSendPacketLen); conn.d6d.maxPMTU = maxUdpPayloadSize; VLOG(10) << "conn.d6d.basePMTU=" << conn.d6d.basePMTU; // Start from base conn.d6d.state = D6DMachineState::BASE; conn.d6d.meta.lastNonSearchState = D6DMachineState::DISABLED; conn.d6d.meta.timeLastNonSearchState = Clock::now(); // Temporary, should be removed after transport knob pipeline works conn.d6d.noBlackholeDetection = true; } else { LOG(ERROR) << "client d6dBasePMTU fails sanity check: " << *d6dBasePMTU; // We treat base pmtu transport param as client's swich to activate d6d, // so not receiving that means there's no need to configure the rest d6d // params return; } } if (d6dRaiseTimeout) { if (*d6dRaiseTimeout >= kMinD6DRaiseTimeout.count()) { conn.d6d.raiseTimeout = std::chrono::seconds(*d6dRaiseTimeout); VLOG(10) << "conn.d6d.raiseTimeout=" << conn.d6d.raiseTimeout.count(); } else { LOG(ERROR) << "client d6dRaiseTimeout fails sanity check: " << *d6dRaiseTimeout; } } if (d6dProbeTimeout) { if (*d6dProbeTimeout >= kMinD6DProbeTimeout.count()) { conn.d6d.probeTimeout = std::chrono::seconds(*d6dProbeTimeout); VLOG(10) << "conn.d6d.probeTimeout=" << conn.d6d.probeTimeout.count(); } else { LOG(ERROR) << "client d6dProbeTimeout fails sanity check: " << *d6dProbeTimeout; } } } }
0
210,091
get_password(const char *prompt, char *input, int capacity) { #ifdef ENABLE_SYSTEMD int is_systemd_running; struct stat a, b; /* We simply test whether the systemd cgroup hierarchy is * mounted */ is_systemd_running = (lstat("/sys/fs/cgroup", &a) == 0) && (lstat("/sys/fs/cgroup/systemd", &b) == 0) && (a.st_dev != b.st_dev); if (is_systemd_running) { char *cmd, *ret; FILE *ask_pass_fp = NULL; cmd = ret = NULL; if (asprintf(&cmd, "systemd-ask-password \"%s\"", prompt) >= 0) { ask_pass_fp = popen (cmd, "re"); free (cmd); } if (ask_pass_fp) { ret = fgets(input, capacity, ask_pass_fp); pclose(ask_pass_fp); } if (ret) { int len = strlen(input); if (input[len - 1] == '\n') input[len - 1] = '\0'; return input; } } #endif /* * Falling back to getpass(..) * getpass is obsolete, but there's apparently nothing that replaces it */ char *tmp_pass = getpass(prompt); if (!tmp_pass) return NULL; strncpy(input, tmp_pass, capacity - 1); input[capacity - 1] = '\0'; /* zero-out the static buffer */ memset(tmp_pass, 0, strlen(tmp_pass)); return input; }
1
90,792
void QuotaManager::NotifyOriginInUse(const GURL& origin) { DCHECK(io_thread_->BelongsToCurrentThread()); origins_in_use_[origin]++; }
0
293,766
static void process_constructors(RKernelCacheObj *obj, struct MACH0_(obj_t) *mach0, RList *ret, ut64 paddr, bool is_first, int mode, const char *prefix) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } int i, type; for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "_mod_fini_func") || strstr (sections[i].name, "_mod_term_func")) { type = R_BIN_ENTRY_TYPE_FINI; } else if (strstr (sections[i].name, "_mod_init_func")) { type = is_first ? 0 : R_BIN_ENTRY_TYPE_INIT; is_first = false; } else { continue; } ut8 *buf = calloc (sections[i].size, 1); if (!buf) { break; } if (r_buf_read_at (obj->cache_buf, sections[i].offset + paddr, buf, sections[i].size) < sections[i].size) { free (buf); break; } int j; int count = 0; for (j = 0; j < sections[i].size; j += 8) { ut64 addr64 = K_RPTR (buf + j); ut64 paddr64 = sections[i].offset + paddr + j; if (mode == R_K_CONSTRUCTOR_TO_ENTRY) { RBinAddr *ba = newEntry (paddr64, addr64, type); r_list_append (ret, ba); } else if (mode == R_K_CONSTRUCTOR_TO_SYMBOL) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("%s.%s.%d", prefix, (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++); sym->vaddr = addr64; sym->paddr = paddr64; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (ret, sym); } } free (buf); } free (sections); }
0
353,234
bool SplashOutputDev::checkTransparencyGroup(GfxState *state, bool knockout) { if (state->getFillOpacity() != 1 || state->getStrokeOpacity() != 1 || state->getAlphaIsShape() || state->getBlendMode() != gfxBlendNormal || splash->getSoftMask() != nullptr || knockout) return true; return transpGroupStack != nullptr && transpGroupStack->shape != nullptr; }
0
437,324
renumber_by_map(Node* node, GroupNumRemap* map) { int r = 0; switch (NODE_TYPE(node)) { case NODE_LIST: case NODE_ALT: do { r = renumber_by_map(NODE_CAR(node), map); } while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node))); break; case NODE_QUANT: r = renumber_by_map(NODE_BODY(node), map); break; case NODE_ENCLOSURE: { EnclosureNode* en = ENCLOSURE_(node); r = renumber_by_map(NODE_BODY(node), map); if (r != 0) return r; if (en->type == ENCLOSURE_IF_ELSE) { if (IS_NOT_NULL(en->te.Then)) { r = renumber_by_map(en->te.Then, map); if (r != 0) return r; } if (IS_NOT_NULL(en->te.Else)) { r = renumber_by_map(en->te.Else, map); if (r != 0) return r; } } } break; case NODE_BACKREF: r = renumber_node_backref(node, map); break; case NODE_ANCHOR: if (IS_NOT_NULL(NODE_BODY(node))) r = renumber_by_map(NODE_BODY(node), map); break; default: break; } return r; }
0
90,750
void QuotaManager::DeleteOnCorrectThread() const { if (!io_thread_->BelongsToCurrentThread()) { io_thread_->DeleteSoon(FROM_HERE, this); return; } delete this; }
0
252,419
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; }
0
247,748
void updateFilterChain( const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& tls_context, envoy::config::listener::v3::FilterChain& filter_chain) { filter_chain.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); }
0
473,820
onigenc_ascii_apply_all_case_fold(OnigCaseFoldType flag ARG_UNUSED, OnigApplyAllCaseFoldFunc f, void* arg, OnigEncoding enc ARG_UNUSED) { OnigCodePoint code; int i, r; for (i = 0; i < (int )(sizeof(OnigAsciiLowerMap)/sizeof(OnigPairCaseFoldCodes)); i++) { code = OnigAsciiLowerMap[i].to; r = (*f)(OnigAsciiLowerMap[i].from, &code, 1, arg); if (r != 0) return r; code = OnigAsciiLowerMap[i].from; r = (*f)(OnigAsciiLowerMap[i].to, &code, 1, arg); if (r != 0) return r; } return 0; }
0
500,689
int sftp_symlink(sftp_session sftp, const char *target, const char *dest) { sftp_status_message status = NULL; sftp_message msg = NULL; ssh_string target_s; ssh_string dest_s; ssh_buffer buffer; uint32_t id; if (sftp == NULL) return -1; if (target == NULL || dest == NULL) { ssh_set_error_invalid(sftp->session, __FUNCTION__); return -1; } buffer = ssh_buffer_new(); if (buffer == NULL) { ssh_set_error_oom(sftp->session); return -1; } target_s = ssh_string_from_char(target); if (target_s == NULL) { ssh_set_error_oom(sftp->session); ssh_buffer_free(buffer); return -1; } dest_s = ssh_string_from_char(dest); if (dest_s == NULL) { ssh_set_error_oom(sftp->session); ssh_string_free(target_s); ssh_buffer_free(buffer); return -1; } id = sftp_get_new_id(sftp); if (buffer_add_u32(buffer, id) < 0) { ssh_set_error_oom(sftp->session); ssh_buffer_free(buffer); ssh_string_free(dest_s); ssh_string_free(target_s); return -1; } if (ssh_get_openssh_version(sftp->session)) { /* TODO check for version number if they ever fix it. */ if (buffer_add_ssh_string(buffer, target_s) < 0 || buffer_add_ssh_string(buffer, dest_s) < 0) { ssh_set_error_oom(sftp->session); ssh_buffer_free(buffer); ssh_string_free(dest_s); ssh_string_free(target_s); return -1; } } else { if (buffer_add_ssh_string(buffer, dest_s) < 0 || buffer_add_ssh_string(buffer, target_s) < 0) { ssh_set_error_oom(sftp->session); ssh_buffer_free(buffer); ssh_string_free(dest_s); ssh_string_free(target_s); return -1; } } if (sftp_packet_write(sftp, SSH_FXP_SYMLINK, buffer) < 0) { ssh_buffer_free(buffer); ssh_string_free(dest_s); ssh_string_free(target_s); return -1; } ssh_buffer_free(buffer); ssh_string_free(dest_s); ssh_string_free(target_s); while (msg == NULL) { if (sftp_read_and_dispatch(sftp) < 0) { return -1; } msg = sftp_dequeue(sftp, id); } /* By specification, this command only returns SSH_FXP_STATUS */ if (msg->packet_type == SSH_FXP_STATUS) { status = parse_status_msg(msg); sftp_message_free(msg); if (status == NULL) { return -1; } sftp_set_error(sftp, status->status); switch (status->status) { case SSH_FX_OK: status_msg_free(status); return 0; default: break; } /* * The status should be SSH_FX_OK if the command was successful, if it * didn't, then there was an error */ ssh_set_error(sftp->session, SSH_REQUEST_DENIED, "SFTP server: %s", status->errormsg); status_msg_free(status); return -1; } else { ssh_set_error(sftp->session, SSH_FATAL, "Received message %d when attempting to set stats", msg->packet_type); sftp_message_free(msg); } return -1; }
0
244,359
GF_Err tsro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->TimeOffset); return GF_OK; }
0
221,459
flatpak_run_add_pulseaudio_args (FlatpakBwrap *bwrap) { g_autofree char *pulseaudio_server = flatpak_run_get_pulseaudio_server (); g_autofree char *pulseaudio_socket = NULL; g_autofree char *pulse_runtime_dir = flatpak_run_get_pulse_runtime_dir (); if (pulseaudio_server) pulseaudio_socket = flatpak_run_parse_pulse_server (pulseaudio_server); if (!pulseaudio_socket) { pulseaudio_socket = g_build_filename (pulse_runtime_dir, "native", NULL); if (!g_file_test (pulseaudio_socket, G_FILE_TEST_EXISTS)) g_clear_pointer (&pulseaudio_socket, g_free); } if (!pulseaudio_socket) { pulseaudio_socket = realpath ("/var/run/pulse/native", NULL); if (pulseaudio_socket && !g_file_test (pulseaudio_socket, G_FILE_TEST_EXISTS)) g_clear_pointer (&pulseaudio_socket, g_free); } flatpak_bwrap_unset_env (bwrap, "PULSE_SERVER"); if (pulseaudio_socket && g_file_test (pulseaudio_socket, G_FILE_TEST_EXISTS)) { static const char sandbox_socket_path[] = "/run/flatpak/pulse/native"; static const char pulse_server[] = "unix:/run/flatpak/pulse/native"; static const char config_path[] = "/run/flatpak/pulse/config"; gboolean share_shm = FALSE; /* TODO: When do we add this? */ g_autofree char *client_config = g_strdup_printf ("enable-shm=%s\n", share_shm ? "yes" : "no"); /* FIXME - error handling */ if (!flatpak_bwrap_add_args_data (bwrap, "pulseaudio", client_config, -1, config_path, NULL)) return; flatpak_bwrap_add_args (bwrap, "--ro-bind", pulseaudio_socket, sandbox_socket_path, NULL); flatpak_bwrap_set_env (bwrap, "PULSE_SERVER", pulse_server, TRUE); flatpak_bwrap_set_env (bwrap, "PULSE_CLIENTCONFIG", config_path, TRUE); flatpak_bwrap_add_runtime_dir_member (bwrap, "pulse"); } else g_debug ("Could not find pulseaudio socket"); /* Also allow ALSA access. This was added in 1.8, and is not ideally named. However, * since the practical permission of ALSA and PulseAudio are essentially the same, and * since we don't want to add more permissions for something we plan to replace with * portals/pipewire going forward we reinterpret pulseaudio to also mean ALSA. */ if (g_file_test ("/dev/snd", G_FILE_TEST_IS_DIR)) flatpak_bwrap_add_args (bwrap, "--dev-bind", "/dev/snd", "/dev/snd", NULL); }
0
253,721
ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; struct ccp_dm_workarea exp, src, dst; struct ccp_op op; unsigned int sb_count, i_len, o_len; int ret; /* Check against the maximum allowable size, in bits */ if (rsa->key_size > cmd_q->ccp->vdata->rsamax) return -EINVAL; if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* The RSA modulus must precede the message being acted upon, so * it must be copied to a DMA area where the message and the * modulus can be concatenated. Therefore the input buffer * length required is twice the output buffer length (which * must be a multiple of 256-bits). Compute o_len, i_len in bytes. * Buffer sizes must be a multiple of 32 bytes; rounding up may be * required. */ o_len = 32 * ((rsa->key_size + 255) / 256); i_len = o_len * 2; sb_count = 0; if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { /* sb_count is the number of storage block slots required * for the modulus. */ sb_count = o_len / CCP_SB_BYTES; op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); if (!op.sb_key) return -EIO; } else { /* A version 5 device allows a modulus size that will not fit * in the LSB, so the command will transfer it from memory. * Set the sb key to the default, even though it's not used. */ op.sb_key = cmd_q->sb_key; } /* The RSA exponent must be in little endian format. Reverse its * byte order. */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) goto e_sb; ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); if (ret) goto e_exp; if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { /* Copy the exponent to the local storage block, using * as many 32-byte blocks as were allocated above. It's * already little endian, so no further change is required. */ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_exp; } } else { /* The exponent can be retrieved from memory via DMA. */ op.exp.u.dma.address = exp.dma.address; op.exp.u.dma.offset = 0; } /* Concatenate the modulus and the message. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted. */ ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); if (ret) goto e_exp; ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); if (ret) goto e_src; ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); if (ret) goto e_src; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = i_len; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = o_len; op.u.rsa.mod_size = rsa->key_size; op.u.rsa.input_len = i_len; ret = cmd_q->ccp->vdata->perform->rsa(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); e_exp: ccp_dm_free(&exp); e_sb: if (sb_count) cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); return ret; }
0
247,610
TEST_P(SslReadBufferLimitTest, WritesLargerThanBufferLimit) { singleWriteTest(1024, 5 * 1024); }
0
209,931
static PresentationContext* PresentationContext_new(VideoClientContext* video, BYTE PresentationId, UINT32 x, UINT32 y, UINT32 width, UINT32 height) { VideoClientContextPriv* priv = video->priv; PresentationContext* ret = calloc(1, sizeof(*ret)); if (!ret) return NULL; ret->video = video; ret->PresentationId = PresentationId; ret->h264 = h264_context_new(FALSE); if (!ret->h264) { WLog_ERR(TAG, "unable to create a h264 context"); goto error_h264; } h264_context_reset(ret->h264, width, height); ret->currentSample = Stream_New(NULL, 4096); if (!ret->currentSample) { WLog_ERR(TAG, "unable to create current packet stream"); goto error_currentSample; } ret->surfaceData = BufferPool_Take(priv->surfacePool, width * height * 4); if (!ret->surfaceData) { WLog_ERR(TAG, "unable to allocate surfaceData"); goto error_surfaceData; } ret->surface = video->createSurface(video, ret->surfaceData, x, y, width, height); if (!ret->surface) { WLog_ERR(TAG, "unable to create surface"); goto error_surface; } ret->yuv = yuv_context_new(FALSE); if (!ret->yuv) { WLog_ERR(TAG, "unable to create YUV decoder"); goto error_yuv; } yuv_context_reset(ret->yuv, width, height); ret->refCounter = 1; return ret; error_yuv: video->deleteSurface(video, ret->surface); error_surface: BufferPool_Return(priv->surfacePool, ret->surfaceData); error_surfaceData: Stream_Free(ret->currentSample, TRUE); error_currentSample: h264_context_free(ret->h264); error_h264: free(ret); return NULL; }
1
455,332
restore_tilde (val, directory_part) char *val, *directory_part; { int l, vl, dl2, xl; char *dh2, *expdir, *ret, *v; vl = strlen (val); /* We need to duplicate the expansions readline performs on the directory portion before passing it to our completion function. */ dh2 = directory_part ? bash_dequote_filename (directory_part, 0) : 0; bash_directory_expansion (&dh2); dl2 = strlen (dh2); expdir = bash_tilde_expand (directory_part, 0); xl = strlen (expdir); if (*directory_part == '~' && STREQ (directory_part, expdir)) { /* tilde expansion failed, so what should we return? we use what the user typed. */ v = mbschr (val, '/'); vl = STRLEN (v); ret = (char *)xmalloc (xl + vl + 2); strcpy (ret, directory_part); if (v && *v) strcpy (ret + xl, v); free (dh2); free (expdir); return ret; } free (expdir); /* dh2 = unexpanded but dequoted tilde-prefix dl2 = length of tilde-prefix expdir = tilde-expanded tilde-prefix xl = length of expanded tilde-prefix l = length of remainder after tilde-prefix */ l = (vl - xl) + 1; if (l <= 0) { free (dh2); return (savestring (val)); /* XXX - just punt */ } ret = (char *)xmalloc (dl2 + 2 + l); strcpy (ret, dh2); strcpy (ret + dl2, val + xl); free (dh2); return (ret); }
0
338,124
void WasmBinaryBuilder::throwError(std::string text) { throw ParseException(text, 0, pos); }
0
473,849
cp1251_apply_all_case_fold(OnigCaseFoldType flag, OnigApplyAllCaseFoldFunc f, void* arg, OnigEncoding enc ARG_UNUSED) { return onigenc_apply_all_case_fold_with_map( sizeof(CaseFoldMap)/sizeof(OnigPairCaseFoldCodes), CaseFoldMap, 0, flag, f, arg); }
0
225,067
PQserverVersion(const PGconn *conn) { if (!conn) return 0; if (conn->status == CONNECTION_BAD) return 0; return conn->sversion; }
0
299,322
static Image *decompress_block(Image *orig, unsigned int *Size, ImageInfo *clone_info, ExceptionInfo *exception) { Image *image2; void *cache_block, *decompress_block; z_stream zip_info; FILE *mat_file; size_t magick_size; size_t extent; int file; int status; int zip_status; ssize_t TotalSize = 0; if(clone_info==NULL) return NULL; if(clone_info->file) /* Close file opened from previous transaction. */ { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } cache_block = AcquireQuantumMemory((size_t)(*Size < 16384) ? *Size: 16384,sizeof(unsigned char *)); if(cache_block==NULL) return NULL; decompress_block = AcquireQuantumMemory((size_t)(4096),sizeof(unsigned char *)); if(decompress_block==NULL) { RelinquishMagickMemory(cache_block); return NULL; } mat_file=0; file = AcquireUniqueFileResource(clone_info->filename); if (file != -1) mat_file = fdopen(file,"w"); if(!mat_file) { RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Cannot create file stream for decompressed image"); return NULL; } zip_info.zalloc=AcquireZIPMemory; zip_info.zfree=RelinquishZIPMemory; zip_info.opaque = (voidpf) NULL; zip_status = inflateInit(&zip_info); if (zip_status != Z_OK) { RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "UnableToUncompressImage","`%s'",clone_info->filename); (void) fclose(mat_file); RelinquishUniqueFileResource(clone_info->filename); return NULL; } /* zip_info.next_out = 8*4;*/ zip_info.avail_in = 0; zip_info.total_out = 0; while(*Size>0 && !EOFBlob(orig)) { magick_size = ReadBlob(orig, (*Size < 16384) ? *Size : 16384, (unsigned char *) cache_block); if (magick_size == 0) break; zip_info.next_in = (Bytef *) cache_block; zip_info.avail_in = (uInt) magick_size; while(zip_info.avail_in>0) { zip_info.avail_out = 4096; zip_info.next_out = (Bytef *) decompress_block; zip_status = inflate(&zip_info,Z_NO_FLUSH); if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END)) break; extent=fwrite(decompress_block, 4096-zip_info.avail_out, 1, mat_file); (void) extent; TotalSize += 4096-zip_info.avail_out; if(zip_status == Z_STREAM_END) goto DblBreak; } if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END)) break; *Size -= (unsigned int) magick_size; } DblBreak: inflateEnd(&zip_info); (void)fclose(mat_file); RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); *Size = TotalSize; if((clone_info->file=fopen(clone_info->filename,"rb"))==NULL) goto UnlinkFile; if( (image2 = AcquireImage(clone_info,exception))==NULL ) goto EraseFile; status = OpenBlob(clone_info,image2,ReadBinaryBlobMode,exception); if (status == MagickFalse) { DeleteImageFromList(&image2); EraseFile: fclose(clone_info->file); clone_info->file = NULL; UnlinkFile: RelinquishUniqueFileResource(clone_info->filename); return NULL; } return image2; }
0
220,233
NodeDef* Node::mutable_def() { return &props_->node_def; }
0
450,337
gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED, GIOCondition condition, void *opaque) { VncState *vs = opaque; assert(vs->magic == VNC_MAGIC); if (condition & G_IO_IN) { if (vnc_client_read(vs) < 0) { /* vs is free()ed here */ return TRUE; } } if (condition & G_IO_OUT) { vnc_client_write(vs); } if (vs->disconnecting) { if (vs->ioc_tag != 0) { g_source_remove(vs->ioc_tag); } vs->ioc_tag = 0; } return TRUE; }
0
90,791
HostUsageCallback* NewWaitableHostUsageCallback() { ++waiting_callbacks_; return callback_factory_.NewCallback( &UsageAndQuotaDispatcherTask::DidGetHostUsage); }
0
384,796
getvcol( win_T *wp, pos_T *pos, colnr_T *start, colnr_T *cursor, colnr_T *end) { colnr_T vcol; char_u *ptr; // points to current char char_u *posptr; // points to char at pos->col char_u *line; // start of the line int incr; int head; #ifdef FEAT_VARTABS int *vts = wp->w_buffer->b_p_vts_array; #endif int ts = wp->w_buffer->b_p_ts; int c; vcol = 0; line = ptr = ml_get_buf(wp->w_buffer, pos->lnum, FALSE); if (pos->col == MAXCOL) posptr = NULL; // continue until the NUL else { colnr_T i; // In a few cases the position can be beyond the end of the line. for (i = 0; i < pos->col; ++i) if (ptr[i] == NUL) { pos->col = i; break; } posptr = ptr + pos->col; if (has_mbyte) // always start on the first byte posptr -= (*mb_head_off)(line, posptr); } /* * This function is used very often, do some speed optimizations. * When 'list', 'linebreak', 'showbreak' and 'breakindent' are not set * use a simple loop. * Also use this when 'list' is set but tabs take their normal size. */ if ((!wp->w_p_list || wp->w_lcs_chars.tab1 != NUL) #ifdef FEAT_LINEBREAK && !wp->w_p_lbr && *get_showbreak_value(wp) == NUL && !wp->w_p_bri #endif ) { for (;;) { head = 0; c = *ptr; // make sure we don't go past the end of the line if (c == NUL) { incr = 1; // NUL at end of line only takes one column break; } // A tab gets expanded, depending on the current column if (c == TAB) #ifdef FEAT_VARTABS incr = tabstop_padding(vcol, ts, vts); #else incr = ts - (vcol % ts); #endif else { if (has_mbyte) { // For utf-8, if the byte is >= 0x80, need to look at // further bytes to find the cell width. if (enc_utf8 && c >= 0x80) incr = utf_ptr2cells(ptr); else incr = g_chartab[c] & CT_CELL_MASK; // If a double-cell char doesn't fit at the end of a line // it wraps to the next line, it's like this char is three // cells wide. if (incr == 2 && wp->w_p_wrap && MB_BYTE2LEN(*ptr) > 1 && in_win_border(wp, vcol)) { ++incr; head = 1; } } else incr = g_chartab[c] & CT_CELL_MASK; } if (posptr != NULL && ptr >= posptr) // character at pos->col break; vcol += incr; MB_PTR_ADV(ptr); } } else { for (;;) { // A tab gets expanded, depending on the current column head = 0; incr = win_lbr_chartabsize(wp, line, ptr, vcol, &head); // make sure we don't go past the end of the line if (*ptr == NUL) { incr = 1; // NUL at end of line only takes one column break; } if (posptr != NULL && ptr >= posptr) // character at pos->col break; vcol += incr; MB_PTR_ADV(ptr); } } if (start != NULL) *start = vcol + head; if (end != NULL) *end = vcol + incr - 1; if (cursor != NULL) { if (*ptr == TAB && (State & NORMAL) && !wp->w_p_list && !virtual_active() && !(VIsual_active && (*p_sel == 'e' || LTOREQ_POS(*pos, VIsual))) ) *cursor = vcol + incr - 1; // cursor at end else *cursor = vcol + head; // cursor at start } }
0
328,949
R_API void r_bin_java_print_code_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaExceptionEntry *exc_entry = NULL; RBinJavaAttrInfo *_attr = NULL; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Code.\n"); return; } printf ("Code Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d, Attribute Count: %d\n", attr->length, attr->info.code_attr.attributes_count); printf (" Max Stack: %d\n", attr->info.code_attr.max_stack); printf (" Max Locals: %d\n", attr->info.code_attr.max_locals); printf (" Code Length: %d\n", attr->info.code_attr.code_length); printf (" Code At Offset: 0x%08"PFMT64x "\n", (ut64) attr->info.code_attr.code_offset); printf ("Code Attribute Exception Table Information:\n"); printf (" Exception Table Length: %d\n", attr->info.code_attr.exception_table_length); if (attr->info.code_attr.exception_table) { // Delete the attr entries r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) { r_bin_java_print_code_exceptions_attr_summary (exc_entry); } } printf (" Implicit Method Stack Frame:\n"); r_bin_java_print_stack_map_frame_summary (attr->info.code_attr.implicit_frame); printf ("Code Attribute Attributes Information:\n"); if (attr->info.code_attr.attributes && attr->info.code_attr.attributes_count > 0) { printf (" Code Attribute Attributes Count: %d\n", attr->info.code_attr.attributes_count); r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) { r_bin_java_print_attr_summary (_attr); } } }
0
253,595
smb3_notify(const unsigned int xid, struct file *pfile, void __user *ioc_buf) { struct smb3_notify notify; struct dentry *dentry = pfile->f_path.dentry; struct inode *inode = file_inode(pfile); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_open_parms oparms; struct cifs_fid fid; struct cifs_tcon *tcon; const unsigned char *path; void *page = alloc_dentry_path(); __le16 *utf16_path = NULL; u8 oplock = SMB2_OPLOCK_LEVEL_NONE; int rc = 0; path = build_path_from_dentry(dentry, page); if (IS_ERR(path)) { rc = PTR_ERR(path); goto notify_exit; } utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (utf16_path == NULL) { rc = -ENOMEM; goto notify_exit; } if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) { rc = -EFAULT; goto notify_exit; } tcon = cifs_sb_master_tcon(cifs_sb); oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; oparms.disposition = FILE_OPEN; oparms.create_options = cifs_create_options(cifs_sb, 0); oparms.fid = &fid; oparms.reconnect = false; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL, NULL); if (rc) goto notify_exit; rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid, notify.watch_tree, notify.completion_filter); SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc); notify_exit: free_dentry_path(page); kfree(utf16_path); return rc; }
0
442,579
static void test_memslot_invalid_addresses(void) { g_test_trap_subprocess("/server/memslot-invalid-addresses/subprocess/group_id", 0, 0); g_test_trap_assert_stderr("*group_id too big*"); g_test_trap_subprocess("/server/memslot-invalid-addresses/subprocess/slot_id", 0, 0); g_test_trap_assert_stderr("*slot_id 1 too big*"); }
0
254,066
std::vector<std::string> keys() const { std::vector<std::string> ret; for (auto element : key_value_pairs_) { std::string str_element(element); ret.emplace_back(str_element.substr(0, str_element.find('='))); } return ret; }
0
222,492
Status FunctionLibraryDefinition::AddFunctionDef( const FunctionDef& fdef, const StackTracesMap& stack_traces) { mutex_lock l(mu_); bool added; return AddFunctionDefHelper(fdef, stack_traces, &added); }
0
345,209
console_map_init(void) { int i; for (i = 0; i < MAX_NR_CONSOLES; i++) if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc) con_set_default_unimap(vc_cons[i].d); }
0
509,547
int ha_maria::optimize(THD * thd, HA_CHECK_OPT *check_opt) { int error; HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param); if (!file || !param) return HA_ADMIN_INTERNAL_ERROR; maria_chk_init(param); param->thd= thd; param->op_name= "optimize"; param->testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE | T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX); param->orig_sort_buffer_length= THDVAR(thd, sort_buffer_size); thd_progress_init(thd, 1); if ((error= repair(thd, param, 1)) && param->retry_repair) { sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying", my_errno, param->db_name, param->table_name); param->testflag &= ~T_REP_BY_SORT; error= repair(thd, param, 0); } thd_progress_end(thd); return error; }
0
90,835
void DeleteClientOriginData(QuotaClient* client, const GURL& origin, StorageType type) { DCHECK(client); quota_status_ = kQuotaStatusUnknown; client->DeleteOriginData(origin, type, callback_factory_.NewCallback( &QuotaManagerTest::StatusCallback)); }
0
512,355
bool check_is_evaluable_expression_or_error() { if (is_evaluable_expression()) return false; // Ok raise_error_not_evaluable(); return true; // Error }
0
275,961
unsigned uECC_curve_num_words(uECC_Curve curve) { return curve->num_words; }
0
355,649
eval7( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int len; char_u *s; char_u *name_start = NULL; char_u *start_leader, *end_leader; int ret = OK; char_u *alias; static int recurse = 0; /* * Initialise variable so that clear_tv() can't mistake this for a * string and free a string that isn't there. */ rettv->v_type = VAR_UNKNOWN; /* * Skip '!', '-' and '+' characters. They are handled later. */ start_leader = *arg; if (eval_leader(arg, in_vim9script()) == FAIL) return FAIL; end_leader = *arg; if (**arg == '.' && (!isdigit(*(*arg + 1)) #ifdef FEAT_FLOAT || in_old_script(2) #endif )) { semsg(_(e_invalid_expression_str), *arg); ++*arg; return FAIL; } // Limit recursion to 1000 levels. At least at 10000 we run out of stack // and crash. if (recurse == 1000) { semsg(_(e_expression_too_recursive_str), *arg); return FAIL; } ++recurse; switch (**arg) { /* * Number constant. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': ret = eval_number(arg, rettv, evaluate, want_string); // Apply prefixed "-" and "+" now. Matters especially when // "->" follows. if (ret == OK && evaluate && end_leader > start_leader && rettv->v_type != VAR_BLOB) ret = eval7_leader(rettv, TRUE, start_leader, &end_leader); break; /* * String constant: "string". */ case '"': ret = eval_string(arg, rettv, evaluate); break; /* * Literal string constant: 'str''ing'. */ case '\'': ret = eval_lit_string(arg, rettv, evaluate); break; /* * List: [expr, expr] */ case '[': ret = eval_list(arg, rettv, evalarg, TRUE); break; /* * Dictionary: #{key: val, key: val} */ case '#': if (in_vim9script()) { ret = vim9_bad_comment(*arg) ? FAIL : NOTDONE; } else if ((*arg)[1] == '{') { ++*arg; ret = eval_dict(arg, rettv, evalarg, TRUE); } else ret = NOTDONE; break; /* * Lambda: {arg, arg -> expr} * Dictionary: {'key': val, 'key': val} */ case '{': if (in_vim9script()) ret = NOTDONE; else ret = get_lambda_tv(arg, rettv, in_vim9script(), evalarg); if (ret == NOTDONE) ret = eval_dict(arg, rettv, evalarg, FALSE); break; /* * Option value: &name */ case '&': ret = eval_option(arg, rettv, evaluate); break; /* * Environment variable: $VAR. */ case '$': ret = eval_env_var(arg, rettv, evaluate); break; /* * Register contents: @r. */ case '@': ++*arg; if (evaluate) { if (in_vim9script() && IS_WHITE_OR_NUL(**arg)) semsg(_(e_syntax_error_at_str), *arg); else if (in_vim9script() && !valid_yank_reg(**arg, FALSE)) emsg_invreg(**arg); else { rettv->v_type = VAR_STRING; rettv->vval.v_string = get_reg_contents(**arg, GREG_EXPR_SRC); } } if (**arg != NUL) ++*arg; break; /* * nested expression: (expression). * or lambda: (arg) => expr */ case '(': ret = NOTDONE; if (in_vim9script()) { ret = get_lambda_tv(arg, rettv, TRUE, evalarg); if (ret == OK && evaluate) { ufunc_T *ufunc = rettv->vval.v_partial->pt_func; // Compile it here to get the return type. The return // type is optional, when it's missing use t_unknown. // This is recognized in compile_return(). if (ufunc->uf_ret_type->tt_type == VAR_VOID) ufunc->uf_ret_type = &t_unknown; if (compile_def_function(ufunc, FALSE, COMPILE_TYPE(ufunc), NULL) == FAIL) { clear_tv(rettv); ret = FAIL; } } } if (ret == NOTDONE) { *arg = skipwhite_and_linebreak(*arg + 1, evalarg); ret = eval1(arg, rettv, evalarg); // recursive! *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ')') ++*arg; else if (ret == OK) { emsg(_(e_missing_closing_paren)); clear_tv(rettv); ret = FAIL; } } break; default: ret = NOTDONE; break; } if (ret == NOTDONE) { /* * Must be a variable or function name. * Can also be a curly-braces kind of name: {expr}. */ s = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) s = alias; if (len <= 0) ret = FAIL; else { int flags = evalarg == NULL ? 0 : evalarg->eval_flags; if (evaluate && in_vim9script() && len == 1 && *s == '_') { emsg(_(e_cannot_use_underscore_here)); ret = FAIL; } else if ((in_vim9script() ? **arg : *skipwhite(*arg)) == '(') { // "name(..." recursive! *arg = skipwhite(*arg); ret = eval_func(arg, evalarg, s, len, rettv, flags, NULL); } else if (flags & EVAL_CONSTANT) ret = FAIL; else if (evaluate) { // get the value of "true", "false" or a variable if (len == 4 && in_vim9script() && STRNCMP(s, "true", 4) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_TRUE; ret = OK; } else if (len == 5 && in_vim9script() && STRNCMP(s, "false", 5) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_FALSE; ret = OK; } else if (len == 4 && in_vim9script() && STRNCMP(s, "null", 4) == 0) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; ret = OK; } else { name_start = s; ret = eval_variable(s, len, 0, rettv, NULL, EVAL_VAR_VERBOSE + EVAL_VAR_IMPORT); } } else { // skip the name check_vars(s, len); ret = OK; } } vim_free(alias); } // Handle following '[', '(' and '.' for expr[expr], expr.name, // expr(expr), expr->name(expr) if (ret == OK) ret = handle_subscript(arg, name_start, rettv, evalarg, TRUE); /* * Apply logical NOT and unary '-', from right to left, ignore '+'. */ if (ret == OK && evaluate && end_leader > start_leader) ret = eval7_leader(rettv, FALSE, start_leader, &end_leader); --recurse; return ret; }
0
413,632
R_API RGraph *r_core_anal_importxrefs(RCore *core) { RBinInfo *info = r_bin_get_info (core->bin); RBinObject *obj = r_bin_cur_object (core->bin); bool lit = info? info->has_lit: false; bool va = core->io->va || r_config_get_b (core->config, "cfg.debug"); RListIter *iter; RBinImport *imp; if (!obj) { return NULL; } RGraph *graph = r_graph_new (); if (!graph) { return NULL; } r_list_foreach (obj->imports, iter, imp) { ut64 addr = lit ? r_core_bin_impaddr (core->bin, va, imp->name): 0; if (addr) { add_single_addr_xrefs (core, addr, graph); } else { r_graph_add_node_info (graph, imp->name, NULL, 0); } } return graph; }
0
274,851
int input2() { return input2_; }
0
514,300
static bool check_fields(THD *thd, List<Item> &items, bool update_view) { Item *item; if (update_view) { List_iterator<Item> it(items); Item_field *field; while ((item= it++)) { if (!(field= item->field_for_view_update())) { /* item has name, because it comes from VIEW SELECT list */ my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name.str); return TRUE; } /* we make temporary copy of Item_field, to avoid influence of changing result_field on Item_ref which refer on this field */ thd->change_item_tree(it.ref(), new (thd->mem_root) Item_field(thd, field)); } } if (thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT) { // Make sure that a column is updated only once List_iterator_fast<Item> it(items); while ((item= it++)) { item->field_for_view_update()->field->clear_has_explicit_value(); } it.rewind(); while ((item= it++)) { Field *f= item->field_for_view_update()->field; if (f->has_explicit_value()) { my_error(ER_UPDATED_COLUMN_ONLY_ONCE, MYF(0), *(f->table_name), f->field_name.str); return TRUE; } f->set_has_explicit_value(); } } return FALSE; }
0
344,807
sanitise_stdfd(void) { int nullfd, dupfd; if ((nullfd = dupfd = open(_PATH_DEVNULL, O_RDWR)) == -1) { fprintf(stderr, "Couldn't open /dev/null: %s\n", strerror(errno)); exit(1); } while (++dupfd <= STDERR_FILENO) { /* Only populate closed fds. */ if (fcntl(dupfd, F_GETFL) == -1 && errno == EBADF) { if (dup2(nullfd, dupfd) == -1) { fprintf(stderr, "dup2: %s\n", strerror(errno)); exit(1); } } } if (nullfd > STDERR_FILENO) close(nullfd); }
0
457,772
static size_t handle_returned_header (void *ptr, size_t size, size_t nmemb, void *stream) { auth_client *auth_user = stream; size_t len = size * nmemb; client_t *client = auth_user->client; if (client) { auth_t *auth = client->auth; auth_url *url = auth->state; if (url->auth_header && len >= url->auth_header_len && strncasecmp(ptr, url->auth_header, url->auth_header_len) == 0) client->authenticated = 1; if (url->timelimit_header && len > url->timelimit_header_len && strncasecmp(ptr, url->timelimit_header, url->timelimit_header_len) == 0) { const char *input = ptr; unsigned int limit = 0; if (len >= 2 && input[len - 2] == '\r' && input[len - 1] == '\n') { input += url->timelimit_header_len; if (sscanf(input, "%u\r\n", &limit) == 1) { client->con->discon_time = time(NULL) + limit; } else { ICECAST_LOG_ERROR("Auth backend returned invalid timeline header: Can not parse limit"); } } else { ICECAST_LOG_ERROR("Auth backend returned invalid timelimit header."); } } if (len > 24 && strncasecmp(ptr, "icecast-auth-message: ", 22) == 0) { const char *input = ptr; size_t copy_len = len - 24 + 1; /* length of string plus \0-termination */ if (copy_len > sizeof(url->errormsg)) { copy_len = sizeof(url->errormsg); } if (len >= 2 && input[len - 2] == '\r' && input[len - 1] == '\n') { input += 22; memcpy(url->errormsg, input, copy_len); url->errormsg[copy_len-1] = 0; } else { ICECAST_LOG_ERROR("Auth backend returned invalid message header."); } } } return len; }
0
488,333
const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) return "[vdso]"; return NULL; }
0
232,306
void gf_isom_box_array_del(GF_List *boxlist) { gf_isom_box_array_reset(boxlist); gf_list_del(boxlist); }
0
384,126
raptor_xml_writer_get_depth(raptor_xml_writer *xml_writer) { return xml_writer->depth; }
0
261,736
void RtmpProtocol::sendInvoke(const string &cmd, const AMFValue &val) { AMFEncoder enc; enc << cmd << ++_send_req_id << val; sendRequest(MSG_CMD, enc.data()); }
0
225,629
void xtra_box_del(GF_Box *s) { GF_XtraBox *ptr = (GF_XtraBox *)s; while (gf_list_count(ptr->tags)) { GF_XtraTag *tag = gf_list_pop_back(ptr->tags); if (tag->name) gf_free(tag->name); if (tag->prop_value) gf_free(tag->prop_value); gf_free(tag); } gf_list_del(ptr->tags); gf_free(s);
0
385,804
int finish_no_open(struct file *file, struct dentry *dentry) { file->f_path.dentry = dentry; return 1; }
0
281,635
void CLASS parse_exif (int base) { unsigned kodak, entries, tag, type, len, save, c; double expo; kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 33434: shutter = getreal(type); break; case 33437: aperture = getreal(type); break; case 34855: iso_speed = get2(); break; case 36867: case 36868: get_timestamp(0); break; case 37377: if ((expo = -getreal(type)) < 128) shutter = pow (2.0, expo); break; case 37378: aperture = pow (2.0, getreal(type)/2); break; case 37386: focal_len = getreal(type); break; case 37500: parse_makernote (base, 0); break; case 40962: if (kodak) raw_width = get4(); break; case 40963: if (kodak) raw_height = get4(); break; case 41730: if (get4() == 0x20002) for (exif_cfa=c=0; c < 8; c+=2) exif_cfa |= fgetc(ifp) * 0x01010101 << c; } fseek (ifp, save, SEEK_SET); } }
0
318,780
new_state(drill_state_t *state) { state = g_new0(drill_state_t, 1); if (state != NULL) { /* Init structure */ state->curr_section = DRILL_NONE; state->coordinate_mode = DRILL_MODE_ABSOLUTE; state->origin_x = 0.0; state->origin_y = 0.0; state->unit = GERBV_UNIT_UNSPECIFIED; state->backup_number_format = FMT_000_000; /* only used for METRIC */ state->header_number_format = state->number_format = FMT_00_0000; /* i. e. INCH */ state->autod = 1; state->decimals = 4; } return state; } /* new_state */
0
225,010
conninfo_uri_decode(const char *str, PQExpBuffer errorMessage) { char *buf; char *p; const char *q = str; buf = malloc(strlen(str) + 1); if (buf == NULL) { appendPQExpBufferStr(errorMessage, libpq_gettext("out of memory\n")); return NULL; } p = buf; for (;;) { if (*q != '%') { /* copy and check for NUL terminator */ if (!(*(p++) = *(q++))) break; } else { int hi; int lo; int c; ++q; /* skip the percent sign itself */ /* * Possible EOL will be caught by the first call to * get_hexdigit(), so we never dereference an invalid q pointer. */ if (!(get_hexdigit(*q++, &hi) && get_hexdigit(*q++, &lo))) { appendPQExpBuffer(errorMessage, libpq_gettext("invalid percent-encoded token: \"%s\"\n"), str); free(buf); return NULL; } c = (hi << 4) | lo; if (c == 0) { appendPQExpBuffer(errorMessage, libpq_gettext("forbidden value %%00 in percent-encoded value: \"%s\"\n"), str); free(buf); return NULL; } *(p++) = c; } } return buf; }
0
310,099
drv_nap(TERMINAL_CONTROL_BLOCK * TCB GCC_UNUSED, int ms) { #if HAVE_NANOSLEEP { struct timespec request, remaining; request.tv_sec = ms / 1000; request.tv_nsec = (ms % 1000) * 1000000; while (nanosleep(&request, &remaining) == -1 && errno == EINTR) { request = remaining; } } #else _nc_timed_wait(0, 0, ms, (int *) 0 EVENTLIST_2nd(0)); #endif return OK; }
0
198,116
void Compute(OpKernelContext *ctx) override { const Tensor *indices_t, *values_t, *shape_t, *reduction_axes_t; OP_REQUIRES_OK(ctx, ctx->input("input_indices", &indices_t)); OP_REQUIRES_OK(ctx, ctx->input("input_values", &values_t)); OP_REQUIRES_OK(ctx, ctx->input("input_shape", &shape_t)); OP_REQUIRES_OK(ctx, ctx->input("reduction_axes", &reduction_axes_t)); OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t)); // TODO(zongheng): we will call Reorder() below, which will modify // in-place the underlying indices and values buffers. To avoid // surprises of this kernel being stateful, we work around the above by // making deep copies here. Remove this if/when we change Reorder()'s // semantics. const auto shape_vec = shape_t->vec<int64>(); SparseTensor sp; OP_REQUIRES_OK(ctx, SparseTensor::Create( tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), TensorShape(shape_vec), &sp)); ReduceDetails reduction = SparseTensorReduceHelper( sp, reduction_axes_t->flat<int32>(), keep_dims_); Tensor *out_values; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, reduction.reduced_shape, &out_values)); auto out_flat = out_values->flat<T>(); out_flat.setZero(); Tensor tmp_reduced_val; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({}), &tmp_reduced_val)); auto reduced_val = tmp_reduced_val.scalar<T>(); // Compute strides, and use it to convert coords to flat index. The // coordinates returned by .group() have the same ndims as group_by_dims. gtl::InlinedVector<int64, 8> output_strides(reduction.group_by_dims.size()); if (!output_strides.empty()) { // Do this iff we don't reduce all. output_strides.back() = 1; for (int d = output_strides.size() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * shape_vec(reduction.group_by_dims[d + 1]); } } auto CoordinatesToFlatIndex = [](ArraySlice<int64> coords, ArraySlice<int64> strides) -> int64 { if (strides.empty()) { // Reduce all. return 0; } CHECK_EQ(coords.size(), strides.size()); int64_t idx = 0; for (int i = 0; i < coords.size(); ++i) { idx += coords[i] * strides[i]; } return idx; }; // Each group maps one-on-one onto a value in the reduced tensor. // g.group() provides the coordinates of a particular reduced value. sp.Reorder<T>(reduction.reorder_dims); for (const auto &g : sp.group(reduction.group_by_dims)) { Op::template Run<T>(ctx, reduced_val, g.template values<T>()); const int64_t idx = CoordinatesToFlatIndex(g.group(), output_strides); out_flat(idx) = reduced_val(); VLOG(2) << "coords: " << absl::StrJoin(g.group(), ",") << "; idx: " << idx << "; group " << Op::Name() << ": " << reduced_val(); } }
1
513,263
bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, TMP_ENGINE_COLUMNDEF *start_recinfo, TMP_ENGINE_COLUMNDEF **recinfo, ulonglong options) { if (table->s->db_type() == TMP_ENGINE_HTON) { if (create_internal_tmp_table(table, keyinfo, start_recinfo, recinfo, options)) return TRUE; // Make empty record so random data is not written to disk empty_record(table); table->status= STATUS_NO_RECORD; } if (open_tmp_table(table)) return TRUE; return FALSE; }
0
202,081
do_put( int regname, char_u *expr_result, // result for regname "=" when compiled int dir, // BACKWARD for 'P', FORWARD for 'p' long count, int flags) { char_u *ptr; char_u *newp, *oldp; int yanklen; int totlen = 0; // init for gcc linenr_T lnum; colnr_T col; long i; // index in y_array[] int y_type; long y_size; int oldlen; long y_width = 0; colnr_T vcol; int delcount; int incr = 0; long j; struct block_def bd; char_u **y_array = NULL; yankreg_T *y_current_used = NULL; long nr_lines = 0; pos_T new_cursor; int indent; int orig_indent = 0; // init for gcc int indent_diff = 0; // init for gcc int first_indent = TRUE; int lendiff = 0; pos_T old_pos; char_u *insert_string = NULL; int allocated = FALSE; long cnt; pos_T orig_start = curbuf->b_op_start; pos_T orig_end = curbuf->b_op_end; unsigned int cur_ve_flags = get_ve_flags(); #ifdef FEAT_CLIPBOARD // Adjust register name for "unnamed" in 'clipboard'. adjust_clip_reg(&regname); (void)may_get_selection(regname); #endif if (flags & PUT_FIXINDENT) orig_indent = get_indent(); curbuf->b_op_start = curwin->w_cursor; // default for '[ mark curbuf->b_op_end = curwin->w_cursor; // default for '] mark // Using inserted text works differently, because the register includes // special characters (newlines, etc.). if (regname == '.') { if (VIsual_active) stuffcharReadbuff(VIsual_mode); (void)stuff_inserted((dir == FORWARD ? (count == -1 ? 'o' : 'a') : (count == -1 ? 'O' : 'i')), count, FALSE); // Putting the text is done later, so can't really move the cursor to // the next character. Use "l" to simulate it. if ((flags & PUT_CURSEND) && gchar_cursor() != NUL) stuffcharReadbuff('l'); return; } // For special registers '%' (file name), '#' (alternate file name) and // ':' (last command line), etc. we have to create a fake yank register. // For compiled code "expr_result" holds the expression result. if (regname == '=' && expr_result != NULL) insert_string = expr_result; else if (get_spec_reg(regname, &insert_string, &allocated, TRUE) && insert_string == NULL) return; // Autocommands may be executed when saving lines for undo. This might // make "y_array" invalid, so we start undo now to avoid that. if (u_save(curwin->w_cursor.lnum, curwin->w_cursor.lnum + 1) == FAIL) goto end; if (insert_string != NULL) { y_type = MCHAR; #ifdef FEAT_EVAL if (regname == '=') { // For the = register we need to split the string at NL // characters. // Loop twice: count the number of lines and save them. for (;;) { y_size = 0; ptr = insert_string; while (ptr != NULL) { if (y_array != NULL) y_array[y_size] = ptr; ++y_size; ptr = vim_strchr(ptr, '\n'); if (ptr != NULL) { if (y_array != NULL) *ptr = NUL; ++ptr; // A trailing '\n' makes the register linewise. if (*ptr == NUL) { y_type = MLINE; break; } } } if (y_array != NULL) break; y_array = ALLOC_MULT(char_u *, y_size); if (y_array == NULL) goto end; } } else #endif { y_size = 1; // use fake one-line yank register y_array = &insert_string; } } else { get_yank_register(regname, FALSE); y_type = y_current->y_type; y_width = y_current->y_width; y_size = y_current->y_size; y_array = y_current->y_array; y_current_used = y_current; } if (y_type == MLINE) { if (flags & PUT_LINE_SPLIT) { char_u *p; // "p" or "P" in Visual mode: split the lines to put the text in // between. if (u_save_cursor() == FAIL) goto end; p = ml_get_cursor(); if (dir == FORWARD && *p != NUL) MB_PTR_ADV(p); ptr = vim_strsave(p); if (ptr == NULL) goto end; ml_append(curwin->w_cursor.lnum, ptr, (colnr_T)0, FALSE); vim_free(ptr); oldp = ml_get_curline(); p = oldp + curwin->w_cursor.col; if (dir == FORWARD && *p != NUL) MB_PTR_ADV(p); ptr = vim_strnsave(oldp, p - oldp); if (ptr == NULL) goto end; ml_replace(curwin->w_cursor.lnum, ptr, FALSE); ++nr_lines; dir = FORWARD; } if (flags & PUT_LINE_FORWARD) { // Must be "p" for a Visual block, put lines below the block. curwin->w_cursor = curbuf->b_visual.vi_end; dir = FORWARD; } curbuf->b_op_start = curwin->w_cursor; // default for '[ mark curbuf->b_op_end = curwin->w_cursor; // default for '] mark } if (flags & PUT_LINE) // :put command or "p" in Visual line mode. y_type = MLINE; if (y_size == 0 || y_array == NULL) { semsg(_(e_nothing_in_register_str), regname == 0 ? (char_u *)"\"" : transchar(regname)); goto end; } if (y_type == MBLOCK) { lnum = curwin->w_cursor.lnum + y_size + 1; if (lnum > curbuf->b_ml.ml_line_count) lnum = curbuf->b_ml.ml_line_count + 1; if (u_save(curwin->w_cursor.lnum - 1, lnum) == FAIL) goto end; } else if (y_type == MLINE) { lnum = curwin->w_cursor.lnum; #ifdef FEAT_FOLDING // Correct line number for closed fold. Don't move the cursor yet, // u_save() uses it. if (dir == BACKWARD) (void)hasFolding(lnum, &lnum, NULL); else (void)hasFolding(lnum, NULL, &lnum); #endif if (dir == FORWARD) ++lnum; // In an empty buffer the empty line is going to be replaced, include // it in the saved lines. if ((BUFEMPTY() ? u_save(0, 2) : u_save(lnum - 1, lnum)) == FAIL) goto end; #ifdef FEAT_FOLDING if (dir == FORWARD) curwin->w_cursor.lnum = lnum - 1; else curwin->w_cursor.lnum = lnum; curbuf->b_op_start = curwin->w_cursor; // for mark_adjust() #endif } else if (u_save_cursor() == FAIL) goto end; yanklen = (int)STRLEN(y_array[0]); if (cur_ve_flags == VE_ALL && y_type == MCHAR) { if (gchar_cursor() == TAB) { int viscol = getviscol(); int ts = curbuf->b_p_ts; // Don't need to insert spaces when "p" on the last position of a // tab or "P" on the first position. if (dir == FORWARD ? #ifdef FEAT_VARTABS tabstop_padding(viscol, ts, curbuf->b_p_vts_array) != 1 #else ts - (viscol % ts) != 1 #endif : curwin->w_cursor.coladd > 0) coladvance_force(viscol); else curwin->w_cursor.coladd = 0; } else if (curwin->w_cursor.coladd > 0 || gchar_cursor() == NUL) coladvance_force(getviscol() + (dir == FORWARD)); } lnum = curwin->w_cursor.lnum; col = curwin->w_cursor.col; // Block mode if (y_type == MBLOCK) { int c = gchar_cursor(); colnr_T endcol2 = 0; if (dir == FORWARD && c != NUL) { if (cur_ve_flags == VE_ALL) getvcol(curwin, &curwin->w_cursor, &col, NULL, &endcol2); else getvcol(curwin, &curwin->w_cursor, NULL, NULL, &col); if (has_mbyte) // move to start of next multi-byte character curwin->w_cursor.col += (*mb_ptr2len)(ml_get_cursor()); else if (c != TAB || cur_ve_flags != VE_ALL) ++curwin->w_cursor.col; ++col; } else getvcol(curwin, &curwin->w_cursor, &col, NULL, &endcol2); col += curwin->w_cursor.coladd; if (cur_ve_flags == VE_ALL && (curwin->w_cursor.coladd > 0 || endcol2 == curwin->w_cursor.col)) { if (dir == FORWARD && c == NUL) ++col; if (dir != FORWARD && c != NUL && curwin->w_cursor.coladd > 0) ++curwin->w_cursor.col; if (c == TAB) { if (dir == BACKWARD && curwin->w_cursor.col) curwin->w_cursor.col--; if (dir == FORWARD && col - 1 == endcol2) curwin->w_cursor.col++; } } curwin->w_cursor.coladd = 0; bd.textcol = 0; for (i = 0; i < y_size; ++i) { int spaces = 0; char shortline; bd.startspaces = 0; bd.endspaces = 0; vcol = 0; delcount = 0; // add a new line if (curwin->w_cursor.lnum > curbuf->b_ml.ml_line_count) { if (ml_append(curbuf->b_ml.ml_line_count, (char_u *)"", (colnr_T)1, FALSE) == FAIL) break; ++nr_lines; } // get the old line and advance to the position to insert at oldp = ml_get_curline(); oldlen = (int)STRLEN(oldp); for (ptr = oldp; vcol < col && *ptr; ) { // Count a tab for what it's worth (if list mode not on) incr = lbr_chartabsize_adv(oldp, &ptr, vcol); vcol += incr; } bd.textcol = (colnr_T)(ptr - oldp); shortline = (vcol < col) || (vcol == col && !*ptr) ; if (vcol < col) // line too short, padd with spaces bd.startspaces = col - vcol; else if (vcol > col) { bd.endspaces = vcol - col; bd.startspaces = incr - bd.endspaces; --bd.textcol; delcount = 1; if (has_mbyte) bd.textcol -= (*mb_head_off)(oldp, oldp + bd.textcol); if (oldp[bd.textcol] != TAB) { // Only a Tab can be split into spaces. Other // characters will have to be moved to after the // block, causing misalignment. delcount = 0; bd.endspaces = 0; } } yanklen = (int)STRLEN(y_array[i]); if ((flags & PUT_BLOCK_INNER) == 0) { // calculate number of spaces required to fill right side of // block spaces = y_width + 1; for (j = 0; j < yanklen; j++) spaces -= lbr_chartabsize(NULL, &y_array[i][j], 0); if (spaces < 0) spaces = 0; } // Insert the new text. // First check for multiplication overflow. if (yanklen + spaces != 0 && count > ((INT_MAX - (bd.startspaces + bd.endspaces)) / (yanklen + spaces))) { emsg(_(e_resulting_text_too_long)); break; } totlen = count * (yanklen + spaces) + bd.startspaces + bd.endspaces; newp = alloc(totlen + oldlen + 1); if (newp == NULL) break; // copy part up to cursor to new line ptr = newp; mch_memmove(ptr, oldp, (size_t)bd.textcol); ptr += bd.textcol; // may insert some spaces before the new text vim_memset(ptr, ' ', (size_t)bd.startspaces); ptr += bd.startspaces; // insert the new text for (j = 0; j < count; ++j) { mch_memmove(ptr, y_array[i], (size_t)yanklen); ptr += yanklen; // insert block's trailing spaces only if there's text behind if ((j < count - 1 || !shortline) && spaces) { vim_memset(ptr, ' ', (size_t)spaces); ptr += spaces; } } // may insert some spaces after the new text vim_memset(ptr, ' ', (size_t)bd.endspaces); ptr += bd.endspaces; // move the text after the cursor to the end of the line. mch_memmove(ptr, oldp + bd.textcol + delcount, (size_t)(oldlen - bd.textcol - delcount + 1)); ml_replace(curwin->w_cursor.lnum, newp, FALSE); ++curwin->w_cursor.lnum; if (i == 0) curwin->w_cursor.col += bd.startspaces; } changed_lines(lnum, 0, curwin->w_cursor.lnum, nr_lines); // Set '[ mark. curbuf->b_op_start = curwin->w_cursor; curbuf->b_op_start.lnum = lnum; // adjust '] mark curbuf->b_op_end.lnum = curwin->w_cursor.lnum - 1; curbuf->b_op_end.col = bd.textcol + totlen - 1; curbuf->b_op_end.coladd = 0; if (flags & PUT_CURSEND) { colnr_T len; curwin->w_cursor = curbuf->b_op_end; curwin->w_cursor.col++; // in Insert mode we might be after the NUL, correct for that len = (colnr_T)STRLEN(ml_get_curline()); if (curwin->w_cursor.col > len) curwin->w_cursor.col = len; } else curwin->w_cursor.lnum = lnum; } else { // Character or Line mode if (y_type == MCHAR) { // if type is MCHAR, FORWARD is the same as BACKWARD on the next // char if (dir == FORWARD && gchar_cursor() != NUL) { if (has_mbyte) { int bytelen = (*mb_ptr2len)(ml_get_cursor()); // put it on the next of the multi-byte character. col += bytelen; if (yanklen) { curwin->w_cursor.col += bytelen; curbuf->b_op_end.col += bytelen; } } else { ++col; if (yanklen) { ++curwin->w_cursor.col; ++curbuf->b_op_end.col; } } } curbuf->b_op_start = curwin->w_cursor; } // Line mode: BACKWARD is the same as FORWARD on the previous line else if (dir == BACKWARD) --lnum; new_cursor = curwin->w_cursor; // simple case: insert into one line at a time if (y_type == MCHAR && y_size == 1) { linenr_T end_lnum = 0; // init for gcc linenr_T start_lnum = lnum; int first_byte_off = 0; if (VIsual_active) { end_lnum = curbuf->b_visual.vi_end.lnum; if (end_lnum < curbuf->b_visual.vi_start.lnum) end_lnum = curbuf->b_visual.vi_start.lnum; if (end_lnum > start_lnum) { pos_T pos; // "col" is valid for the first line, in following lines // the virtual column needs to be used. Matters for // multi-byte characters. pos.lnum = lnum; pos.col = col; pos.coladd = 0; getvcol(curwin, &pos, NULL, &vcol, NULL); } } if (count == 0 || yanklen == 0) { if (VIsual_active) lnum = end_lnum; } else if (count > INT_MAX / yanklen) // multiplication overflow emsg(_(e_resulting_text_too_long)); else { totlen = count * yanklen; do { oldp = ml_get(lnum); oldlen = (int)STRLEN(oldp); if (lnum > start_lnum) { pos_T pos; pos.lnum = lnum; if (getvpos(&pos, vcol) == OK) col = pos.col; else col = MAXCOL; } if (VIsual_active && col > oldlen) { lnum++; continue; } newp = alloc(totlen + oldlen + 1); if (newp == NULL) goto end; // alloc() gave an error message mch_memmove(newp, oldp, (size_t)col); ptr = newp + col; for (i = 0; i < count; ++i) { mch_memmove(ptr, y_array[0], (size_t)yanklen); ptr += yanklen; } STRMOVE(ptr, oldp + col); ml_replace(lnum, newp, FALSE); // compute the byte offset for the last character first_byte_off = mb_head_off(newp, ptr - 1); // Place cursor on last putted char. if (lnum == curwin->w_cursor.lnum) { // make sure curwin->w_virtcol is updated changed_cline_bef_curs(); curwin->w_cursor.col += (colnr_T)(totlen - 1); } if (VIsual_active) lnum++; } while (VIsual_active && lnum <= end_lnum); if (VIsual_active) // reset lnum to the last visual line lnum--; } // put '] at the first byte of the last character curbuf->b_op_end = curwin->w_cursor; curbuf->b_op_end.col -= first_byte_off; // For "CTRL-O p" in Insert mode, put cursor after last char if (totlen && (restart_edit != 0 || (flags & PUT_CURSEND))) ++curwin->w_cursor.col; else curwin->w_cursor.col -= first_byte_off; changed_bytes(lnum, col); } else { linenr_T new_lnum = new_cursor.lnum; size_t len; // Insert at least one line. When y_type is MCHAR, break the first // line in two. for (cnt = 1; cnt <= count; ++cnt) { i = 0; if (y_type == MCHAR) { // Split the current line in two at the insert position. // First insert y_array[size - 1] in front of second line. // Then append y_array[0] to first line. lnum = new_cursor.lnum; ptr = ml_get(lnum) + col; totlen = (int)STRLEN(y_array[y_size - 1]); newp = alloc(STRLEN(ptr) + totlen + 1); if (newp == NULL) goto error; STRCPY(newp, y_array[y_size - 1]); STRCAT(newp, ptr); // insert second line ml_append(lnum, newp, (colnr_T)0, FALSE); ++new_lnum; vim_free(newp); oldp = ml_get(lnum); newp = alloc(col + yanklen + 1); if (newp == NULL) goto error; // copy first part of line mch_memmove(newp, oldp, (size_t)col); // append to first line mch_memmove(newp + col, y_array[0], (size_t)(yanklen + 1)); ml_replace(lnum, newp, FALSE); curwin->w_cursor.lnum = lnum; i = 1; } for (; i < y_size; ++i) { if (y_type != MCHAR || i < y_size - 1) { if (ml_append(lnum, y_array[i], (colnr_T)0, FALSE) == FAIL) goto error; new_lnum++; } lnum++; ++nr_lines; if (flags & PUT_FIXINDENT) { old_pos = curwin->w_cursor; curwin->w_cursor.lnum = lnum; ptr = ml_get(lnum); if (cnt == count && i == y_size - 1) lendiff = (int)STRLEN(ptr); if (*ptr == '#' && preprocs_left()) indent = 0; // Leave # lines at start else if (*ptr == NUL) indent = 0; // Ignore empty lines else if (first_indent) { indent_diff = orig_indent - get_indent(); indent = orig_indent; first_indent = FALSE; } else if ((indent = get_indent() + indent_diff) < 0) indent = 0; (void)set_indent(indent, 0); curwin->w_cursor = old_pos; // remember how many chars were removed if (cnt == count && i == y_size - 1) lendiff -= (int)STRLEN(ml_get(lnum)); } } if (cnt == 1) new_lnum = lnum; } error: // Adjust marks. if (y_type == MLINE) { curbuf->b_op_start.col = 0; if (dir == FORWARD) curbuf->b_op_start.lnum++; } // Skip mark_adjust when adding lines after the last one, there // can't be marks there. But still needed in diff mode. if (curbuf->b_op_start.lnum + (y_type == MCHAR) - 1 + nr_lines < curbuf->b_ml.ml_line_count #ifdef FEAT_DIFF || curwin->w_p_diff #endif ) mark_adjust(curbuf->b_op_start.lnum + (y_type == MCHAR), (linenr_T)MAXLNUM, nr_lines, 0L); // note changed text for displaying and folding if (y_type == MCHAR) changed_lines(curwin->w_cursor.lnum, col, curwin->w_cursor.lnum + 1, nr_lines); else changed_lines(curbuf->b_op_start.lnum, 0, curbuf->b_op_start.lnum, nr_lines); if (y_current_used != NULL && (y_current_used != y_current || y_current->y_array != y_array)) { // Something invoked through changed_lines() has changed the // yank buffer, e.g. a GUI clipboard callback. emsg(_(e_yank_register_changed_while_using_it)); goto end; } // Put the '] mark on the first byte of the last inserted character. // Correct the length for change in indent. curbuf->b_op_end.lnum = new_lnum; len = STRLEN(y_array[y_size - 1]); col = (colnr_T)len - lendiff; if (col > 1) { curbuf->b_op_end.col = col - 1; if (len > 0) curbuf->b_op_end.col -= mb_head_off(y_array[y_size - 1], y_array[y_size - 1] + len - 1); } else curbuf->b_op_end.col = 0; if (flags & PUT_CURSLINE) { // ":put": put cursor on last inserted line curwin->w_cursor.lnum = lnum; beginline(BL_WHITE | BL_FIX); } else if (flags & PUT_CURSEND) { // put cursor after inserted text if (y_type == MLINE) { if (lnum >= curbuf->b_ml.ml_line_count) curwin->w_cursor.lnum = curbuf->b_ml.ml_line_count; else curwin->w_cursor.lnum = lnum + 1; curwin->w_cursor.col = 0; } else { curwin->w_cursor.lnum = new_lnum; curwin->w_cursor.col = col; curbuf->b_op_end = curwin->w_cursor; if (col > 1) curbuf->b_op_end.col = col - 1; } } else if (y_type == MLINE) { // put cursor on first non-blank in first inserted line curwin->w_cursor.col = 0; if (dir == FORWARD) ++curwin->w_cursor.lnum; beginline(BL_WHITE | BL_FIX); } else // put cursor on first inserted character curwin->w_cursor = new_cursor; } } msgmore(nr_lines); curwin->w_set_curswant = TRUE; end: if (cmdmod.cmod_flags & CMOD_LOCKMARKS) { curbuf->b_op_start = orig_start; curbuf->b_op_end = orig_end; } if (allocated) vim_free(insert_string); if (regname == '=') vim_free(y_array); VIsual_active = FALSE; // If the cursor is past the end of the line put it at the end. adjust_cursor_eol(); }
1
226,328
GF_Box *drep_box_new() { ISOM_DECL_BOX_ALLOC(GF_DREPBox, GF_ISOM_BOX_TYPE_DREP); return (GF_Box *)tmp; }
0
383,355
gdImageAntialias (gdImagePtr im, int antialias) { if (im->trueColor){ im->antialias = antialias; } }
0
234,784
static bool contains_pending_extent(struct btrfs_device *device, u64 *start, u64 len) { u64 physical_start, physical_end; lockdep_assert_held(&device->fs_info->chunk_mutex); if (!find_first_extent_bit(&device->alloc_state, *start, &physical_start, &physical_end, CHUNK_ALLOCATED, NULL)) { if (in_range(physical_start, *start, len) || in_range(*start, physical_start, physical_end - physical_start)) { *start = physical_end + 1; return true; } } return false; }
0
402,586
void cms_set_pw_data(cms_context *cms, secuPWData *pwdata) { ingress(); switch (cms->pwdata.source) { case PW_SOURCE_INVALID: case PW_PROMPT: case PW_DEVICE: case PW_SOURCE_MAX: break; case PW_FROMENV: case PW_FROMFILEDB: case PW_PLAINTEXT: memset(cms->pwdata.data, 0, strlen(cms->pwdata.data)); xfree(cms->pwdata.data); break; case PW_DATABASE: xfree(cms->pwdata.data); break; } memmove(&cms->pwdata, pwdata, sizeof(*pwdata)); dprintf("pwdata:%p", pwdata); dprintf("pwdata->source:%d", pwdata->source); dprintf("pwdata->data:%p (\"%s\")", pwdata->data, pwdata->data ? pwdata->data : "(null)"); egress(); }
0
512,937
Item_func_regexp_instr::fix_length_and_dec() { if (agg_arg_charsets_for_comparison(cmp_collation, args, 2)) return TRUE; re.init(cmp_collation.collation, 0); re.fix_owner(this, args[0], args[1]); max_length= MY_INT32_NUM_DECIMAL_DIGITS; // See also Item_func_locate return FALSE; }
0
506,439
mech_rpa_build_token4(struct rpa_auth_request *request, size_t *size) { buffer_t *buf; unsigned char server_response[MD5_RESULTLEN]; unsigned int length = sizeof(rpa_oid) + sizeof(server_response) + 1 + sizeof(request->session_key) + 1 + 1; buf = buffer_create_dynamic(request->pool, length + 4); buffer_append_c(buf, ASN1_APPLICATION); buffer_append_asn1_length(buf, length); buffer_append(buf, rpa_oid, sizeof(rpa_oid)); /* Generate random session key */ random_fill(request->session_key, sizeof(request->session_key)); /* Server authentication response */ rpa_server_response(request, server_response); buffer_append_c(buf, sizeof(server_response)); buffer_append(buf, server_response, sizeof(server_response)); buffer_append_c(buf, sizeof(request->session_key)); buffer_append(buf, request->session_key, sizeof(request->session_key)); /* Status, 0 - success */ buffer_append_c(buf, 0); *size = buf->used; return buffer_free_without_data(&buf); }
0
462,307
stputs(stream * s, const char *str) { uint ignore_count; sputs(s, (const byte *)str, strlen(str), &ignore_count); }
0
512,578
longlong Item_in_optimizer::val_int() { bool tmp; DBUG_ASSERT(fixed == 1); cache->store(args[0]); cache->cache_value(); DBUG_ENTER(" Item_in_optimizer::val_int"); if (invisible_mode()) { longlong res= args[1]->val_int(); null_value= args[1]->null_value; DBUG_PRINT("info", ("pass trough")); DBUG_RETURN(res); } if (cache->null_value_inside) { DBUG_PRINT("info", ("Left NULL...")); /* We're evaluating "<outer_value_list> [NOT] IN (SELECT <inner_value_list>...)" where one or more of the outer values is NULL. */ if (((Item_in_subselect*)args[1])->is_top_level_item()) { /* We're evaluating a top level item, e.g. "<outer_value_list> IN (SELECT <inner_value_list>...)", and in this case a NULL value in the outer_value_list means that the result shall be NULL/FALSE (makes no difference for top level items). The cached value is NULL, so just return NULL. */ null_value= 1; } else { /* We're evaluating an item where a NULL value in either the outer or inner value list does not automatically mean that we can return NULL/FALSE. An example of such a query is "<outer_value_list> NOT IN (SELECT <inner_value_list>...)" The result when there is at least one NULL value is: NULL if the SELECT evaluated over the non-NULL values produces at least one row, FALSE otherwise */ Item_in_subselect *item_subs=(Item_in_subselect*)args[1]; bool all_left_cols_null= true; const uint ncols= cache->cols(); /* Turn off the predicates that are based on column compares for which the left part is currently NULL */ for (uint i= 0; i < ncols; i++) { if (cache->element_index(i)->null_value) item_subs->set_cond_guard_var(i, FALSE); else all_left_cols_null= false; } if (!item_subs->is_correlated && all_left_cols_null && result_for_null_param != UNKNOWN) { /* This is a non-correlated subquery, all values in the outer value list are NULL, and we have already evaluated the subquery for all NULL values: Return the same result we did last time without evaluating the subquery. */ null_value= result_for_null_param; } else { /* The subquery has to be evaluated */ (void) item_subs->val_bool_result(); if (item_subs->engine->no_rows()) null_value= item_subs->null_value; else null_value= TRUE; if (all_left_cols_null) result_for_null_param= null_value; } /* Turn all predicates back on */ for (uint i= 0; i < ncols; i++) item_subs->set_cond_guard_var(i, TRUE); } DBUG_RETURN(0); } tmp= args[1]->val_bool_result(); null_value= args[1]->null_value; DBUG_RETURN(tmp); }
0
401,589
static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); /* * If the timer is deferrable and NO_HZ_COMMON is set then we need * to use the deferrable base. */ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) base = this_cpu_ptr(&timer_bases[BASE_DEF]); return base; }
0
309,984
usage(void) { static const char *msg[] = { "Usage: dots [options]" ,"" ,"Options:" ," -T TERM override $TERM" #if HAVE_USE_ENV ," -e allow environment $LINES / $COLUMNS" #endif ," -f use tigetnum rather than <term.h> mapping" ," -m SIZE set margin (default: 2)" ," -r SECS self-interrupt/exit after specified number of seconds" ," -s MSECS delay 1% of the time (default: 1 msecs)" }; size_t n; for (n = 0; n < SIZEOF(msg); n++) fprintf(stderr, "%s\n", msg[n]); ExitProgram(EXIT_FAILURE); }
0
101,675
void WebProcessProxy::didReceiveMessageOnConnectionWorkQueue(CoreIPC::Connection* connection, CoreIPC::MessageID messageID, CoreIPC::MessageDecoder& decoder, bool& didHandleMessage) { if (decoder.messageReceiverName() == Messages::WebProcessProxy::messageReceiverName()) didReceiveWebProcessProxyMessageOnConnectionWorkQueue(connection, messageID, decoder, didHandleMessage); }
0
344,818
safe_path(const char *name, struct stat *stp, const char *pw_dir, uid_t uid, char *err, size_t errlen) { char buf[PATH_MAX], homedir[PATH_MAX]; char *cp; int comparehome = 0; struct stat st; if (realpath(name, buf) == NULL) { snprintf(err, errlen, "realpath %s failed: %s", name, strerror(errno)); return -1; } if (pw_dir != NULL && realpath(pw_dir, homedir) != NULL) comparehome = 1; if (!S_ISREG(stp->st_mode)) { snprintf(err, errlen, "%s is not a regular file", buf); return -1; } if ((!platform_sys_dir_uid(stp->st_uid) && stp->st_uid != uid) || (stp->st_mode & 022) != 0) { snprintf(err, errlen, "bad ownership or modes for file %s", buf); return -1; } /* for each component of the canonical path, walking upwards */ for (;;) { if ((cp = dirname(buf)) == NULL) { snprintf(err, errlen, "dirname() failed"); return -1; } strlcpy(buf, cp, sizeof(buf)); if (stat(buf, &st) == -1 || (!platform_sys_dir_uid(st.st_uid) && st.st_uid != uid) || (st.st_mode & 022) != 0) { snprintf(err, errlen, "bad ownership or modes for directory %s", buf); return -1; } /* If are past the homedir then we can stop */ if (comparehome && strcmp(homedir, buf) == 0) break; /* * dirname should always complete with a "/" path, * but we can be paranoid and check for "." too */ if ((strcmp("/", buf) == 0) || (strcmp(".", buf) == 0)) break; } return 0; }
0
430,465
~GopherStateData() {if(buf) swanSong();}
0
225,635
GF_Err hnti_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); }
0
353,239
void SplashOutputDev::updateTransfer(GfxState *state) { Function **transfer; unsigned char red[256], green[256], blue[256], gray[256]; double x, y; int i; transfer = state->getTransfer(); if (transfer[0] && transfer[0]->getInputSize() == 1 && transfer[0]->getOutputSize() == 1) { if (transfer[1] && transfer[1]->getInputSize() == 1 && transfer[1]->getOutputSize() == 1 && transfer[2] && transfer[2]->getInputSize() == 1 && transfer[2]->getOutputSize() == 1 && transfer[3] && transfer[3]->getInputSize() == 1 && transfer[3]->getOutputSize() == 1) { for (i = 0; i < 256; ++i) { x = i / 255.0; transfer[0]->transform(&x, &y); red[i] = (unsigned char)(y * 255.0 + 0.5); transfer[1]->transform(&x, &y); green[i] = (unsigned char)(y * 255.0 + 0.5); transfer[2]->transform(&x, &y); blue[i] = (unsigned char)(y * 255.0 + 0.5); transfer[3]->transform(&x, &y); gray[i] = (unsigned char)(y * 255.0 + 0.5); } } else { for (i = 0; i < 256; ++i) { x = i / 255.0; transfer[0]->transform(&x, &y); red[i] = green[i] = blue[i] = gray[i] = (unsigned char)(y * 255.0 + 0.5); } } } else { for (i = 0; i < 256; ++i) { red[i] = green[i] = blue[i] = gray[i] = (unsigned char)i; } } splash->setTransfer(red, green, blue, gray); }
0
474,085
is_mbc_newline(const UChar* p, const UChar* end, OnigEncoding enc) { if (p < end) { if (*p == 0x0a) return 1; #ifdef USE_UNICODE_ALL_LINE_TERMINATORS #ifndef USE_CRNL_AS_LINE_TERMINATOR if (*p == 0x0d) return 1; #endif if (p + 1 < end) { if (*(p+1) == 0x85 && *p == 0xc2) /* U+0085 */ return 1; if (p + 2 < end) { if ((*(p+2) == 0xa8 || *(p+2) == 0xa9) && *(p+1) == 0x80 && *p == 0xe2) /* U+2028, U+2029 */ return 1; } } #endif } return 0; }
0
234,780
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) { return &fs_uuids; }
0
369,247
static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, int sync, void *arg) { struct wait_page_queue *wpq; struct io_kiocb *req = wait->private; struct wait_page_key *key = arg; wpq = container_of(wait, struct wait_page_queue, wait); if (!wake_page_match(wpq, key)) return 0; req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; list_del_init(&wait->entry); io_req_task_queue(req); return 1; }
0
343,222
void domlst(const char * const file) { char line[PATH_MAX + 256U] = MLST_BEGIN; if (modernformat(file, line + (sizeof MLST_BEGIN - 1U), sizeof line - (sizeof MLST_BEGIN - 1U), " ") < 0) { addreply_noformat(550, MSG_STAT_FAILURE2); return; } addreply_noformat(0, line); addreply_noformat(250, "End."); }
0
313,750
nv_ignore(cmdarg_T *cap) { cap->retval |= CA_COMMAND_BUSY; // don't call edit() now }
0
289,261
static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, bool trylock) { struct snd_pcm_runtime *runtime = substream->runtime; int err; if (trylock) { if (!(mutex_trylock(&runtime->oss.params_lock))) return -EAGAIN; } else if (mutex_lock_interruptible(&runtime->oss.params_lock)) return -ERESTARTSYS; err = snd_pcm_oss_change_params_locked(substream); mutex_unlock(&runtime->oss.params_lock); return err; }
0
445,869
fr_window_set_password_for_second_archive (FrWindow *window, const char *password) { g_return_if_fail (window != NULL); if (window->priv->second_password != NULL) { g_free (window->priv->second_password); window->priv->second_password = NULL; } if ((password != NULL) && (password[0] != '\0')) window->priv->second_password = g_strdup (password); }
0
387,745
instanceOop InstanceKlass::register_finalizer(instanceOop i, TRAPS) { if (TraceFinalizerRegistration) { tty->print("Registered "); i->print_value_on(tty); tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", p2i(i)); } instanceHandle h_i(THREAD, i); // Pass the handle as argument, JavaCalls::call expects oop as jobjects JavaValue result(T_VOID); JavaCallArguments args(h_i); methodHandle mh (THREAD, Universe::finalizer_register_method()); JavaCalls::call(&result, mh, &args, CHECK_NULL); return h_i(); }
0
386,599
void DL_Dxf::writeHatch2(DL_WriterA& dw, const DL_HatchData& data, const DL_Attributes& /*attrib*/) { dw.dxfInt(75, 0); // odd parity dw.dxfInt(76, 1); // pattern type if (data.solid==false) { dw.dxfReal(52, data.angle); dw.dxfReal(41, data.scale); dw.dxfInt(77, 0); // not double //dw.dxfInt(78, 0); dw.dxfInt(78, 1); dw.dxfReal(53, 45.0); dw.dxfReal(43, 0.0); dw.dxfReal(44, 0.0); dw.dxfReal(45, -0.0883883476483184); dw.dxfReal(46, 0.0883883476483185); dw.dxfInt(79, 0); } dw.dxfInt(98, 0); if (version==DL_VERSION_2000) { dw.dxfString(1001, "ACAD"); dw.dxfReal(1010, data.originX); dw.dxfReal(1020, data.originY); dw.dxfInt(1030, 0.0); } }
0
432,148
SkipThenLimit extractSkipAndLimitForPushdown(Pipeline* pipeline) { // If the disablePipelineOptimization failpoint is enabled, then do not attempt the limit and // skip pushdown optimization. if (MONGO_unlikely(disablePipelineOptimization.shouldFail())) { return {boost::none, boost::none}; } auto&& sources = pipeline->getSources(); // It is important to call 'extractLimitForPushdown' before 'extractSkipForPushdown'. Otherwise // there could be a situation when $limit stages in pipeline would prevent // 'extractSkipForPushdown' from extracting all $skip stages. auto limit = extractLimitForPushdown(sources.begin(), &sources); auto skip = extractSkipForPushdown(sources.begin(), &sources); auto skipThenLimit = LimitThenSkip(limit, skip).flip(); if (skipThenLimit.getSkip() || skipThenLimit.getLimit()) { // Removing stages may have produced the opportunity for additional optimizations. pipeline->optimizePipeline(); } return skipThenLimit; }
0
400,411
ins_down( int startcol) // when TRUE move to Insstart.col { pos_T tpos; linenr_T old_topline = curwin->w_topline; #ifdef FEAT_DIFF int old_topfill = curwin->w_topfill; #endif undisplay_dollar(); tpos = curwin->w_cursor; if (cursor_down(1L, TRUE) == OK) { if (startcol) coladvance(getvcol_nolist(&Insstart)); if (old_topline != curwin->w_topline #ifdef FEAT_DIFF || old_topfill != curwin->w_topfill #endif ) redraw_later(UPD_VALID); start_arrow(&tpos); can_cindent = TRUE; } else vim_beep(BO_CRSR); }
0
259,304
static int cbcs_scheme_decrypt(MOVContext *c, MOVStreamContext *sc, AVEncryptionInfo *sample, uint8_t *input, int size) { int i, ret, rem_bytes; uint8_t iv[16]; uint8_t *data; if (!sc->cenc.aes_ctx) { /* initialize the cipher */ sc->cenc.aes_ctx = av_aes_alloc(); if (!sc->cenc.aes_ctx) { return AVERROR(ENOMEM); } ret = av_aes_init(sc->cenc.aes_ctx, c->decryption_key, 16 * 8, 1); if (ret < 0) { return ret; } } /* whole-block full sample encryption */ if (!sample->subsample_count) { /* decrypt the whole packet */ memcpy(iv, sample->iv, 16); av_aes_crypt(sc->cenc.aes_ctx, input, input, size/16, iv, 1); return 0; } else if (!sample->crypt_byte_block && !sample->skip_byte_block) { av_log(c->fc, AV_LOG_ERROR, "pattern encryption is not present in 'cbcs' scheme\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < sample->subsample_count; i++) { if (sample->subsamples[i].bytes_of_clear_data + sample->subsamples[i].bytes_of_protected_data > size) { av_log(c->fc, AV_LOG_ERROR, "subsample size exceeds the packet size left\n"); return AVERROR_INVALIDDATA; } /* skip the clear bytes */ input += sample->subsamples[i].bytes_of_clear_data; size -= sample->subsamples[i].bytes_of_clear_data; /* decrypt the encrypted bytes */ memcpy(iv, sample->iv, 16); data = input; rem_bytes = sample->subsamples[i].bytes_of_protected_data; while (rem_bytes > 0) { if (rem_bytes < 16*sample->crypt_byte_block) { break; } av_aes_crypt(sc->cenc.aes_ctx, data, data, sample->crypt_byte_block, iv, 1); data += 16*sample->crypt_byte_block; rem_bytes -= 16*sample->crypt_byte_block; data += FFMIN(16*sample->skip_byte_block, rem_bytes); rem_bytes -= FFMIN(16*sample->skip_byte_block, rem_bytes); } input += sample->subsamples[i].bytes_of_protected_data; size -= sample->subsamples[i].bytes_of_protected_data; } if (size > 0) { av_log(c->fc, AV_LOG_ERROR, "leftover packet bytes after subsample processing\n"); return AVERROR_INVALIDDATA; } return 0; }
0
206,942
eval_string(char_u **arg, typval_T *rettv, int evaluate, int interpolate) { char_u *p; char_u *end; int extra = interpolate ? 1 : 0; int off = interpolate ? 0 : 1; int len; // Find the end of the string, skipping backslashed characters. for (p = *arg + off; *p != NUL && *p != '"'; MB_PTR_ADV(p)) { if (*p == '\\' && p[1] != NUL) { ++p; // A "\<x>" form occupies at least 4 characters, and produces up // to 9 characters (6 for the char and 3 for a modifier): // reserve space for 5 extra. if (*p == '<') extra += 5; } else if (interpolate && (*p == '{' || *p == '}')) { if (*p == '{' && p[1] != '{') // start of expression break; ++p; if (p[-1] == '}' && *p != '}') // single '}' is an error { semsg(_(e_stray_closing_curly_str), *arg); return FAIL; } --extra; // "{{" becomes "{", "}}" becomes "}" } } if (*p != '"' && !(interpolate && *p == '{')) { semsg(_(e_missing_double_quote_str), *arg); return FAIL; } // If only parsing, set *arg and return here if (!evaluate) { *arg = p + off; return OK; } // Copy the string into allocated memory, handling backslashed // characters. rettv->v_type = VAR_STRING; len = (int)(p - *arg + extra); rettv->vval.v_string = alloc(len); if (rettv->vval.v_string == NULL) return FAIL; end = rettv->vval.v_string; for (p = *arg + off; *p != NUL && *p != '"'; ) { if (*p == '\\') { switch (*++p) { case 'b': *end++ = BS; ++p; break; case 'e': *end++ = ESC; ++p; break; case 'f': *end++ = FF; ++p; break; case 'n': *end++ = NL; ++p; break; case 'r': *end++ = CAR; ++p; break; case 't': *end++ = TAB; ++p; break; case 'X': // hex: "\x1", "\x12" case 'x': case 'u': // Unicode: "\u0023" case 'U': if (vim_isxdigit(p[1])) { int n, nr; int c = toupper(*p); if (c == 'X') n = 2; else if (*p == 'u') n = 4; else n = 8; nr = 0; while (--n >= 0 && vim_isxdigit(p[1])) { ++p; nr = (nr << 4) + hex2nr(*p); } ++p; // For "\u" store the number according to // 'encoding'. if (c != 'X') end += (*mb_char2bytes)(nr, end); else *end++ = nr; } break; // octal: "\1", "\12", "\123" case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': *end = *p++ - '0'; if (*p >= '0' && *p <= '7') { *end = (*end << 3) + *p++ - '0'; if (*p >= '0' && *p <= '7') *end = (*end << 3) + *p++ - '0'; } ++end; break; // Special key, e.g.: "\<C-W>" case '<': { int flags = FSK_KEYCODE | FSK_IN_STRING; if (p[1] != '*') flags |= FSK_SIMPLIFY; extra = trans_special(&p, end, flags, FALSE, NULL); if (extra != 0) { end += extra; if (end >= rettv->vval.v_string + len) iemsg("eval_string() used more space than allocated"); break; } } // FALLTHROUGH default: MB_COPY_CHAR(p, end); break; } } else { if (interpolate && (*p == '{' || *p == '}')) { if (*p == '{' && p[1] != '{') // start of expression break; ++p; // reduce "{{" to "{" and "}}" to "}" } MB_COPY_CHAR(p, end); } } *end = NUL; if (*p == '"' && !interpolate) ++p; *arg = p; return OK; }
1
312,505
qf_get_valid_size(exarg_T *eap) { qf_info_T *qi; qf_list_T *qfl; qfline_T *qfp; int i, sz = 0; int prev_fnum = 0; if ((qi = qf_cmd_get_stack(eap, FALSE)) == NULL) return 0; qfl = qf_get_curlist(qi); FOR_ALL_QFL_ITEMS(qfl, qfp, i) { if (qfp->qf_valid) { if (eap->cmdidx == CMD_cdo || eap->cmdidx == CMD_ldo) sz++; // Count all valid entries else if (qfp->qf_fnum > 0 && qfp->qf_fnum != prev_fnum) { // Count the number of files sz++; prev_fnum = qfp->qf_fnum; } } } return sz; }
0
417,085
bool PlayerGeneric::isEnabled(PlayModeOptions option) const { ASSERT(option>=PlayModeOptionFirst && option<PlayModeOptionLast); if (!player) return options[option]; else return player->isEnabled(option); }
0
310,188
NCURSES_SP_NAME(has_mouse) (NCURSES_SP_DCL0) { return _nc_has_mouse(SP_PARM); }
0
508,782
void end_read_record(READ_RECORD *info) { /* free cache if used */ free_cache(info); if (info->table) { if (info->table->db_stat) // if opened (void) info->table->file->extra(HA_EXTRA_NO_CACHE); if (info->read_record != rr_quick) // otherwise quick_range does it (void) info->table->file->ha_index_or_rnd_end(); info->table=0; } }
0
225,062
PQconnectPoll(PGconn *conn) { bool reset_connection_state_machine = false; bool need_new_connection = false; PGresult *res; char sebuf[PG_STRERROR_R_BUFLEN]; int optval; if (conn == NULL) return PGRES_POLLING_FAILED; /* Get the new data */ switch (conn->status) { /* * We really shouldn't have been polled in these two cases, but we * can handle it. */ case CONNECTION_BAD: return PGRES_POLLING_FAILED; case CONNECTION_OK: return PGRES_POLLING_OK; /* These are reading states */ case CONNECTION_AWAITING_RESPONSE: case CONNECTION_AUTH_OK: case CONNECTION_CHECK_WRITABLE: case CONNECTION_CONSUME: case CONNECTION_CHECK_STANDBY: { /* Load waiting data */ int n = pqReadData(conn); if (n < 0) goto error_return; if (n == 0) return PGRES_POLLING_READING; break; } /* These are writing states, so we just proceed. */ case CONNECTION_STARTED: case CONNECTION_MADE: break; /* Special cases: proceed without waiting. */ case CONNECTION_SSL_STARTUP: case CONNECTION_NEEDED: case CONNECTION_GSS_STARTUP: case CONNECTION_CHECK_TARGET: break; default: appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("invalid connection state, probably indicative of memory corruption\n")); goto error_return; } keep_going: /* We will come back to here until there is * nothing left to do. */ /* Time to advance to next address, or next host if no more addresses? */ if (conn->try_next_addr) { if (conn->addr_cur && conn->addr_cur->ai_next) { conn->addr_cur = conn->addr_cur->ai_next; reset_connection_state_machine = true; } else conn->try_next_host = true; conn->try_next_addr = false; } /* Time to advance to next connhost[] entry? */ if (conn->try_next_host) { pg_conn_host *ch; struct addrinfo hint; int thisport; int ret; char portstr[MAXPGPATH]; if (conn->whichhost + 1 < conn->nconnhost) conn->whichhost++; else { /* * Oops, no more hosts. * * If we are trying to connect in "prefer-standby" mode, then drop * the standby requirement and start over. * * Otherwise, an appropriate error message is already set up, so * we just need to set the right status. */ if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY && conn->nconnhost > 0) { conn->target_server_type = SERVER_TYPE_PREFER_STANDBY_PASS2; conn->whichhost = 0; } else goto error_return; } /* Drop any address info for previous host */ release_conn_addrinfo(conn); /* * Look up info for the new host. On failure, log the problem in * conn->errorMessage, then loop around to try the next host. (Note * we don't clear try_next_host until we've succeeded.) */ ch = &conn->connhost[conn->whichhost]; /* Initialize hint structure */ MemSet(&hint, 0, sizeof(hint)); hint.ai_socktype = SOCK_STREAM; conn->addrlist_family = hint.ai_family = AF_UNSPEC; /* Figure out the port number we're going to use. */ if (ch->port == NULL || ch->port[0] == '\0') thisport = DEF_PGPORT; else { if (!parse_int_param(ch->port, &thisport, conn, "port")) goto error_return; if (thisport < 1 || thisport > 65535) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid port number: \"%s\"\n"), ch->port); goto keep_going; } } snprintf(portstr, sizeof(portstr), "%d", thisport); /* Use pg_getaddrinfo_all() to resolve the address */ switch (ch->type) { case CHT_HOST_NAME: ret = pg_getaddrinfo_all(ch->host, portstr, &hint, &conn->addrlist); if (ret || !conn->addrlist) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not translate host name \"%s\" to address: %s\n"), ch->host, gai_strerror(ret)); goto keep_going; } break; case CHT_HOST_ADDRESS: hint.ai_flags = AI_NUMERICHOST; ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint, &conn->addrlist); if (ret || !conn->addrlist) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not parse network address \"%s\": %s\n"), ch->hostaddr, gai_strerror(ret)); goto keep_going; } break; case CHT_UNIX_SOCKET: #ifdef HAVE_UNIX_SOCKETS conn->addrlist_family = hint.ai_family = AF_UNIX; UNIXSOCK_PATH(portstr, thisport, ch->host); if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"), portstr, (int) (UNIXSOCK_PATH_BUFLEN - 1)); goto keep_going; } /* * NULL hostname tells pg_getaddrinfo_all to parse the service * name as a Unix-domain socket path. */ ret = pg_getaddrinfo_all(NULL, portstr, &hint, &conn->addrlist); if (ret || !conn->addrlist) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"), portstr, gai_strerror(ret)); goto keep_going; } #else Assert(false); #endif break; } /* OK, scan this addrlist for a working server address */ conn->addr_cur = conn->addrlist; reset_connection_state_machine = true; conn->try_next_host = false; } /* Reset connection state machine? */ if (reset_connection_state_machine) { /* * (Re) initialize our connection control variables for a set of * connection attempts to a single server address. These variables * must persist across individual connection attempts, but we must * reset them when we start to consider a new server. */ conn->pversion = PG_PROTOCOL(3, 0); conn->send_appname = true; #ifdef USE_SSL /* initialize these values based on SSL mode */ conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */ conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */ #endif #ifdef ENABLE_GSS conn->try_gss = (conn->gssencmode[0] != 'd'); /* "disable" */ #endif reset_connection_state_machine = false; need_new_connection = true; } /* Force a new connection (perhaps to the same server as before)? */ if (need_new_connection) { /* Drop any existing connection */ pqDropConnection(conn, true); /* Reset all state obtained from old server */ pqDropServerData(conn); /* Drop any PGresult we might have, too */ conn->asyncStatus = PGASYNC_IDLE; conn->xactStatus = PQTRANS_IDLE; conn->pipelineStatus = PQ_PIPELINE_OFF; pqClearAsyncResult(conn); /* Reset conn->status to put the state machine in the right state */ conn->status = CONNECTION_NEEDED; need_new_connection = false; } /* Now try to advance the state machine for this connection */ switch (conn->status) { case CONNECTION_NEEDED: { /* * Try to initiate a connection to one of the addresses * returned by pg_getaddrinfo_all(). conn->addr_cur is the * next one to try. * * The extra level of braces here is historical. It's not * worth reindenting this whole switch case to remove 'em. */ { struct addrinfo *addr_cur = conn->addr_cur; char host_addr[NI_MAXHOST]; /* * Advance to next possible host, if we've tried all of * the addresses for the current host. */ if (addr_cur == NULL) { conn->try_next_host = true; goto keep_going; } /* Remember current address for possible use later */ memcpy(&conn->raddr.addr, addr_cur->ai_addr, addr_cur->ai_addrlen); conn->raddr.salen = addr_cur->ai_addrlen; /* * Set connip, too. Note we purposely ignore strdup * failure; not a big problem if it fails. */ if (conn->connip != NULL) { free(conn->connip); conn->connip = NULL; } getHostaddr(conn, host_addr, NI_MAXHOST); if (host_addr[0]) conn->connip = strdup(host_addr); /* Try to create the socket */ conn->sock = socket(addr_cur->ai_family, SOCK_STREAM, 0); if (conn->sock == PGINVALID_SOCKET) { int errorno = SOCK_ERRNO; /* * Silently ignore socket() failure if we have more * addresses to try; this reduces useless chatter in * cases where the address list includes both IPv4 and * IPv6 but kernel only accepts one family. */ if (addr_cur->ai_next != NULL || conn->whichhost + 1 < conn->nconnhost) { conn->try_next_addr = true; goto keep_going; } emitHostIdentityInfo(conn, host_addr); appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not create socket: %s\n"), SOCK_STRERROR(errorno, sebuf, sizeof(sebuf))); goto error_return; } /* * Once we've identified a target address, all errors * except the preceding socket()-failure case should be * prefixed with host-identity information. (If the * connection succeeds, the contents of conn->errorMessage * won't matter, so this is harmless.) */ emitHostIdentityInfo(conn, host_addr); /* * Select socket options: no delay of outgoing data for * TCP sockets, nonblock mode, close-on-exec. Try the * next address if any of this fails. */ if (!IS_AF_UNIX(addr_cur->ai_family)) { if (!connectNoDelay(conn)) { /* error message already created */ conn->try_next_addr = true; goto keep_going; } } if (!pg_set_noblock(conn->sock)) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not set socket to nonblocking mode: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); conn->try_next_addr = true; goto keep_going; } #ifdef F_SETFD if (fcntl(conn->sock, F_SETFD, FD_CLOEXEC) == -1) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not set socket to close-on-exec mode: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); conn->try_next_addr = true; goto keep_going; } #endif /* F_SETFD */ if (!IS_AF_UNIX(addr_cur->ai_family)) { #ifndef WIN32 int on = 1; #endif int usekeepalives = useKeepalives(conn); int err = 0; if (usekeepalives < 0) { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("keepalives parameter must be an integer\n")); err = 1; } else if (usekeepalives == 0) { /* Do nothing */ } #ifndef WIN32 else if (setsockopt(conn->sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("%s(%s) failed: %s\n"), "setsockopt", "SO_KEEPALIVE", SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); err = 1; } else if (!setKeepalivesIdle(conn) || !setKeepalivesInterval(conn) || !setKeepalivesCount(conn)) err = 1; #else /* WIN32 */ #ifdef SIO_KEEPALIVE_VALS else if (!setKeepalivesWin32(conn)) err = 1; #endif /* SIO_KEEPALIVE_VALS */ #endif /* WIN32 */ else if (!setTCPUserTimeout(conn)) err = 1; if (err) { conn->try_next_addr = true; goto keep_going; } } /*---------- * We have three methods of blocking SIGPIPE during * send() calls to this socket: * * - setsockopt(sock, SO_NOSIGPIPE) * - send(sock, ..., MSG_NOSIGNAL) * - setting the signal mask to SIG_IGN during send() * * The third method requires three syscalls per send, * so we prefer either of the first two, but they are * less portable. The state is tracked in the following * members of PGconn: * * conn->sigpipe_so - we have set up SO_NOSIGPIPE * conn->sigpipe_flag - we're specifying MSG_NOSIGNAL * * If we can use SO_NOSIGPIPE, then set sigpipe_so here * and we're done. Otherwise, set sigpipe_flag so that * we will try MSG_NOSIGNAL on sends. If we get an error * with MSG_NOSIGNAL, we'll clear that flag and revert to * signal masking. *---------- */ conn->sigpipe_so = false; #ifdef MSG_NOSIGNAL conn->sigpipe_flag = true; #else conn->sigpipe_flag = false; #endif /* MSG_NOSIGNAL */ #ifdef SO_NOSIGPIPE optval = 1; if (setsockopt(conn->sock, SOL_SOCKET, SO_NOSIGPIPE, (char *) &optval, sizeof(optval)) == 0) { conn->sigpipe_so = true; conn->sigpipe_flag = false; } #endif /* SO_NOSIGPIPE */ /* * Start/make connection. This should not block, since we * are in nonblock mode. If it does, well, too bad. */ if (connect(conn->sock, addr_cur->ai_addr, addr_cur->ai_addrlen) < 0) { if (SOCK_ERRNO == EINPROGRESS || #ifdef WIN32 SOCK_ERRNO == EWOULDBLOCK || #endif SOCK_ERRNO == EINTR) { /* * This is fine - we're in non-blocking mode, and * the connection is in progress. Tell caller to * wait for write-ready on socket. */ conn->status = CONNECTION_STARTED; return PGRES_POLLING_WRITING; } /* otherwise, trouble */ } else { /* * Hm, we're connected already --- seems the "nonblock * connection" wasn't. Advance the state machine and * go do the next stuff. */ conn->status = CONNECTION_STARTED; goto keep_going; } /* * This connection failed. Add the error report to * conn->errorMessage, then try the next address if any. */ connectFailureMessage(conn, SOCK_ERRNO); conn->try_next_addr = true; goto keep_going; } } case CONNECTION_STARTED: { ACCEPT_TYPE_ARG3 optlen = sizeof(optval); /* * Write ready, since we've made it here, so the connection * has been made ... or has failed. */ /* * Now check (using getsockopt) that there is not an error * state waiting for us on the socket. */ if (getsockopt(conn->sock, SOL_SOCKET, SO_ERROR, (char *) &optval, &optlen) == -1) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not get socket error status: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); goto error_return; } else if (optval != 0) { /* * When using a nonblocking connect, we will typically see * connect failures at this point, so provide a friendly * error message. */ connectFailureMessage(conn, optval); /* * Try the next address if any, just as in the case where * connect() returned failure immediately. */ conn->try_next_addr = true; goto keep_going; } /* Fill in the client address */ conn->laddr.salen = sizeof(conn->laddr.addr); if (getsockname(conn->sock, (struct sockaddr *) &conn->laddr.addr, &conn->laddr.salen) < 0) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not get client address from socket: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); goto error_return; } /* * Make sure we can write before advancing to next step. */ conn->status = CONNECTION_MADE; return PGRES_POLLING_WRITING; } case CONNECTION_MADE: { char *startpacket; int packetlen; /* * Implement requirepeer check, if requested and it's a * Unix-domain socket. */ if (conn->requirepeer && conn->requirepeer[0] && IS_AF_UNIX(conn->raddr.addr.ss_family)) { #ifndef WIN32 char pwdbuf[BUFSIZ]; struct passwd pass_buf; struct passwd *pass; int passerr; #endif uid_t uid; gid_t gid; errno = 0; if (getpeereid(conn->sock, &uid, &gid) != 0) { /* * Provide special error message if getpeereid is a * stub */ if (errno == ENOSYS) appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("requirepeer parameter is not supported on this platform\n")); else appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not get peer credentials: %s\n"), strerror_r(errno, sebuf, sizeof(sebuf))); goto error_return; } #ifndef WIN32 passerr = pqGetpwuid(uid, &pass_buf, pwdbuf, sizeof(pwdbuf), &pass); if (pass == NULL) { if (passerr != 0) appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not look up local user ID %d: %s\n"), (int) uid, strerror_r(passerr, sebuf, sizeof(sebuf))); else appendPQExpBuffer(&conn->errorMessage, libpq_gettext("local user with ID %d does not exist\n"), (int) uid); goto error_return; } if (strcmp(pass->pw_name, conn->requirepeer) != 0) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n"), conn->requirepeer, pass->pw_name); goto error_return; } #else /* WIN32 */ /* should have failed with ENOSYS above */ Assert(false); #endif /* WIN32 */ } if (IS_AF_UNIX(conn->raddr.addr.ss_family)) { /* Don't request SSL or GSSAPI over Unix sockets */ #ifdef USE_SSL conn->allow_ssl_try = false; #endif #ifdef ENABLE_GSS conn->try_gss = false; #endif } #ifdef ENABLE_GSS /* * If GSSAPI encryption is enabled, then call * pg_GSS_have_cred_cache() which will return true if we can * acquire credentials (and give us a handle to use in * conn->gcred), and then send a packet to the server asking * for GSSAPI Encryption (and skip past SSL negotiation and * regular startup below). */ if (conn->try_gss && !conn->gctx) conn->try_gss = pg_GSS_have_cred_cache(&conn->gcred); if (conn->try_gss && !conn->gctx) { ProtocolVersion pv = pg_hton32(NEGOTIATE_GSS_CODE); if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not send GSSAPI negotiation packet: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); goto error_return; } /* Ok, wait for response */ conn->status = CONNECTION_GSS_STARTUP; return PGRES_POLLING_READING; } else if (!conn->gctx && conn->gssencmode[0] == 'r') { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("GSSAPI encryption required but was impossible (possibly no credential cache, no server support, or using a local socket)\n")); goto error_return; } #endif #ifdef USE_SSL /* * Enable the libcrypto callbacks before checking if SSL needs * to be done. This is done before sending the startup packet * as depending on the type of authentication done, like MD5 * or SCRAM that use cryptohashes, the callbacks would be * required even without a SSL connection */ if (pqsecure_initialize(conn, false, true) < 0) goto error_return; /* * If SSL is enabled and we haven't already got encryption of * some sort running, request SSL instead of sending the * startup message. */ if (conn->allow_ssl_try && !conn->wait_ssl_try && !conn->ssl_in_use #ifdef ENABLE_GSS && !conn->gssenc #endif ) { ProtocolVersion pv; /* * Send the SSL request packet. * * Theoretically, this could block, but it really * shouldn't since we only got here if the socket is * write-ready. */ pv = pg_hton32(NEGOTIATE_SSL_CODE); if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not send SSL negotiation packet: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); goto error_return; } /* Ok, wait for response */ conn->status = CONNECTION_SSL_STARTUP; return PGRES_POLLING_READING; } #endif /* USE_SSL */ /* * Build the startup packet. */ startpacket = pqBuildStartupPacket3(conn, &packetlen, EnvironmentOptions); if (!startpacket) { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("out of memory\n")); goto error_return; } /* * Send the startup packet. * * Theoretically, this could block, but it really shouldn't * since we only got here if the socket is write-ready. */ if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not send startup packet: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); free(startpacket); goto error_return; } free(startpacket); conn->status = CONNECTION_AWAITING_RESPONSE; return PGRES_POLLING_READING; } /* * Handle SSL negotiation: wait for postmaster messages and * respond as necessary. */ case CONNECTION_SSL_STARTUP: { #ifdef USE_SSL PostgresPollingStatusType pollres; /* * On first time through, get the postmaster's response to our * SSL negotiation packet. */ if (!conn->ssl_in_use) { /* * We use pqReadData here since it has the logic to * distinguish no-data-yet from connection closure. Since * conn->ssl isn't set, a plain recv() will occur. */ char SSLok; int rdresult; rdresult = pqReadData(conn); if (rdresult < 0) { /* errorMessage is already filled in */ goto error_return; } if (rdresult == 0) { /* caller failed to wait for data */ return PGRES_POLLING_READING; } if (pqGetc(&SSLok, conn) < 0) { /* should not happen really */ return PGRES_POLLING_READING; } if (SSLok == 'S') { /* mark byte consumed */ conn->inStart = conn->inCursor; /* * Set up global SSL state if required. The crypto * state has already been set if libpq took care of * doing that, so there is no need to make that happen * again. */ if (pqsecure_initialize(conn, true, false) != 0) goto error_return; } else if (SSLok == 'N') { /* mark byte consumed */ conn->inStart = conn->inCursor; /* OK to do without SSL? */ if (conn->sslmode[0] == 'r' || /* "require" */ conn->sslmode[0] == 'v') /* "verify-ca" or * "verify-full" */ { /* Require SSL, but server does not want it */ appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("server does not support SSL, but SSL was required\n")); goto error_return; } /* Otherwise, proceed with normal startup */ conn->allow_ssl_try = false; /* We can proceed using this connection */ conn->status = CONNECTION_MADE; return PGRES_POLLING_WRITING; } else if (SSLok == 'E') { /* * Server failure of some sort, such as failure to * fork a backend process. We need to process and * report the error message, which might be formatted * according to either protocol 2 or protocol 3. * Rather than duplicate the code for that, we flip * into AWAITING_RESPONSE state and let the code there * deal with it. Note we have *not* consumed the "E" * byte here. */ conn->status = CONNECTION_AWAITING_RESPONSE; goto keep_going; } else { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("received invalid response to SSL negotiation: %c\n"), SSLok); goto error_return; } } /* * Begin or continue the SSL negotiation process. */ pollres = pqsecure_open_client(conn); if (pollres == PGRES_POLLING_OK) { /* * At this point we should have no data already buffered. * If we do, it was received before we performed the SSL * handshake, so it wasn't encrypted and indeed may have * been injected by a man-in-the-middle. */ if (conn->inCursor != conn->inEnd) { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("received unencrypted data after SSL response\n")); goto error_return; } /* SSL handshake done, ready to send startup packet */ conn->status = CONNECTION_MADE; return PGRES_POLLING_WRITING; } if (pollres == PGRES_POLLING_FAILED) { /* * Failed ... if sslmode is "prefer" then do a non-SSL * retry */ if (conn->sslmode[0] == 'p' /* "prefer" */ && conn->allow_ssl_try /* redundant? */ && !conn->wait_ssl_try) /* redundant? */ { /* only retry once */ conn->allow_ssl_try = false; need_new_connection = true; goto keep_going; } /* Else it's a hard failure */ goto error_return; } /* Else, return POLLING_READING or POLLING_WRITING status */ return pollres; #else /* !USE_SSL */ /* can't get here */ goto error_return; #endif /* USE_SSL */ } case CONNECTION_GSS_STARTUP: { #ifdef ENABLE_GSS PostgresPollingStatusType pollres; /* * If we haven't yet, get the postmaster's response to our * negotiation packet */ if (conn->try_gss && !conn->gctx) { char gss_ok; int rdresult = pqReadData(conn); if (rdresult < 0) /* pqReadData fills in error message */ goto error_return; else if (rdresult == 0) /* caller failed to wait for data */ return PGRES_POLLING_READING; if (pqGetc(&gss_ok, conn) < 0) /* shouldn't happen... */ return PGRES_POLLING_READING; if (gss_ok == 'E') { /* * Server failure of some sort. Assume it's a * protocol version support failure, and let's see if * we can't recover (if it's not, we'll get a better * error message on retry). Server gets fussy if we * don't hang up the socket, though. */ conn->try_gss = false; need_new_connection = true; goto keep_going; } /* mark byte consumed */ conn->inStart = conn->inCursor; if (gss_ok == 'N') { /* Server doesn't want GSSAPI; fall back if we can */ if (conn->gssencmode[0] == 'r') { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("server doesn't support GSSAPI encryption, but it was required\n")); goto error_return; } conn->try_gss = false; /* We can proceed using this connection */ conn->status = CONNECTION_MADE; return PGRES_POLLING_WRITING; } else if (gss_ok != 'G') { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("received invalid response to GSSAPI negotiation: %c\n"), gss_ok); goto error_return; } } /* Begin or continue GSSAPI negotiation */ pollres = pqsecure_open_gss(conn); if (pollres == PGRES_POLLING_OK) { /* * At this point we should have no data already buffered. * If we do, it was received before we performed the GSS * handshake, so it wasn't encrypted and indeed may have * been injected by a man-in-the-middle. */ if (conn->inCursor != conn->inEnd) { appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("received unencrypted data after GSSAPI encryption response\n")); goto error_return; } /* All set for startup packet */ conn->status = CONNECTION_MADE; return PGRES_POLLING_WRITING; } else if (pollres == PGRES_POLLING_FAILED && conn->gssencmode[0] == 'p') { /* * We failed, but we can retry on "prefer". Have to drop * the current connection to do so, though. */ conn->try_gss = false; need_new_connection = true; goto keep_going; } return pollres; #else /* !ENABLE_GSS */ /* unreachable */ goto error_return; #endif /* ENABLE_GSS */ } /* * Handle authentication exchange: wait for postmaster messages * and respond as necessary. */ case CONNECTION_AWAITING_RESPONSE: { char beresp; int msgLength; int avail; AuthRequest areq; int res; /* * Scan the message from current point (note that if we find * the message is incomplete, we will return without advancing * inStart, and resume here next time). */ conn->inCursor = conn->inStart; /* Read type byte */ if (pqGetc(&beresp, conn)) { /* We'll come back when there is more data */ return PGRES_POLLING_READING; } /* * Validate message type: we expect only an authentication * request or an error here. Anything else probably means * it's not Postgres on the other end at all. */ if (!(beresp == 'R' || beresp == 'E')) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("expected authentication request from server, but received %c\n"), beresp); goto error_return; } /* Read message length word */ if (pqGetInt(&msgLength, 4, conn)) { /* We'll come back when there is more data */ return PGRES_POLLING_READING; } /* * Try to validate message length before using it. * Authentication requests can't be very large, although GSS * auth requests may not be that small. Errors can be a * little larger, but not huge. If we see a large apparent * length in an error, it means we're really talking to a * pre-3.0-protocol server; cope. (Before version 14, the * server also used the old protocol for errors that happened * before processing the startup packet.) */ if (beresp == 'R' && (msgLength < 8 || msgLength > 2000)) { appendPQExpBuffer(&conn->errorMessage, libpq_gettext("expected authentication request from server, but received %c\n"), beresp); goto error_return; } if (beresp == 'E' && (msgLength < 8 || msgLength > 30000)) { /* Handle error from a pre-3.0 server */ conn->inCursor = conn->inStart + 1; /* reread data */ if (pqGets_append(&conn->errorMessage, conn)) { /* We'll come back when there is more data */ return PGRES_POLLING_READING; } /* OK, we read the message; mark data consumed */ conn->inStart = conn->inCursor; /* * Before 7.2, the postmaster didn't always end its * messages with a newline, so add one if needed to * conform to libpq conventions. */ if (conn->errorMessage.len == 0 || conn->errorMessage.data[conn->errorMessage.len - 1] != '\n') { appendPQExpBufferChar(&conn->errorMessage, '\n'); } goto error_return; } /* * Can't process if message body isn't all here yet. */ msgLength -= 4; avail = conn->inEnd - conn->inCursor; if (avail < msgLength) { /* * Before returning, try to enlarge the input buffer if * needed to hold the whole message; see notes in * pqParseInput3. */ if (pqCheckInBufferSpace(conn->inCursor + (size_t) msgLength, conn)) goto error_return; /* We'll come back when there is more data */ return PGRES_POLLING_READING; } /* Handle errors. */ if (beresp == 'E') { if (pqGetErrorNotice3(conn, true)) { /* We'll come back when there is more data */ return PGRES_POLLING_READING; } /* OK, we read the message; mark data consumed */ conn->inStart = conn->inCursor; /* * If error is "cannot connect now", try the next host if * any (but we don't want to consider additional addresses * for this host, nor is there much point in changing SSL * or GSS mode). This is helpful when dealing with * standby servers that might not be in hot-standby state. */ if (strcmp(conn->last_sqlstate, ERRCODE_CANNOT_CONNECT_NOW) == 0) { conn->try_next_host = true; goto keep_going; } /* Check to see if we should mention pgpassfile */ pgpassfileWarning(conn); #ifdef ENABLE_GSS /* * If gssencmode is "prefer" and we're using GSSAPI, retry * without it. */ if (conn->gssenc && conn->gssencmode[0] == 'p') { /* only retry once */ conn->try_gss = false; need_new_connection = true; goto keep_going; } #endif #ifdef USE_SSL /* * if sslmode is "allow" and we haven't tried an SSL * connection already, then retry with an SSL connection */ if (conn->sslmode[0] == 'a' /* "allow" */ && !conn->ssl_in_use && conn->allow_ssl_try && conn->wait_ssl_try) { /* only retry once */ conn->wait_ssl_try = false; need_new_connection = true; goto keep_going; } /* * if sslmode is "prefer" and we're in an SSL connection, * then do a non-SSL retry */ if (conn->sslmode[0] == 'p' /* "prefer" */ && conn->ssl_in_use && conn->allow_ssl_try /* redundant? */ && !conn->wait_ssl_try) /* redundant? */ { /* only retry once */ conn->allow_ssl_try = false; need_new_connection = true; goto keep_going; } #endif goto error_return; } /* It is an authentication request. */ conn->auth_req_received = true; /* Get the type of request. */ if (pqGetInt((int *) &areq, 4, conn)) { /* We'll come back when there are more data */ return PGRES_POLLING_READING; } msgLength -= 4; /* * Process the rest of the authentication request message, and * respond to it if necessary. * * Note that conn->pghost must be non-NULL if we are going to * avoid the Kerberos code doing a hostname look-up. */ res = pg_fe_sendauth(areq, msgLength, conn); /* OK, we have processed the message; mark data consumed */ conn->inStart = conn->inCursor; if (res != STATUS_OK) goto error_return; /* * Just make sure that any data sent by pg_fe_sendauth is * flushed out. Although this theoretically could block, it * really shouldn't since we don't send large auth responses. */ if (pqFlush(conn)) goto error_return; if (areq == AUTH_REQ_OK) { /* We are done with authentication exchange */ conn->status = CONNECTION_AUTH_OK; /* * Set asyncStatus so that PQgetResult will think that * what comes back next is the result of a query. See * below. */ conn->asyncStatus = PGASYNC_BUSY; } /* Look to see if we have more data yet. */ goto keep_going; } case CONNECTION_AUTH_OK: { /* * Now we expect to hear from the backend. A ReadyForQuery * message indicates that startup is successful, but we might * also get an Error message indicating failure. (Notice * messages indicating nonfatal warnings are also allowed by * the protocol, as are ParameterStatus and BackendKeyData * messages.) Easiest way to handle this is to let * PQgetResult() read the messages. We just have to fake it * out about the state of the connection, by setting * asyncStatus = PGASYNC_BUSY (done above). */ if (PQisBusy(conn)) return PGRES_POLLING_READING; res = PQgetResult(conn); /* * NULL return indicating we have gone to IDLE state is * expected */ if (res) { if (res->resultStatus != PGRES_FATAL_ERROR) appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("unexpected message from server during startup\n")); else if (conn->send_appname && (conn->appname || conn->fbappname)) { /* * If we tried to send application_name, check to see * if the error is about that --- pre-9.0 servers will * reject it at this stage of the process. If so, * close the connection and retry without sending * application_name. We could possibly get a false * SQLSTATE match here and retry uselessly, but there * seems no great harm in that; we'll just get the * same error again if it's unrelated. */ const char *sqlstate; sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (sqlstate && strcmp(sqlstate, ERRCODE_APPNAME_UNKNOWN) == 0) { PQclear(res); conn->send_appname = false; need_new_connection = true; goto keep_going; } } /* * if the resultStatus is FATAL, then conn->errorMessage * already has a copy of the error; needn't copy it back. * But add a newline if it's not there already, since * postmaster error messages may not have one. */ if (conn->errorMessage.len <= 0 || conn->errorMessage.data[conn->errorMessage.len - 1] != '\n') appendPQExpBufferChar(&conn->errorMessage, '\n'); PQclear(res); goto error_return; } /* Almost there now ... */ conn->status = CONNECTION_CHECK_TARGET; goto keep_going; } case CONNECTION_CHECK_TARGET: { /* * If a read-write, read-only, primary, or standby connection * is required, see if we have one. */ if (conn->target_server_type == SERVER_TYPE_READ_WRITE || conn->target_server_type == SERVER_TYPE_READ_ONLY) { bool read_only_server; /* * If the server didn't report * "default_transaction_read_only" or "in_hot_standby" at * startup, we must determine its state by sending the * query "SHOW transaction_read_only". This GUC exists in * all server versions that support 3.0 protocol. */ if (conn->default_transaction_read_only == PG_BOOL_UNKNOWN || conn->in_hot_standby == PG_BOOL_UNKNOWN) { /* * We use PQsendQueryContinue so that * conn->errorMessage does not get cleared. We need * to preserve any error messages related to previous * hosts we have tried and failed to connect to. */ conn->status = CONNECTION_OK; if (!PQsendQueryContinue(conn, "SHOW transaction_read_only")) goto error_return; /* We'll return to this state when we have the answer */ conn->status = CONNECTION_CHECK_WRITABLE; return PGRES_POLLING_READING; } /* OK, we can make the test */ read_only_server = (conn->default_transaction_read_only == PG_BOOL_YES || conn->in_hot_standby == PG_BOOL_YES); if ((conn->target_server_type == SERVER_TYPE_READ_WRITE) ? read_only_server : !read_only_server) { /* Wrong server state, reject and try the next host */ if (conn->target_server_type == SERVER_TYPE_READ_WRITE) appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("session is read-only\n")); else appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("session is not read-only\n")); /* Close connection politely. */ conn->status = CONNECTION_OK; sendTerminateConn(conn); /* * Try next host if any, but we don't want to consider * additional addresses for this host. */ conn->try_next_host = true; goto keep_going; } } else if (conn->target_server_type == SERVER_TYPE_PRIMARY || conn->target_server_type == SERVER_TYPE_STANDBY || conn->target_server_type == SERVER_TYPE_PREFER_STANDBY) { /* * If the server didn't report "in_hot_standby" at * startup, we must determine its state by sending the * query "SELECT pg_catalog.pg_is_in_recovery()". Servers * before 9.0 don't have that function, but by the same * token they don't have any standby mode, so we may just * assume the result. */ if (conn->sversion < 90000) conn->in_hot_standby = PG_BOOL_NO; if (conn->in_hot_standby == PG_BOOL_UNKNOWN) { /* * We use PQsendQueryContinue so that * conn->errorMessage does not get cleared. We need * to preserve any error messages related to previous * hosts we have tried and failed to connect to. */ conn->status = CONNECTION_OK; if (!PQsendQueryContinue(conn, "SELECT pg_catalog.pg_is_in_recovery()")) goto error_return; /* We'll return to this state when we have the answer */ conn->status = CONNECTION_CHECK_STANDBY; return PGRES_POLLING_READING; } /* OK, we can make the test */ if ((conn->target_server_type == SERVER_TYPE_PRIMARY) ? (conn->in_hot_standby == PG_BOOL_YES) : (conn->in_hot_standby == PG_BOOL_NO)) { /* Wrong server state, reject and try the next host */ if (conn->target_server_type == SERVER_TYPE_PRIMARY) appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("server is in hot standby mode\n")); else appendPQExpBufferStr(&conn->errorMessage, libpq_gettext("server is not in hot standby mode\n")); /* Close connection politely. */ conn->status = CONNECTION_OK; sendTerminateConn(conn); /* * Try next host if any, but we don't want to consider * additional addresses for this host. */ conn->try_next_host = true; goto keep_going; } } /* We can release the address list now. */ release_conn_addrinfo(conn); /* * Contents of conn->errorMessage are no longer interesting * (and it seems some clients expect it to be empty after a * successful connection). */ resetPQExpBuffer(&conn->errorMessage); /* We are open for business! */ conn->status = CONNECTION_OK; return PGRES_POLLING_OK; } case CONNECTION_CONSUME: { /* * This state just makes sure the connection is idle after * we've obtained the result of a SHOW or SELECT query. Once * we're clear, return to CONNECTION_CHECK_TARGET state to * decide what to do next. We must transiently set status = * CONNECTION_OK in order to use the result-consuming * subroutines. */ conn->status = CONNECTION_OK; if (!PQconsumeInput(conn)) goto error_return; if (PQisBusy(conn)) { conn->status = CONNECTION_CONSUME; return PGRES_POLLING_READING; } /* Call PQgetResult() again until we get a NULL result */ res = PQgetResult(conn); if (res != NULL) { PQclear(res); conn->status = CONNECTION_CONSUME; return PGRES_POLLING_READING; } conn->status = CONNECTION_CHECK_TARGET; goto keep_going; } case CONNECTION_CHECK_WRITABLE: { /* * Waiting for result of "SHOW transaction_read_only". We * must transiently set status = CONNECTION_OK in order to use * the result-consuming subroutines. */ conn->status = CONNECTION_OK; if (!PQconsumeInput(conn)) goto error_return; if (PQisBusy(conn)) { conn->status = CONNECTION_CHECK_WRITABLE; return PGRES_POLLING_READING; } res = PQgetResult(conn); if (res && PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1) { char *val = PQgetvalue(res, 0, 0); /* * "transaction_read_only = on" proves that at least one * of default_transaction_read_only and in_hot_standby is * on, but we don't actually know which. We don't care * though for the purpose of identifying a read-only * session, so satisfy the CONNECTION_CHECK_TARGET code by * claiming they are both on. On the other hand, if it's * a read-write session, they are certainly both off. */ if (strncmp(val, "on", 2) == 0) { conn->default_transaction_read_only = PG_BOOL_YES; conn->in_hot_standby = PG_BOOL_YES; } else { conn->default_transaction_read_only = PG_BOOL_NO; conn->in_hot_standby = PG_BOOL_NO; } PQclear(res); /* Finish reading messages before continuing */ conn->status = CONNECTION_CONSUME; goto keep_going; } /* Something went wrong with "SHOW transaction_read_only". */ if (res) PQclear(res); /* Append error report to conn->errorMessage. */ appendPQExpBuffer(&conn->errorMessage, libpq_gettext("\"%s\" failed\n"), "SHOW transaction_read_only"); /* Close connection politely. */ conn->status = CONNECTION_OK; sendTerminateConn(conn); /* Try next host. */ conn->try_next_host = true; goto keep_going; } case CONNECTION_CHECK_STANDBY: { /* * Waiting for result of "SELECT pg_is_in_recovery()". We * must transiently set status = CONNECTION_OK in order to use * the result-consuming subroutines. */ conn->status = CONNECTION_OK; if (!PQconsumeInput(conn)) goto error_return; if (PQisBusy(conn)) { conn->status = CONNECTION_CHECK_STANDBY; return PGRES_POLLING_READING; } res = PQgetResult(conn); if (res && PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1) { char *val = PQgetvalue(res, 0, 0); if (strncmp(val, "t", 1) == 0) conn->in_hot_standby = PG_BOOL_YES; else conn->in_hot_standby = PG_BOOL_NO; PQclear(res); /* Finish reading messages before continuing */ conn->status = CONNECTION_CONSUME; goto keep_going; } /* Something went wrong with "SELECT pg_is_in_recovery()". */ if (res) PQclear(res); /* Append error report to conn->errorMessage. */ appendPQExpBuffer(&conn->errorMessage, libpq_gettext("\"%s\" failed\n"), "SELECT pg_is_in_recovery()"); /* Close connection politely. */ conn->status = CONNECTION_OK; sendTerminateConn(conn); /* Try next host. */ conn->try_next_host = true; goto keep_going; } default: appendPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid connection state %d, " "probably indicative of memory corruption\n"), conn->status); goto error_return; } /* Unreachable */ error_return: /* * We used to close the socket at this point, but that makes it awkward * for those above us if they wish to remove this socket from their own * records (an fd_set for example). We'll just have this socket closed * when PQfinish is called (which is compulsory even after an error, since * the connection structure must be freed). */ conn->status = CONNECTION_BAD; return PGRES_POLLING_FAILED; }
0
234,833
static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_fs_devices *old_devices; struct btrfs_fs_devices *seed_devices; struct btrfs_super_block *disk_super = fs_info->super_copy; struct btrfs_device *device; u64 super_flags; lockdep_assert_held(&uuid_mutex); if (!fs_devices->seeding) return -EINVAL; /* * Private copy of the seed devices, anchored at * fs_info->fs_devices->seed_list */ seed_devices = alloc_fs_devices(NULL, NULL); if (IS_ERR(seed_devices)) return PTR_ERR(seed_devices); /* * It's necessary to retain a copy of the original seed fs_devices in * fs_uuids so that filesystems which have been seeded can successfully * reference the seed device from open_seed_devices. This also supports * multiple fs seed. */ old_devices = clone_fs_devices(fs_devices); if (IS_ERR(old_devices)) { kfree(seed_devices); return PTR_ERR(old_devices); } list_add(&old_devices->fs_list, &fs_uuids); memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); seed_devices->opened = 1; INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); mutex_lock(&fs_devices->device_list_mutex); list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, synchronize_rcu); list_for_each_entry(device, &seed_devices->devices, dev_list) device->fs_devices = seed_devices; fs_devices->seeding = false; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->missing_devices = 0; fs_devices->rotating = false; list_add(&seed_devices->seed_list, &fs_devices->seed_list); generate_random_uuid(fs_devices->fsid); memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); mutex_unlock(&fs_devices->device_list_mutex); super_flags = btrfs_super_flags(disk_super) & ~BTRFS_SUPER_FLAG_SEEDING; btrfs_set_super_flags(disk_super, super_flags); return 0; }
0
220,461
static bool Run(OpKernelContext* ctx, const Tensor& input, const Tensor& filter, int batch, int input_rows, int input_cols, int in_depth, int filter_rows, int filter_cols, int pad_rows, int pad_cols, int out_rows, int out_cols, int out_depth, int dilation_rows, int dilation_cols, int stride_rows, int stride_cols, Tensor* output, TensorFormat data_format) { auto num_threads = ctx->device()->tensorflow_cpu_worker_threads()->num_threads; // See libxsmm_dnn.h for this struct definition. libxsmm_dnn_conv_desc desc; desc.N = batch; desc.C = in_depth; desc.H = input_rows; desc.W = input_cols; desc.K = out_depth; desc.R = filter_rows; desc.S = filter_cols; desc.u = stride_rows; desc.v = stride_cols; desc.pad_h = pad_rows; desc.pad_w = pad_cols; desc.pad_h_in = 0; desc.pad_w_in = 0; desc.pad_h_out = 0; desc.pad_w_out = 0; desc.threads = num_threads; desc.algo = LIBXSMM_DNN_CONV_ALGO_DIRECT; desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NHWC; desc.filter_format = LIBXSMM_DNN_TENSOR_FORMAT_LIBXSMM; desc.fuse_ops = LIBXSMM_DNN_CONV_FUSE_NONE; desc.options = LIBXSMM_DNN_CONV_OPTION_OVERWRITE; desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32; desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32; if (dilation_rows != 1 || dilation_cols != 1 || !CanUseXsmmConv2D(desc, data_format)) { return false; } auto input_ptr = input.template flat<float>().data(); auto filter_ptr = filter.template flat<float>().data(); auto output_ptr = output->template flat<float>().data(); bool success = functor::XsmmFwdConv2D<CPUDevice, float>()( ctx, desc, input_ptr, filter_ptr, output_ptr); return success; }
0
508,877
List<Item>* st_select_lex::get_item_list() { return &item_list; }
0
253,524
smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u32 type, int lock, int unlock, bool wait) { if (unlock && !lock) type = SMB2_LOCKFLAG_UNLOCK; return SMB2_lock(xid, tlink_tcon(cfile->tlink), cfile->fid.persistent_fid, cfile->fid.volatile_fid, current->tgid, length, offset, type, wait); }
0
359,547
DEFUN (no_neighbor_allowas_in, no_neighbor_allowas_in_cmd, NO_NEIGHBOR_CMD2 "allowas-in", NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "allow local ASN appears in aspath attribute\n") { int ret; struct peer *peer; peer = peer_and_group_lookup_vty (vty, argv[0]); if (! peer) return CMD_WARNING; ret = peer_allowas_in_unset (peer, bgp_node_afi (vty), bgp_node_safi (vty)); return bgp_vty_return (vty, ret); }
0