max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
|---|---|---|
372
|
<filename>clients/google-api-services-firebasedynamiclinks/v1/1.30.1/com/google/api/services/firebasedynamiclinks/v1/model/DeviceInfo.java<gh_stars>100-1000
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.firebasedynamiclinks.v1.model;
/**
* Signals associated with the device making the request.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Firebase Dynamic Links API. For a detailed
* explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class DeviceInfo extends com.google.api.client.json.GenericJson {
/**
* Device model name.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String deviceModelName;
/**
* Device language code setting.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String languageCode;
/**
* Device language code setting obtained by executing JavaScript code in WebView.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String languageCodeFromWebview;
/**
* Device language code raw setting. iOS does returns language code in different format than iOS
* WebView. For example WebView returns en_US, but iOS returns en-US. Field below will return raw
* value returned by iOS.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String languageCodeRaw;
/**
* Device display resolution height.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key @com.google.api.client.json.JsonString
private java.lang.Long screenResolutionHeight;
/**
* Device display resolution width.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key @com.google.api.client.json.JsonString
private java.lang.Long screenResolutionWidth;
/**
* Device timezone setting.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String timezone;
/**
* Device model name.
* @return value or {@code null} for none
*/
public java.lang.String getDeviceModelName() {
return deviceModelName;
}
/**
* Device model name.
* @param deviceModelName deviceModelName or {@code null} for none
*/
public DeviceInfo setDeviceModelName(java.lang.String deviceModelName) {
this.deviceModelName = deviceModelName;
return this;
}
/**
* Device language code setting.
* @return value or {@code null} for none
*/
public java.lang.String getLanguageCode() {
return languageCode;
}
/**
* Device language code setting.
* @param languageCode languageCode or {@code null} for none
*/
public DeviceInfo setLanguageCode(java.lang.String languageCode) {
this.languageCode = languageCode;
return this;
}
/**
* Device language code setting obtained by executing JavaScript code in WebView.
* @return value or {@code null} for none
*/
public java.lang.String getLanguageCodeFromWebview() {
return languageCodeFromWebview;
}
/**
* Device language code setting obtained by executing JavaScript code in WebView.
* @param languageCodeFromWebview languageCodeFromWebview or {@code null} for none
*/
public DeviceInfo setLanguageCodeFromWebview(java.lang.String languageCodeFromWebview) {
this.languageCodeFromWebview = languageCodeFromWebview;
return this;
}
/**
* Device language code raw setting. iOS does returns language code in different format than iOS
* WebView. For example WebView returns en_US, but iOS returns en-US. Field below will return raw
* value returned by iOS.
* @return value or {@code null} for none
*/
public java.lang.String getLanguageCodeRaw() {
return languageCodeRaw;
}
/**
* Device language code raw setting. iOS does returns language code in different format than iOS
* WebView. For example WebView returns en_US, but iOS returns en-US. Field below will return raw
* value returned by iOS.
* @param languageCodeRaw languageCodeRaw or {@code null} for none
*/
public DeviceInfo setLanguageCodeRaw(java.lang.String languageCodeRaw) {
this.languageCodeRaw = languageCodeRaw;
return this;
}
/**
* Device display resolution height.
* @return value or {@code null} for none
*/
public java.lang.Long getScreenResolutionHeight() {
return screenResolutionHeight;
}
/**
* Device display resolution height.
* @param screenResolutionHeight screenResolutionHeight or {@code null} for none
*/
public DeviceInfo setScreenResolutionHeight(java.lang.Long screenResolutionHeight) {
this.screenResolutionHeight = screenResolutionHeight;
return this;
}
/**
* Device display resolution width.
* @return value or {@code null} for none
*/
public java.lang.Long getScreenResolutionWidth() {
return screenResolutionWidth;
}
/**
* Device display resolution width.
* @param screenResolutionWidth screenResolutionWidth or {@code null} for none
*/
public DeviceInfo setScreenResolutionWidth(java.lang.Long screenResolutionWidth) {
this.screenResolutionWidth = screenResolutionWidth;
return this;
}
/**
* Device timezone setting.
* @return value or {@code null} for none
*/
public java.lang.String getTimezone() {
return timezone;
}
/**
* Device timezone setting.
* @param timezone timezone or {@code null} for none
*/
public DeviceInfo setTimezone(java.lang.String timezone) {
this.timezone = timezone;
return this;
}
@Override
public DeviceInfo set(String fieldName, Object value) {
return (DeviceInfo) super.set(fieldName, value);
}
@Override
public DeviceInfo clone() {
return (DeviceInfo) super.clone();
}
}
| 2,058
|
1,144
|
<gh_stars>1000+
package de.metas.tourplanning.api.impl;
import static org.adempiere.model.InterfaceWrapperHelper.save;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.LocalDate;
import de.metas.bpartner.BPartnerId;
import de.metas.invoicecandidate.modelvalidator.C_BPartner;
import org.adempiere.util.lang.IContextAware;
import org.compiere.model.I_C_BPartner;
import org.junit.jupiter.api.Test;
import de.metas.bpartner.BPartnerLocationId;
import de.metas.tourplanning.TourPlanningTestBase;
import de.metas.tourplanning.api.IDeliveryDayQueryParams;
import de.metas.tourplanning.api.PlainDeliveryDayQueryParams;
import de.metas.tourplanning.model.I_M_DeliveryDay;
/**
* Tests for {@link DeliveryDayDAO#retrieveDeliveryDay(IContextAware, IDeliveryDayQueryParams)}
*
* @author tsa
*
*/
public class DeliveryDayDAO_retrieveDeliveryDay_Test extends TourPlanningTestBase
{
@Override
public void afterInit()
{
tour = createTour("tour01");
tourVersion = createTourVersion(tour, LocalDate.of(2014, 1, 1));
bpartner = createBPartner("bp1");
bpLocation = createBPLocation(bpartner);
}
@Test
public void test_StandardUseCase()
{
final I_M_DeliveryDay dd1 = createDeliveryDay("07.09.2014 15:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
final I_M_DeliveryDay dd2 = createDeliveryDay("08.09.2014 15:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
final I_M_DeliveryDay dd3 = createDeliveryDay("09.09.2014 15:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
// shall be ignored
I_C_BPartner otherBPartner = createBPartner("bp2");
final I_M_DeliveryDay dd4_withoutLocation = createDeliveryDay("10.09.2014 15:00:00.000", 5, otherBPartner.getC_BPartner_ID(), -1);
testRetrieveDeliveryDay(null, "06.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd1, "07.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd2, "08.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "09.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "10.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "11.09.2014 23:59:59.999");
}
@Test
public void test_M_DeliveryDay_DeliveryDate_Plus_Buffer_ExceedingTheDay()
{
final I_M_DeliveryDay dd1 = createDeliveryDay("07.09.2014 19:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
final I_M_DeliveryDay dd2 = createDeliveryDay("08.09.2014 19:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
final I_M_DeliveryDay dd3 = createDeliveryDay("09.09.2014 19:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
testRetrieveDeliveryDay(null, "07.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd1, "08.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd2, "09.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "10.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "11.09.2014 23:59:59.999");
}
@Test
public void test_M_DeliveryDay_SkipProcessed()
{
final I_M_DeliveryDay dd1 = createDeliveryDay("07.09.2014 15:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
final I_M_DeliveryDay dd2 = createDeliveryDay("08.09.2014 15:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
final I_M_DeliveryDay dd3 = createDeliveryDay("09.09.2014 15:00:00.000", 5, bpartner.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID());
testRetrieveDeliveryDay(null, "06.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd1, "07.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd2, "08.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "09.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "10.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "11.09.2014 23:59:59.999");
// Make Delivery Day 2 as processed
dd2.setProcessed(true);
save(dd2);
// Re-test again: those who were matched to dd2 now shall be matched to dd1
testRetrieveDeliveryDay(null, "06.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd1, "07.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd1, "08.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "09.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "10.09.2014 23:59:59.999");
testRetrieveDeliveryDay(dd3, "11.09.2014 23:59:59.999");
}
private IDeliveryDayQueryParams createDeliveryDayQueryParams(final String deliveryDateStr)
{
final PlainDeliveryDayQueryParams params = new PlainDeliveryDayQueryParams();
//params.setBPartnerId(BPartnerId.ofRepoId(bpLocation.getC_BPartner_ID()));
params.setBPartnerLocationId(BPartnerLocationId.ofRepoId(bpLocation.getC_BPartner_ID(), bpLocation.getC_BPartner_Location_ID()));
params.setToBeFetched(false);
params.setDeliveryDate(toZonedDateTime(deliveryDateStr));
params.setProcessed(false);
return params;
}
/**
* Convenient method for calling {@link DeliveryDayDAO#retrieveDeliveryDay(IContextAware, IDeliveryDayQueryParams)}
*/
private I_M_DeliveryDay retrieveDeliveryDay(final String deliveryDateStr)
{
final IDeliveryDayQueryParams params = createDeliveryDayQueryParams(deliveryDateStr);
return deliveryDayDAO.retrieveDeliveryDay(contextProvider, params);
}
private void testRetrieveDeliveryDay(final I_M_DeliveryDay deliveryDayExpected, final String deliveryDateStr)
{
final I_M_DeliveryDay deliveryDayActual = retrieveDeliveryDay(deliveryDateStr);
assertThat(deliveryDayActual).isEqualTo(deliveryDayExpected).withFailMessage("Invalid M_DeliveryDay for: ", deliveryDateStr);
}
}
| 2,156
|
334
|
<reponame>NuAngel/PoS-DogeCoin_PUPS
// Auto generated code, do not modify
package nxt.http.callers;
import nxt.http.APICall;
public class SearchCurrenciesCall extends APICall.Builder<SearchCurrenciesCall> {
private SearchCurrenciesCall() {
super(ApiSpec.searchCurrencies);
}
public static SearchCurrenciesCall create() {
return new SearchCurrenciesCall();
}
public SearchCurrenciesCall requireLastBlock(String requireLastBlock) {
return param("requireLastBlock", requireLastBlock);
}
public SearchCurrenciesCall firstIndex(int firstIndex) {
return param("firstIndex", firstIndex);
}
public SearchCurrenciesCall includeCounts(boolean includeCounts) {
return param("includeCounts", includeCounts);
}
public SearchCurrenciesCall query(String query) {
return param("query", query);
}
public SearchCurrenciesCall lastIndex(int lastIndex) {
return param("lastIndex", lastIndex);
}
public SearchCurrenciesCall requireBlock(String requireBlock) {
return param("requireBlock", requireBlock);
}
}
| 379
|
482
|
package io.cattle.platform.servicediscovery.api.service;
import io.cattle.platform.core.addon.ServiceLink;
import io.cattle.platform.core.model.Service;
import io.cattle.platform.core.model.Stack;
import java.util.List;
import java.util.Map;
public interface ServiceDiscoveryApiService {
void addServiceLink(Service service, ServiceLink serviceLink);
void removeServiceLink(Service service, ServiceLink serviceLink);
List<? extends Service> listStackServices(long stackId);
Map.Entry<String, String> buildComposeConfig(List<? extends Service> services, Stack stack);
String buildDockerComposeConfig(List<? extends Service> services, Stack stack);
String buildRancherComposeConfig(List<? extends Service> services);
String getServiceCertificate(Service service);
boolean isV1LB(Service service);
}
| 240
|
1,968
|
//////////////////////////////////////////////////////////////////////////////
//
// This file is part of the Corona game engine.
// For overview and more information on licensing please refer to README.md
// Home page: https://github.com/coronalabs/corona
// Contact: <EMAIL>
//
//////////////////////////////////////////////////////////////////////////////
#include "Core/Rtt_Build.h"
#include "Display/Rtt_ShapeAdapterPolygon.h"
#include "Core/Rtt_StringHash.h"
#include "Display/Rtt_DisplayTypes.h"
#include "Display/Rtt_ShapePath.h"
#include "Display/Rtt_TesselatorPolygon.h"
#include "Rtt_Lua.h"
#include "Rtt_LuaContext.h"
// ----------------------------------------------------------------------------
namespace Rtt
{
// ----------------------------------------------------------------------------
const ShapeAdapterPolygon&
ShapeAdapterPolygon::Constant()
{
static const ShapeAdapterPolygon sAdapter;
return sAdapter;
}
bool
ShapeAdapterPolygon::InitializeContour(
lua_State *L, int index, TesselatorPolygon& tesselator )
{
bool result = false;
index = Lua::Normalize( L, index );
if ( lua_istable( L, index ) )
{
ArrayVertex2& contour = tesselator.GetContour();
Rtt_ASSERT( contour.Length() == 0 );
// This is used to find the center of the body.
Rect bounds;
int numVertices = (int) lua_objlen( L, index ) >> 1;
for ( int i = 0; i < numVertices; i++ )
{
// Lua is one-based, so the first element must be at index 1.
lua_rawgeti( L, index, ( ( i * 2 ) + 1 ) );
// Lua is one-based, so the second element must be at index 2.
lua_rawgeti( L, index, ( ( i * 2 ) + 2 ) );
Vertex2 v = { luaL_toreal( L, -2 ),
luaL_toreal( L, -1 ) };
lua_pop( L, 2 );
contour.Append( v );
bounds.Union( v );
}
Vertex2 center_offset;
bounds.GetCenter( center_offset );
// Offset the contour to center the body around its center of mass.
for ( int i = 0; i < numVertices; i++ )
{
contour[ i ].x -= center_offset.x;
contour[ i ].y -= center_offset.y;
}
tesselator.Invalidate();
result = true;
}
return result;
}
// ----------------------------------------------------------------------------
ShapeAdapterPolygon::ShapeAdapterPolygon()
: Super( kPolygonType )
{
}
StringHash *
ShapeAdapterPolygon::GetHash( lua_State *L ) const
{
static const char *keys[] =
{
"",
};
static StringHash sHash( *LuaContext::GetAllocator( L ), keys, sizeof( keys ) / sizeof( const char * ), 1, 0, 0, __FILE__, __LINE__ );
return &sHash;
}
// No properties (except inherited ones), so disabling for now.
#if 0
int
ShapeAdapterPolygon::ValueForKey(
const LuaUserdataProxy& sender,
lua_State *L,
const char *key ) const
{
int result = 0;
Rtt_ASSERT( key ); // Caller should check at the top-most level
const ShapePath *path = (const ShapePath *)sender.GetUserdata();
if ( ! path ) { return result; }
const TesselatorPolygon *tesselator =
static_cast< const TesselatorPolygon * >( path->GetTesselator() );
if ( ! tesselator ) { return result; }
result = 1; // Assume 1 Lua value will be pushed on the stack
int index = GetHash( L )->Lookup( key );
switch ( index )
{
default:
result = 0; // No Lua values pushed
break;
}
return result;
}
bool
ShapeAdapterPolygon::SetValueForKey(
LuaUserdataProxy& sender,
lua_State *L,
const char *key,
int valueIndex ) const
{
bool result = false;
Rtt_ASSERT( key ); // Caller should check at the top-most level
ShapePath *path = (ShapePath *)sender.GetUserdata();
if ( ! path ) { return result; }
TesselatorPolygon *tesselator =
static_cast< TesselatorPolygon * >( path->GetTesselator() );
if ( ! tesselator ) { return result; }
result = true; // Assume value will be set
int index = GetHash( L )->Lookup( key );
switch ( index )
{
default:
result = false; // No value set
break;
}
return result;
}
#endif
// ----------------------------------------------------------------------------
} // namespace Rtt
// ----------------------------------------------------------------------------
| 1,341
|
1,821
|
<filename>src/carnot/exec/exec_node.h
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "src/carnot/exec/exec_state.h"
#include "src/carnot/plan/operators.h"
#include "src/common/base/base.h"
#include "src/common/perf/perf.h"
#include "src/table_store/table_store.h"
namespace px {
namespace carnot {
namespace exec {
enum class ExecNodeType : int8_t {
kSourceNode = 0,
kSinkNode = 1,
kProcessingNode = 2,
};
struct ExecNodeStats {
explicit ExecNodeStats(bool collect_stats) : collect_exec_stats(collect_stats) {}
void AddOutputStats(const table_store::schema::RowBatch& rb) {
if (!collect_exec_stats) {
return;
}
++batches_output;
bytes_output += rb.NumBytes();
rows_output += rb.num_rows();
}
void AddInputStats(const table_store::schema::RowBatch& rb) {
if (!collect_exec_stats) {
return;
}
++batches_input;
bytes_input += rb.NumBytes();
rows_input += rb.num_rows();
}
void ResumeChildTimer() {
if (!collect_exec_stats) {
return;
}
children_timer.Resume();
}
void StopChildTimer() {
if (!collect_exec_stats) {
return;
}
children_timer.Stop();
}
void ResumeTotalTimer() {
if (!collect_exec_stats) {
return;
}
total_timer.Resume();
}
void StopTotalTimer() {
if (!collect_exec_stats) {
return;
}
total_timer.Stop();
}
void AddExtraMetric(std::string_view key, double value) {
if (!collect_exec_stats) {
return;
}
extra_metrics[key] = value;
}
void AddExtraInfo(std::string_view key, std::string_view value) {
if (!collect_exec_stats) {
return;
}
extra_info[key] = value;
}
int64_t ChildExecTime() const { return children_timer.ElapsedTime_us() * 1000; }
int64_t TotalExecTime() const { return total_timer.ElapsedTime_us() * 1000; }
int64_t SelfExecTime() const { return TotalExecTime() - ChildExecTime(); }
// Total bytes input to this exec node.
int64_t bytes_input = 0;
// Total rows input to this exec node.
int64_t rows_input = 0;
// Total batches input to this exec node.
int64_t batches_input = 0;
// Total bytes output by this exec node.
int64_t bytes_output = 0;
// Total rows output by this exec node.
int64_t rows_output = 0;
// Total batches input to this exec node.
int64_t batches_output = 0;
// Total timer for the node = children_time + self_time.
ElapsedTimer total_timer;
// Total timer for the children of the ndoe.
ElapsedTimer children_timer;
// Flag to determine whether to collect stats or not.
bool collect_exec_stats;
// Extra metrics to store.
absl::flat_hash_map<std::string, double> extra_metrics;
absl::flat_hash_map<std::string, std::string> extra_info;
};
/**
* This is the base class for the execution nodes in Carnot.
*/
class ExecNode {
public:
ExecNode() = delete;
virtual ~ExecNode() = default;
/**
* Init is called with plan & schema information.
* @param plan_node the plan class of the node.
* @param output_descriptor The output column schema of row batches.
* @param input_descriptors The input column schema of row batches.
* @return
*/
Status Init(const plan::Operator& plan_node,
const table_store::schema::RowDescriptor& output_descriptor,
std::vector<table_store::schema::RowDescriptor> input_descriptors,
bool collect_exec_stats = false) {
is_initialized_ = true;
output_descriptor_ = std::make_unique<table_store::schema::RowDescriptor>(output_descriptor);
input_descriptors_ = input_descriptors;
stats_ = std::make_unique<ExecNodeStats>(collect_exec_stats);
return InitImpl(plan_node);
}
/**
* Setup internal data structures, perform validation, etc.
* @param exec_state The execution state.
* @return The status of the prepare.
*/
Status Prepare(ExecState* exec_state) {
DCHECK(is_initialized_);
return PrepareImpl(exec_state);
}
/**
* Acquire memory resources, etc.
* @param exec_state The execution state.
* @return
*/
Status Open(ExecState* exec_state) {
DCHECK(is_initialized_);
return OpenImpl(exec_state);
}
/**
* Close is where cleanup should take place. This includes cleaning up objects.
* It is highly recomended that a default destructor be used and cleanup peformed here,
* since at the end a query the data is batch deleted and ordering is not guaranteed.
* @param exec_state The execution state.
* @return The status of the Finalize.axs
*/
Status Close(ExecState* exec_state) {
DCHECK(is_initialized_);
return CloseImpl(exec_state);
}
/**
* GenerateNext is called to produce the next row batch. This is only valid
* on source nodes (and will result in an error on other nodes).
* @param exec_state The execution state.
* @return The status of the execution.
*/
Status GenerateNext(ExecState* exec_state) {
DCHECK(is_initialized_);
DCHECK(type() == ExecNodeType::kSourceNode);
stats_->ResumeTotalTimer();
PL_RETURN_IF_ERROR(GenerateNextImpl(exec_state));
stats_->StopTotalTimer();
return Status::OK();
}
/**
* Consume the next row batch. This function is only valid for Sink and Processing
* Nodes.
*
* This needs to be careful to forward the output batch to all children.
*
* @param exec_state The execution state.
* @param rb The input row batch.
* @return The Status of consumption.
*/
Status ConsumeNext(ExecState* exec_state, const table_store::schema::RowBatch& rb,
size_t parent_index) {
DCHECK(is_initialized_);
DCHECK(type() == ExecNodeType::kSinkNode || type() == ExecNodeType::kProcessingNode);
if (rb.eos() && !rb.eow()) {
return error::Internal(
"ConsumeNext received row batch with end of stream set but not end of window.");
}
stats_->AddInputStats(rb);
stats_->ResumeTotalTimer();
PL_RETURN_IF_ERROR(ConsumeNextImpl(exec_state, rb, parent_index));
stats_->StopTotalTimer();
return Status::OK();
}
/**
* Check if it's a source node.
*/
bool IsSource() { return type() == ExecNodeType::kSourceNode; }
/**
* Check if it's a sink node.
*/
bool IsSink() { return type() == ExecNodeType::kSinkNode; }
/**
* Check if it's a processing node.
*/
bool IsProcessing() { return type() == ExecNodeType::kProcessingNode; }
/**
* Get a debug string for the node.
* @return the debug string/
*/
std::string DebugString() { return DebugStringImpl(); }
/**
* Add a new child node where data is forwarded.
* This node will not own the child. The lifetime of the child should
* exceed the lifetime of this node.
* The node also needs to know which parent index it is for its child.
* A node that is the 2nd parent of a child needs to pass that information
* down when it sends that child row batches so the child can differentiate
* between the row batches of its various parents.
*
* @param child Another execution node.
*/
void AddChild(ExecNode* child, size_t parent_index) {
children_.emplace_back(child);
parent_ids_for_children_.emplace_back(parent_index);
}
/**
* Get the type of the execution node.
* @return the ExecNodeType.
*/
ExecNodeType type() { return type_; }
/**
* @ return the children of the execution node.
*/
std::vector<ExecNode*> children() { return children_; }
ExecNodeStats* stats() const { return stats_.get(); }
protected:
/**
* Send data to children row batches.
* @param exec_state The exec state.
* @param rb The row batch to send.
* @return Status of children execution.
*/
Status SendRowBatchToChildren(ExecState* exec_state, const table_store::schema::RowBatch& rb) {
stats_->ResumeChildTimer();
for (size_t i = 0; i < children_.size(); ++i) {
PL_RETURN_IF_ERROR(children_[i]->ConsumeNext(exec_state, rb, parent_ids_for_children_[i]));
}
stats_->StopChildTimer();
stats_->AddOutputStats(rb);
if (rb.eos()) {
DCHECK(!sent_eos_);
sent_eos_ = true;
}
return Status::OK();
}
explicit ExecNode(ExecNodeType type) : type_(type) {}
// Defines the protected implementations of the non-virtual interface functions
// defined above.
virtual std::string DebugStringImpl() = 0;
virtual Status InitImpl(const plan::Operator& plan_node) = 0;
virtual Status PrepareImpl(ExecState* exec_state) = 0;
virtual Status OpenImpl(ExecState* exec_state) = 0;
virtual Status CloseImpl(ExecState* exec_state) = 0;
virtual Status GenerateNextImpl(ExecState*) {
return error::Unimplemented("Implement in derived class (if source)");
}
virtual Status ConsumeNextImpl(ExecState*, const table_store::schema::RowBatch&, size_t) {
return error::Unimplemented("Implement in derived class (if sink or processing)");
}
bool is_closed() { return is_closed_; }
std::unique_ptr<table_store::schema::RowDescriptor> output_descriptor_;
std::vector<table_store::schema::RowDescriptor> input_descriptors_;
// Whether or not the node sent EOS to its children.
bool sent_eos_ = false;
private:
// The stats of this exec node.
std::unique_ptr<ExecNodeStats> stats_;
// Unowned reference to the children. Must remain valid for the duration of query.
std::vector<ExecNode*> children_;
// For each of the children (which may have multiple parents) which parent is this node?
// Parents 0, 1, and 2 would exist for a node with 3 parents.
std::vector<size_t> parent_ids_for_children_;
// Whether Close() has been called on this ExecNode.
bool is_closed_ = false;
// The type of execution node.
ExecNodeType type_;
// Whether this node has been initialized.
bool is_initialized_ = false;
};
/**
* Processing node is the base class for anything that computes
* producing 1:1 or N:M records. For example: Agg, Map, etc.
*/
class ProcessingNode : public ExecNode {
public:
ProcessingNode() : ExecNode(ExecNodeType::kProcessingNode) {}
virtual ~ProcessingNode() = default;
};
/**
* Source node is the base class for anything that produces records from some source.
* For example: MemorySource.
*/
class SourceNode : public ExecNode {
public:
SourceNode() : ExecNode(ExecNodeType::kSourceNode) {}
virtual ~SourceNode() = default;
bool HasBatchesRemaining() { return !sent_eos_; }
virtual bool NextBatchReady() = 0;
int64_t BytesProcessed() const { return bytes_processed_; }
int64_t RowsProcessed() const { return rows_processed_; }
Status SendEndOfStream(ExecState* exec_state) {
// TODO(philkuz) this part is not tracked w/ the timer. Need to include this in NVI or cut
// losses.
PL_ASSIGN_OR_RETURN(auto rb, table_store::schema::RowBatch::WithZeroRows(
*output_descriptor_, /*eow*/ true, /*eos*/ true));
return SendRowBatchToChildren(exec_state, *rb);
}
protected:
int64_t rows_processed_ = 0;
int64_t bytes_processed_ = 0;
};
/**
* Sink node is the base class for anything that consumes records and writes to some sink.
* For example: MemorySink.
*/
class SinkNode : public ExecNode {
public:
SinkNode() : ExecNode(ExecNodeType::kSinkNode) {}
virtual ~SinkNode() = default;
};
} // namespace exec
} // namespace carnot
} // namespace px
| 4,105
|
4,081
|
/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio.master.table;
import alluxio.table.common.BaseProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a property name and default value for the catalog.
*/
public class CatalogProperty extends BaseProperty {
private static final Logger LOG = LoggerFactory.getLogger(CatalogProperty.class);
public static final int DEFAULT_DB_SYNC_THREADS = 4;
private CatalogProperty(String name, String description, String defaultValue) {
super(name, description, defaultValue);
}
public static final CatalogProperty DB_IGNORE_TABLES =
new CatalogProperty("catalog.db.ignore.udb.tables",
"The comma-separated list of table names to ignore from the UDB.", "");
public static final CatalogProperty DB_SYNC_THREADS =
new CatalogProperty("catalog.db.sync.threads",
"The maximum number of threads to use when parallel syncing all the tables from the "
+ "under database (UDB) to the catalog. If this is set too large, the threads may "
+ "overload the UDB, and if set too low, syncing a database with many tables may "
+ "take a long time.",
Integer.toString(DEFAULT_DB_SYNC_THREADS));
public static final CatalogProperty DB_CONFIG_FILE =
new CatalogProperty("catalog.db.config.file",
"The config file for the UDB.", "<catalog.db.config.file>");
}
| 601
|
556
|
"""
Worlds are used to manage simulations consisting of multiple states.
A world uses a StateCollection with CollectivePhysics to resolve state dependencies.
Worlds also facilitate referencing states when performing a forward simulation.
A default World, called `world` is provided for convenience.
"""
import inspect
import warnings
from typing import TypeVar
from phi import geom
from phi.field import GeometryMask
from phi.physics._effect import Gravity
from ._physics import Physics, State, struct, _as_physics, Static
class StateProxy(object):
"""
StateProxy mirrors all data_dict of a state in an associated world.
While State objects are generally immutable, StateProxy also implements setting any attribute of the state.
When an attribute is set, a copy of the state with the new value replaces the old state in the world.
This object then mirrors the values of the new state.
After world.step() is invoked, all Proxies of that world will mirror the state after stepping.
To reference the current immutable state of
Args:
Returns:
"""
def __init__(self, enclosing_world, state_name):
self.world = enclosing_world
self.state_name = state_name
@property
def state(self):
"""
Finds and returns the state in the referenced world that matches the StateProxy's state_name.
:return: State
Args:
Returns:
"""
state = self.world.state[self.state_name]
assert state is not None
return state
@state.setter
def state(self, state):
"""
Replaces the State in the referenced world.
Args:
state:
Returns:
"""
assert state.name == self.state_name
self.world.state = self.world.state.state_replaced(state)
@property
def physics(self):
"""
Returns the Physics object used by the referenced world for the system.
If not specified manually, the default physics is returned.
Args:
Returns:
"""
physics = self.world.physics.for_(self.state)
assert physics is not None
return physics
@physics.setter
def physics(self, physics):
"""
Sets a specific Physics object for the system in the referenced world.
Args:
physics: Physics
Returns:
"""
assert isinstance(physics, Physics)
self.world.physics.add(self.state_name, physics)
def step(self, dt=1.0, physics=None):
"""
Steps this system in the referenced world. Other states are not affected.
Args:
dt: time increment (Default value = 1.0)
physics: specific Physics object to use, defaults to self.physics
Returns:
"""
self.world.step(self, dt=dt, physics=physics)
def __getattr__(self, item):
"""
Returns an attribute from the referenced State.
:param item: attribute name
"""
assert item not in ('world', 'state_name', 'physics', 'state')
return getattr(self.state, item)
def __setattr__(self, key, value):
"""
Changes the referenced state by replacing it in the referenced world.
:param key: State attribute name
:param value: new value
"""
if key in ('world', 'state_name', 'physics', 'state'):
object.__setattr__(self, key, value)
else:
self.state = self.state.copied_with(**{key:value})
# pylint: disable-msg = invalid-name
S = TypeVar('S', bound=State)
class World(object):
"""
A world object defines a global state as well as a set of rules (Physics objects) that definition how the state evolves.
The world manages dependencies among the contained simulations and provides convenience methods for creating proxies for specific simulations.
The method world.step() evolves the whole state or optionally a specific state in time.
Args:
Returns:
"""
def __init__(self, batch_size=None, add_default_objects=True):
# --- Insert object / create proxy shortcuts ---
self._state = self.physics = self.observers = self.batch_size = None
self.reset(batch_size, add_default_objects)
def reset(self, batch_size=None, add_default_objects=True):
"""
Resets the world to the default configuration.
This removes all States and observers.
Args:
batch_size: int or None (Default value = None)
add_default_objects: if True, adds defaults like Gravity
Returns:
"""
self._state = StateCollection()
self.physics = self._state.default_physics()
self.observers = set()
self.batch_size = batch_size
if add_default_objects:
self.add(Gravity())
@property
def state(self):
"""
Returns the current state of the world.
:return: StateCollection
Args:
Returns:
"""
return self._state
@property
def age(self):
"""Alias for world.state.age"""
return self._state.age
@state.setter
def state(self, state):
"""
Sets the current state of the world and informs all observers.
Args:
state: StateCollection
Returns:
"""
assert state is not None
assert isinstance(state, StateCollection)
self._state = state
for observer in self.observers:
observer(self)
def step(self, state=None, dt=1.0, physics=None):
"""
Evolves the current world state by a time increment dt.
If state is provided, only that state is evolved, leaving the others untouched.
The optional physics parameter can then be used to override the default physics.
Otherwise, all states are evolved.
Calling World.step resolves all dependencies among simulations and then calls Physics.step on each simulation to evolve the states.
Invoking this method alters the world state. To to_field a copy of the state, use :func:`World.stepped <~world.World.stepped>` instead.
Args:
state: State, StateProxy or None (Default value = None)
dt: time increment (Default value = 1.0)
physics: Physics object for the state or None for default
Returns:
evolved state if a specific state was provided
"""
if state is None:
if physics is None:
physics = self.physics
self.state = physics.step(self._state, dt)
return self.state
else:
if isinstance(state, StateProxy):
state = state.state
s = self.physics.substep(state, self._state, dt, override_physics=physics)
self.state = self._state.state_replaced(s)
return s
def stepped(self, state=None, dt=1.0, physics=None):
"""
Similar to step() but does not change the state of the world. Instead, the new state is returned.
Args:
state: (Default value = None)
dt: (Default value = 1.0)
physics: (Default value = None)
Returns:
"""
if state is None:
if physics is None:
physics = self.physics
return physics.step(self._state, None, dt)
else:
if isinstance(state, StateProxy):
state = state.state
return self.physics.substep(state, self._state, dt, override_physics=physics)
def add(self, state, physics=None):
# type: (S, Physics) -> S
"""
Adds a State to the world that will be stepped forward in time each time world.step() is invoked.
:param state: State or list of States
:param physics: (optional) Physics to use during world.step(). If a list was provided for `state`, a matching list must be given for `state`.
:return: StateProxy referencing the current state of the added system. If world.state is updated (e.g. because world.step() was called), the StateProxy will refer to the updated values.
"""
if isinstance(state, dict):
raise ValueError('Cannot add dict to world. Maybe you meant world.add(**dict)?')
if isinstance(state, (tuple, list)):
assert isinstance(physics, (tuple, list))
assert len(state) == len(physics)
return [self.add(s, p) for s, p in zip(state, physics)]
else:
if physics is not None:
self.physics.add(state.name, physics)
elif state.default_physics() is not None and not isinstance(state.default_physics(), Static):
warnings.warn('No physics provided to world.add(%s). In the future this will default to static physics' % state)
self.state = self.state.state_added(state)
return StateProxy(self, state.name)
def add_all(self, *states):
"""
Add a collection of states to the system using world.add(state).
Args:
*states:
Returns:
"""
warnings.warn('World.add_all() is deprecated. Use World.add(list_of_states) instead.', DeprecationWarning)
for state in states:
self.add(state)
def remove(self, obj):
"""
Remove a system or collection of systems from the world.
Args:
obj: one of the following: State, state name, subclass of State, tuple or list thereof
Returns:
"""
if inspect.isclass(obj):
states = self.state.all_instances(obj)
self.remove(states)
elif isinstance(obj, (tuple, list)):
for state in obj:
self.remove(state)
else:
key = obj if isinstance(obj, str) else obj.name
self.state = self.state.state_removed(key)
self.physics.remove(key)
def get_physics(self, state):
"""
Looks up the Physics object associated with a given State or StateProxy.
If no Physics object was registered manually, the state.default_physics() object is used.
Args:
state: State or StateProxy contained in this world
Returns:
Physics
"""
if isinstance(state, StateProxy):
state = state.state
return self.physics.for_(state)
def __getattr__(self, item):
if item in self.state:
return StateProxy(self, item)
else:
return object.__getattribute__(self, item)
def obstacle_mask(world_or_proxy):
"""
Builds a binary Field, masking all obstacles in the world.
Args:
world_or_proxy: World or StateProxy object
Returns:
Field
"""
world = world_or_proxy.world if isinstance(world_or_proxy, StateProxy) else world_or_proxy
assert isinstance(world, World)
geometries = [obstacle.geometry for obstacle in world.state.all_with_tag('obstacle')]
return GeometryMask(geom.union(*geometries))
class StateCollection(dict):
def __init__(self, states=None):
"""
Create a state collection from a dictionary of states.
Args:
states(dict or list or tuple None): dict mapping from state names to states
"""
if states is None:
states = {}
elif not isinstance(states, dict):
states = {state.name: state for state in states}
dict.__init__(self, states)
def __setitem__(self, key, val):
raise AttributeError('StateCollections are immutable')
def all_with_tag(self, tag):
return [s for s in self.values() if tag in s.tags]
def all_instances(self, cls):
return [s for s in self.values() if isinstance(s, cls)]
def state_added(self, state):
assert state.name not in self, 'A state with name "%s" is already present. Use state_replaced() to replace it.' % state.name
new_states = self.copy()
new_states[state.name] = state
return StateCollection(new_states)
def state_replaced(self, new_state):
assert new_state.name in self, 'No state found with name "%s"' % new_state.name
new_states = dict(self)
new_states[new_state.name] = new_state
return StateCollection(new_states)
def state_removed(self, state):
name = state if isinstance(state, str) else state.name
new_states = dict(self)
del new_states[name]
return StateCollection(new_states)
def find(self, name):
warnings.warn("StateCollection.find is deprecated. Use statecollection[name] instead.", DeprecationWarning)
return dict.__getitem__(self, name)
def __getitem__(self, item):
if isinstance(item, State):
return self[item.name]
if isinstance(item, str):
return dict.__getitem__(self, item)
if struct.isstruct(item):
return struct.map(lambda x: self[x], item, content_type=struct.INVALID)
try:
return self[item.name]
except AttributeError as e:
pass
raise ValueError('Illegal argument: %s' % item)
def __getattr__(self, item):
return self[item]
def default_physics(self):
warnings.warn("StateCollection will be removed in the future.", DeprecationWarning)
return CollectivePhysics()
def __repr__(self):
return '[' + ', '.join((str(s) for s in self)) + ']'
def __contains__(self, item):
if isinstance(item, State):
return item.name in self
if isinstance(item, str):
return dict.__contains__(self, item)
raise ValueError('Illegal type: %s' % type(item))
def __hash__(self):
return 0
@property
def states(self):
return self
def copied_with(self, **kwargs):
if len(kwargs) == 0:
return self
assert len(kwargs) == 1
name, value = next(iter(kwargs.items()))
assert name == 'states'
return StateCollection(value)
@property
def shape(self):
return StateCollection({name: state.shape for name, state in self.items()})
@property
def staticshape(self):
return StateCollection({name: state.staticshape for name, state in self.items()})
@property
def dtype(self):
return StateCollection({name: state.dtype for name, state in self.items()})
CollectiveState = StateCollection
class CollectivePhysics(Physics):
def __init__(self):
Physics.__init__(self, {})
self.physics = {} # map from name to Physics
def step(self, state_collection, dt=1.0, **dependent_states):
assert len(dependent_states) == 0
if len(state_collection) == 0:
return state_collection
unhandled_states = list(state_collection.values())
next_states = {}
partial_next_state_collection = StateCollection(next_states)
for sweep in range(len(state_collection)):
for state in tuple(unhandled_states):
physics = self.for_(state)
if self._all_dependencies_fulfilled(physics.blocking_dependencies, state_collection, partial_next_state_collection):
next_state = self.substep(state, state_collection, dt, partial_next_state_collection=partial_next_state_collection)
assert next_state is not None, "step() called on %s returned None for state '%s'" % (type(physics).__name__, state)
assert isinstance(next_state, State), "step() called on %s dit not return a State but '%s' for state '%s'" % (type(physics).__name__, next_state, state)
assert next_state.name == state.name, "The state name must remain constant during step(). Caused by '%s' on state '%s'." % (type(physics).__name__, state)
next_states[next_state.name] = next_state
unhandled_states.remove(state)
partial_next_state_collection = StateCollection(next_states)
if len(unhandled_states) == 0:
ordered_states = [partial_next_state_collection[state] for state in state_collection]
return StateCollection(ordered_states)
# Error
errstr = 'Cyclic blocking_dependencies in simulation: %s' % unhandled_states
for state in tuple(unhandled_states):
physics = self.for_(state)
state_dict = self._gather_dependencies(physics.blocking_dependencies, state_collection, {})
errstr += '\nState "%s" with physics "%s" depends on %s' % (state, physics, state_dict)
raise AssertionError(errstr)
def substep(self, state, state_collection, dt, override_physics=None, partial_next_state_collection=None):
physics = self.for_(state) if override_physics is None else override_physics
# --- gather dependencies
dependent_states = {}
self._gather_dependencies(physics.dependencies, state_collection, dependent_states)
if partial_next_state_collection is not None:
self._gather_dependencies(physics.blocking_dependencies, partial_next_state_collection, dependent_states)
# --- execute step ---
next_state = physics.step(state, dt, **dependent_states)
return next_state
def _gather_dependencies(self, dependencies, state_collection, result_dict):
for statedependency in dependencies:
if statedependency.state_name is not None:
matching_states = state_collection.find(statedependency.state_name)
else:
matching_states = state_collection.all_with_tag(statedependency.tag)
if statedependency.single_state:
assert len(matching_states) == 1, 'Dependency %s requires 1 state but found %d' % (statedependency, len(matching_states))
value = matching_states[0]
else:
value = tuple(matching_states)
result_dict[statedependency.parameter_name] = value
return result_dict
def _all_dependencies_fulfilled(self, dependencies, all_states, computed_states):
state_dict = self._gather_dependencies(dependencies, all_states, {})
for name, states in state_dict.items():
if isinstance(states, tuple):
for state in states:
if state.name not in computed_states:
return False
else: # single state
if states.name not in computed_states:
return False
return True
def for_(self, state):
return self.physics[state.name] if state.name in self.physics else state.default_physics()
def add(self, name, physics):
self.physics[name] = _as_physics(physics)
def remove(self, name):
if name in self.physics:
del self.physics[name]
world = World()
| 7,651
|
1,808
|
//
// Simple byte wise SPI driver
// Demo how to set up memmap and access SPI registers.
// Code seems to be working but has not been much tested.
// <NAME> 15-Jan-2012
//
// Access from ARM Running Linux
#define BCM2708_PERI_BASE 0x20000000
#define UART0_BASE (BCM2708_PERI_BASE + 0x201000) /* Uart 0 */
#define UART1_BASE (BCM2708_PERI_BASE + 0x215000) /* Uart 1 */
#define MCORE_BASE (BCM2708_PERI_BASE + 0x0000) /* Fake frame buffer device */
#define GPIO_BASE (BCM2708_PERI_BASE + 0x200000) /* GPIO controller */
#define SPI0_BASE (BCM2708_PERI_BASE + 0x204000) /* SPI0 controller */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <dirent.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#define PAGE_SIZE (4*1024)
#define BLOCK_SIZE (4*1024)
int mem_fd;
char *gpio_mem, *gpio_map;
char *spi0_mem, *spi0_map;
// I/O access
volatile unsigned *gpio;
volatile unsigned *spi0;
// SPI operation
// GPIO setup macros. Always use INP_GPIO(x) before using OUT_GPIO(x) or SET_GPIO_ALT(x,y)
#define INP_GPIO(g) *(gpio+((g)/10)) &= ~(7<<(((g)%10)*3))
#define OUT_GPIO(g) *(gpio+((g)/10)) |= (1<<(((g)%10)*3))
#define SET_GPIO_ALT(g,a) *(gpio+(((g)/10))) |= (((a)<=3?(a)+4:(a)==4?3:2)<<(((g)%10)*3))
//
#define SPI0_CNTLSTAT *(spi0 + 0)
#define SPI0_FIFO *(spi0 + 1)
#define SPI0_CLKSPEED *(spi0 + 2)
// SPI0_CNTLSTAT register bits
#define SPI0_CS_CS2ACTHIGH 0x00800000 // CS2 active high
#define SPI0_CS_CS1ACTHIGH 0x00400000 // CS1 active high
#define SPI0_CS_CS0ACTHIGH 0x00200000 // CS0 active high
#define SPI0_CS_RXFIFOFULL 0x00100000 // Receive FIFO full
#define SPI0_CS_RXFIFO3_4 0x00080000 // Receive FIFO 3/4 full
#define SPI0_CS_TXFIFOSPCE 0x00040000 // Transmit FIFO has space
#define SPI0_CS_RXFIFODATA 0x00020000 // Receive FIFO has data
#define SPI0_CS_DONE 0x00010000 // SPI transfer done. WRT to CLR!
#define SPI0_CS_MOSI_INPUT 0x00001000 // MOSI is input, read from MOSI (BI-dir mode)
#define SPI0_CS_DEASRT_CS 0x00000800 // De-assert CS at end
#define SPI0_CS_RX_IRQ 0x00000400 // Receive irq enable
#define SPI0_CS_DONE_IRQ 0x00000200 // irq when done
#define SPI0_CS_DMA_ENABLE 0x00000100 // Run in DMA mode
#define SPI0_CS_ACTIVATE 0x00000080 // Activate: be high before starting
#define SPI0_CS_CS_POLARIT 0x00000040 // Chip selects active high
#define SPI0_CS_CLRTXFIFO 0x00000020 // Clear TX FIFO (auto clear bit)
#define SPI0_CS_CLRRXFIFO 0x00000010 // Clear RX FIFO (auto clear bit)
#define SPI0_CS_CLRFIFOS 0x00000030 // Clear BOTH FIFOs (auto clear bit)
#define SPI0_CS_CLK_IDLHI 0x00000008 // Clock pin is high when idle
#define SPI0_CS_CLKTRANS 0x00000004 // 0=first clock in middle of data bit
// 1=first clock at begin of data bit
#define SPI0_CS_CHIPSEL0 0x00000000 // Use chip select 0
#define SPI0_CS_CHIPSEL1 0x00000001 // Use chip select 1
#define SPI0_CS_CHIPSEL2 0x00000002 // Use chip select 2
#define SPI0_CS_CHIPSELN 0x00000003 // No chip select (e.g. use GPIO pin)
#define SPI0_CS_CLRALL (SPI0_CS_CLRFIFOS|SPI0_CS_DONE)
#define ISASC(x) ((x)>=0x20 && (x)<=0x7F)
void setup_io();
int main(int argc, char **argv)
{ int g;
setup_io(); // Set up direct access to I/O for GPIO and SPI
// Switch GPIO 7..11 to SPI mode (ALT function 0)
/************************************************************************\
* You are about to change the GPIO settings of your computer. *
* Mess this up and it will stop working! *
* It might be a good idea to 'sync' before running this program *
* so at least you still have your code changes written to the SD-card! *
\************************************************************************/
for (g=7; g<=11; g++)
{
INP_GPIO(g); // clear bits (= input)
SET_GPIO_ALT(g,0); // set function 0
}
return 0;
} // main
//
// Set up a memory regions to access GPIO and SPI0
//
void setup_io()
{
/* open /dev/mem */
if ((mem_fd = open("/dev/mem", O_RDWR|O_SYNC) ) < 0) {
printf("can't open /dev/mem \n");
exit (-1);
}
/* mmap GPIO */
if ((gpio_mem = malloc(BLOCK_SIZE + (PAGE_SIZE-1))) == NULL) {
printf("allocation error \n");
exit (-1);
}
if ((unsigned long)gpio_mem % PAGE_SIZE)
gpio_mem += PAGE_SIZE - ((unsigned long)gpio_mem % PAGE_SIZE);
gpio_map = (unsigned char *)mmap(
(caddr_t)gpio_mem,
BLOCK_SIZE,
PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_FIXED,
mem_fd,
GPIO_BASE
);
if ((long)gpio_map < 0) {
printf("mmap error %ld\n", (long)gpio_map);
exit (-1);
}
gpio = (volatile unsigned *)gpio_map;
/* mmap SPI0 */
if ((spi0_mem = malloc(BLOCK_SIZE + (PAGE_SIZE-1))) == NULL) {
printf("allocation error \n");
exit (-1);
}
if ((unsigned long)spi0_mem % PAGE_SIZE)
spi0_mem += PAGE_SIZE - ((unsigned long)spi0_mem % PAGE_SIZE);
spi0_map = (unsigned char *)mmap(
(caddr_t)spi0_mem,
BLOCK_SIZE,
PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_FIXED,
mem_fd,
SPI0_BASE
);
printf("SPI mapped from 0x%d to 0x%p\n",SPI0_BASE,spi0_map);
if ((long)spi0_map < 0) {
printf("mmap error %ld\n", (long)spi0_map);
exit (-1);
}
spi0 = (volatile unsigned *)spi0_map;
} // setup_io
| 2,509
|
348
|
<gh_stars>100-1000
{"nom":"Murinais","circ":"9ème circonscription","dpt":"Isère","inscrits":309,"abs":181,"votants":128,"blancs":17,"nuls":6,"exp":105,"res":[{"nuance":"LR","nom":"<NAME>","voix":55},{"nuance":"MDM","nom":"<NAME>","voix":50}]}
| 98
|
5,169
|
{
"name": "SKYKitChat",
"version": "0.0.1",
"summary": "Chat extension for SKYKit",
"description": "This is the client library for the Skygear Chat extension.",
"homepage": "https://github.com/SkygearIO/chat-SDK-iOS",
"license": "Apache License, Version 2.0",
"authors": {
"Oursky Ltd.": "<EMAIL>"
},
"source": {
"git": "https://github.com/SkygearIO/chat-SDK-iOS.git",
"tag": "0.0.1"
},
"platforms": {
"ios": "8.0"
},
"source_files": "SKYKitChat/Classes/**/*",
"dependencies": {
"SKYKit": [
"~> 0.19.0"
]
}
}
| 251
|
453
|
<filename>tests/sfko/sfko/util/schema.py
from ..error import RequiredKeyError
from .log import Log
_log = Log('assert')
def assert_key(mapping, key, target = 'mapping'):
if key not in mapping:
raise RequiredKeyError(key, target)
return True
def assert_wmsg(condition, msg = 'Assertion Failed!'):
try:
assert(condition)
except AssertionError as e:
_log.error(msg)
raise e
| 167
|
546
|
<filename>include/Msnhnet/cv/MsnhCVVector.h
#ifndef MSNHCVVECTOR_H
#define MSNHCVVECTOR_H
#include <Msnhnet/config/MsnhnetCfg.h>
#include <Msnhnet/cv/MsnhCVType.h>
#include <iostream>
#include <sstream>
#include <iomanip>
namespace Msnhnet
{
template<typename T>
class VectorX
{
public:
VectorX(const int &n)
{
_n = n;
_value = new T[n]();
}
VectorX(){}
~VectorX()
{
release();
}
VectorX(const std::vector<T> &value)
{
if(value.empty())
{
throw Exception(1,"[VectorX]: input vector should not be empty\n", __FILE__, __LINE__, __FUNCTION__);
}
_n = (int)value.size();
_value = new T[_n]();
memcpy(_value, value.data(), _n*sizeof(T));
}
VectorX(const VectorX& vec)
{
this->_n = vec._n;
_value = new T(vec._n);
if(vec._value!=nullptr)
{
memcpy(this->_value,vec._value,sizeof(T)*vec._n);
}
}
VectorX(VectorX&& vec)
{
this->_n = vec._n;
this->_value = vec._value;
vec.setDataNull();
}
VectorX &operator= (const VectorX& vec)
{
if(this!=&vec)
{
release();
this->_n = vec._n;
memcpy(this->_value,vec._value,sizeof(T)*vec._n);
}
return *this;
}
VectorX &operator= (VectorX&& vec)
{
if(this!=&vec)
{
release();
this->_n = vec._n;
this->_value = vec._value;
vec.setDataNull();
}
return *this;
}
VectorX &operator= (const std::vector<T> &vec)
{
release();
this->_n = vec.size();
memcpy(this->_value,vec.data(),sizeof(T)*_n);
return *this;
}
void release()
{
if(this->_value!=nullptr)
{
delete[] this->_value;
this->_value = nullptr;
}
this->_n = 0;
}
void setDataNull()
{
this->_n = 0;
this->_value = nullptr;
}
inline void fill(const T &value)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i] = value;
}
}
inline void print()
{
std::cout<<"{ VectorX: "<<_n<<std::endl;
if(isF32Vec())
{
for (int i = 0; i < _n; ++i)
{
std::cout<<std::setiosflags(std::ios::left)<<std::setprecision(6)<<std::setw(6)<<_value[i]<<" ";
}
}
else if(isF64Vec())
{
for (int i = 0; i < _n; ++i)
{
std::cout<<std::setiosflags(std::ios::left)<<std::setprecision(12)<<std::setw(12)<<_value[i]<<" ";
}
}
else
{
for (int i = 0; i < _n; ++i)
{
std::cout<<_value[i]<<" ";
}
}
std::cout<<";\n}"<<std::endl;
}
inline std::string toString() const
{
std::stringstream buf;
buf<<"{ VectorX: "<<_n<<std::endl;
if(isF32Vec())
{
for (int i = 0; i < _n; ++i)
{
buf<<std::setiosflags(std::ios::left)<<std::setprecision(6)<<std::setw(6)<<_value[i]<<" ";
}
}
else if(isF64Vec())
{
for (int i = 0; i < _n; ++i)
{
buf<<std::setiosflags(std::ios::left)<<std::setprecision(12)<<std::setw(12)<<_value[i]<<" ";
}
}
else
{
for (int i = 0; i < _n; ++i)
{
buf<<_value[i]<<" ";
}
}
buf<<";\n}"<<std::endl;
return buf.str();
}
inline std::string toHtmlString() const
{
std::stringstream buf;
buf<<"{ VectorX: "<<_n<<"<br/>";
if(isF32Vec())
{
for (int i = 0; i < _n; ++i)
{
buf<<std::setiosflags(std::ios::left)<<std::setprecision(6)<<std::setw(6)<<_value[i]<<" ";
}
}
else if(isF64Vec())
{
for (int i = 0; i < _n; ++i)
{
buf<<std::setiosflags(std::ios::left)<<std::setprecision(12)<<std::setw(12)<<_value[i]<<" ";
}
}
else
{
for (int i = 0; i < _n; ++i)
{
buf<<_value[i]<<" ";
}
}
buf<<";\n}"<<"<br/>";
return buf.str();
}
void setVal(const std::vector<T> &val)
{
if(val.size()!=_n)
{
throw Exception(1,"[VectorX]: set val num must equal data num! \n", __FILE__, __LINE__, __FUNCTION__);
}
memcpy(this->_value, val.data(), sizeof(T)*_n);
}
void setVal(const int &index, const T &val)
{
if(index>(_n-1))
{
throw Exception(1,"[VectorX]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__);
}
this->_value[index] = val;
}
inline void zero()
{
for (int i = 0; i < _n; ++i)
{
this->_value[i] = 0;
}
}
inline void reverseSign()
{
for (int i = 0; i < _n; ++i)
{
this->_value[i] = 0 - this->_value[i];
}
}
bool isFuzzyNull() const
{
if(isF32Vec())
{
for (int i = 0; i < _n; ++i)
{
if(fabsf(this->_value[i])>MSNH_F32_EPS)
{
return false;
}
}
return true;
}
else if(isF64Vec())
{
for (int i = 0; i < _n; ++i)
{
if(fabs(this->_value[i])>MSNH_F64_EPS)
{
return false;
}
}
return true;
}
else
{
for (int i = 0; i < _n; ++i)
{
if(this->_value[i]>0)
{
return false;
}
}
return true;
}
}
inline bool isNan() const
{
for (int i = 0; i < _n; ++i)
{
if(std::isnan(static_cast<double>(this->_value[i])))
{
return true;
}
}
return false;
}
inline bool isF32Vec() const
{
return std::is_same<T,float>::value;
}
inline bool isF64Vec() const
{
return std::is_same<T,double>::value;
}
VectorX normalized() const
{
if(!(isF32Vec() || isF64Vec()))
{
throw Exception(1, "[VectorX] normalize only f32 and f64 is supported!", __FILE__, __LINE__,__FUNCTION__);
}
T len = 0;
VectorX vec(_n);
for (int i = 0; i < _n; ++i)
{
len += this->_value[i]*this->_value[i];
}
if(isF32Vec())
{
if(fabsf(len - 1.0f) < MSNH_F32_EPS)
{
return *this;
}
if(fabsf(len) < MSNH_F32_EPS)
{
return vec;
}
len = sqrtf(len);
}
else if(isF64Vec())
{
if(fabs(len - 1.0) < MSNH_F64_EPS)
{
return *this;
}
if(fabs(len) < MSNH_F64_EPS)
{
return vec;
}
len = sqrt(len);
}
for (int i = 0; i < _n; ++i)
{
vec[i] = this->_value[i] / len;
}
return vec;
}
void normalize()
{
if(!(isF32Vec() || isF64Vec()))
{
throw Exception(1, "[VectorX] normalize only f32 and f64 is supported!", __FILE__, __LINE__,__FUNCTION__);
}
T len = 0;
for (int i = 0; i < _n; ++i)
{
len += this->_value[i]*this->_value[i];
}
if(this->isF32Vec())
{
if(fabsf(len - 1.0f) < MSNH_F32_EPS || fabsf(len) < MSNH_F32_EPS)
{
return;
}
len = sqrtf(len);
}
else
{
if(fabs(len - 1.0) < MSNH_F64_EPS || fabs(len) < MSNH_F64_EPS)
{
return;
}
len = sqrt(len);
}
for (int i = 0; i < _n; ++i)
{
this->_value[i] = this->_value[i] / len;
}
}
inline double length() const
{
double len = 0;
for (int i = 0; i < _n; ++i)
{
len += this->_value[i]*this->_value[i];
}
return sqrt(len);
}
inline double lengthSquared() const
{
double len = 0;
for (int i = 0; i < _n; ++i)
{
len += this->_value[i]*this->_value[i];
}
return len;
}
/* 点到点之间的距离
* .eg ^
* |
* A x --> --> --->
* | \ OA - OB = |BA|
* | \
* O |-----x-->
* B
*/
inline double distanceToPoint(const VectorX &point) const
{
return (*this - point).length();
}
/* 点到线之间的距离
* .eg ^
* \ |
* x x(A)
* | \
* | x (point)
* | \
* O |-------x--> B
* \(direction)
* \LINE(point + direction)
*/
inline double distanceToLine(const VectorX &point, const VectorX &direction) const
{
if(point.getN() != direction.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
if(direction.getN()<2)
{
throw Exception(1,"[VectorX] only 2 dims+ is supported!",__FILE__,__LINE__,__FUNCTION__);
}
if(direction.isFuzzyNull())
{
return (*this - point).length();
}
VectorX p = point + VectorX::dotProduct((*this-point)*direction,direction);
return (*this - p).length();
}
/* 点到线之间的距离
* .eg ^
* / \ | *(normal)
* / x *
* / | \
* / | \ x(A)
* \ *| \
* \ O |-------x--> B
* * \ / /
* * /\ /
* / \ / (plane)
* / \/
*
*/
inline double distanceToPlane(const VectorX& plane, const VectorX& normal) const
{
if(plane.getN() != normal.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
if(plane.getN()<3)
{
throw Exception(1,"[VectorX] only 3 dims+ is supported!",__FILE__,__LINE__,__FUNCTION__);
}
return dotProduct((*this-plane),normal);
}
inline static VectorX crossProduct(const VectorX &v1, const VectorX &v2)
{
if(v1.getN() != v2.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
if(v1.getN()!=3)
{
throw Exception(1,"[VectorX] only 3 dims is supported!",__FILE__,__LINE__,__FUNCTION__);
}
return VectorX({ v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0]});
}
inline static VectorX normal(const VectorX &v1, const VectorX &v2)
{
return crossProduct(v1,v2).normalized();
}
inline static VectorX normal(const VectorX &v1, const VectorX &v2, const VectorX &v3)
{
return crossProduct((v2-v1),(v3-v1)).normalized();
}
inline static T dotProduct(const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
T finalVal = 0;
for (int i = 0; i < A.getN(); ++i)
{
finalVal += A[i]*B[i];
}
return finalVal;
}
inline T operator [](const int &index) const
{
if(index > (_n-1))
{
throw Exception(1,"[VectorX]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__);
}
return _value[index];
}
inline T &operator [](const int &index)
{
if(index > (_n-1))
{
throw Exception(1,"[VectorX]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__);
}
return _value[index];
}
inline friend VectorX operator+ (const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] + B[i];
}
return tmp;
}
inline friend VectorX operator+ (T A, const VectorX &B)
{
VectorX tmp(B.getN());
for (int i = 0; i < B.getN(); ++i)
{
tmp[i] = A + B[i];
}
return tmp;
}
inline friend VectorX operator+ (const VectorX &A, T B)
{
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] + B;
}
return tmp;
}
inline friend VectorX operator- (const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] - B[i];
}
return tmp;
}
inline friend VectorX operator- (T A, const VectorX &B)
{
VectorX tmp(B.getN());
for (int i = 0; i < B.getN(); ++i)
{
tmp[i] = A - B[i];
}
return tmp;
}
inline friend VectorX operator- (const VectorX &A, T B)
{
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] - B;
}
return tmp;
}
inline friend VectorX operator- (const VectorX &A)
{
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = 0 - A[i];
}
return tmp;
}
inline friend VectorX operator* (const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] * B[i];
}
return tmp;
}
inline friend VectorX operator* (T A, const VectorX &B)
{
VectorX tmp(B.getN());
for (int i = 0; i < B.getN(); ++i)
{
tmp[i] = A * B[i];
}
return tmp;
}
inline friend VectorX operator* (const VectorX &A, T B)
{
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] * B;
}
return tmp;
}
inline friend VectorX operator/ (const VectorX &A, T B)
{
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] / B;
}
return tmp;
}
inline friend VectorX operator/ (const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
throw Exception(1,"[VectorX]: data num of A and B not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
VectorX tmp(A.getN());
for (int i = 0; i < A.getN(); ++i)
{
tmp[i] = A[i] / B[i];
}
return tmp;
}
inline friend bool operator== (const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
return false;
}
if(A.isF32Vec())
{
for (int i = 0; i < A.getN(); ++i)
{
if(fabsf(A[i] - B[i])>MSNH_F32_EPS)
{
return false;
}
}
}
else if(A.isF64Vec())
{
for (int i = 0; i < A.getN(); ++i)
{
if(fabsf(A[i] - B[i])>MSNH_F64_EPS)
{
return false;
}
}
}
else
{
for (int i = 0; i < A.getN(); ++i)
{
if(A[i] != B[i])
{
return false;
}
}
}
return true;
}
inline friend bool operator!= (const VectorX &A, const VectorX &B)
{
if(A.getN() != B.getN())
{
return true;
}
if(std::is_same<T,float>::value)
{
for (int i = 0; i < A.getN(); ++i)
{
if(fabsf(A[i] - B[i])>MSNH_F32_EPS)
{
return true;
}
}
}
else if(std::is_same<T,double>::value)
{
for (int i = 0; i < A.getN(); ++i)
{
if(fabsf(A[i] - B[i])>MSNH_F64_EPS)
{
return true;
}
}
}
else
{
for (int i = 0; i < A.getN(); ++i)
{
if(A[i] != B[i])
{
return true;
}
}
}
return false;
}
inline VectorX &operator +=(const VectorX &A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]+=A[i];
}
return *this;
}
inline VectorX &operator +=(T A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]+=A;
}
return *this;
}
inline VectorX &operator -=(const VectorX &A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]-=A[i];
}
return *this;
}
inline VectorX &operator -=(T A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]-=A;
}
return *this;
}
inline VectorX &operator *=(const VectorX &A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]*=A[i];
}
return *this;
}
inline VectorX &operator *=(T A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]*=A;
}
return *this;
}
inline VectorX &operator /=(T A)
{
for (int i = 0; i < _n; ++i)
{
this->_value[i]/=A;
}
return *this;
}
inline T* getValue() const
{
return this->_value;
}
inline int getN() const
{
return this->_n;
}
protected:
int _n = 0;
T* _value = nullptr;
};
template<int N,typename T>
class Vector:public VectorX<T>
{
public:
Vector():VectorX<T>(N){}
Vector(const std::vector<T> &value):VectorX<T>(N)
{
this->setVal(value);
}
Vector(const Vector& vec):VectorX<T>(N)
{
memcpy(this->_value,vec._value,sizeof(T)*N);
}
Vector(Vector&& vec)
{
this->_n = N;
this->_value = vec._value;
vec.setDataNull();
}
Vector(const VectorX<T>& vec)
{
if(N!=vec._n)
{
throw Exception(1,"[Vector]: data num not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
this->_n = N;
this->_value = new T[N]();
memcpy(this->_value,vec._value,sizeof(T)*N);
}
Vector(VectorX<T>&& vec)
{
if(N!=vec.getN())
{
throw Exception(1,"[Vector]: data num not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
this->_n = N;
this->_value = vec.getValue();
vec.setDataNull();
}
Vector& operator =(const Vector &vec)
{
if(this!=&vec)
{
this->_n = N;
this->_value = new T[N]();
memcpy(this->_value,vec._value,sizeof(T)*N);
}
return *this;
}
Vector& operator =(Vector &&vec)
{
if(this!=&vec)
{
this->_n = N;
this->_value = vec._value;
vec.setDataNull();
}
return *this;
}
Vector& operator =(const VectorX<T> &vec)
{
if(N!=vec._n)
{
throw Exception(1,"[Vector]: data num not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
this->_n = N;
this->_value = new T[N]();
memcpy(this->_value,vec._value,sizeof(T)*N);
return *this;
}
Vector& operator =(VectorX<T> &&vec)
{
if(N!=vec.getN())
{
throw Exception(1,"[Vector]: data num not equal! \n", __FILE__, __LINE__, __FUNCTION__);
}
this->_n = N;
this->_value = vec.getValue();
vec.setDataNull();
return *this;
}
};
typedef VectorX<double> VectorXD;
typedef VectorX<float> VectorXF;
typedef Vector<3,double> EulerD;
typedef Vector<3,double> TranslationD;
typedef Vector<3,double> RotationVecD;
typedef Vector<3,double> LinearVelD;
typedef Vector<3,double> AngularVelD;
typedef Vector<2,double> Vector2D;
typedef Vector<3,double> Vector3D;
typedef Vector<5,double> Vector5D;
typedef Vector<4,double> Vector4D;
typedef Vector<6,double> Vector6D;
typedef Vector<7,double> Vector7D;
typedef Vector<3,float> EulerF;
typedef Vector<3,float> TranslationF;
typedef Vector<3,float> RotationVecF;
typedef Vector<3,float> LinearVelF;
typedef Vector<3,float> AngularVelF;
typedef Vector<2,float> Vector2F;
typedef Vector<3,float> Vector3F;
typedef Vector<4,float> Vector4F;
typedef Vector<5,float> Vector5F;
typedef Vector<6,float> Vector6F;
typedef Vector<7,float> Vector7F;
}
#endif
| 12,975
|
379
|
# -*- coding: utf-8 -*-
import os
import glob
import yaml
import george
# Inject the kernel docs
d = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(d, "docs", "user", "kernels.rst.template"), "r") as f:
TEMPLATE = f.read()
fns = glob.glob(os.path.join(d, "kernels", "*.yml"))
if len(fns):
specs = []
for i, fn in enumerate(fns):
with open(fn, "r") as f:
specs.append(yaml.load(f.read()))
tokens = []
for spec in specs:
if spec["stationary"]:
tokens += [
".. autoclass:: george.kernels.{0}".format(spec["name"])
]
TEMPLATE = TEMPLATE.replace("STATIONARYKERNELS", "\n".join(tokens))
tokens = []
for spec in specs:
if not spec["stationary"]:
tokens += [
".. autoclass:: george.kernels.{0}".format(spec["name"])
]
TEMPLATE = TEMPLATE.replace("OTHERKERNELS", "\n".join(tokens))
with open(os.path.join(d, "docs", "user", "kernels.rst"), "w") as f:
f.write(TEMPLATE)
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
# General information about the project.
project = u"George"
copyright = u"2012-2021 <NAME>"
version = george.__version__
release = george.__version__
exclude_patterns = ["_build"]
pygments_style = "sphinx"
# Readthedocs.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
htmp_theme_options = dict(
analytics_id="analytics_id",
)
html_context = dict(
display_github=True,
github_user="dfm",
github_repo="george",
github_version="main",
conf_py_path="/docs/",
)
html_static_path = ["_static"]
html_show_sourcelink = False
| 883
|
1,682
|
/*
Copyright (c) 2019 LinkedIn Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.linkedin.restli.tools.errors;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.annotations.Action;
import com.linkedin.restli.server.annotations.RestLiSimpleResource;
import com.linkedin.restli.server.annotations.RestMethod;
import com.linkedin.restli.server.annotations.ServiceErrorDef;
import com.linkedin.restli.server.annotations.ServiceErrors;
import com.linkedin.restli.server.annotations.SuccessResponse;
import com.linkedin.restli.server.resources.SimpleResourceTemplate;
import com.linkedin.restli.tools.DummyRecord;
import static com.linkedin.restli.tools.errors.DummyServiceError.Codes.*;
/**
* Simple resource to test IDL generation with defined service errors.
*
* @author <NAME>
*/
@RestLiSimpleResource(name = "simple")
@ServiceErrorDef(DummyServiceError.class)
@ServiceErrors(RESOURCE_LEVEL_ERROR)
public class ServiceErrorSimpleResource extends SimpleResourceTemplate<DummyRecord>
{
/**
* This ensures that annotation-specified CRUD methods can specify a method-level service error.
*/
@RestMethod.Get
@ServiceErrors(METHOD_LEVEL_ERROR)
public DummyRecord get()
{
return null;
}
/**
* This ensures that template CRUD methods can specify a method-level service error in conjunction with
* success statuses. Also uses an error code with a unique error detail type.
*/
@Override
@ServiceErrors(ILLEGAL_ACTION)
@SuccessResponse(statuses = { HttpStatus.S_204_NO_CONTENT })
public UpdateResponse update(DummyRecord dummyRecord)
{
return new UpdateResponse(HttpStatus.S_204_NO_CONTENT);
}
/**
* Ensures that action methods can specify a method-level service error.
* Also ensures that service errors without messages can be used.
*/
@Action(name = "doAction")
@ServiceErrors({ METHOD_LEVEL_ERROR, NO_MESSAGE_ERROR })
public int doAction()
{
return 2147;
}
}
| 759
|
1,845
|
<filename>rajawali/src/main/java/org/rajawali3d/WorldParameters.java
package org.rajawali3d;
import org.rajawali3d.math.vector.Vector3;
/**
* Collection of world global parameters. These parameters are constant across all scenes. This class is intended to be
* read only after setup, and so does not include any thread safety mechanisms in the interest of speed. Extreme care
* must be taken if you desire to modify anything in this class while other threads are actively using it.
*
* @author <NAME> (<EMAIL>)
*/
public final class WorldParameters {
private static final Vector3 TEMP_VECTOR = new Vector3();
/**
* Global Right Axis. Defaults to OpenGL +X axis. It is not safe to modify this {@link Vector3} directly.
* Instead, use {@link #setWorldAxes(Vector3, Vector3, Vector3)}.
*/
public static final Vector3 RIGHT_AXIS = Vector3.X.clone();
/**
* Global Negative Right Axis. Defaults to OpenGL -X axis. It is not safe to modify this {@link Vector3} directly.
* Instead, use {@link #setWorldAxes(Vector3, Vector3, Vector3)}.
*/
public static final Vector3 NEG_RIGHT_AXIS = Vector3.NEG_X.clone();
/**
* Global Up Axis. Defaults to OpenGL +Y axis. It is not safe to modify this {@link Vector3} directly.
* Instead, use {@link #setWorldAxes(Vector3, Vector3, Vector3)}.
*/
public static final Vector3 UP_AXIS = Vector3.Y.clone();
/**
* Global Negative Up Axis. Defaults to OpenGL -Y axis. It is not safe to modify this {@link Vector3} directly.
* Instead, use {@link #setWorldAxes(Vector3, Vector3, Vector3)}.
*/
public static final Vector3 NEG_UP_AXIS = Vector3.NEG_Y.clone();
/**
* Global Forward Axis. Defaults to OpenGL +Z axis. It is not safe to modify this {@link Vector3} directly.
* Instead, use {@link #setWorldAxes(Vector3, Vector3, Vector3)}.
*/
public static final Vector3 FORWARD_AXIS = Vector3.Z.clone();
/**
* Global Negative Forward Axis. Defaults to OpenGL -Z axis. It is not safe to modify this {@link Vector3} directly.
* Instead, use {@link #setWorldAxes(Vector3, Vector3, Vector3)}.
*/
public static final Vector3 NEG_FORWARD_AXIS = Vector3.NEG_Z.clone();
/**
* Sets the world axis values after checking that they are all orthogonal to each other. The check performed
* is to verify that the cross product between {@code right} and {@code up} is equivilant to {@code forward}
* withing 1ppm error on each component.
*
* @param right {@link Vector3} The desired right vector. Must be normalized.
* @param up {@link Vector3} The desired up vector. Must be normalized.
* @param forward {@link Vector3} The desired forward vector. Must be normalized.
*/
public static void setWorldAxes(Vector3 right, Vector3 up, Vector3 forward) {
TEMP_VECTOR.crossAndSet(right, up);
if (!TEMP_VECTOR.equals(forward, 1e-6)) {
throw new IllegalArgumentException("World axes must be orthogonal.");
}
RIGHT_AXIS.setAll(right);
NEG_RIGHT_AXIS.setAll(RIGHT_AXIS).inverse();
UP_AXIS.setAll(up);
NEG_UP_AXIS.setAll(UP_AXIS).inverse();
FORWARD_AXIS.setAll(forward);
NEG_FORWARD_AXIS.setAll(FORWARD_AXIS).inverse();
}
}
| 1,180
|
1,283
|
package com.pengrad.telegrambot.model.request;
import java.io.Serializable;
/**
* stas
* 1/12/16.
*/
public class InlineQueryResultVideo extends InlineQueryResult<InlineQueryResultVideo> implements Serializable {
private final static long serialVersionUID = 0L;
public static final String MIME_TEXT_HTML = "text/html";
public static final String MIME_VIDEO_MP4 = "video/mp4";
private String video_url;
private String mime_type;
private String thumb_url;
private String title;
private String caption;
private String parse_mode;
private Integer video_width;
private Integer video_height;
private Integer video_duration;
private String description;
public InlineQueryResultVideo(String id, String videoUrl, String mimeType, String messageText, String thumbUrl, String title) {
this(id, videoUrl, mimeType, new InputTextMessageContent(messageText), thumbUrl, title);
}
public InlineQueryResultVideo(String id, String videoUrl, String mimeType, InputMessageContent inputMessageContent, String thumbUrl, String title) {
super("video", id);
this.video_url = videoUrl;
this.mime_type = mimeType;
this.thumb_url = thumbUrl;
this.title = title;
inputMessageContent(inputMessageContent);
}
public InlineQueryResultVideo caption(String caption) {
this.caption = caption;
return this;
}
public InlineQueryResultVideo parseMode(ParseMode parseMode) {
this.parse_mode = parseMode.name();
return this;
}
public InlineQueryResultVideo videoWidth(Integer videoWidth) {
this.video_width = videoWidth;
return this;
}
public InlineQueryResultVideo videoHeight(Integer videoHeight) {
this.video_height = videoHeight;
return this;
}
public InlineQueryResultVideo videoDuration(Integer videoDuration) {
this.video_duration = videoDuration;
return this;
}
public InlineQueryResultVideo description(String description) {
this.description = description;
return this;
}
}
| 736
|
764
|
<gh_stars>100-1000
{"symbol": "QPY","address": "0x6911270D4BC1915744AEdd785d41D44f47245BD0","overview":{"en": "QPay POWERED BY QUANTAEX | PAYMENT GATEWAY | WALLET"},"email": "<EMAIL>","website": "https://project.quantaex.com/","state": "NORMAL","links": {"blog": "https://medium.com/qpay/","twitter": "https://twitter.com/QuantaEx","telegram": "https://t.me/quantaex","github": "https://github.com/QuantaEx"}}
| 156
|
372
|
/* Editor Settings: expandtabs and use 4 spaces for indentation
* ex: set softtabstop=4 tabstop=8 expandtab shiftwidth=4: *
* -*- mode: c, c-basic-offset: 4 -*- */
/*
* Copyright © BeyondTrust Software 2004 - 2019
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS
* WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH
* BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT
* SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE,
* NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST
* A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT
* BEYONDTRUST AT beyondtrust.com/contact
*/
/*
* Copyright (C) BeyondTrust Software. All rights reserved.
*
* Module Name:
*
* provider-main.c
*
* Abstract:
*
* BeyondTrust Security and Authentication Subsystem (LSASS)
*
* Active Directory Authentication Provider
*
* Authors: <NAME> (<EMAIL>)
* <NAME> (<EMAIL>)
* <NAME> (<EMAIL>)
* <NAME> (<EMAIL>)
* <NAME> (<EMAIL>)
*/
#include "adprovider.h"
#include "lsaum_p.h"
static LSA_UM_STATE_HANDLE gLsaUmState = NULL;
DWORD
LsaUmInitialize(
IN PLSA_AD_PROVIDER_STATE pProviderState
)
{
DWORD dwError = 0;
LSA_UM_STATE_HANDLE hState = NULL;
dwError = LsaUmpStateCreate(pProviderState, &hState);
BAIL_ON_LSA_ERROR(dwError);
if (gLsaUmState)
{
dwError = LW_ERROR_INTERNAL;
BAIL_ON_LSA_ERROR(dwError);
}
gLsaUmState = hState;
hState = NULL;
dwError = 0;
cleanup:
if (hState)
{
LsaUmpStateDestroy(hState);
}
return dwError;
error:
goto cleanup;
}
VOID
LsaUmCleanup(
VOID
)
{
if (gLsaUmState)
{
LsaUmpStateDestroy(gLsaUmState);
gLsaUmState = NULL;
}
}
DWORD
LsaUmAddUser(
IN uid_t Uid,
IN PCSTR pszUserName,
IN PCSTR pszPassword,
IN DWORD dwEndTime
)
{
return LsaUmpAddUser(gLsaUmState, Uid, pszUserName, pszPassword, dwEndTime);
}
DWORD
LsaUmModifyUserPassword(
IN uid_t Uid,
IN PCSTR pszPassword
)
{
return LsaUmpModifyUserPassword(gLsaUmState, Uid, pszPassword);
}
DWORD
LsaUmModifyUserMountedDirectory(
IN uid_t Uid,
IN PCSTR pszMountedDirectory
)
{
return LsaUmpModifyUserMountedDirectory(gLsaUmState, Uid, pszMountedDirectory);
}
DWORD
LsaUmRemoveUser(
IN uid_t Uid
)
{
return LsaUmpRemoveUser(gLsaUmState, Uid);
}
VOID
LsaUmTriggerCheckUsersThread(
VOID
)
{
LsaUmpTriggerCheckUsersThread(gLsaUmState);
}
| 1,292
|
528
|
#ifndef GEN_SMOOTH_RAND_INTS_H
#define GEN_SMOOTH_RAND_INTS_H
#include "include/zfp/types.h"
// used to compute (square) array sizes
size_t
intPow(size_t base, int exponent);
// a double pointer is passed because memory allocation
// is taken care of within the functions
// generate randomly correlated integers in range:
// [-(2^amplitudeExp - 1), 2^amplitudeExp - 1] (64 bit)
void
generateSmoothRandInts64(size_t minTotalElements, int numDims, int amplitudeExp, int64** outputArr, size_t* outputSideLen, size_t* outputTotalLen);
// generate randomly correlated integers in range:
// [-(2^amplitudeExp - 1), 2^amplitudeExp - 1] (32 bit)
void
generateSmoothRandInts32(size_t minTotalElements, int numDims, int amplitudeExp, int32** outputArr32Ptr, size_t* outputSideLen, size_t* outputTotalLen);
// generate randomly correlated floats in range:
// [-(2^11), 2^11 - 2^(-12)]
void
generateSmoothRandFloats(size_t minTotalElements, int numDims, float** outputArrPtr, size_t* outputSideLen, size_t* outputTotalLen);
// generate randomly correlated doubles in range:
// [-(2^26), 2^26 - 2^(-26)]
void
generateSmoothRandDoubles(size_t minTotalElements, int numDims, double** outputArrPtr, size_t* outputSideLen, size_t* outputTotalLen);
#endif
| 426
|
675
|
<gh_stars>100-1000
#include "OpenMPTaskMgr.h"
#include "engine/core/log/Log.h"
namespace Echo
{
OpenMPTaskMgr::OpenMPTaskMgr()
{
Echo::CpuThreadPool::Cinfo info;
info.m_numThreads = std::thread::hardware_concurrency();
info.m_isBlocking = true;
m_threadPool = EchoNew(Echo::CpuThreadPool(info));
}
OpenMPTaskMgr::~OpenMPTaskMgr()
{
size_t totalNums = m_animationUpdateTasks.size();
for (size_t i = 0; i < totalNums; ++i)
{
EchoSafeDelete(m_animationUpdateTasks[i], Job);
}
EchoSafeDeleteContainer(m_effectSystemUpdateTasks, Job);
m_threadPool->stop();
EchoSafeDelete(m_threadPool, CpuThreadPool);
}
OpenMPTaskMgr* OpenMPTaskMgr::instance()
{
static OpenMPTaskMgr* inst = EchoNew(OpenMPTaskMgr);
return inst;
}
void OpenMPTaskMgr::addTask(TaskType type, CpuThreadPool::Job* task)
{
switch (type)
{
case TT_AnimationUpdate: m_animationUpdateTasks.emplace_back(task); break;
case TT_EffectSystem: m_effectSystemUpdateTasks.emplace_back(task); break;
default: EchoLogError("OpenMPTaskMgr::Unknown task type"); break;
}
}
void OpenMPTaskMgr::execTasks(TaskType type)
{
switch (type)
{
case TT_AnimationUpdate:
{
if (!m_animationUpdateTasks.empty())
{
m_animationUpdateTasksFinished = m_animationUpdateTasks;
m_animationUpdateTasks.clear();
m_threadPool->processJobs(m_animationUpdateTasksFinished.data(), int(m_animationUpdateTasksFinished.size()));
}
}
break;
case TT_EffectSystem:
{
if (!m_effectSystemUpdateTasks.empty())
{
m_effectSystemUpdateTasksFinished = m_effectSystemUpdateTasks;
m_effectSystemUpdateTasks.clear();
m_threadPool->processJobs(m_effectSystemUpdateTasksFinished.data(), int(m_effectSystemUpdateTasksFinished.size()));
}
}
break;
}
}
void OpenMPTaskMgr::waitForAnimationUpdateComplete()
{
m_threadPool->waitForComplete(TT_AnimationUpdate);
EchoSafeDeleteContainer(m_animationUpdateTasksFinished, Job);
}
void OpenMPTaskMgr::waitForEffectSystemUpdateComplete()
{
m_threadPool->waitForComplete(TT_EffectSystem);
for (CpuThreadPool::Job* job : m_effectSystemUpdateTasksFinished)
{
job->onFinished();
}
EchoSafeDeleteContainer(m_effectSystemUpdateTasksFinished, Job);
}
}
| 927
|
333
|
<reponame>ydiller/BalancedGroupSoftmax<filename>mmdet/models/bbox_heads/DCM_bbox_head.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
from mmdet.core import (delta2bbox, force_fp32,
multiclass_nms)
from .convfc_bbox_head import SharedFCBBoxHead
from ..builder import build_loss
from ..registry import HEADS
from ..losses import accuracy
@HEADS.register_module
class DCMBBoxHead(SharedFCBBoxHead):
def __init__(self,
num_fcs=2,
fc_out_channels=1024,
*args,
**kwargs):
super(DCMBBoxHead, self).__init__(num_fcs=num_fcs,
fc_out_channels=fc_out_channels,
*args, **kwargs)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
for fc in self.shared_fcs:
before_relu = fc(x)
x = self.relu(before_relu)
# separate branches
x_cls = x
x_reg = x
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred, before_relu
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_det_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
# if isinstance(cls_score, list):
# cls_score = sum(cls_score) / float(len(cls_score))
# scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
scores = cls_score
if bbox_pred is not None:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
| 1,717
|
5,169
|
{
"name": "MadSqliteSwift",
"version": "0.2.1",
"summary": "A simple Sqlite Abstraction",
"description": "A simple Sqlite Abstraction with FTS5 and R*Tree enabled",
"homepage": "https://manimaul.github.io/madsqlite/",
"license": {
"type": "BSD",
"file": "LICENSE.md"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"documentation_url": "https://manimaul.github.io/madsqlite/ios",
"source": {
"git": "https://github.com/manimaul/madsqlite-ios-swift.git",
"tag": "0.2.1"
},
"platforms": {
"ios": "10.1"
},
"source_files": "MadSqliteSwift/**/*.{swift}",
"requires_arc": true,
"dependencies": {
"MadSqlite": [
"0.2.1"
]
},
"pushed_with_swift_version": "3.0"
}
| 334
|
2,072
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import re
from typing import Dict
from apache_atlas.client.base_client import AtlasClient
from apache_atlas.exceptions import AtlasServiceException
from apache_atlas.model.misc import SearchFilter
from apache_atlas.model.typedef import AtlasTypesDef
from requests import Timeout
from databuilder.types.atlas.types_def import (
application_schema, bookmark_schema, cluster_schema, column_schema, column_table_relation, dashboard_chart_schema,
dashboard_execution_schema, dashboard_group_schema, dashboard_query_schema, dashboard_schema, data_owner_schema,
database_cluster_relation, database_schema, hive_table_partition, lineage_schema, reader_referenceable_relation,
reader_schema, report_schema, schema_cluster_relation, schema_schema, source_schema, table_partition_schema,
table_schema, table_schema_relation, table_source_relation, user_reader_relation, user_schema,
)
LOGGER = logging.getLogger(__name__)
# noinspection PyMethodMayBeStatic
class AtlasEntityInitializer:
def __init__(self, client: AtlasClient) -> None:
self.driver = client
def assign_subtypes(self, regex: str, super_type: str) -> None:
LOGGER.info(f'\nAssigning {super_type} entity to all the subtypes entity definitions with postfix ')
entities_to_update = []
entity_defs = self.driver.typedef.get_all_typedefs(search_filter=SearchFilter()).get('entityDefs', [])
for e in entity_defs:
if re.compile(regex).match(e.name) is not None:
LOGGER.info(f'Assigning {e.name} as a subtype of {super_type}')
e["superTypes"].append(super_type)
entities_to_update.append(e)
typedef_dict = {
"entityDefs": entities_to_update
}
self.driver.typedef.update_atlas_typedefs(AtlasTypesDef(attrs=typedef_dict))
LOGGER.info(f'Assignment of "{super_type}" Entity to existing "{regex}" entities Completed.\n')
def create_or_update(self, typedef_dict: Dict, info: str, attempt: int = 1) -> None:
try:
LOGGER.info(f"Trying to create {info} Entity")
self.driver.typedef.create_atlas_typedefs(AtlasTypesDef(attrs=typedef_dict))
except AtlasServiceException:
LOGGER.info(f"Already Exists, updating {info} Entity")
try:
self.driver.typedef.update_atlas_typedefs(AtlasTypesDef(attrs=typedef_dict))
except Exception:
# This is a corner case, for Atlas Sample Data
LOGGER.warn(f"Error updating {info} Entity.", exc_info=True)
except Timeout:
# Sometimes on local atlas instance you do get ReadTimeout a lot.
# This will try to apply definition 3 times and then cancel
if attempt < 4:
LOGGER.info("ReadTimeout - Another Try.")
self.create_or_update(typedef_dict, info, attempt + 1)
else:
LOGGER.info(f"ReadTimeout Exception - Cancelling Operation: {attempt}", exc_info=True)
except Exception:
LOGGER.info(f"Error creating/updating {info} Entity Definition", exc_info=True)
finally:
LOGGER.info(f"Applied {info} Entity Definition")
def get_schema_dict(self, schema: str) -> Dict:
return json.loads(schema)
def create_table_schema(self) -> None:
self.create_or_update(self.get_schema_dict(table_schema), "Table")
def create_column_schema(self) -> None:
self.create_or_update(self.get_schema_dict(column_schema), "Column")
def create_column_table_relation(self) -> None:
self.create_or_update(self.get_schema_dict(column_table_relation), "Column <-> Table")
def create_cluster_schema(self) -> None:
self.create_or_update(self.get_schema_dict(cluster_schema), "Cluster")
def create_database_schema(self) -> None:
self.create_or_update(self.get_schema_dict(database_schema), "Database")
def create_database_cluster_relation(self) -> None:
self.create_or_update(self.get_schema_dict(database_cluster_relation), "Database <-> Cluster")
def create_schema_schema(self) -> None:
self.create_or_update(self.get_schema_dict(schema_schema), "Schema")
def create_schema_cluster_relation(self) -> None:
self.create_or_update(self.get_schema_dict(schema_cluster_relation), "Schema <-> Database")
def create_table_schema_relation(self) -> None:
self.create_or_update(self.get_schema_dict(table_schema_relation), "Table <-> Schema")
def create_user_schema(self) -> None:
self.create_or_update(self.get_schema_dict(user_schema), "User")
def create_reader_schema(self) -> None:
self.create_or_update(self.get_schema_dict(reader_schema), "Reader")
def create_bookmark_schema(self) -> None:
self.create_or_update(self.get_schema_dict(bookmark_schema), "Bookmark")
def create_report_schema(self) -> None:
self.create_or_update(self.get_schema_dict(report_schema), "Report")
def create_user_reader_relation(self) -> None:
self.create_or_update(self.get_schema_dict(user_reader_relation), "User <-> Reader")
def create_reader_referenceable_relation(self) -> None:
self.create_or_update(self.get_schema_dict(reader_referenceable_relation), "Reader <-> Referenceable")
def create_table_partition_schema(self) -> None:
self.create_or_update(self.get_schema_dict(table_partition_schema), "Partition")
def create_hive_table_partition(self) -> None:
self.create_or_update(self.get_schema_dict(hive_table_partition), "Hive Table Partition")
def create_data_owner_relation(self) -> None:
self.create_or_update(self.get_schema_dict(data_owner_schema), "Data Owner Relation")
def create_application_schema(self) -> None:
self.create_or_update(self.get_schema_dict(application_schema), "Application")
def create_source_schema(self) -> None:
self.create_or_update(self.get_schema_dict(source_schema), "Source")
def create_table_source_relation(self) -> None:
self.create_or_update(self.get_schema_dict(table_source_relation), "Table <-> Source")
def create_lineage_schema(self) -> None:
self.create_or_update(self.get_schema_dict(lineage_schema), "LineageProcess")
def create_dashboard_group_schema(self) -> None:
self.create_or_update(self.get_schema_dict(dashboard_group_schema), "Dashboard Group")
def create_dashboard_schema(self) -> None:
self.create_or_update(self.get_schema_dict(dashboard_schema), "Dashboard")
def create_dashboard_chart_schema(self) -> None:
self.create_or_update(self.get_schema_dict(dashboard_chart_schema), "Dashboard Chart")
def create_dashboard_query_schema(self) -> None:
self.create_or_update(self.get_schema_dict(dashboard_query_schema), "Dashboard Query")
def create_dashboard_execution_schema(self) -> None:
self.create_or_update(self.get_schema_dict(dashboard_execution_schema), "Dashboard Execution")
def create_dashboard_cluster_relation(self) -> None:
self.create_or_update(self.get_schema_dict(database_cluster_relation), "Dashboard <-> Cluster")
def create_required_entities(self, fix_existing_data: bool = False) -> None:
"""
IMPORTANT: The order of the entity definition matters.
Please keep this order.
:return: Creates or Updates the entity definition in Apache Atlas
"""
self.create_cluster_schema()
self.create_column_schema()
self.create_reader_schema()
self.create_user_schema()
self.create_bookmark_schema()
self.create_report_schema()
self.create_database_schema()
self.create_database_cluster_relation()
self.create_schema_schema()
self.create_schema_cluster_relation()
self.create_table_schema()
self.create_column_table_relation()
self.create_table_schema_relation()
self.create_source_schema()
self.create_table_source_relation()
self.create_application_schema()
self.create_lineage_schema()
self.assign_subtypes(regex="(.*)_table$", super_type="Table")
self.assign_subtypes(regex="(.*)_column$", super_type="Column")
self.create_user_reader_relation()
self.create_reader_referenceable_relation()
self.create_table_partition_schema()
self.create_hive_table_partition()
self.create_data_owner_relation()
self.create_dashboard_group_schema()
self.create_dashboard_schema()
self.create_dashboard_query_schema()
self.create_dashboard_chart_schema()
self.create_dashboard_execution_schema()
self.create_dashboard_cluster_relation()
| 3,592
|
1,006
|
/****************************************************************************
* arch/arm/src/str71x/str71x_rtc.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM_SRC_STR71X_STR71X_RTC_H
#define __ARCH_ARM_SRC_STR71X_STR71X_RTC_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "str71x_map.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* RTC Registers ************************************************************/
#define STR71X_RTC_CRH (STR71X_RTC_BASE + 0x0000) /* 16-bits wide */
#define STR71X_RTC_CRL (STR71X_RTC_BASE + 0x0004) /* 16-bits wide */
#define STR71X_RTC_PRLH (STR71X_RTC_BASE + 0x0008) /* 16-bits wide */
#define STR71X_RTC_PRLL (STR71X_RTC_BASE + 0x000c) /* 16-bits wide */
#define STR71X_RTC_DIVH (STR71X_RTC_BASE + 0x0010) /* 16-bits wide */
#define STR71X_RTC_DIVL (STR71X_RTC_BASE + 0x0014) /* 16-bits wide */
#define STR71X_RTC_CNTH (STR71X_RTC_BASE + 0x0018) /* 16-bits wide */
#define STR71X_RTC_CNTL (STR71X_RTC_BASE + 0x001c) /* 16-bits wide */
#define STR71X_RTC_ALRH (STR71X_RTC_BASE + 0x0020) /* 16-bits wide */
#define STR71X_RTC_ALRL (STR71X_RTC_BASE + 0x0024) /* 16-bits wide */
/* Register bit settings ****************************************************/
/* RTC control register */
#define STR71X_RTCCRH_SEN (0x0001) /* Bit 0: Second interrupt enable */
#define STR71X_RTCCRH_AEN (0x0002) /* Bit 1: Alarm interrupt enable */
#define STR71X_RTCCRH_OWEN (0x0004) /* Bit 2: Overflow interrupt enable */
#define STR71X_RTCCRH_GEN (0x0008) /* Bit 3: Global interrupt enable */
#define STR71X_RTCCRL_SIR (0x0001) /* Bit 0: Second interrupt request */
#define STR71X_RTCCRL_AIR (0x0002) /* Bit 1: Alarm interrupt request */
#define STR71X_RTCCRL_OWIR (0x0004) /* Bit 2: Overflow interrupt request */
#define STR71X_RTCCRL_GIR (0x0008) /* Bit 3: Global interrupt request */
#define STR71X_RTCCRL_CNF (0x0010) /* Bit 4: Enter configuration mode */
#define STR71X_RTCCRL_RTOFF (0x0020) /* Bit 5: RTC Operation Off */
/****************************************************************************
* Public Types
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Functions Prototypes
****************************************************************************/
#endif /* __ARCH_ARM_SRC_STR71X_STR71X_RTC_H */
| 1,100
|
345
|
import unittest
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.triggers.config import TriggerConfiguration
class TriggersConfigurationTests(unittest.TestCase):
def test_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
console:
triggers:
manager: programy.triggers.rest.RestTriggerManager
""", ConsoleConfiguration(), ".")
console_config = yaml.get_section("console")
triggers_config = TriggerConfiguration()
triggers_config.load_config_section(yaml, console_config, ".")
self.assertEqual("programy.triggers.rest.RestTriggerManager", triggers_config.manager)
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
bot:
triggers:
""", ConsoleConfiguration(), ".")
console_config = yaml.get_section("console")
triggers_config = TriggerConfiguration()
triggers_config.load_config_section(yaml, console_config, ".")
self.assertEqual("programy.triggers.local.LocalTriggerManager", triggers_config.manager)
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
bot:
""", ConsoleConfiguration(), ".")
console_config = yaml.get_section("console")
triggers_config = TriggerConfiguration()
triggers_config.load_config_section(yaml, console_config, ".")
self.assertEqual("programy.triggers.local.LocalTriggerManager", triggers_config.manager)
def test_with_additional_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
console:
triggers:
manager: programy.triggers.rest.RestTriggerManager
url: http://localhost:8989/api/v1.0/trigger
method: POST
token: <PASSWORD>
""", ConsoleConfiguration(), ".")
console_config = yaml.get_section("console")
triggers_config = TriggerConfiguration()
triggers_config.load_config_section(yaml, console_config, ".")
self.assertEqual("programy.triggers.rest.RestTriggerManager", triggers_config.manager)
self.assertEqual(triggers_config.value("url"), "http://localhost:8989/api/v1.0/trigger")
self.assertEqual(triggers_config.value("method"), "POST")
self.assertEqual(triggers_config.value("token"), "<PASSWORD>")
def test_to_yaml_no_defaults(self):
triggers_config = TriggerConfiguration()
triggers_config._manager = "programy.triggers.local.LocalTriggerManager2"
data = {}
triggers_config.to_yaml(data, defaults=False)
self.assertEquals({'manager': 'programy.triggers.local.LocalTriggerManager2'}, data)
def test_to_yaml_with_defaults(self):
triggers_config = TriggerConfiguration()
triggers_config._manager = TriggerConfiguration.LOCAL_MANAGER
data = {}
triggers_config.to_yaml(data, defaults=True)
self.assertEquals({'manager': 'programy.triggers.local.LocalTriggerManager'}, data)
def test_defaults(self):
triggers_config = TriggerConfiguration()
data = {}
triggers_config.to_yaml(data, True)
TriggersConfigurationTests.assert_defaults(self, data)
@staticmethod
def assert_defaults(test, data):
test.assertEqual(data['manager'], TriggerConfiguration.LOCAL_MANAGER)
| 1,484
|
14,668
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/service/common_decoder.h"
#include "gpu/command_buffer/service/mocks.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
TEST(CommonDecoderBucket, Basic) {
CommonDecoder::Bucket bucket;
EXPECT_EQ(0u, bucket.size());
EXPECT_TRUE(nullptr == bucket.GetData(0, 0));
}
TEST(CommonDecoderBucket, Size) {
CommonDecoder::Bucket bucket;
bucket.SetSize(24);
EXPECT_EQ(24u, bucket.size());
bucket.SetSize(12);
EXPECT_EQ(12u, bucket.size());
}
TEST(CommonDecoderBucket, GetData) {
CommonDecoder::Bucket bucket;
bucket.SetSize(24);
EXPECT_TRUE(nullptr != bucket.GetData(0, 0));
EXPECT_TRUE(nullptr != bucket.GetData(24, 0));
EXPECT_TRUE(nullptr == bucket.GetData(25, 0));
EXPECT_TRUE(nullptr != bucket.GetData(0, 24));
EXPECT_TRUE(nullptr == bucket.GetData(0, 25));
bucket.SetSize(23);
EXPECT_TRUE(nullptr == bucket.GetData(0, 24));
}
TEST(CommonDecoderBucket, SetData) {
CommonDecoder::Bucket bucket;
static const char data[] = "testing";
bucket.SetSize(10);
EXPECT_TRUE(bucket.SetData(data, 0, sizeof(data)));
EXPECT_EQ(0, memcmp(data, bucket.GetData(0, sizeof(data)), sizeof(data)));
EXPECT_TRUE(bucket.SetData(data, 2, sizeof(data)));
EXPECT_EQ(0, memcmp(data, bucket.GetData(2, sizeof(data)), sizeof(data)));
EXPECT_FALSE(bucket.SetData(data, 0, sizeof(data) * 2));
EXPECT_FALSE(bucket.SetData(data, 5, sizeof(data)));
}
class TestCommonDecoder : public CommonDecoder {
public:
explicit TestCommonDecoder(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service)
: CommonDecoder(client, command_buffer_service) {}
error::Error DoCommand(unsigned int command,
unsigned int arg_count,
const volatile void* cmd_data) {
return DoCommonCommand(command, arg_count, cmd_data);
}
CommonDecoder::Bucket* GetBucket(uint32_t id) const {
return CommonDecoder::GetBucket(id);
}
};
class CommonDecoderTest : public testing::Test {
protected:
static const size_t kBufferSize = 1024;
static const uint32_t kInvalidShmId = UINT32_MAX;
CommonDecoderTest() : decoder_(&client_, &command_buffer_service_) {}
void SetUp() override {
command_buffer_service_.CreateTransferBufferHelper(kBufferSize,
&valid_shm_id_);
}
void TearDown() override {}
template <typename T>
error::Error ExecuteCmd(const T& cmd) {
static_assert(T::kArgFlags == cmd::kFixed,
"T::kArgFlags should equal cmd::kFixed");
return decoder_.DoCommand(cmd.header.command, cmd.header.size - 1, &cmd);
}
template <typename T>
error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
static_assert(T::kArgFlags == cmd::kAtLeastN,
"T::kArgFlags should equal cmd::kAtLeastN");
return decoder_.DoCommand(cmd.header.command, cmd.header.size - 1, &cmd);
}
template <typename T>
T GetSharedMemoryAs(size_t offset) {
void* memory =
command_buffer_service_.GetTransferBuffer(valid_shm_id_)->memory();
return reinterpret_cast<T>(static_cast<uint8_t*>(memory) + offset);
}
FakeCommandBufferServiceBase command_buffer_service_;
FakeDecoderClient client_;
TestCommonDecoder decoder_;
int32_t valid_shm_id_ = 0;
};
const size_t CommonDecoderTest::kBufferSize;
const uint32_t CommonDecoderTest::kInvalidShmId;
TEST_F(CommonDecoderTest, DoCommonCommandInvalidCommand) {
EXPECT_EQ(error::kUnknownCommand, decoder_.DoCommand(999999, 0, nullptr));
}
TEST_F(CommonDecoderTest, HandleNoop) {
cmd::Noop cmd;
const uint32_t kSkipCount = 5;
cmd.Init(kSkipCount);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(
cmd, kSkipCount * kCommandBufferEntrySize));
const uint32_t kSkipCount2 = 1;
cmd.Init(kSkipCount2);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(
cmd, kSkipCount2 * kCommandBufferEntrySize));
}
TEST_F(CommonDecoderTest, SetToken) {
cmd::SetToken cmd;
const int32_t kTokenId = 123;
command_buffer_service_.SetToken(0);
cmd.Init(kTokenId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(kTokenId, command_buffer_service_.GetState().token);
}
TEST_F(CommonDecoderTest, SetBucketSize) {
cmd::SetBucketSize cmd;
const uint32_t kBucketId = 123;
const uint32_t kBucketLength1 = 1234;
const uint32_t kBucketLength2 = 78;
// Check the bucket does not exist.
EXPECT_TRUE(nullptr == decoder_.GetBucket(kBucketId));
// Check we can create one.
cmd.Init(kBucketId, kBucketLength1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
CommonDecoder::Bucket* bucket;
bucket = decoder_.GetBucket(kBucketId);
EXPECT_TRUE(nullptr != bucket);
EXPECT_EQ(kBucketLength1, bucket->size());
// Check we can change it.
cmd.Init(kBucketId, kBucketLength2);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
bucket = decoder_.GetBucket(kBucketId);
EXPECT_TRUE(nullptr != bucket);
EXPECT_EQ(kBucketLength2, bucket->size());
// Check we can delete it.
cmd.Init(kBucketId, 0);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
bucket = decoder_.GetBucket(kBucketId);
EXPECT_EQ(0u, bucket->size());
}
TEST_F(CommonDecoderTest, SetBucketData) {
cmd::SetBucketSize size_cmd;
cmd::SetBucketData cmd;
static const char kData[] = "1234567890123456789";
const uint32_t kBucketId = 123;
const uint32_t kInvalidBucketId = 124;
size_cmd.Init(kBucketId, sizeof(kData));
EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
CommonDecoder::Bucket* bucket = decoder_.GetBucket(kBucketId);
// Check the data is not there.
EXPECT_NE(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
// Check we can set it.
const uint32_t kSomeOffsetInSharedMemory = 50;
void* memory = GetSharedMemoryAs<void*>(kSomeOffsetInSharedMemory);
memcpy(memory, kData, sizeof(kData));
cmd.Init(kBucketId, 0, sizeof(kData), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
// Check we can set it partially.
static const char kData2[] = "ABCEDFG";
const uint32_t kSomeOffsetInBucket = 5;
memcpy(memory, kData2, sizeof(kData2));
cmd.Init(kBucketId, kSomeOffsetInBucket, sizeof(kData2), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(0, memcmp(bucket->GetData(kSomeOffsetInBucket, sizeof(kData2)),
kData2, sizeof(kData2)));
const char* bucket_data = bucket->GetDataAs<const char*>(0, sizeof(kData));
// Check that nothing was affected outside of updated area.
EXPECT_EQ(kData[kSomeOffsetInBucket - 1],
bucket_data[kSomeOffsetInBucket - 1]);
EXPECT_EQ(kData[kSomeOffsetInBucket + sizeof(kData2)],
bucket_data[kSomeOffsetInBucket + sizeof(kData2)]);
// Check that it fails if the bucket_id is invalid
cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData2), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the offset is out of range.
cmd.Init(kBucketId, bucket->size(), 1, valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the size is out of range.
cmd.Init(kBucketId, 0, bucket->size() + 1, valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_F(CommonDecoderTest, SetBucketDataImmediate) {
cmd::SetBucketSize size_cmd;
int8_t buffer[1024];
cmd::SetBucketDataImmediate& cmd =
*reinterpret_cast<cmd::SetBucketDataImmediate*>(&buffer);
static const char kData[] = "1234567890123456789";
const uint32_t kBucketId = 123;
const uint32_t kInvalidBucketId = 124;
size_cmd.Init(kBucketId, sizeof(kData));
EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
CommonDecoder::Bucket* bucket = decoder_.GetBucket(kBucketId);
// Check the data is not there.
EXPECT_NE(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
// Check we can set it.
void* memory = &buffer[0] + sizeof(cmd);
memcpy(memory, kData, sizeof(kData));
cmd.Init(kBucketId, 0, sizeof(kData));
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(kData)));
EXPECT_EQ(0, memcmp(bucket->GetData(0, sizeof(kData)), kData, sizeof(kData)));
// Check we can set it partially.
static const char kData2[] = "ABCEDFG";
const uint32_t kSomeOffsetInBucket = 5;
memcpy(memory, kData2, sizeof(kData2));
cmd.Init(kBucketId, kSomeOffsetInBucket, sizeof(kData2));
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(kData2)));
EXPECT_EQ(0, memcmp(bucket->GetData(kSomeOffsetInBucket, sizeof(kData2)),
kData2, sizeof(kData2)));
const char* bucket_data = bucket->GetDataAs<const char*>(0, sizeof(kData));
// Check that nothing was affected outside of updated area.
EXPECT_EQ(kData[kSomeOffsetInBucket - 1],
bucket_data[kSomeOffsetInBucket - 1]);
EXPECT_EQ(kData[kSomeOffsetInBucket + sizeof(kData2)],
bucket_data[kSomeOffsetInBucket + sizeof(kData2)]);
// Check that it fails if the bucket_id is invalid
cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData2));
EXPECT_NE(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(kData2)));
// Check that it fails if the offset is out of range.
cmd.Init(kBucketId, bucket->size(), 1);
EXPECT_NE(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(kData2)));
// Check that it fails if the size is out of range.
size_cmd.Init(kBucketId, sizeof(kData2));
EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
cmd.Init(kBucketId, 0, bucket->size() + 1);
EXPECT_NE(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(kData)));
}
TEST_F(CommonDecoderTest, GetBucketStart) {
cmd::SetBucketSize size_cmd;
cmd::SetBucketData set_cmd;
cmd::GetBucketStart cmd;
static const char kData[] = "1234567890123456789";
static const char zero[sizeof(kData)] = { 0, };
const uint32_t kBucketSize = sizeof(kData);
const uint32_t kBucketId = 123;
const uint32_t kInvalidBucketId = 124;
// Put data in the bucket.
size_cmd.Init(kBucketId, sizeof(kData));
EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
const uint32_t kSomeOffsetInSharedMemory = 50;
uint8_t* start = GetSharedMemoryAs<uint8_t*>(kSomeOffsetInSharedMemory);
memcpy(start, kData, sizeof(kData));
set_cmd.Init(kBucketId, 0, sizeof(kData), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(set_cmd));
// Check that the size is correct with no data buffer.
uint32_t* memory = GetSharedMemoryAs<uint32_t*>(kSomeOffsetInSharedMemory);
*memory = 0x0;
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, 0, 0, 0);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(kBucketSize, *memory);
// Check that the data is copied with data buffer.
const uint32_t kDataOffsetInSharedMemory = 54;
uint8_t* data = GetSharedMemoryAs<uint8_t*>(kDataOffsetInSharedMemory);
*memory = 0x0;
memset(data, 0, sizeof(kData));
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, kBucketSize,
valid_shm_id_, kDataOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(kBucketSize, *memory);
EXPECT_EQ(0, memcmp(data, kData, kBucketSize));
// Check that we can get a piece.
*memory = 0x0;
memset(data, 0, sizeof(kData));
const uint32_t kPieceSize = kBucketSize / 2;
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, kPieceSize,
valid_shm_id_, kDataOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(kBucketSize, *memory);
EXPECT_EQ(0, memcmp(data, kData, kPieceSize));
EXPECT_EQ(0, memcmp(data + kPieceSize, zero, sizeof(kData) - kPieceSize));
// Check that it fails if the result_id is invalid
cmd.Init(kInvalidBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, 0, 0, 0);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the data_id is invalid
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, 1,
CommonDecoderTest::kInvalidShmId, 0);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the data_size is invalid
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, 1, 0, 0);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory,
CommonDecoderTest::kBufferSize + 1, valid_shm_id_, 0);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the data_offset is invalid
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, 0, 0, 1);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory,
CommonDecoderTest::kBufferSize, valid_shm_id_, 1);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the result size is not set to zero
*memory = 0x1;
cmd.Init(kBucketId, valid_shm_id_, kSomeOffsetInSharedMemory, 0, 0, 0);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_F(CommonDecoderTest, GetBucketData) {
cmd::SetBucketSize size_cmd;
cmd::SetBucketData set_cmd;
cmd::GetBucketData cmd;
static const char kData[] = "1234567890123456789";
static const char zero[sizeof(kData)] = { 0, };
const uint32_t kBucketId = 123;
const uint32_t kInvalidBucketId = 124;
size_cmd.Init(kBucketId, sizeof(kData));
EXPECT_EQ(error::kNoError, ExecuteCmd(size_cmd));
const uint32_t kSomeOffsetInSharedMemory = 50;
uint8_t* memory = GetSharedMemoryAs<uint8_t*>(kSomeOffsetInSharedMemory);
memcpy(memory, kData, sizeof(kData));
set_cmd.Init(kBucketId, 0, sizeof(kData), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(set_cmd));
// Check we can get the whole thing.
memset(memory, 0, sizeof(kData));
cmd.Init(kBucketId, 0, sizeof(kData), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(0, memcmp(memory, kData, sizeof(kData)));
// Check we can get a piece.
const uint32_t kSomeOffsetInBucket = 5;
const uint32_t kLengthOfPiece = 6;
const uint8_t kSentinel = 0xff;
memset(memory, 0, sizeof(kData));
memory[-1] = kSentinel;
cmd.Init(kBucketId, kSomeOffsetInBucket, kLengthOfPiece, valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(0, memcmp(memory, kData + kSomeOffsetInBucket, kLengthOfPiece));
EXPECT_EQ(0, memcmp(memory + kLengthOfPiece, zero,
sizeof(kData) - kLengthOfPiece));
EXPECT_EQ(kSentinel, memory[-1]);
// Check that it fails if the bucket_id is invalid
cmd.Init(kInvalidBucketId, kSomeOffsetInBucket, sizeof(kData), valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the offset is invalid
cmd.Init(kBucketId, sizeof(kData) + 1, 1, valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that it fails if the size is invalid
cmd.Init(kBucketId, 0, sizeof(kData) + 1, valid_shm_id_,
kSomeOffsetInSharedMemory);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
} // namespace gpu
| 6,324
|
2,023
|
def fit(X, Y):
def mean(Xs):
return sum(Xs) / len(Xs)
m_X = mean(X)
m_Y = mean(Y)
def std(Xs, m):
normalizer = len(Xs) - 1
return math.sqrt(sum((pow(x - m, 2) for x in Xs)) / normalizer)
# assert np.round(Series(X).std(), 6) == np.round(std(X, m_X), 6)
def pearson_r(Xs, Ys):
sum_xy = 0
sum_sq_v_x = 0
sum_sq_v_y = 0
for (x, y) in zip(Xs, Ys):
var_x = x - m_X
var_y = y - m_Y
sum_xy += var_x * var_y
sum_sq_v_x += pow(var_x, 2)
sum_sq_v_y += pow(var_y, 2)
return sum_xy / math.sqrt(sum_sq_v_x * sum_sq_v_y)
# assert np.round(Series(X).corr(Series(Y)), 6) == np.round(pearson_r(X, Y), 6)
r = pearson_r(X, Y)
b = r * (std(Y, m_Y) / std(X, m_X))
A = m_Y - b * m_X
def line(x):
return b * x + A
return line
| 523
|
583
|
package ru.yandex.clickhouse;
import ru.yandex.clickhouse.util.ClickHouseValueFormatter;
import java.util.TimeZone;
public final class ClickHousePreparedStatementParameter {
private static final ClickHousePreparedStatementParameter NULL_PARAM =
new ClickHousePreparedStatementParameter(null, false);
private static final ClickHousePreparedStatementParameter TRUE_PARAM =
new ClickHousePreparedStatementParameter("1", false);
private static final ClickHousePreparedStatementParameter FALSE_PARAM =
new ClickHousePreparedStatementParameter("0", false);
private final String stringValue;
private final boolean quoteNeeded;
public static ClickHousePreparedStatementParameter fromObject(Object x,
TimeZone dateTimeZone, TimeZone dateTimeTimeZone)
{
if (x == null) {
return NULL_PARAM;
}
return new ClickHousePreparedStatementParameter(
ClickHouseValueFormatter.formatObject(x, dateTimeZone, dateTimeTimeZone),
ClickHouseValueFormatter.needsQuoting(x));
}
public static ClickHousePreparedStatementParameter nullParameter() {
return NULL_PARAM;
}
public static ClickHousePreparedStatementParameter boolParameter(boolean value) {
return value ? TRUE_PARAM : FALSE_PARAM;
}
public ClickHousePreparedStatementParameter(String stringValue,
boolean quoteNeeded)
{
this.stringValue = stringValue == null
? ClickHouseValueFormatter.NULL_MARKER
: stringValue;
this.quoteNeeded = quoteNeeded;
}
String getRegularValue() {
return !ClickHouseValueFormatter.NULL_MARKER.equals(stringValue)
? quoteNeeded
? "'" + stringValue + "'"
: stringValue
: "null";
}
String getBatchValue() {
return stringValue;
}
@Override
public String toString() {
return stringValue;
}
}
| 738
|
1,143
|
<filename>pinball/workflow/log_saver.py
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic handling log read/write."""
import abc
import os
import time
from pinball.common import s3_utils
from pinball.config.pinball_config import PinballConfig
from pinball.config.utils import get_log
LOG = get_log('pinball.workflow.log_saver')
__author__ = '<NAME>'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class LogSaver(object):
"""Interface of a component reading and writing job execution logs."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, content_str):
"""Write content_str to the file.
Args:
content_str: The string to be written to the file.
"""
return
@abc.abstractmethod
def read(self):
"""Return the file content as string.
Returns:
content_str: The string of the file content.
"""
return
class FileLogSaver(LogSaver):
"""FileLogSaver class provides basic methods interacting with a local file.
A list of methods are open, write, read and close.
Attributes:
_file_path: A string of the local file path.
_file_descriptor: A file descriptor of the file in _file_path.
"""
def __init__(self, file_path):
self._file_path = file_path
self._file_descriptor = None
def __str__(self):
return self._file_path
def open(self, mode='a+'):
"""Open the local file.
Args:
mode: The default mode is 'a+', i.e., append/read
"""
self._file_descriptor = open(self._file_path, mode)
def write(self, content_str):
"""Write to the local file.
Args:
content_str: The string to be written to the local file.
"""
self._file_descriptor.write(content_str)
self._file_descriptor.flush()
def read(self):
"""Read content from the local file.
Returns:
String content of the local file.
"""
return self._file_descriptor.read()
def close(self):
"""Close the local file"""
self._file_descriptor.close()
self._file_descriptor = None
@staticmethod
def from_path(file_path):
"""A factory method which returns the right LogSaver class for the given file path.
A list of supported file path includes:
1) local file, which is supported by FileLogSaver;
2) s3 file, which is supported by S3FileLogSaver.
Args:
file_path: A string presentation of the file path.
Returns:
A LogSaver class instance.
"""
if file_path.startswith('s3n://'):
return S3FileLogSaver(file_path)
else:
return FileLogSaver(file_path)
class S3FileLogSaver(FileLogSaver):
"""S3FileLogSaver class provides basic methods interacting with remote s3 file.
A list of methods are open, close, read, write.
Note that there is no appending operation in s3 file system. In order to implement
write to s3, we write the content to local file first, then upload the content from
local file to s3. Considering the write performance, we only upload the content from
local file to s3, when either of the following two conditions is satisfied.
Condition-1: it has been a long time since the last upload to s3
Condition-2: there is a lot new content flushed into the local file, and waiting
to be uploaded to s3
Attributes:
_S3_UPLOAD_INTERVAL_IN_SEC: A class level setting for Condition-1.
_S3_UPLOAD_BATCH_IN_BYTE: A class level setting for Condition-2.
_file_path: The s3 file path to read from or write to.
_s3_key: A Boto refernce to an s3 key for the _file_path
_local_file_log_saver: This is a class instance of FileLogSaver,
which helps to read and write content to a local file.
_last_remote_upload_time: The last time (in second) when we uploaded the file to s3.
_pending_bytes: The size of the data that is pending to be written to s3 file.
"""
_S3_UPLOAD_BATCH_IN_BYTE = 1000
_S3_UPLOAD_INTERVAL_IN_SEC = 3*60
def __init__(self, file_path):
super(S3FileLogSaver, self).__init__(file_path)
local_file_path = self._file_path.replace(
PinballConfig.S3_LOGS_DIR_PREFIX,
PinballConfig.LOCAL_LOGS_DIR_PREFIX)
self._local_file_log_saver = FileLogSaver(local_file_path)
self._last_remote_upload_time = time.time()
self._pending_bytes = 0L
self._s3_key = None
def open(self, mode=None):
"""Open S3FileLogSaver to make it ready to read/write.
More specifically, we need a s3 key for ready/write to remote s3 file,
and open the LogSaver for local file as well.
"""
# TODO(Mao): With "a+" mode, we need to warn if local file is missing
# while there is a file in s3.
self._s3_key = self._get_or_create_s3_key(self._file_path)
def close(self):
"""Close S3FileLogSaver. No further operation on the saver are permitted.
Note that, we need to make sure all the content which is stored in
the local file is uploaded to s3.
"""
self._sync_to_s3()
self._s3_key = None
LOG.info("deleting local file: %s as all content is uploaded.",
self._local_file_log_saver._file_path)
if os.path.exists(self._local_file_log_saver._file_path):
try:
os.remove(self._local_file_log_saver._file_path)
except OSError, e:
LOG.warn('deletion failed due to: %s', e)
def _check_s3_upload_condition(self):
"""Check whether to upload local log file to remote s3 storage.
There are two conditions which are related to the class level attributes:
_S3_UPLOAD_BATCH_IN_BYTE and _S3_UPLOAD_INTERVAL_IN_SEC.
Returns:
True: If either of the two conditions is satisfied.
"""
if self._pending_bytes >= self._S3_UPLOAD_BATCH_IN_BYTE:
return True
elif time.time() - self._last_remote_upload_time >= self._S3_UPLOAD_INTERVAL_IN_SEC:
return True
def write(self, content_str):
"""Write the content_str to remote s3 storage.
Since there is no appending operation in s3 storage,
we write the content_str to local file, and then upload
the local file to the remote s3 storage.
Args:
content_str: The string to be written to s3.
"""
# First write the content_str to local file
self._local_file_log_saver.open()
self._write_to_local_file(content_str)
self._local_file_log_saver.close()
# Check if we need to upload the local file to remote s3 storage.
self._pending_bytes += len(content_str)
if self._check_s3_upload_condition():
self._sync_to_s3()
self._last_remote_upload_time = time.time()
self._pending_bytes = 0L
def read(self):
"""Read from a s3 file."""
return self._s3_key.get_contents_as_string()
def _sync_to_s3(self):
"""Upload data from local file to remote s3 storage."""
self._local_file_log_saver.open()
content = self._local_file_log_saver.read()
self._local_file_log_saver.close()
self._s3_key.set_contents_from_string(content)
LOG.info("%d bytes of data has been uploaded to s3 path %s",
len(content),
self._file_path)
def _write_to_local_file(self, content_str):
"""Write content_str to a local file."""
self._local_file_log_saver.write(content_str)
@staticmethod
def _get_or_create_s3_key(s3_location):
"""Get or create a Boto reference to an s3 key of the s3_location."""
bucket_name, path = s3_utils.parse_s3_location(s3_location)
bucket = s3_utils.get_s3_bucket(bucket_name)
key = bucket.get_key(path)
if not key:
key = bucket.new_key(path)
return key
| 3,580
|
2,113
|
//-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#ifndef CORE_INTERFACES_H
#define CORE_INTERFACES_H
#ifndef _SCENERENDERSTATE_H_
#include "scene/sceneRenderState.h"
#endif
template<typename T>
class Interface
{
public:
static Vector<T*> all;
Interface()
{
all.push_back((T*)this);
}
virtual ~Interface()
{
for (U32 i = 0; i < all.size(); i++)
{
if (all[i] == (T*)this)
{
all.erase(i);
return;
}
}
}
};
template<typename T> Vector<T*> Interface<T>::all(0);
//Basically a file for generic interfaces that many behaviors may make use of
class SetTransformInterface// : public Interface<SetTransformInterface>
{
public:
virtual void setTransform( MatrixF transform );
virtual void setTransform( Point3F pos, EulerF rot );
};
class UpdateInterface : public Interface<UpdateInterface>
{
public:
virtual void processTick(){}
virtual void interpolateTick(F32 dt){}
virtual void advanceTime(F32 dt){}
};
class BehaviorFieldInterface// : public Interface<BehaviorFieldInterface>
{
public:
virtual void onFieldChange(const char* fieldName, const char* newValue){};
};
class CameraInterface// : public Interface<CameraInterface>
{
public:
virtual bool getCameraTransform(F32* pos,MatrixF* mat)=0;
virtual void onCameraScopeQuery(NetConnection *cr, CameraScopeQuery * query)=0;
virtual Frustum getFrustum()=0;
virtual F32 getCameraFov()=0;
virtual void setCameraFov(F32 fov)=0;
virtual bool isValidCameraFov(F32 fov)=0;
};
class CastRayInterface// : public Interface<CastRayInterface>
{
public:
virtual bool castRay(const Point3F &start, const Point3F &end, RayInfo* info)=0;
};
class EditorInspectInterface// : public Interface<EditorInspectInterface>
{
public:
virtual void onInspect()=0;
virtual void onEndInspect()=0;
};
#endif
| 957
|
559
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph to visualise job/evidence relationships."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import graphviz
import sys
from turbinia.jobs import manager as jobs_manager
try:
unicode
except NameError:
unicode = str # pylint: disable=redefined-builtin
def create_graph():
"""Create graph of relationships between Turbinia jobs and evidence.
Returns:
Instance of graphviz.dot.Digraph
"""
dot = graphviz.Digraph(comment='Turbinia Evidence graph', format='png')
for _, job in jobs_manager.JobsManager.GetJobs():
dot.node(job.NAME)
for evidence in job.evidence_input:
dot.node(evidence.__name__, shape='box')
dot.edge(evidence.__name__, job.NAME)
for evidence in job.evidence_output:
dot.node(evidence.__name__, shape='box')
dot.edge(job.NAME, evidence.__name__)
return dot
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create Turbinia evidence graph.')
parser.add_argument(
'-f', '--format', default='png',
help='The format of the output file you wish to generate. Specify '
'"list" to list out the available output types. More info is here: '
'http://www.graphviz.org/doc/info/output.html')
parser.add_argument(
'-e', '--engine', default='dot',
help='The graphviz engine used to generate the graph layout. Specify '
'"list" to list out the available engines.')
parser.add_argument('filename', type=unicode, help='where to save the file')
args = parser.parse_args()
if args.format == 'list':
formats = ' '.join(graphviz.FORMATS)
print('Available format types: {0:s}'.format(formats))
sys.exit(0)
if args.format not in graphviz.FORMATS:
print('Format type {0:s} is not supported'.format(args.format))
sys.exit(1)
if args.engine == 'list':
engines = ' '.join(graphviz.ENGINES)
print('Available graph layout engines: {0:s}'.format(engines))
sys.exit(0)
if args.engine not in graphviz.ENGINES:
print('Layout engine type {0:s} is not supported'.format(args.engine))
sys.exit(1)
graph = create_graph()
graph.engine = args.engine
output_file = args.filename.replace('.png', '')
try:
rendered_graph = graph.render(
filename=output_file, format=args.format, cleanup=True)
print('Graph generated and saved to: {0}'.format(rendered_graph))
except graphviz.ExecutableNotFound:
print('Graphviz is not installed - Run: apt-get install graphviz')
| 1,058
|
2,151
|
<filename>src/trusted/validator_arm/dgen_decoder_output.py
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the decoder based on parsed
table representations.
"""
import dgen_opt
import dgen_output
import dgen_actuals
# This file generates the class decoder Decoder as defined by the
# decoder tables. The code is specifically written to minimize the
# number of decoder classes needed to parse valid ARM
# instructions. Many rows in the table use the same decoder class. In
# addition, we optimize tables by merging, so long as the same decoder
# class is built.
#
# The following files are generated:
#
# decoder.h
# decoder.cc
#
# decoder.h declares the generated decoder parser class while
# decoder.cc contains the implementation of that decoder class.
#
# For testing purposes (see dgen_test_output.py) different rules are
# applied. Note: It may be worth reading dgen_test_output.py preamble
# to get a better understanding of decoder actions, and why we need
# the "action_filter" methods.
"""The current command line arguments to use"""
_cl_args = {}
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
# Defines the header for decoder.h
H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_actuals.h"
namespace nacl_arm_dec {
"""
DECODER_DECLARE_HEADER="""
// Defines a decoder class selector for instructions.
class %(decoder_name)s : DecoderState {
public:
explicit %(decoder_name)s();
// Parses the given instruction, returning the decoder to use.
virtual const ClassDecoder& decode(const Instruction) const;
// Returns the class decoder to use to process the fictitious instruction
// that is inserted before the first instruction in the code block by
// the validator.
const ClassDecoder &fictitious_decoder() const {
return %(fictitious_decoder)s_instance_;
}
private:
"""
DECODER_DECLARE_METHOD_COMMENTS="""
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction.
"""
DECODER_DECLARE_METHOD="""
inline const ClassDecoder& decode_%(table_name)s(
const Instruction inst) const;
"""
DECODER_DECLARE_FIELD_COMMENTS="""
// The following fields define the set of class decoders
// that can be returned by the API function "decode". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be built once (and reused
// for each call to "decode")."""
DECODER_DECLARE_FIELD="""
const %(decoder)s %(decoder)s_instance_;"""
DECODER_DECLARE_FOOTER="""
};
"""
H_FOOTER="""
} // namespace nacl_arm_dec
#endif // %(IFDEF_NAME)s
"""
def generate_h(decoder, decoder_name, filename, out, cl_args):
"""Entry point to the decoder for .h file.
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.h')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME': dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(H_HEADER % values)
values['fictitious_decoder'] = (
decoder.get_value('FictitiousFirst').actual())
out.write(DECODER_DECLARE_HEADER % values)
out.write(DECODER_DECLARE_METHOD_COMMENTS)
for table in decoder.tables():
values['table_name'] = table.name
out.write(DECODER_DECLARE_METHOD % values)
out.write(DECODER_DECLARE_FIELD_COMMENTS)
for action in decoder.action_filter(['actual']).decoders():
values['decoder'] = action.actual()
out.write(DECODER_DECLARE_FIELD % values)
out.write(DECODER_DECLARE_FOOTER % values)
out.write(H_FOOTER % values)
# Defines the header for DECODER.h
CC_HEADER="""%(FILE_HEADER)s
#include "%(header_filename)s"
namespace nacl_arm_dec {
"""
CONSTRUCTOR_HEADER="""
%(decoder_name)s::%(decoder_name)s() : DecoderState()"""
CONSTRUCTOR_FIELD_INIT="""
, %(decoder)s_instance_()"""
CONSTRUCTOR_FOOTER="""
{}
"""
METHOD_HEADER="""
// Implementation of table: %(table_name)s.
// Specified by: %(citation)s
const ClassDecoder& %(decoder_name)s::decode_%(table_name)s(
const Instruction inst) const
{"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
METHOD_DISPATCH_CLASS_DECODER="""
return %(decoder)s_instance_;"""
METHOD_DISPATCH_SUBMETHOD="""
return decode_%(subtable_name)s(inst);"""
METHOD_DISPATCH_CLOSE="""
}
"""
METHOD_FOOTER="""
// Catch any attempt to fall though ...
return %(not_implemented)s_instance_;
}
"""
DECODER_METHOD_HEADER="""
const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {"""
DECODER_METHOD_TRACE="""
fprintf(stderr, "Parsing %%08x\\n", inst.Bits());"""
DECODER_METHOD_FOOTER="""
return decode_%(entry_table_name)s(inst);
}
"""
CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the decoder in .cc file
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.cc')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed
# tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'header_filename': filename[:-2] + 'h',
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(CC_HEADER % values)
_generate_constructors(decoder, values, out)
_generate_methods(decoder, values, out)
out.write(DECODER_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(DECODER_METHOD_TRACE % values)
out.write(DECODER_METHOD_FOOTER % values)
out.write(CC_FOOTER % values)
def _generate_constructors(decoder, values, out):
out.write(CONSTRUCTOR_HEADER % values)
for decoder in decoder.action_filter(['actual']).decoders():
values['decoder'] = decoder.actual()
out.write(CONSTRUCTOR_FIELD_INIT % values)
out.write(CONSTRUCTOR_FOOTER % values)
def _generate_methods(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(dgen_opt.optimize_rows(table.rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation
out.write(METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write("\n UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
if row.action.__class__.__name__ == 'DecoderAction':
values['decoder'] = row.action.actual()
out.write(METHOD_DISPATCH_CLASS_DECODER % values)
elif row.action.__class__.__name__ == 'DecoderMethod':
values['subtable_name'] = row.action.name
out.write(METHOD_DISPATCH_SUBMETHOD % values)
else:
raise Exception('Bad table action: %s' % repr(row.action))
out.write(METHOD_DISPATCH_CLOSE % values)
values['not_implemented'] = decoder.get_value('NotImplemented').actual()
out.write(METHOD_FOOTER % values)
| 3,874
|
571
|
<reponame>1123852253/mexopencv
/**
* @file DISOpticalFlow_.cpp
* @brief mex interface for cv::optflow::DISOpticalFlow
* @ingroup optflow
* @author Amro
* @date 2017
*/
#include "mexopencv.hpp"
#include "opencv2/optflow.hpp"
using namespace std;
using namespace cv;
using namespace cv::optflow;
namespace {
// Persistent objects
/// Last object id to allocate
int last_id = 0;
/// Object container
map<int,Ptr<DISOpticalFlow> > obj_;
/// DIS preset types
const ConstMap<string,int> DISPresetMap = ConstMap<string,int>
("UltraFast", cv::optflow::DISOpticalFlow::PRESET_ULTRAFAST)
("Fast", cv::optflow::DISOpticalFlow::PRESET_FAST)
("Medium", cv::optflow::DISOpticalFlow::PRESET_MEDIUM);
}
/**
* Main entry called from Matlab
* @param nlhs number of left-hand-side arguments
* @param plhs pointers to mxArrays in the left-hand-side
* @param nrhs number of right-hand-side arguments
* @param prhs pointers to mxArrays in the right-hand-side
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// Check the number of arguments
nargchk(nrhs>=2 && nlhs<=1);
// Argument vector
vector<MxArray> rhs(prhs, prhs+nrhs);
int id = rhs[0].toInt();
string method(rhs[1].toString());
// constructor call
if (method == "new") {
nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1);
int preset = cv::optflow::DISOpticalFlow::PRESET_FAST;
for (int i=2; i<nrhs; i+=2) {
string key(rhs[i].toString());
if (key == "Preset")
preset = DISPresetMap[rhs[i+1].toString()];
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized option %s", key.c_str());
}
obj_[++last_id] = createOptFlow_DIS(preset);
plhs[0] = MxArray(last_id);
mexLock();
return;
}
// Big operation switch
Ptr<DISOpticalFlow> obj = obj_[id];
if (obj.empty())
mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id);
if (method == "delete") {
nargchk(nrhs==2 && nlhs==0);
obj_.erase(id);
mexUnlock();
}
else if (method == "clear") {
nargchk(nrhs==2 && nlhs==0);
obj->clear();
}
else if (method == "save") {
nargchk(nrhs==3 && nlhs==0);
obj->save(rhs[2].toString());
}
else if (method == "load") {
nargchk(nrhs>=3 && (nrhs%2)!=0 && nlhs==0);
string objname;
bool loadFromString = false;
for (int i=3; i<nrhs; i+=2) {
string key(rhs[i].toString());
if (key == "ObjName")
objname = rhs[i+1].toString();
else if (key == "FromString")
loadFromString = rhs[i+1].toBool();
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized option %s", key.c_str());
}
/*
obj_[id] = (loadFromString ?
Algorithm::loadFromString<DISOpticalFlow>(rhs[2].toString(), objname) :
Algorithm::load<DISOpticalFlow>(rhs[2].toString(), objname));
*/
///*
// HACK: workaround for missing DISOpticalFlow::create()
FileStorage fs(rhs[2].toString(), FileStorage::READ +
(loadFromString ? FileStorage::MEMORY : 0));
if (!fs.isOpened())
mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file");
FileNode fn(objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]);
if (fn.empty())
mexErrMsgIdAndTxt("mexopencv:error", "Failed to get node");
obj->read(fn);
//*/
}
else if (method == "empty") {
nargchk(nrhs==2 && nlhs<=1);
plhs[0] = MxArray(obj->empty());
}
else if (method == "getDefaultName") {
nargchk(nrhs==2 && nlhs<=1);
plhs[0] = MxArray(obj->getDefaultName());
}
else if (method == "calc") {
nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=1);
Mat flow;
for (int i=4; i<nrhs; i+=2) {
string key(rhs[i].toString());
if (key == "InitialFlow")
flow = rhs[i+1].toMat(CV_32F);
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized option %s", key.c_str());
}
Mat I0(rhs[2].toMat(CV_8U)),
I1(rhs[3].toMat(CV_8U));
obj->calc(I0, I1, flow);
plhs[0] = MxArray(flow);
}
else if (method == "collectGarbage") {
nargchk(nrhs==2 && nlhs==0);
obj->collectGarbage();
}
else if (method == "get") {
nargchk(nrhs==3 && nlhs<=1);
string prop(rhs[2].toString());
if (prop == "FinestScale")
plhs[0] = MxArray(obj->getFinestScale());
else if (prop == "PatchSize")
plhs[0] = MxArray(obj->getPatchSize());
else if (prop == "PatchStride")
plhs[0] = MxArray(obj->getPatchStride());
else if (prop == "GradientDescentIterations")
plhs[0] = MxArray(obj->getGradientDescentIterations());
else if (prop == "VariationalRefinementIterations")
plhs[0] = MxArray(obj->getVariationalRefinementIterations());
else if (prop == "VariationalRefinementAlpha")
plhs[0] = MxArray(obj->getVariationalRefinementAlpha());
else if (prop == "VariationalRefinementDelta")
plhs[0] = MxArray(obj->getVariationalRefinementDelta());
else if (prop == "VariationalRefinementGamma")
plhs[0] = MxArray(obj->getVariationalRefinementGamma());
else if (prop == "UseMeanNormalization")
plhs[0] = MxArray(obj->getUseMeanNormalization());
else if (prop == "UseSpatialPropagation")
plhs[0] = MxArray(obj->getUseSpatialPropagation());
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized property %s", prop.c_str());
}
else if (method == "set") {
nargchk(nrhs==4 && nlhs==0);
string prop(rhs[2].toString());
if (prop == "FinestScale")
obj->setFinestScale(rhs[3].toInt());
else if (prop == "PatchSize")
obj->setPatchSize(rhs[3].toInt());
else if (prop == "PatchStride")
obj->setPatchStride(rhs[3].toInt());
else if (prop == "GradientDescentIterations")
obj->setGradientDescentIterations(rhs[3].toInt());
else if (prop == "VariationalRefinementIterations")
obj->setVariationalRefinementIterations(rhs[3].toInt());
else if (prop == "VariationalRefinementAlpha")
obj->setVariationalRefinementAlpha(rhs[3].toFloat());
else if (prop == "VariationalRefinementDelta")
obj->setVariationalRefinementDelta(rhs[3].toFloat());
else if (prop == "VariationalRefinementGamma")
obj->setVariationalRefinementGamma(rhs[3].toFloat());
else if (prop == "UseMeanNormalization")
obj->setUseMeanNormalization(rhs[3].toBool());
else if (prop == "UseSpatialPropagation")
obj->setUseSpatialPropagation(rhs[3].toBool());
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized property %s", prop.c_str());
}
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized operation %s", method.c_str());
}
| 3,592
|
430
|
/* pp.c
*
* Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
* 2000, 2001, 2002, 2003, 2004, 2005, by <NAME> and others
*
* You may distribute under the terms of either the GNU General Public
* License or the Artistic License, as specified in the README file.
*
*/
/*
* "It's a big house this, and very peculiar. Always a bit more to discover,
* and no knowing what you'll find around a corner. And Elves, sir!" --Samwise
*/
/* This file contains general pp ("push/pop") functions that execute the
* opcodes that make up a perl program. A typical pp function expects to
* find its arguments on the stack, and usually pushes its results onto
* the stack, hence the 'pp' terminology. Each OP structure contains
* a pointer to the relevant pp_foo() function.
*/
#include "EXTERN.h"
#define PERL_IN_PP_C
#include "perl.h"
#include "keywords.h"
#include "reentr.h"
#if defined(SPEC_CPU)
# include "specrand.h"
#endif /* SPEC_CPU */
/* XXX I can't imagine anyone who doesn't have this actually _needs_
it, since pid_t is an integral type.
--AD 2/20/1998
*/
#ifdef NEED_GETPID_PROTO
extern Pid_t getpid (void);
#endif
/* variations on pp_null */
PP(pp_stub)
{
dSP;
if (GIMME_V == G_SCALAR)
XPUSHs(&PL_sv_undef);
RETURN;
}
PP(pp_scalar)
{
return NORMAL;
}
/* Pushy stuff. */
PP(pp_padav)
{
dSP; dTARGET;
I32 gimme;
if (PL_op->op_private & OPpLVAL_INTRO)
SAVECLEARSV(PAD_SVl(PL_op->op_targ));
EXTEND(SP, 1);
if (PL_op->op_flags & OPf_REF) {
PUSHs(TARG);
RETURN;
} else if (LVRET) {
if (GIMME == G_SCALAR)
Perl_croak(aTHX_ "Can't return array to lvalue scalar context");
PUSHs(TARG);
RETURN;
}
gimme = GIMME_V;
if (gimme == G_ARRAY) {
I32 maxarg = AvFILL((AV*)TARG) + 1;
EXTEND(SP, maxarg);
if (SvMAGICAL(TARG)) {
U32 i;
for (i=0; i < (U32)maxarg; i++) {
SV **svp = av_fetch((AV*)TARG, i, FALSE);
SP[i+1] = (svp) ? *svp : &PL_sv_undef;
}
}
else {
Copy(AvARRAY((AV*)TARG), SP+1, maxarg, SV*);
}
SP += maxarg;
}
else if (gimme == G_SCALAR) {
SV* sv = sv_newmortal();
I32 maxarg = AvFILL((AV*)TARG) + 1;
sv_setiv(sv, maxarg);
PUSHs(sv);
}
RETURN;
}
PP(pp_padhv)
{
dSP; dTARGET;
I32 gimme;
XPUSHs(TARG);
if (PL_op->op_private & OPpLVAL_INTRO)
SAVECLEARSV(PAD_SVl(PL_op->op_targ));
if (PL_op->op_flags & OPf_REF)
RETURN;
else if (LVRET) {
if (GIMME == G_SCALAR)
Perl_croak(aTHX_ "Can't return hash to lvalue scalar context");
RETURN;
}
gimme = GIMME_V;
if (gimme == G_ARRAY) {
RETURNOP(do_kv());
}
else if (gimme == G_SCALAR) {
SV* sv = Perl_hv_scalar(aTHX_ (HV*)TARG);
SETs(sv);
}
RETURN;
}
PP(pp_padany)
{
DIE(aTHX_ "NOT IMPL LINE %d",__LINE__);
}
/* Translations. */
PP(pp_rv2gv)
{
dSP; dTOPss;
if (SvROK(sv)) {
wasref:
tryAMAGICunDEREF(to_gv);
sv = SvRV(sv);
if (SvTYPE(sv) == SVt_PVIO) {
GV *gv = (GV*) sv_newmortal();
gv_init(gv, 0, "", 0, 0);
GvIOp(gv) = (IO *)sv;
(void)SvREFCNT_inc(sv);
sv = (SV*) gv;
}
else if (SvTYPE(sv) != SVt_PVGV)
DIE(aTHX_ "Not a GLOB reference");
}
else {
if (SvTYPE(sv) != SVt_PVGV) {
char *sym;
STRLEN len;
if (SvGMAGICAL(sv)) {
mg_get(sv);
if (SvROK(sv))
goto wasref;
}
if (!SvOK(sv) && sv != &PL_sv_undef) {
/* If this is a 'my' scalar and flag is set then vivify
* NI-S 1999/05/07
*/
if (SvREADONLY(sv))
Perl_croak(aTHX_ PL_no_modify);
if (PL_op->op_private & OPpDEREF) {
char *name;
GV *gv;
if (cUNOP->op_targ) {
STRLEN len;
SV *namesv = PAD_SV(cUNOP->op_targ);
name = SvPV(namesv, len);
gv = (GV*)NEWSV(0,0);
gv_init(gv, CopSTASH(PL_curcop), name, len, 0);
}
else {
name = CopSTASHPV(PL_curcop);
gv = newGVgen(name);
}
if (SvTYPE(sv) < SVt_RV)
sv_upgrade(sv, SVt_RV);
if (SvPVX(sv)) {
SvOOK_off(sv); /* backoff */
if (SvLEN(sv))
Safefree(SvPVX(sv));
SvLEN(sv)=SvCUR(sv)=0;
}
SvRV(sv) = (SV*)gv;
SvROK_on(sv);
SvSETMAGIC(sv);
goto wasref;
}
if (PL_op->op_flags & OPf_REF ||
PL_op->op_private & HINT_STRICT_REFS)
DIE(aTHX_ PL_no_usym, "a symbol");
if (ckWARN(WARN_UNINITIALIZED))
report_uninit();
RETSETUNDEF;
}
sym = SvPV(sv,len);
if ((PL_op->op_flags & OPf_SPECIAL) &&
!(PL_op->op_flags & OPf_MOD))
{
sv = (SV*)gv_fetchpv(sym, FALSE, SVt_PVGV);
if (!sv
&& (!is_gv_magical(sym,len,0)
|| !(sv = (SV*)gv_fetchpv(sym, TRUE, SVt_PVGV))))
{
RETSETUNDEF;
}
}
else {
if (PL_op->op_private & HINT_STRICT_REFS)
DIE(aTHX_ PL_no_symref, sym, "a symbol");
sv = (SV*)gv_fetchpv(sym, TRUE, SVt_PVGV);
}
}
}
if (PL_op->op_private & OPpLVAL_INTRO)
save_gp((GV*)sv, !(PL_op->op_flags & OPf_SPECIAL));
SETs(sv);
RETURN;
}
PP(pp_rv2sv)
{
GV *gv = Nullgv;
dSP; dTOPss;
if (SvROK(sv)) {
wasref:
tryAMAGICunDEREF(to_sv);
sv = SvRV(sv);
switch (SvTYPE(sv)) {
case SVt_PVAV:
case SVt_PVHV:
case SVt_PVCV:
DIE(aTHX_ "Not a SCALAR reference");
}
}
else {
char *sym;
STRLEN len;
gv = (GV*)sv;
if (SvTYPE(gv) != SVt_PVGV) {
if (SvGMAGICAL(sv)) {
mg_get(sv);
if (SvROK(sv))
goto wasref;
}
if (!SvOK(sv)) {
if (PL_op->op_flags & OPf_REF ||
PL_op->op_private & HINT_STRICT_REFS)
DIE(aTHX_ PL_no_usym, "a SCALAR");
if (ckWARN(WARN_UNINITIALIZED))
report_uninit();
RETSETUNDEF;
}
sym = SvPV(sv, len);
if ((PL_op->op_flags & OPf_SPECIAL) &&
!(PL_op->op_flags & OPf_MOD))
{
gv = (GV*)gv_fetchpv(sym, FALSE, SVt_PV);
if (!gv
&& (!is_gv_magical(sym,len,0)
|| !(gv = (GV*)gv_fetchpv(sym, TRUE, SVt_PV))))
{
RETSETUNDEF;
}
}
else {
if (PL_op->op_private & HINT_STRICT_REFS)
DIE(aTHX_ PL_no_symref, sym, "a SCALAR");
gv = (GV*)gv_fetchpv(sym, TRUE, SVt_PV);
}
}
sv = GvSV(gv);
}
if (PL_op->op_flags & OPf_MOD) {
if (PL_op->op_private & OPpLVAL_INTRO) {
if (cUNOP->op_first->op_type == OP_NULL)
sv = save_scalar((GV*)TOPs);
else if (gv)
sv = save_scalar(gv);
else
Perl_croak(aTHX_ PL_no_localize_ref);
}
else if (PL_op->op_private & OPpDEREF)
vivify_ref(sv, PL_op->op_private & OPpDEREF);
}
SETs(sv);
RETURN;
}
PP(pp_av2arylen)
{
dSP;
AV *av = (AV*)TOPs;
SV *sv = AvARYLEN(av);
if (!sv) {
AvARYLEN(av) = sv = NEWSV(0,0);
sv_upgrade(sv, SVt_IV);
sv_magic(sv, (SV*)av, PERL_MAGIC_arylen, Nullch, 0);
}
SETs(sv);
RETURN;
}
PP(pp_pos)
{
dSP; dTARGET; dPOPss;
if (PL_op->op_flags & OPf_MOD || LVRET) {
if (SvTYPE(TARG) < SVt_PVLV) {
sv_upgrade(TARG, SVt_PVLV);
sv_magic(TARG, Nullsv, PERL_MAGIC_pos, Nullch, 0);
}
LvTYPE(TARG) = '.';
if (LvTARG(TARG) != sv) {
if (LvTARG(TARG))
SvREFCNT_dec(LvTARG(TARG));
LvTARG(TARG) = SvREFCNT_inc(sv);
}
PUSHs(TARG); /* no SvSETMAGIC */
RETURN;
}
else {
MAGIC* mg;
if (SvTYPE(sv) >= SVt_PVMG && SvMAGIC(sv)) {
mg = mg_find(sv, PERL_MAGIC_regex_global);
if (mg && mg->mg_len >= 0) {
I32 i = mg->mg_len;
if (DO_UTF8(sv))
sv_pos_b2u(sv, &i);
PUSHi(i + PL_curcop->cop_arybase);
RETURN;
}
}
RETPUSHUNDEF;
}
}
PP(pp_rv2cv)
{
dSP;
GV *gv;
HV *stash;
/* We usually try to add a non-existent subroutine in case of AUTOLOAD. */
/* (But not in defined().) */
CV *cv = sv_2cv(TOPs, &stash, &gv, !(PL_op->op_flags & OPf_SPECIAL));
if (cv) {
if (CvCLONE(cv))
cv = (CV*)sv_2mortal((SV*)cv_clone(cv));
if ((PL_op->op_private & OPpLVAL_INTRO)) {
if (gv && GvCV(gv) == cv && (gv = gv_autoload4(GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), FALSE)))
cv = GvCV(gv);
if (!CvLVALUE(cv))
DIE(aTHX_ "Can't modify non-lvalue subroutine call");
}
}
else
cv = (CV*)&PL_sv_undef;
SETs((SV*)cv);
RETURN;
}
PP(pp_prototype)
{
dSP;
CV *cv;
HV *stash;
GV *gv;
SV *ret;
ret = &PL_sv_undef;
if (SvPOK(TOPs) && SvCUR(TOPs) >= 7) {
char *s = SvPVX(TOPs);
if (strnEQ(s, "CORE::", 6)) {
int code;
code = keyword(s + 6, SvCUR(TOPs) - 6);
if (code < 0) { /* Overridable. */
#define MAX_ARGS_OP ((sizeof(I32) - 1) * 2)
int i = 0, n = 0, seen_question = 0;
I32 oa;
char str[ MAX_ARGS_OP * 2 + 2 ]; /* One ';', one '\0' */
if (code == -KEY_chop || code == -KEY_chomp)
goto set;
while (i < MAXO) { /* The slow way. */
if (strEQ(s + 6, PL_op_name[i])
|| strEQ(s + 6, PL_op_desc[i]))
{
goto found;
}
i++;
}
goto nonesuch; /* Should not happen... */
found:
oa = PL_opargs[i] >> OASHIFT;
while (oa) {
if (oa & OA_OPTIONAL && !seen_question) {
seen_question = 1;
str[n++] = ';';
}
else if (n && str[0] == ';' && seen_question)
goto set; /* XXXX system, exec */
if ((oa & (OA_OPTIONAL - 1)) >= OA_AVREF
&& (oa & (OA_OPTIONAL - 1)) <= OA_SCALARREF
/* But globs are already references (kinda) */
&& (oa & (OA_OPTIONAL - 1)) != OA_FILEREF
) {
str[n++] = '\\';
}
str[n++] = ("?$@@%&*$")[oa & (OA_OPTIONAL - 1)];
oa = oa >> 4;
}
str[n++] = '\0';
ret = sv_2mortal(newSVpvn(str, n - 1));
}
else if (code) /* Non-Overridable */
goto set;
else { /* None such */
nonesuch:
DIE(aTHX_ "Can't find an opnumber for \"%s\"", s+6);
}
}
}
cv = sv_2cv(TOPs, &stash, &gv, FALSE);
if (cv && SvPOK(cv))
ret = sv_2mortal(newSVpvn(SvPVX(cv), SvCUR(cv)));
set:
SETs(ret);
RETURN;
}
PP(pp_anoncode)
{
dSP;
CV* cv = (CV*)PAD_SV(PL_op->op_targ);
if (CvCLONE(cv))
cv = (CV*)sv_2mortal((SV*)cv_clone(cv));
EXTEND(SP,1);
PUSHs((SV*)cv);
RETURN;
}
PP(pp_srefgen)
{
dSP;
*SP = refto(*SP);
RETURN;
}
PP(pp_refgen)
{
dSP; dMARK;
if (GIMME != G_ARRAY) {
if (++MARK <= SP)
*MARK = *SP;
else
*MARK = &PL_sv_undef;
*MARK = refto(*MARK);
SP = MARK;
RETURN;
}
EXTEND_MORTAL(SP - MARK);
while (++MARK <= SP)
*MARK = refto(*MARK);
RETURN;
}
STATIC SV*
S_refto(pTHX_ SV *sv)
{
SV* rv;
if (SvTYPE(sv) == SVt_PVLV && LvTYPE(sv) == 'y') {
if (LvTARGLEN(sv))
vivify_defelem(sv);
if (!(sv = LvTARG(sv)))
sv = &PL_sv_undef;
else
(void)SvREFCNT_inc(sv);
}
else if (SvTYPE(sv) == SVt_PVAV) {
if (!AvREAL((AV*)sv) && AvREIFY((AV*)sv))
av_reify((AV*)sv);
SvTEMP_off(sv);
(void)SvREFCNT_inc(sv);
}
else if (SvPADTMP(sv) && !IS_PADGV(sv))
sv = newSVsv(sv);
else {
SvTEMP_off(sv);
(void)SvREFCNT_inc(sv);
}
rv = sv_newmortal();
sv_upgrade(rv, SVt_RV);
SvRV(rv) = sv;
SvROK_on(rv);
return rv;
}
PP(pp_ref)
{
dSP; dTARGET;
SV *sv;
char *pv;
sv = POPs;
if (sv && SvGMAGICAL(sv))
mg_get(sv);
if (!sv || !SvROK(sv))
RETPUSHNO;
sv = SvRV(sv);
pv = sv_reftype(sv,TRUE);
PUSHp(pv, strlen(pv));
RETURN;
}
PP(pp_bless)
{
dSP;
HV *stash;
if (MAXARG == 1)
stash = CopSTASH(PL_curcop);
else {
SV *ssv = POPs;
STRLEN len;
char *ptr;
if (ssv && !SvGMAGICAL(ssv) && !SvAMAGIC(ssv) && SvROK(ssv))
Perl_croak(aTHX_ "Attempt to bless into a reference");
ptr = SvPV(ssv,len);
if (ckWARN(WARN_MISC) && len == 0)
Perl_warner(aTHX_ packWARN(WARN_MISC),
"Explicit blessing to '' (assuming package main)");
stash = gv_stashpvn(ptr, len, TRUE);
}
(void)sv_bless(TOPs, stash);
RETURN;
}
PP(pp_gelem)
{
GV *gv;
SV *sv;
SV *tmpRef;
char *elem;
dSP;
STRLEN n_a;
sv = POPs;
elem = SvPV(sv, n_a);
gv = (GV*)POPs;
tmpRef = Nullsv;
sv = Nullsv;
if (elem) {
/* elem will always be NUL terminated. */
const char *elem2 = elem + 1;
switch (*elem) {
case 'A':
if (strEQ(elem2, "RRAY"))
tmpRef = (SV*)GvAV(gv);
break;
case 'C':
if (strEQ(elem2, "ODE"))
tmpRef = (SV*)GvCVu(gv);
break;
case 'F':
if (strEQ(elem2, "ILEHANDLE")) {
/* finally deprecated in 5.8.0 */
deprecate("*glob{FILEHANDLE}");
tmpRef = (SV*)GvIOp(gv);
}
else
if (strEQ(elem2, "ORMAT"))
tmpRef = (SV*)GvFORM(gv);
break;
case 'G':
if (strEQ(elem2, "LOB"))
tmpRef = (SV*)gv;
break;
case 'H':
if (strEQ(elem2, "ASH"))
tmpRef = (SV*)GvHV(gv);
break;
case 'I':
if (*elem2 == 'O' && !elem[2])
tmpRef = (SV*)GvIOp(gv);
break;
case 'N':
if (strEQ(elem2, "AME"))
sv = newSVpvn(GvNAME(gv), GvNAMELEN(gv));
break;
case 'P':
if (strEQ(elem2, "ACKAGE")) {
char *name = HvNAME(GvSTASH(gv));
sv = newSVpv(name ? name : "__ANON__", 0);
}
break;
case 'S':
if (strEQ(elem2, "CALAR"))
tmpRef = GvSV(gv);
break;
}
}
if (tmpRef)
sv = newRV(tmpRef);
if (sv)
sv_2mortal(sv);
else
sv = &PL_sv_undef;
XPUSHs(sv);
RETURN;
}
/* Pattern matching */
PP(pp_study)
{
dSP; dPOPss;
register unsigned char *s;
register I32 pos;
register I32 ch;
register I32 *sfirst;
register I32 *snext;
STRLEN len;
if (sv == PL_lastscream) {
if (SvSCREAM(sv))
RETPUSHYES;
}
else {
if (PL_lastscream) {
SvSCREAM_off(PL_lastscream);
SvREFCNT_dec(PL_lastscream);
}
PL_lastscream = SvREFCNT_inc(sv);
}
s = (unsigned char*)(SvPV(sv, len));
pos = len;
if (pos <= 0)
RETPUSHNO;
if (pos > PL_maxscream) {
if (PL_maxscream < 0) {
PL_maxscream = pos + 80;
New(301, PL_screamfirst, 256, I32);
New(302, PL_screamnext, PL_maxscream, I32);
}
else {
PL_maxscream = pos + pos / 4;
Renew(PL_screamnext, PL_maxscream, I32);
}
}
sfirst = PL_screamfirst;
snext = PL_screamnext;
if (!sfirst || !snext)
DIE(aTHX_ "do_study: out of memory");
for (ch = 256; ch; --ch)
*sfirst++ = -1;
sfirst -= 256;
while (--pos >= 0) {
ch = s[pos];
if (sfirst[ch] >= 0)
snext[pos] = sfirst[ch] - pos;
else
snext[pos] = -pos;
sfirst[ch] = pos;
}
SvSCREAM_on(sv);
/* piggyback on m//g magic */
sv_magic(sv, Nullsv, PERL_MAGIC_regex_global, Nullch, 0);
RETPUSHYES;
}
PP(pp_trans)
{
dSP; dTARG;
SV *sv;
if (PL_op->op_flags & OPf_STACKED)
sv = POPs;
else {
sv = DEFSV;
EXTEND(SP,1);
}
TARG = sv_newmortal();
PUSHi(do_trans(sv));
RETURN;
}
/* Lvalue operators. */
PP(pp_schop)
{
dSP; dTARGET;
do_chop(TARG, TOPs);
SETTARG;
RETURN;
}
PP(pp_chop)
{
dSP; dMARK; dTARGET; dORIGMARK;
while (MARK < SP)
do_chop(TARG, *++MARK);
SP = ORIGMARK;
PUSHTARG;
RETURN;
}
PP(pp_schomp)
{
dSP; dTARGET;
SETi(do_chomp(TOPs));
RETURN;
}
PP(pp_chomp)
{
dSP; dMARK; dTARGET;
register I32 count = 0;
while (SP > MARK)
count += do_chomp(POPs);
PUSHi(count);
RETURN;
}
PP(pp_defined)
{
dSP;
register SV* sv;
sv = POPs;
if (!sv || !SvANY(sv))
RETPUSHNO;
switch (SvTYPE(sv)) {
case SVt_PVAV:
if (AvMAX(sv) >= 0 || SvGMAGICAL(sv)
|| (SvRMAGICAL(sv) && mg_find(sv, PERL_MAGIC_tied)))
RETPUSHYES;
break;
case SVt_PVHV:
if (HvARRAY(sv) || SvGMAGICAL(sv)
|| (SvRMAGICAL(sv) && mg_find(sv, PERL_MAGIC_tied)))
RETPUSHYES;
break;
case SVt_PVCV:
if (CvROOT(sv) || CvXSUB(sv))
RETPUSHYES;
break;
default:
if (SvGMAGICAL(sv))
mg_get(sv);
if (SvOK(sv))
RETPUSHYES;
}
RETPUSHNO;
}
PP(pp_undef)
{
dSP;
SV *sv;
if (!PL_op->op_private) {
EXTEND(SP, 1);
RETPUSHUNDEF;
}
sv = POPs;
if (!sv)
RETPUSHUNDEF;
if (SvTHINKFIRST(sv))
sv_force_normal(sv);
switch (SvTYPE(sv)) {
case SVt_NULL:
break;
case SVt_PVAV:
av_undef((AV*)sv);
break;
case SVt_PVHV:
hv_undef((HV*)sv);
break;
case SVt_PVCV:
if (ckWARN(WARN_MISC) && cv_const_sv((CV*)sv))
Perl_warner(aTHX_ packWARN(WARN_MISC), "Constant subroutine %s undefined",
CvANON((CV*)sv) ? "(anonymous)" : GvENAME(CvGV((CV*)sv)));
/* FALL THROUGH */
case SVt_PVFM:
{
/* let user-undef'd sub keep its identity */
GV* gv = CvGV((CV*)sv);
cv_undef((CV*)sv);
CvGV((CV*)sv) = gv;
}
break;
case SVt_PVGV:
if (SvFAKE(sv))
SvSetMagicSV(sv, &PL_sv_undef);
else {
GP *gp;
gp_free((GV*)sv);
Newz(602, gp, 1, GP);
GvGP(sv) = gp_ref(gp);
GvSV(sv) = NEWSV(72,0);
GvLINE(sv) = CopLINE(PL_curcop);
GvEGV(sv) = (GV*)sv;
GvMULTI_on(sv);
}
break;
default:
if (SvTYPE(sv) >= SVt_PV && SvPVX(sv) && SvLEN(sv)) {
SvOOK_off(sv);
Safefree(SvPVX(sv));
SvPV_set(sv, Nullch);
SvLEN_set(sv, 0);
}
SvOK_off(sv);
SvSETMAGIC(sv);
}
RETPUSHUNDEF;
}
PP(pp_predec)
{
dSP;
if (SvTYPE(TOPs) > SVt_PVLV)
DIE(aTHX_ PL_no_modify);
if (!SvREADONLY(TOPs) && SvIOK_notUV(TOPs) && !SvNOK(TOPs) && !SvPOK(TOPs)
&& SvIVX(TOPs) != IV_MIN)
{
--SvIVX(TOPs);
SvFLAGS(TOPs) &= ~(SVp_NOK|SVp_POK);
}
else
sv_dec(TOPs);
SvSETMAGIC(TOPs);
return NORMAL;
}
PP(pp_postinc)
{
dSP; dTARGET;
if (SvTYPE(TOPs) > SVt_PVLV)
DIE(aTHX_ PL_no_modify);
sv_setsv(TARG, TOPs);
if (!SvREADONLY(TOPs) && SvIOK_notUV(TOPs) && !SvNOK(TOPs) && !SvPOK(TOPs)
&& SvIVX(TOPs) != IV_MAX)
{
++SvIVX(TOPs);
SvFLAGS(TOPs) &= ~(SVp_NOK|SVp_POK);
}
else
sv_inc(TOPs);
SvSETMAGIC(TOPs);
/* special case for undef: see thread at 2003-03/msg00536.html in archive */
if (!SvOK(TARG))
sv_setiv(TARG, 0);
SETs(TARG);
return NORMAL;
}
PP(pp_postdec)
{
dSP; dTARGET;
if (SvTYPE(TOPs) > SVt_PVLV)
DIE(aTHX_ PL_no_modify);
sv_setsv(TARG, TOPs);
if (!SvREADONLY(TOPs) && SvIOK_notUV(TOPs) && !SvNOK(TOPs) && !SvPOK(TOPs)
&& SvIVX(TOPs) != IV_MIN)
{
--SvIVX(TOPs);
SvFLAGS(TOPs) &= ~(SVp_NOK|SVp_POK);
}
else
sv_dec(TOPs);
SvSETMAGIC(TOPs);
SETs(TARG);
return NORMAL;
}
/* Ordinary operators. */
PP(pp_pow)
{
dSP; dATARGET;
#ifdef PERL_PRESERVE_IVUV
bool is_int = 0;
#endif
tryAMAGICbin(pow,opASSIGN);
#ifdef PERL_PRESERVE_IVUV
/* For integer to integer power, we do the calculation by hand wherever
we're sure it is safe; otherwise we call pow() and try to convert to
integer afterwards. */
{
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool baseuok = SvUOK(TOPm1s);
UV baseuv;
if (baseuok) {
baseuv = SvUVX(TOPm1s);
} else {
IV iv = SvIVX(TOPm1s);
if (iv >= 0) {
baseuv = iv;
baseuok = TRUE; /* effectively it's a UV now */
} else {
baseuv = -iv; /* abs, baseuok == false records sign */
}
}
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
UV power;
if (SvUOK(TOPs)) {
power = SvUVX(TOPs);
} else {
IV iv = SvIVX(TOPs);
if (iv >= 0) {
power = iv;
} else {
goto float_it; /* Can't do negative powers this way. */
}
}
/* now we have integer ** positive integer. */
is_int = 1;
/* foo & (foo - 1) is zero only for a power of 2. */
if (!(baseuv & (baseuv - 1))) {
/* We are raising power-of-2 to a positive integer.
The logic here will work for any base (even non-integer
bases) but it can be less accurate than
pow (base,power) or exp (power * log (base)) when the
intermediate values start to spill out of the mantissa.
With powers of 2 we know this can't happen.
And powers of 2 are the favourite thing for perl
programmers to notice ** not doing what they mean. */
NV result = 1.0;
NV base = baseuok ? baseuv : -(NV)baseuv;
int n = 0;
for (; power; base *= base, n++) {
/* Do I look like I trust gcc with long longs here?
Do I hell. */
UV bit = (UV)1 << (UV)n;
if (power & bit) {
result *= base;
/* Only bother to clear the bit if it is set. */
power -= bit;
/* Avoid squaring base again if we're done. */
if (power == 0) break;
}
}
SP--;
SETn( result );
SvIV_please(TOPs);
RETURN;
} else {
register unsigned int highbit = 8 * sizeof(UV);
register unsigned int lowbit = 0;
register unsigned int diff;
bool odd_power = (bool)(power & 1);
while ((diff = (highbit - lowbit) >> 1)) {
if (baseuv & ~((1 << (lowbit + diff)) - 1))
lowbit += diff;
else
highbit -= diff;
}
/* we now have baseuv < 2 ** highbit */
if (power * highbit <= 8 * sizeof(UV)) {
/* result will definitely fit in UV, so use UV math
on same algorithm as above */
register UV result = 1;
register UV base = baseuv;
register int n = 0;
for (; power; base *= base, n++) {
register UV bit = (UV)1 << (UV)n;
if (power & bit) {
result *= base;
power -= bit;
if (power == 0) break;
}
}
SP--;
if (baseuok || !odd_power)
/* answer is positive */
SETu( result );
else if (result <= (UV)IV_MAX)
/* answer negative, fits in IV */
SETi( -(IV)result );
else if (result == (UV)IV_MIN)
/* 2's complement assumption: special case IV_MIN */
SETi( IV_MIN );
else
/* answer negative, doesn't fit */
SETn( -(NV)result );
RETURN;
}
}
}
}
}
float_it:
#endif
{
dPOPTOPnnrl;
SETn( Perl_pow( left, right) );
#ifdef PERL_PRESERVE_IVUV
if (is_int)
SvIV_please(TOPs);
#endif
RETURN;
}
}
PP(pp_multiply)
{
dSP; dATARGET; tryAMAGICbin(mult,opASSIGN);
#ifdef PERL_PRESERVE_IVUV
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
/* Unless the left argument is integer in range we are going to have to
use NV maths. Hence only attempt to coerce the right argument if
we know the left is integer. */
/* Left operand is defined, so is it IV? */
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool auvok = SvUOK(TOPm1s);
bool buvok = SvUOK(TOPs);
const UV topmask = (~ (UV)0) << (4 * sizeof (UV));
const UV botmask = ~((~ (UV)0) << (4 * sizeof (UV)));
UV alow;
UV ahigh;
UV blow;
UV bhigh;
if (auvok) {
alow = SvUVX(TOPm1s);
} else {
IV aiv = SvIVX(TOPm1s);
if (aiv >= 0) {
alow = aiv;
auvok = TRUE; /* effectively it's a UV now */
} else {
alow = -aiv; /* abs, auvok == false records sign */
}
}
if (buvok) {
blow = SvUVX(TOPs);
} else {
IV biv = SvIVX(TOPs);
if (biv >= 0) {
blow = biv;
buvok = TRUE; /* effectively it's a UV now */
} else {
blow = -biv; /* abs, buvok == false records sign */
}
}
/* If this does sign extension on unsigned it's time for plan B */
ahigh = alow >> (4 * sizeof (UV));
alow &= botmask;
bhigh = blow >> (4 * sizeof (UV));
blow &= botmask;
if (ahigh && bhigh) {
/* eg 32 bit is at least 0x10000 * 0x10000 == 0x100000000
which is overflow. Drop to NVs below. */
} else if (!ahigh && !bhigh) {
/* eg 32 bit is at most 0xFFFF * 0xFFFF == 0xFFFE0001
so the unsigned multiply cannot overflow. */
UV product = alow * blow;
if (auvok == buvok) {
/* -ve * -ve or +ve * +ve gives a +ve result. */
SP--;
SETu( product );
RETURN;
} else if (product <= (UV)IV_MIN) {
/* 2s complement assumption that (UV)-IV_MIN is correct. */
/* -ve result, which could overflow an IV */
SP--;
SETi( -(IV)product );
RETURN;
} /* else drop to NVs below. */
} else {
/* One operand is large, 1 small */
UV product_middle;
if (bhigh) {
/* swap the operands */
ahigh = bhigh;
bhigh = blow; /* bhigh now the temp var for the swap */
blow = alow;
alow = bhigh;
}
/* now, ((ahigh * blow) << half_UV_len) + (alow * blow)
multiplies can't overflow. shift can, add can, -ve can. */
product_middle = ahigh * blow;
if (!(product_middle & topmask)) {
/* OK, (ahigh * blow) won't lose bits when we shift it. */
UV product_low;
product_middle <<= (4 * sizeof (UV));
product_low = alow * blow;
/* as for pp_add, UV + something mustn't get smaller.
IIRC ANSI mandates this wrapping *behaviour* for
unsigned whatever the actual representation*/
product_low += product_middle;
if (product_low >= product_middle) {
/* didn't overflow */
if (auvok == buvok) {
/* -ve * -ve or +ve * +ve gives a +ve result. */
SP--;
SETu( product_low );
RETURN;
} else if (product_low <= (UV)IV_MIN) {
/* 2s complement assumption again */
/* -ve result, which could overflow an IV */
SP--;
SETi( -(IV)product_low );
RETURN;
} /* else drop to NVs below. */
}
} /* product_middle too large */
} /* ahigh && bhigh */
} /* SvIOK(TOPm1s) */
} /* SvIOK(TOPs) */
#endif
{
dPOPTOPnnrl;
SETn( left * right );
RETURN;
}
}
PP(pp_divide)
{
dSP; dATARGET; tryAMAGICbin(div,opASSIGN);
/* Only try to do UV divide first
if ((SLOPPYDIVIDE is true) or
(PERL_PRESERVE_IVUV is true and one or both SV is a UV too large
to preserve))
The assumption is that it is better to use floating point divide
whenever possible, only doing integer divide first if we can't be sure.
If NV_PRESERVES_UV is true then we know at compile time that no UV
can be too large to preserve, so don't need to compile the code to
test the size of UVs. */
#ifdef SLOPPYDIVIDE
# define PERL_TRY_UV_DIVIDE
/* ensure that 20./5. == 4. */
#else
# ifdef PERL_PRESERVE_IVUV
# ifndef NV_PRESERVES_UV
# define PERL_TRY_UV_DIVIDE
# endif
# endif
#endif
#ifdef PERL_TRY_UV_DIVIDE
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool left_non_neg = SvUOK(TOPm1s);
bool right_non_neg = SvUOK(TOPs);
UV left;
UV right;
if (right_non_neg) {
right = SvUVX(TOPs);
}
else {
IV biv = SvIVX(TOPs);
if (biv >= 0) {
right = biv;
right_non_neg = TRUE; /* effectively it's a UV now */
}
else {
right = -biv;
}
}
/* historically undef()/0 gives a "Use of uninitialized value"
warning before dieing, hence this test goes here.
If it were immediately before the second SvIV_please, then
DIE() would be invoked before left was even inspected, so
no inpsection would give no warning. */
if (right == 0)
DIE(aTHX_ "Illegal division by zero");
if (left_non_neg) {
left = SvUVX(TOPm1s);
}
else {
IV aiv = SvIVX(TOPm1s);
if (aiv >= 0) {
left = aiv;
left_non_neg = TRUE; /* effectively it's a UV now */
}
else {
left = -aiv;
}
}
if (left >= right
#ifdef SLOPPYDIVIDE
/* For sloppy divide we always attempt integer division. */
#else
/* Otherwise we only attempt it if either or both operands
would not be preserved by an NV. If both fit in NVs
we fall through to the NV divide code below. However,
as left >= right to ensure integer result here, we know that
we can skip the test on the right operand - right big
enough not to be preserved can't get here unless left is
also too big. */
&& (left > ((UV)1 << NV_PRESERVES_UV_BITS))
#endif
) {
/* Integer division can't overflow, but it can be imprecise. */
UV result = left / right;
if (result * right == left) {
SP--; /* result is valid */
if (left_non_neg == right_non_neg) {
/* signs identical, result is positive. */
SETu( result );
RETURN;
}
/* 2s complement assumption */
if (result <= (UV)IV_MIN)
SETi( -(IV)result );
else {
/* It's exact but too negative for IV. */
SETn( -(NV)result );
}
RETURN;
} /* tried integer divide but it was not an integer result */
} /* else (PERL_ABS(result) < 1.0) or (both UVs in range for NV) */
} /* left wasn't SvIOK */
} /* right wasn't SvIOK */
#endif /* PERL_TRY_UV_DIVIDE */
{
dPOPPOPnnrl;
if (right == 0.0)
DIE(aTHX_ "Illegal division by zero");
PUSHn( left / right );
RETURN;
}
}
PP(pp_modulo)
{
dSP; dATARGET; tryAMAGICbin(modulo,opASSIGN);
{
UV left = 0;
UV right = 0;
bool left_neg = FALSE;
bool right_neg = FALSE;
bool use_double = FALSE;
bool dright_valid = FALSE;
NV dright = 0.0;
NV dleft = 0.0;
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
right_neg = !SvUOK(TOPs);
if (!right_neg) {
right = SvUVX(POPs);
} else {
IV biv = SvIVX(POPs);
if (biv >= 0) {
right = biv;
right_neg = FALSE; /* effectively it's a UV now */
} else {
right = -biv;
}
}
}
else {
dright = POPn;
right_neg = dright < 0;
if (right_neg)
dright = -dright;
if (dright < UV_MAX_P1) {
right = U_V(dright);
dright_valid = TRUE; /* In case we need to use double below. */
} else {
use_double = TRUE;
}
}
/* At this point use_double is only true if right is out of range for
a UV. In range NV has been rounded down to nearest UV and
use_double false. */
SvIV_please(TOPs);
if (!use_double && SvIOK(TOPs)) {
if (SvIOK(TOPs)) {
left_neg = !SvUOK(TOPs);
if (!left_neg) {
left = SvUVX(POPs);
} else {
IV aiv = SvIVX(POPs);
if (aiv >= 0) {
left = aiv;
left_neg = FALSE; /* effectively it's a UV now */
} else {
left = -aiv;
}
}
}
}
else {
dleft = POPn;
left_neg = dleft < 0;
if (left_neg)
dleft = -dleft;
/* This should be exactly the 5.6 behaviour - if left and right are
both in range for UV then use U_V() rather than floor. */
if (!use_double) {
if (dleft < UV_MAX_P1) {
/* right was in range, so is dleft, so use UVs not double.
*/
left = U_V(dleft);
}
/* left is out of range for UV, right was in range, so promote
right (back) to double. */
else {
/* The +0.5 is used in 5.6 even though it is not strictly
consistent with the implicit +0 floor in the U_V()
inside the #if 1. */
dleft = Perl_floor(dleft + 0.5);
use_double = TRUE;
if (dright_valid)
dright = Perl_floor(dright + 0.5);
else
dright = right;
}
}
}
if (use_double) {
NV dans;
if (!dright)
DIE(aTHX_ "Illegal modulus zero");
dans = Perl_fmod(dleft, dright);
if ((left_neg != right_neg) && dans)
dans = dright - dans;
if (right_neg)
dans = -dans;
sv_setnv(TARG, dans);
}
else {
UV ans;
if (!right)
DIE(aTHX_ "Illegal modulus zero");
ans = left % right;
if ((left_neg != right_neg) && ans)
ans = right - ans;
if (right_neg) {
/* XXX may warn: unary minus operator applied to unsigned type */
/* could change -foo to be (~foo)+1 instead */
if (ans <= ~((UV)IV_MAX)+1)
sv_setiv(TARG, ~ans+1);
else
sv_setnv(TARG, -(NV)ans);
}
else
sv_setuv(TARG, ans);
}
PUSHTARG;
RETURN;
}
}
PP(pp_repeat)
{
dSP; dATARGET; tryAMAGICbin(repeat,opASSIGN);
{
register IV count;
dPOPss;
if (SvGMAGICAL(sv))
mg_get(sv);
if (SvIOKp(sv)) {
if (SvUOK(sv)) {
UV uv = SvUV(sv);
if (uv > IV_MAX)
count = IV_MAX; /* The best we can do? */
else
count = uv;
} else {
IV iv = SvIV(sv);
if (iv < 0)
count = 0;
else
count = iv;
}
}
else if (SvNOKp(sv)) {
NV nv = SvNV(sv);
if (nv < 0.0)
count = 0;
else
count = (IV)nv;
}
else
count = SvIVx(sv);
if (GIMME == G_ARRAY && PL_op->op_private & OPpREPEAT_DOLIST) {
dMARK;
I32 items = SP - MARK;
I32 max;
static const char oom_list_extend[] =
"Out of memory during list extend";
max = items * count;
MEM_WRAP_CHECK_1(max, SV*, oom_list_extend);
/* Did the max computation overflow? */
if (items > 0 && max > 0 && (max < items || max < count))
Perl_croak(aTHX_ oom_list_extend);
MEXTEND(MARK, max);
if (count > 1) {
while (SP > MARK) {
#if 0
/* This code was intended to fix 20010809.028:
$x = 'abcd';
for (($x =~ /./g) x 2) {
print chop; # "abcdabcd" expected as output.
}
* but that change (#11635) broke this code:
$x = [("foo")x2]; # only one "foo" ended up in the anonlist.
* I can't think of a better fix that doesn't introduce
* an efficiency hit by copying the SVs. The stack isn't
* refcounted, and mortalisation obviously doesn't
* Do The Right Thing when the stack has more than
* one pointer to the same mortal value.
* .robin.
*/
if (*SP) {
*SP = sv_2mortal(newSVsv(*SP));
SvREADONLY_on(*SP);
}
#else
if (*SP)
SvTEMP_off((*SP));
#endif
SP--;
}
MARK++;
repeatcpy((char*)(MARK + items), (char*)MARK,
items * sizeof(SV*), count - 1);
SP += max;
}
else if (count <= 0)
SP -= items;
}
else { /* Note: mark already snarfed by pp_list */
SV *tmpstr = POPs;
STRLEN len;
bool isutf;
static const char oom_string_extend[] =
"Out of memory during string extend";
SvSetSV(TARG, tmpstr);
SvPV_force(TARG, len);
isutf = DO_UTF8(TARG);
if (count != 1) {
if (count < 1)
SvCUR_set(TARG, 0);
else {
STRLEN max = (UV)count * len;
if (len > ((MEM_SIZE)~0)/count)
Perl_croak(aTHX_ oom_string_extend);
MEM_WRAP_CHECK_1(max, char, oom_string_extend);
SvGROW(TARG, max + 1);
repeatcpy(SvPVX(TARG) + len, SvPVX(TARG), len, count - 1);
SvCUR(TARG) *= count;
}
*SvEND(TARG) = '\0';
}
if (isutf)
(void)SvPOK_only_UTF8(TARG);
else
(void)SvPOK_only(TARG);
if (PL_op->op_private & OPpREPEAT_DOLIST) {
/* The parser saw this as a list repeat, and there
are probably several items on the stack. But we're
in scalar context, and there's no pp_list to save us
now. So drop the rest of the items -- <EMAIL>
*/
dMARK;
SP = MARK;
}
PUSHTARG;
}
RETURN;
}
}
PP(pp_subtract)
{
dSP; dATARGET; bool useleft; tryAMAGICbin(subtr,opASSIGN);
useleft = USE_LEFT(TOPm1s);
#ifdef PERL_PRESERVE_IVUV
/* See comments in pp_add (in pp_hot.c) about Overflow, and how
"bad things" happen if you rely on signed integers wrapping. */
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
/* Unless the left argument is integer in range we are going to have to
use NV maths. Hence only attempt to coerce the right argument if
we know the left is integer. */
register UV auv = 0;
bool auvok = FALSE;
bool a_valid = 0;
if (!useleft) {
auv = 0;
a_valid = auvok = 1;
/* left operand is undef, treat as zero. */
} else {
/* Left operand is defined, so is it IV? */
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
if ((auvok = SvUOK(TOPm1s)))
auv = SvUVX(TOPm1s);
else {
register IV aiv = SvIVX(TOPm1s);
if (aiv >= 0) {
auv = aiv;
auvok = 1; /* Now acting as a sign flag. */
} else { /* 2s complement assumption for IV_MIN */
auv = (UV)-aiv;
}
}
a_valid = 1;
}
}
if (a_valid) {
bool result_good = 0;
UV result;
register UV buv;
bool buvok = SvUOK(TOPs);
if (buvok)
buv = SvUVX(TOPs);
else {
register IV biv = SvIVX(TOPs);
if (biv >= 0) {
buv = biv;
buvok = 1;
} else
buv = (UV)-biv;
}
/* ?uvok if value is >= 0. basically, flagged as UV if it's +ve,
else "IV" now, independent of how it came in.
if a, b represents positive, A, B negative, a maps to -A etc
a - b => (a - b)
A - b => -(a + b)
a - B => (a + b)
A - B => -(a - b)
all UV maths. negate result if A negative.
subtract if signs same, add if signs differ. */
if (auvok ^ buvok) {
/* Signs differ. */
result = auv + buv;
if (result >= auv)
result_good = 1;
} else {
/* Signs same */
if (auv >= buv) {
result = auv - buv;
/* Must get smaller */
if (result <= auv)
result_good = 1;
} else {
result = buv - auv;
if (result <= buv) {
/* result really should be -(auv-buv). as its negation
of true value, need to swap our result flag */
auvok = !auvok;
result_good = 1;
}
}
}
if (result_good) {
SP--;
if (auvok)
SETu( result );
else {
/* Negate result */
if (result <= (UV)IV_MIN)
SETi( -(IV)result );
else {
/* result valid, but out of range for IV. */
SETn( -(NV)result );
}
}
RETURN;
} /* Overflow, drop through to NVs. */
}
}
#endif
useleft = USE_LEFT(TOPm1s);
{
dPOPnv;
if (!useleft) {
/* left operand is undef, treat as zero - value */
SETn(-value);
RETURN;
}
SETn( TOPn - value );
RETURN;
}
}
PP(pp_left_shift)
{
dSP; dATARGET; tryAMAGICbin(lshift,opASSIGN);
{
IV shift = POPi;
if (PL_op->op_private & HINT_INTEGER) {
IV i = TOPi;
SETi(i << shift);
}
else {
UV u = TOPu;
SETu(u << shift);
}
RETURN;
}
}
PP(pp_right_shift)
{
dSP; dATARGET; tryAMAGICbin(rshift,opASSIGN);
{
IV shift = POPi;
if (PL_op->op_private & HINT_INTEGER) {
IV i = TOPi;
SETi(i >> shift);
}
else {
UV u = TOPu;
SETu(u >> shift);
}
RETURN;
}
}
PP(pp_lt)
{
dSP; tryAMAGICbinSET(lt,0);
#ifdef PERL_PRESERVE_IVUV
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool auvok = SvUOK(TOPm1s);
bool buvok = SvUOK(TOPs);
if (!auvok && !buvok) { /* ## IV < IV ## */
IV aiv = SvIVX(TOPm1s);
IV biv = SvIVX(TOPs);
SP--;
SETs(boolSV(aiv < biv));
RETURN;
}
if (auvok && buvok) { /* ## UV < UV ## */
UV auv = SvUVX(TOPm1s);
UV buv = SvUVX(TOPs);
SP--;
SETs(boolSV(auv < buv));
RETURN;
}
if (auvok) { /* ## UV < IV ## */
UV auv;
IV biv;
biv = SvIVX(TOPs);
SP--;
if (biv < 0) {
/* As (a) is a UV, it's >=0, so it cannot be < */
SETs(&PL_sv_no);
RETURN;
}
auv = SvUVX(TOPs);
SETs(boolSV(auv < (UV)biv));
RETURN;
}
{ /* ## IV < UV ## */
IV aiv;
UV buv;
aiv = SvIVX(TOPm1s);
if (aiv < 0) {
/* As (b) is a UV, it's >=0, so it must be < */
SP--;
SETs(&PL_sv_yes);
RETURN;
}
buv = SvUVX(TOPs);
SP--;
SETs(boolSV((UV)aiv < buv));
RETURN;
}
}
}
#endif
#ifndef NV_PRESERVES_UV
#ifdef PERL_PRESERVE_IVUV
else
#endif
if (SvROK(TOPs) && !SvAMAGIC(TOPs) && SvROK(TOPm1s) && !SvAMAGIC(TOPm1s)) {
SP--;
SETs(boolSV(SvRV(TOPs) < SvRV(TOPp1s)));
RETURN;
}
#endif
{
dPOPnv;
SETs(boolSV(TOPn < value));
RETURN;
}
}
PP(pp_gt)
{
dSP; tryAMAGICbinSET(gt,0);
#ifdef PERL_PRESERVE_IVUV
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool auvok = SvUOK(TOPm1s);
bool buvok = SvUOK(TOPs);
if (!auvok && !buvok) { /* ## IV > IV ## */
IV aiv = SvIVX(TOPm1s);
IV biv = SvIVX(TOPs);
SP--;
SETs(boolSV(aiv > biv));
RETURN;
}
if (auvok && buvok) { /* ## UV > UV ## */
UV auv = SvUVX(TOPm1s);
UV buv = SvUVX(TOPs);
SP--;
SETs(boolSV(auv > buv));
RETURN;
}
if (auvok) { /* ## UV > IV ## */
UV auv;
IV biv;
biv = SvIVX(TOPs);
SP--;
if (biv < 0) {
/* As (a) is a UV, it's >=0, so it must be > */
SETs(&PL_sv_yes);
RETURN;
}
auv = SvUVX(TOPs);
SETs(boolSV(auv > (UV)biv));
RETURN;
}
{ /* ## IV > UV ## */
IV aiv;
UV buv;
aiv = SvIVX(TOPm1s);
if (aiv < 0) {
/* As (b) is a UV, it's >=0, so it cannot be > */
SP--;
SETs(&PL_sv_no);
RETURN;
}
buv = SvUVX(TOPs);
SP--;
SETs(boolSV((UV)aiv > buv));
RETURN;
}
}
}
#endif
#ifndef NV_PRESERVES_UV
#ifdef PERL_PRESERVE_IVUV
else
#endif
if (SvROK(TOPs) && !SvAMAGIC(TOPs) && SvROK(TOPm1s) && !SvAMAGIC(TOPm1s)) {
SP--;
SETs(boolSV(SvRV(TOPs) > SvRV(TOPp1s)));
RETURN;
}
#endif
{
dPOPnv;
SETs(boolSV(TOPn > value));
RETURN;
}
}
PP(pp_le)
{
dSP; tryAMAGICbinSET(le,0);
#ifdef PERL_PRESERVE_IVUV
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool auvok = SvUOK(TOPm1s);
bool buvok = SvUOK(TOPs);
if (!auvok && !buvok) { /* ## IV <= IV ## */
IV aiv = SvIVX(TOPm1s);
IV biv = SvIVX(TOPs);
SP--;
SETs(boolSV(aiv <= biv));
RETURN;
}
if (auvok && buvok) { /* ## UV <= UV ## */
UV auv = SvUVX(TOPm1s);
UV buv = SvUVX(TOPs);
SP--;
SETs(boolSV(auv <= buv));
RETURN;
}
if (auvok) { /* ## UV <= IV ## */
UV auv;
IV biv;
biv = SvIVX(TOPs);
SP--;
if (biv < 0) {
/* As (a) is a UV, it's >=0, so a cannot be <= */
SETs(&PL_sv_no);
RETURN;
}
auv = SvUVX(TOPs);
SETs(boolSV(auv <= (UV)biv));
RETURN;
}
{ /* ## IV <= UV ## */
IV aiv;
UV buv;
aiv = SvIVX(TOPm1s);
if (aiv < 0) {
/* As (b) is a UV, it's >=0, so a must be <= */
SP--;
SETs(&PL_sv_yes);
RETURN;
}
buv = SvUVX(TOPs);
SP--;
SETs(boolSV((UV)aiv <= buv));
RETURN;
}
}
}
#endif
#ifndef NV_PRESERVES_UV
#ifdef PERL_PRESERVE_IVUV
else
#endif
if (SvROK(TOPs) && !SvAMAGIC(TOPs) && SvROK(TOPm1s) && !SvAMAGIC(TOPm1s)) {
SP--;
SETs(boolSV(SvRV(TOPs) <= SvRV(TOPp1s)));
RETURN;
}
#endif
{
dPOPnv;
SETs(boolSV(TOPn <= value));
RETURN;
}
}
PP(pp_ge)
{
dSP; tryAMAGICbinSET(ge,0);
#ifdef PERL_PRESERVE_IVUV
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool auvok = SvUOK(TOPm1s);
bool buvok = SvUOK(TOPs);
if (!auvok && !buvok) { /* ## IV >= IV ## */
IV aiv = SvIVX(TOPm1s);
IV biv = SvIVX(TOPs);
SP--;
SETs(boolSV(aiv >= biv));
RETURN;
}
if (auvok && buvok) { /* ## UV >= UV ## */
UV auv = SvUVX(TOPm1s);
UV buv = SvUVX(TOPs);
SP--;
SETs(boolSV(auv >= buv));
RETURN;
}
if (auvok) { /* ## UV >= IV ## */
UV auv;
IV biv;
biv = SvIVX(TOPs);
SP--;
if (biv < 0) {
/* As (a) is a UV, it's >=0, so it must be >= */
SETs(&PL_sv_yes);
RETURN;
}
auv = SvUVX(TOPs);
SETs(boolSV(auv >= (UV)biv));
RETURN;
}
{ /* ## IV >= UV ## */
IV aiv;
UV buv;
aiv = SvIVX(TOPm1s);
if (aiv < 0) {
/* As (b) is a UV, it's >=0, so a cannot be >= */
SP--;
SETs(&PL_sv_no);
RETURN;
}
buv = SvUVX(TOPs);
SP--;
SETs(boolSV((UV)aiv >= buv));
RETURN;
}
}
}
#endif
#ifndef NV_PRESERVES_UV
#ifdef PERL_PRESERVE_IVUV
else
#endif
if (SvROK(TOPs) && !SvAMAGIC(TOPs) && SvROK(TOPm1s) && !SvAMAGIC(TOPm1s)) {
SP--;
SETs(boolSV(SvRV(TOPs) >= SvRV(TOPp1s)));
RETURN;
}
#endif
{
dPOPnv;
SETs(boolSV(TOPn >= value));
RETURN;
}
}
PP(pp_ne)
{
dSP; tryAMAGICbinSET(ne,0);
#ifndef NV_PRESERVES_UV
if (SvROK(TOPs) && !SvAMAGIC(TOPs) && SvROK(TOPm1s) && !SvAMAGIC(TOPm1s)) {
SP--;
SETs(boolSV(SvRV(TOPs) != SvRV(TOPp1s)));
RETURN;
}
#endif
#ifdef PERL_PRESERVE_IVUV
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool auvok = SvUOK(TOPm1s);
bool buvok = SvUOK(TOPs);
if (auvok == buvok) { /* ## IV == IV or UV == UV ## */
/* Casting IV to UV before comparison isn't going to matter
on 2s complement. On 1s complement or sign&magnitude
(if we have any of them) it could make negative zero
differ from normal zero. As I understand it. (Need to
check - is negative zero implementation defined behaviour
anyway?). NWC */
UV buv = SvUVX(POPs);
UV auv = SvUVX(TOPs);
SETs(boolSV(auv != buv));
RETURN;
}
{ /* ## Mixed IV,UV ## */
IV iv;
UV uv;
/* != is commutative so swap if needed (save code) */
if (auvok) {
/* swap. top of stack (b) is the iv */
iv = SvIVX(TOPs);
SP--;
if (iv < 0) {
/* As (a) is a UV, it's >0, so it cannot be == */
SETs(&PL_sv_yes);
RETURN;
}
uv = SvUVX(TOPs);
} else {
iv = SvIVX(TOPm1s);
SP--;
if (iv < 0) {
/* As (b) is a UV, it's >0, so it cannot be == */
SETs(&PL_sv_yes);
RETURN;
}
uv = SvUVX(*(SP+1)); /* Do I want TOPp1s() ? */
}
SETs(boolSV((UV)iv != uv));
RETURN;
}
}
}
#endif
{
dPOPnv;
SETs(boolSV(TOPn != value));
RETURN;
}
}
PP(pp_ncmp)
{
dSP; dTARGET; tryAMAGICbin(ncmp,0);
#ifndef NV_PRESERVES_UV
if (SvROK(TOPs) && !SvAMAGIC(TOPs) && SvROK(TOPm1s) && !SvAMAGIC(TOPm1s)) {
UV right = PTR2UV(SvRV(POPs));
UV left = PTR2UV(SvRV(TOPs));
SETi((left > right) - (left < right));
RETURN;
}
#endif
#ifdef PERL_PRESERVE_IVUV
/* Fortunately it seems NaN isn't IOK */
SvIV_please(TOPs);
if (SvIOK(TOPs)) {
SvIV_please(TOPm1s);
if (SvIOK(TOPm1s)) {
bool leftuvok = SvUOK(TOPm1s);
bool rightuvok = SvUOK(TOPs);
I32 value;
if (!leftuvok && !rightuvok) { /* ## IV <=> IV ## */
IV leftiv = SvIVX(TOPm1s);
IV rightiv = SvIVX(TOPs);
if (leftiv > rightiv)
value = 1;
else if (leftiv < rightiv)
value = -1;
else
value = 0;
} else if (leftuvok && rightuvok) { /* ## UV <=> UV ## */
UV leftuv = SvUVX(TOPm1s);
UV rightuv = SvUVX(TOPs);
if (leftuv > rightuv)
value = 1;
else if (leftuv < rightuv)
value = -1;
else
value = 0;
} else if (leftuvok) { /* ## UV <=> IV ## */
UV leftuv;
IV rightiv;
rightiv = SvIVX(TOPs);
if (rightiv < 0) {
/* As (a) is a UV, it's >=0, so it cannot be < */
value = 1;
} else {
leftuv = SvUVX(TOPm1s);
if (leftuv > (UV)rightiv) {
value = 1;
} else if (leftuv < (UV)rightiv) {
value = -1;
} else {
value = 0;
}
}
} else { /* ## IV <=> UV ## */
IV leftiv;
UV rightuv;
leftiv = SvIVX(TOPm1s);
if (leftiv < 0) {
/* As (b) is a UV, it's >=0, so it must be < */
value = -1;
} else {
rightuv = SvUVX(TOPs);
if ((UV)leftiv > rightuv) {
value = 1;
} else if ((UV)leftiv < rightuv) {
value = -1;
} else {
value = 0;
}
}
}
SP--;
SETi(value);
RETURN;
}
}
#endif
{
dPOPTOPnnrl;
I32 value;
#ifdef Perl_isnan
if (Perl_isnan(left) || Perl_isnan(right)) {
SETs(&PL_sv_undef);
RETURN;
}
value = (left > right) - (left < right);
#else
if (left == right)
value = 0;
else if (left < right)
value = -1;
else if (left > right)
value = 1;
else {
SETs(&PL_sv_undef);
RETURN;
}
#endif
SETi(value);
RETURN;
}
}
PP(pp_slt)
{
dSP; tryAMAGICbinSET(slt,0);
{
dPOPTOPssrl;
int cmp = (IN_LOCALE_RUNTIME
? sv_cmp_locale(left, right)
: sv_cmp(left, right));
SETs(boolSV(cmp < 0));
RETURN;
}
}
PP(pp_sgt)
{
dSP; tryAMAGICbinSET(sgt,0);
{
dPOPTOPssrl;
int cmp = (IN_LOCALE_RUNTIME
? sv_cmp_locale(left, right)
: sv_cmp(left, right));
SETs(boolSV(cmp > 0));
RETURN;
}
}
PP(pp_sle)
{
dSP; tryAMAGICbinSET(sle,0);
{
dPOPTOPssrl;
int cmp = (IN_LOCALE_RUNTIME
? sv_cmp_locale(left, right)
: sv_cmp(left, right));
SETs(boolSV(cmp <= 0));
RETURN;
}
}
PP(pp_sge)
{
dSP; tryAMAGICbinSET(sge,0);
{
dPOPTOPssrl;
int cmp = (IN_LOCALE_RUNTIME
? sv_cmp_locale(left, right)
: sv_cmp(left, right));
SETs(boolSV(cmp >= 0));
RETURN;
}
}
PP(pp_seq)
{
dSP; tryAMAGICbinSET(seq,0);
{
dPOPTOPssrl;
SETs(boolSV(sv_eq(left, right)));
RETURN;
}
}
PP(pp_sne)
{
dSP; tryAMAGICbinSET(sne,0);
{
dPOPTOPssrl;
SETs(boolSV(!sv_eq(left, right)));
RETURN;
}
}
PP(pp_scmp)
{
dSP; dTARGET; tryAMAGICbin(scmp,0);
{
dPOPTOPssrl;
int cmp = (IN_LOCALE_RUNTIME
? sv_cmp_locale(left, right)
: sv_cmp(left, right));
SETi( cmp );
RETURN;
}
}
PP(pp_bit_and)
{
dSP; dATARGET; tryAMAGICbin(band,opASSIGN);
{
dPOPTOPssrl;
if (SvNIOKp(left) || SvNIOKp(right)) {
if (PL_op->op_private & HINT_INTEGER) {
IV i = SvIV(left) & SvIV(right);
SETi(i);
}
else {
UV u = SvUV(left) & SvUV(right);
SETu(u);
}
}
else {
do_vop(PL_op->op_type, TARG, left, right);
SETTARG;
}
RETURN;
}
}
PP(pp_bit_xor)
{
dSP; dATARGET; tryAMAGICbin(bxor,opASSIGN);
{
dPOPTOPssrl;
if (SvNIOKp(left) || SvNIOKp(right)) {
if (PL_op->op_private & HINT_INTEGER) {
IV i = (USE_LEFT(left) ? SvIV(left) : 0) ^ SvIV(right);
SETi(i);
}
else {
UV u = (USE_LEFT(left) ? SvUV(left) : 0) ^ SvUV(right);
SETu(u);
}
}
else {
do_vop(PL_op->op_type, TARG, left, right);
SETTARG;
}
RETURN;
}
}
PP(pp_bit_or)
{
dSP; dATARGET; tryAMAGICbin(bor,opASSIGN);
{
dPOPTOPssrl;
if (SvNIOKp(left) || SvNIOKp(right)) {
if (PL_op->op_private & HINT_INTEGER) {
IV i = (USE_LEFT(left) ? SvIV(left) : 0) | SvIV(right);
SETi(i);
}
else {
UV u = (USE_LEFT(left) ? SvUV(left) : 0) | SvUV(right);
SETu(u);
}
}
else {
do_vop(PL_op->op_type, TARG, left, right);
SETTARG;
}
RETURN;
}
}
PP(pp_negate)
{
dSP; dTARGET; tryAMAGICun(neg);
{
dTOPss;
int flags = SvFLAGS(sv);
if (SvGMAGICAL(sv))
mg_get(sv);
if ((flags & SVf_IOK) || ((flags & (SVp_IOK | SVp_NOK)) == SVp_IOK)) {
/* It's publicly an integer, or privately an integer-not-float */
oops_its_an_int:
if (SvIsUV(sv)) {
if (SvIVX(sv) == IV_MIN) {
/* 2s complement assumption. */
SETi(SvIVX(sv)); /* special case: -((UV)IV_MAX+1) == IV_MIN */
RETURN;
}
else if (SvUVX(sv) <= IV_MAX) {
SETi(-SvIVX(sv));
RETURN;
}
}
else if (SvIVX(sv) != IV_MIN) {
SETi(-SvIVX(sv));
RETURN;
}
#ifdef PERL_PRESERVE_IVUV
else {
SETu((UV)IV_MIN);
RETURN;
}
#endif
}
if (SvNIOKp(sv))
SETn(-SvNV(sv));
else if (SvPOKp(sv)) {
STRLEN len;
char *s = SvPV(sv, len);
if (isIDFIRST(*s)) {
sv_setpvn(TARG, "-", 1);
sv_catsv(TARG, sv);
}
else if (*s == '+' || *s == '-') {
sv_setsv(TARG, sv);
*SvPV_force(TARG, len) = *s == '-' ? '+' : '-';
}
else if (DO_UTF8(sv)) {
SvIV_please(sv);
if (SvIOK(sv))
goto oops_its_an_int;
if (SvNOK(sv))
sv_setnv(TARG, -SvNV(sv));
else {
sv_setpvn(TARG, "-", 1);
sv_catsv(TARG, sv);
}
}
else {
SvIV_please(sv);
if (SvIOK(sv))
goto oops_its_an_int;
sv_setnv(TARG, -SvNV(sv));
}
SETTARG;
}
else
SETn(-SvNV(sv));
}
RETURN;
}
PP(pp_not)
{
dSP; tryAMAGICunSET(not);
*PL_stack_sp = boolSV(!SvTRUE(*PL_stack_sp));
return NORMAL;
}
PP(pp_complement)
{
dSP; dTARGET; tryAMAGICun(compl);
{
dTOPss;
if (SvNIOKp(sv)) {
if (PL_op->op_private & HINT_INTEGER) {
IV i = ~SvIV(sv);
SETi(i);
}
else {
UV u = ~SvUV(sv);
SETu(u);
}
}
else {
register U8 *tmps;
register I32 anum;
STRLEN len;
(void)SvPV_nomg(sv,len); /* force check for uninit var */
SvSetSV(TARG, sv);
tmps = (U8*)SvPV_force(TARG, len);
anum = len;
if (SvUTF8(TARG)) {
/* Calculate exact length, let's not estimate. */
STRLEN targlen = 0;
U8 *result;
U8 *send;
STRLEN l;
UV nchar = 0;
UV nwide = 0;
send = tmps + len;
while (tmps < send) {
UV c = utf8n_to_uvchr(tmps, send-tmps, &l, UTF8_ALLOW_ANYUV);
tmps += UTF8SKIP(tmps);
targlen += UNISKIP(~c);
nchar++;
if (c > 0xff)
nwide++;
}
/* Now rewind strings and write them. */
tmps -= len;
if (nwide) {
Newz(0, result, targlen + 1, U8);
while (tmps < send) {
UV c = utf8n_to_uvchr(tmps, send-tmps, &l, UTF8_ALLOW_ANYUV);
tmps += UTF8SKIP(tmps);
result = uvchr_to_utf8_flags(result, ~c, UNICODE_ALLOW_ANY);
}
*result = '\0';
result -= targlen;
sv_setpvn(TARG, (char*)result, targlen);
SvUTF8_on(TARG);
}
else {
Newz(0, result, nchar + 1, U8);
while (tmps < send) {
U8 c = (U8)utf8n_to_uvchr(tmps, 0, &l, UTF8_ALLOW_ANY);
tmps += UTF8SKIP(tmps);
*result++ = ~c;
}
*result = '\0';
result -= nchar;
sv_setpvn(TARG, (char*)result, nchar);
SvUTF8_off(TARG);
}
Safefree(result);
SETs(TARG);
RETURN;
}
#ifdef LIBERAL
{
register long *tmpl;
for ( ; anum && (unsigned long)tmps % sizeof(long); anum--, tmps++)
*tmps = ~*tmps;
tmpl = (long*)tmps;
for ( ; anum >= sizeof(long); anum -= sizeof(long), tmpl++)
*tmpl = ~*tmpl;
tmps = (U8*)tmpl;
}
#endif
for ( ; anum > 0; anum--, tmps++)
*tmps = ~*tmps;
SETs(TARG);
}
RETURN;
}
}
/* integer versions of some of the above */
PP(pp_i_multiply)
{
dSP; dATARGET; tryAMAGICbin(mult,opASSIGN);
{
dPOPTOPiirl;
SETi( left * right );
RETURN;
}
}
PP(pp_i_divide)
{
dSP; dATARGET; tryAMAGICbin(div,opASSIGN);
{
dPOPiv;
if (value == 0)
DIE(aTHX_ "Illegal division by zero");
value = POPi / value;
PUSHi( value );
RETURN;
}
}
STATIC
PP(pp_i_modulo_0)
{
/* This is the vanilla old i_modulo. */
dSP; dATARGET; tryAMAGICbin(modulo,opASSIGN);
{
dPOPTOPiirl;
if (!right)
DIE(aTHX_ "Illegal modulus zero");
SETi( left % right );
RETURN;
}
}
#if defined(__GLIBC__) && IVSIZE == 8
STATIC
PP(pp_i_modulo_1)
{
/* This is the i_modulo with the workaround for the _moddi3 bug
* in (at least) glibc 2.2.5 (the PERL_ABS() the workaround).
* See below for pp_i_modulo. */
dSP; dATARGET; tryAMAGICbin(modulo,opASSIGN);
{
dPOPTOPiirl;
if (!right)
DIE(aTHX_ "Illegal modulus zero");
SETi( left % PERL_ABS(right) );
RETURN;
}
}
#endif
PP(pp_i_modulo)
{
dSP; dATARGET; tryAMAGICbin(modulo,opASSIGN);
{
dPOPTOPiirl;
if (!right)
DIE(aTHX_ "Illegal modulus zero");
/* The assumption is to use hereafter the old vanilla version... */
PL_op->op_ppaddr =
PL_ppaddr[OP_I_MODULO] =
&Perl_pp_i_modulo_0;
/* .. but if we have glibc, we might have a buggy _moddi3
* (at least glicb 2.2.5 is known to have this bug), in other
* words our integer modulus with negative quad as the second
* argument might be broken. Test for this and re-patch the
* opcode dispatch table if that is the case, remembering to
* also apply the workaround so that this first round works
* right, too. See [perl #9402] for more information. */
#if defined(__GLIBC__) && IVSIZE == 8
{
IV l = 3;
IV r = -10;
/* Cannot do this check with inlined IV constants since
* that seems to work correctly even with the buggy glibc. */
if (l % r == -3) {
/* Yikes, we have the bug.
* Patch in the workaround version. */
PL_op->op_ppaddr =
PL_ppaddr[OP_I_MODULO] =
&Perl_pp_i_modulo_1;
/* Make certain we work right this time, too. */
right = PERL_ABS(right);
}
}
#endif
SETi( left % right );
RETURN;
}
}
PP(pp_i_add)
{
dSP; dATARGET; tryAMAGICbin(add,opASSIGN);
{
dPOPTOPiirl_ul;
SETi( left + right );
RETURN;
}
}
PP(pp_i_subtract)
{
dSP; dATARGET; tryAMAGICbin(subtr,opASSIGN);
{
dPOPTOPiirl_ul;
SETi( left - right );
RETURN;
}
}
PP(pp_i_lt)
{
dSP; tryAMAGICbinSET(lt,0);
{
dPOPTOPiirl;
SETs(boolSV(left < right));
RETURN;
}
}
PP(pp_i_gt)
{
dSP; tryAMAGICbinSET(gt,0);
{
dPOPTOPiirl;
SETs(boolSV(left > right));
RETURN;
}
}
PP(pp_i_le)
{
dSP; tryAMAGICbinSET(le,0);
{
dPOPTOPiirl;
SETs(boolSV(left <= right));
RETURN;
}
}
PP(pp_i_ge)
{
dSP; tryAMAGICbinSET(ge,0);
{
dPOPTOPiirl;
SETs(boolSV(left >= right));
RETURN;
}
}
PP(pp_i_eq)
{
dSP; tryAMAGICbinSET(eq,0);
{
dPOPTOPiirl;
SETs(boolSV(left == right));
RETURN;
}
}
PP(pp_i_ne)
{
dSP; tryAMAGICbinSET(ne,0);
{
dPOPTOPiirl;
SETs(boolSV(left != right));
RETURN;
}
}
PP(pp_i_ncmp)
{
dSP; dTARGET; tryAMAGICbin(ncmp,0);
{
dPOPTOPiirl;
I32 value;
if (left > right)
value = 1;
else if (left < right)
value = -1;
else
value = 0;
SETi(value);
RETURN;
}
}
PP(pp_i_negate)
{
dSP; dTARGET; tryAMAGICun(neg);
SETi(-TOPi);
RETURN;
}
/* High falutin' math. */
PP(pp_atan2)
{
dSP; dTARGET; tryAMAGICbin(atan2,0);
{
dPOPTOPnnrl;
SETn(Perl_atan2(left, right));
RETURN;
}
}
PP(pp_sin)
{
dSP; dTARGET; tryAMAGICun(sin);
{
NV value;
value = POPn;
value = Perl_sin(value);
XPUSHn(value);
RETURN;
}
}
PP(pp_cos)
{
dSP; dTARGET; tryAMAGICun(cos);
{
NV value;
value = POPn;
value = Perl_cos(value);
XPUSHn(value);
RETURN;
}
}
/* Support Configure command-line overrides for rand() functions.
After 5.005, perhaps we should replace this by Configure support
for drand48(), random(), or rand(). For 5.005, though, maintain
compatibility by calling rand() but allow the user to override it.
See INSTALL for details. --<NAME> 15 July 1998
*/
/* Now it's after 5.005, and Configure supports drand48() and random(),
in addition to rand(). So the overrides should not be needed any more.
--<NAME> 27 September 1998
*/
#ifndef HAS_DRAND48_PROTO
extern double drand48 (void);
#endif
PP(pp_rand)
{
dSP; dTARGET;
NV value;
if (MAXARG < 1)
value = 1.0;
else
value = POPn;
if (value == 0.0)
value = 1.0;
if (!PL_srand_called) {
(void)seedDrand01((Rand_seed_t)seed());
PL_srand_called = TRUE;
}
value *= Drand01();
XPUSHn(value);
RETURN;
}
PP(pp_srand)
{
dSP;
UV anum;
if (MAXARG < 1)
anum = seed();
else
anum = POPu;
(void)seedDrand01((Rand_seed_t)anum);
PL_srand_called = TRUE;
EXTEND(SP, 1);
RETPUSHYES;
}
PP(pp_exp)
{
dSP; dTARGET; tryAMAGICun(exp);
{
NV value;
value = POPn;
value = Perl_exp(value);
XPUSHn(value);
RETURN;
}
}
PP(pp_log)
{
dSP; dTARGET; tryAMAGICun(log);
{
NV value;
value = POPn;
if (value <= 0.0) {
SET_NUMERIC_STANDARD();
DIE(aTHX_ "Can't take log of %"NVgf, value);
}
value = Perl_log(value);
XPUSHn(value);
RETURN;
}
}
PP(pp_sqrt)
{
dSP; dTARGET; tryAMAGICun(sqrt);
{
NV value;
value = POPn;
if (value < 0.0) {
SET_NUMERIC_STANDARD();
DIE(aTHX_ "Can't take sqrt of %"NVgf, value);
}
value = Perl_sqrt(value);
XPUSHn(value);
RETURN;
}
}
PP(pp_int)
{
dSP; dTARGET; tryAMAGICun(int);
{
NV value;
IV iv = TOPi; /* attempt to convert to IV if possible. */
/* XXX it's arguable that compiler casting to IV might be subtly
different from modf (for numbers inside (IV_MIN,UV_MAX)) in which
else preferring IV has introduced a subtle behaviour change bug. OTOH
relying on floating point to be accurate is a bug. */
if (!SvOK(TOPs))
SETu(0);
else if (SvIOK(TOPs)) {
if (SvIsUV(TOPs)) {
UV uv = TOPu;
SETu(uv);
} else
SETi(iv);
} else {
value = TOPn;
if (value >= 0.0) {
if (value < (NV)UV_MAX + 0.5) {
SETu(U_V(value));
} else {
SETn(Perl_floor(value));
}
}
else {
if (value > (NV)IV_MIN - 0.5) {
SETi(I_V(value));
} else {
SETn(Perl_ceil(value));
}
}
}
}
RETURN;
}
PP(pp_abs)
{
dSP; dTARGET; tryAMAGICun(abs);
{
/* This will cache the NV value if string isn't actually integer */
IV iv = TOPi;
if (!SvOK(TOPs))
SETu(0);
else if (SvIOK(TOPs)) {
/* IVX is precise */
if (SvIsUV(TOPs)) {
SETu(TOPu); /* force it to be numeric only */
} else {
if (iv >= 0) {
SETi(iv);
} else {
if (iv != IV_MIN) {
SETi(-iv);
} else {
/* 2s complement assumption. Also, not really needed as
IV_MIN and -IV_MIN should both be %100...00 and NV-able */
SETu(IV_MIN);
}
}
}
} else{
NV value = TOPn;
if (value < 0.0)
value = -value;
SETn(value);
}
}
RETURN;
}
PP(pp_hex)
{
dSP; dTARGET;
char *tmps;
I32 flags = PERL_SCAN_ALLOW_UNDERSCORES;
STRLEN len;
NV result_nv;
UV result_uv;
SV* sv = POPs;
tmps = (SvPVx(sv, len));
if (DO_UTF8(sv)) {
/* If Unicode, try to downgrade
* If not possible, croak. */
SV* tsv = sv_2mortal(newSVsv(sv));
SvUTF8_on(tsv);
sv_utf8_downgrade(tsv, FALSE);
tmps = SvPVX(tsv);
}
result_uv = grok_hex (tmps, &len, &flags, &result_nv);
if (flags & PERL_SCAN_GREATER_THAN_UV_MAX) {
XPUSHn(result_nv);
}
else {
XPUSHu(result_uv);
}
RETURN;
}
PP(pp_oct)
{
dSP; dTARGET;
char *tmps;
I32 flags = PERL_SCAN_ALLOW_UNDERSCORES;
STRLEN len;
NV result_nv;
UV result_uv;
SV* sv = POPs;
tmps = (SvPVx(sv, len));
if (DO_UTF8(sv)) {
/* If Unicode, try to downgrade
* If not possible, croak. */
SV* tsv = sv_2mortal(newSVsv(sv));
SvUTF8_on(tsv);
sv_utf8_downgrade(tsv, FALSE);
tmps = SvPVX(tsv);
}
while (*tmps && len && isSPACE(*tmps))
tmps++, len--;
if (*tmps == '0')
tmps++, len--;
if (*tmps == 'x')
result_uv = grok_hex (tmps, &len, &flags, &result_nv);
else if (*tmps == 'b')
result_uv = grok_bin (tmps, &len, &flags, &result_nv);
else
result_uv = grok_oct (tmps, &len, &flags, &result_nv);
if (flags & PERL_SCAN_GREATER_THAN_UV_MAX) {
XPUSHn(result_nv);
}
else {
XPUSHu(result_uv);
}
RETURN;
}
/* String stuff. */
PP(pp_length)
{
dSP; dTARGET;
SV *sv = TOPs;
if (DO_UTF8(sv))
SETi(sv_len_utf8(sv));
else
SETi(sv_len(sv));
RETURN;
}
PP(pp_substr)
{
dSP; dTARGET;
SV *sv;
I32 len = 0;
STRLEN curlen;
STRLEN utf8_curlen;
I32 pos;
I32 rem;
I32 fail;
I32 lvalue = PL_op->op_flags & OPf_MOD || LVRET;
char *tmps;
I32 arybase = PL_curcop->cop_arybase;
SV *repl_sv = NULL;
char *repl = 0;
STRLEN repl_len;
int num_args = PL_op->op_private & 7;
bool repl_need_utf8_upgrade = FALSE;
bool repl_is_utf8 = FALSE;
SvTAINTED_off(TARG); /* decontaminate */
SvUTF8_off(TARG); /* decontaminate */
if (num_args > 2) {
if (num_args > 3) {
repl_sv = POPs;
repl = SvPV(repl_sv, repl_len);
repl_is_utf8 = DO_UTF8(repl_sv) && SvCUR(repl_sv);
}
len = POPi;
}
pos = POPi;
sv = POPs;
PUTBACK;
if (repl_sv) {
if (repl_is_utf8) {
if (!DO_UTF8(sv))
sv_utf8_upgrade(sv);
}
else if (DO_UTF8(sv))
repl_need_utf8_upgrade = TRUE;
}
tmps = SvPV(sv, curlen);
if (DO_UTF8(sv)) {
utf8_curlen = sv_len_utf8(sv);
if (utf8_curlen == curlen)
utf8_curlen = 0;
else
curlen = utf8_curlen;
}
else
utf8_curlen = 0;
if (pos >= arybase) {
pos -= arybase;
rem = curlen-pos;
fail = rem;
if (num_args > 2) {
if (len < 0) {
rem += len;
if (rem < 0)
rem = 0;
}
else if (rem > len)
rem = len;
}
}
else {
pos += curlen;
if (num_args < 3)
rem = curlen;
else if (len >= 0) {
rem = pos+len;
if (rem > (I32)curlen)
rem = curlen;
}
else {
rem = curlen+len;
if (rem < pos)
rem = pos;
}
if (pos < 0)
pos = 0;
fail = rem;
rem -= pos;
}
if (fail < 0) {
if (lvalue || repl)
Perl_croak(aTHX_ "substr outside of string");
if (ckWARN(WARN_SUBSTR))
Perl_warner(aTHX_ packWARN(WARN_SUBSTR), "substr outside of string");
RETPUSHUNDEF;
}
else {
I32 upos = pos;
I32 urem = rem;
if (utf8_curlen)
sv_pos_u2b(sv, &pos, &rem);
tmps += pos;
/* we either return a PV or an LV. If the TARG hasn't been used
* before, or is of that type, reuse it; otherwise use a mortal
* instead. Note that LVs can have an extended lifetime, so also
* dont reuse if refcount > 1 (bug #20933) */
if (SvTYPE(TARG) > SVt_NULL) {
if ( (SvTYPE(TARG) == SVt_PVLV)
? (!lvalue || SvREFCNT(TARG) > 1)
: lvalue)
{
TARG = sv_newmortal();
}
}
sv_setpvn(TARG, tmps, rem);
#ifdef USE_LOCALE_COLLATE
sv_unmagic(TARG, PERL_MAGIC_collxfrm);
#endif
if (utf8_curlen)
SvUTF8_on(TARG);
if (repl) {
SV* repl_sv_copy = NULL;
if (repl_need_utf8_upgrade) {
repl_sv_copy = newSVsv(repl_sv);
sv_utf8_upgrade(repl_sv_copy);
repl = SvPV(repl_sv_copy, repl_len);
repl_is_utf8 = DO_UTF8(repl_sv_copy) && SvCUR(sv);
}
sv_insert(sv, pos, rem, repl, repl_len);
if (repl_is_utf8)
SvUTF8_on(sv);
if (repl_sv_copy)
SvREFCNT_dec(repl_sv_copy);
}
else if (lvalue) { /* it's an lvalue! */
if (!SvGMAGICAL(sv)) {
if (SvROK(sv)) {
STRLEN n_a;
SvPV_force(sv,n_a);
if (ckWARN(WARN_SUBSTR))
Perl_warner(aTHX_ packWARN(WARN_SUBSTR),
"Attempt to use reference as lvalue in substr");
}
if (SvOK(sv)) /* is it defined ? */
(void)SvPOK_only_UTF8(sv);
else
sv_setpvn(sv,"",0); /* avoid lexical reincarnation */
}
if (SvTYPE(TARG) < SVt_PVLV) {
sv_upgrade(TARG, SVt_PVLV);
sv_magic(TARG, Nullsv, PERL_MAGIC_substr, Nullch, 0);
}
else
SvOK_off(TARG);
LvTYPE(TARG) = 'x';
if (LvTARG(TARG) != sv) {
if (LvTARG(TARG))
SvREFCNT_dec(LvTARG(TARG));
LvTARG(TARG) = SvREFCNT_inc(sv);
}
LvTARGOFF(TARG) = upos;
LvTARGLEN(TARG) = urem;
}
}
SPAGAIN;
PUSHs(TARG); /* avoid SvSETMAGIC here */
RETURN;
}
PP(pp_vec)
{
dSP; dTARGET;
register IV size = POPi;
register IV offset = POPi;
register SV *src = POPs;
I32 lvalue = PL_op->op_flags & OPf_MOD || LVRET;
SvTAINTED_off(TARG); /* decontaminate */
if (lvalue) { /* it's an lvalue! */
if (SvREFCNT(TARG) > 1) /* don't share the TARG (#20933) */
TARG = sv_newmortal();
if (SvTYPE(TARG) < SVt_PVLV) {
sv_upgrade(TARG, SVt_PVLV);
sv_magic(TARG, Nullsv, PERL_MAGIC_vec, Nullch, 0);
}
LvTYPE(TARG) = 'v';
if (LvTARG(TARG) != src) {
if (LvTARG(TARG))
SvREFCNT_dec(LvTARG(TARG));
LvTARG(TARG) = SvREFCNT_inc(src);
}
LvTARGOFF(TARG) = offset;
LvTARGLEN(TARG) = size;
}
sv_setuv(TARG, do_vecget(src, offset, size));
PUSHs(TARG);
RETURN;
}
PP(pp_index)
{
dSP; dTARGET;
SV *big;
SV *little;
SV *temp = Nullsv;
I32 offset;
I32 retval;
char *tmps;
char *tmps2;
STRLEN biglen;
I32 arybase = PL_curcop->cop_arybase;
int big_utf8;
int little_utf8;
if (MAXARG < 3)
offset = 0;
else
offset = POPi - arybase;
little = POPs;
big = POPs;
big_utf8 = DO_UTF8(big);
little_utf8 = DO_UTF8(little);
if (big_utf8 ^ little_utf8) {
/* One needs to be upgraded. */
SV *bytes = little_utf8 ? big : little;
STRLEN len;
char *p = SvPV(bytes, len);
temp = newSVpvn(p, len);
if (PL_encoding) {
sv_recode_to_utf8(temp, PL_encoding);
} else {
sv_utf8_upgrade(temp);
}
if (little_utf8) {
big = temp;
big_utf8 = TRUE;
} else {
little = temp;
}
}
if (big_utf8 && offset > 0)
sv_pos_u2b(big, &offset, 0);
tmps = SvPV(big, biglen);
if (offset < 0)
offset = 0;
else if (offset > (I32)biglen)
offset = biglen;
if (!(tmps2 = fbm_instr((unsigned char*)tmps + offset,
(unsigned char*)tmps + biglen, little, 0)))
retval = -1;
else
retval = tmps2 - tmps;
if (retval > 0 && big_utf8)
sv_pos_b2u(big, &retval);
if (temp)
SvREFCNT_dec(temp);
PUSHi(retval + arybase);
RETURN;
}
PP(pp_rindex)
{
dSP; dTARGET;
SV *big;
SV *little;
SV *temp = Nullsv;
STRLEN blen;
STRLEN llen;
I32 offset;
I32 retval;
char *tmps;
char *tmps2;
I32 arybase = PL_curcop->cop_arybase;
int big_utf8;
int little_utf8;
if (MAXARG >= 3)
offset = POPi;
little = POPs;
big = POPs;
big_utf8 = DO_UTF8(big);
little_utf8 = DO_UTF8(little);
if (big_utf8 ^ little_utf8) {
/* One needs to be upgraded. */
SV *bytes = little_utf8 ? big : little;
STRLEN len;
char *p = SvPV(bytes, len);
temp = newSVpvn(p, len);
if (PL_encoding) {
sv_recode_to_utf8(temp, PL_encoding);
} else {
sv_utf8_upgrade(temp);
}
if (little_utf8) {
big = temp;
big_utf8 = TRUE;
} else {
little = temp;
}
}
tmps2 = SvPV(little, llen);
tmps = SvPV(big, blen);
if (MAXARG < 3)
offset = blen;
else {
if (offset > 0 && big_utf8)
sv_pos_u2b(big, &offset, 0);
offset = offset - arybase + llen;
}
if (offset < 0)
offset = 0;
else if (offset > (I32)blen)
offset = blen;
if (!(tmps2 = rninstr(tmps, tmps + offset,
tmps2, tmps2 + llen)))
retval = -1;
else
retval = tmps2 - tmps;
if (retval > 0 && big_utf8)
sv_pos_b2u(big, &retval);
if (temp)
SvREFCNT_dec(temp);
PUSHi(retval + arybase);
RETURN;
}
PP(pp_sprintf)
{
dSP; dMARK; dORIGMARK; dTARGET;
do_sprintf(TARG, SP-MARK, MARK+1);
TAINT_IF(SvTAINTED(TARG));
if (DO_UTF8(*(MARK+1)))
SvUTF8_on(TARG);
SP = ORIGMARK;
PUSHTARG;
RETURN;
}
PP(pp_ord)
{
dSP; dTARGET;
SV *argsv = POPs;
STRLEN len;
U8 *s = (U8*)SvPVx(argsv, len);
SV *tmpsv;
if (PL_encoding && SvPOK(argsv) && !DO_UTF8(argsv)) {
tmpsv = sv_2mortal(newSVsv(argsv));
s = (U8*)sv_recode_to_utf8(tmpsv, PL_encoding);
argsv = tmpsv;
}
XPUSHu(DO_UTF8(argsv) ?
utf8n_to_uvchr(s, UTF8_MAXBYTES, 0, UTF8_ALLOW_ANYUV) :
(*s & 0xff));
RETURN;
}
PP(pp_chr)
{
dSP; dTARGET;
char *tmps;
UV value = POPu;
(void)SvUPGRADE(TARG,SVt_PV);
if (value > 255 && !IN_BYTES) {
SvGROW(TARG, (STRLEN)UNISKIP(value)+1);
tmps = (char*)uvchr_to_utf8_flags((U8*)SvPVX(TARG), value, 0);
SvCUR_set(TARG, tmps - SvPVX(TARG));
*tmps = '\0';
(void)SvPOK_only(TARG);
SvUTF8_on(TARG);
XPUSHs(TARG);
RETURN;
}
SvGROW(TARG,2);
SvCUR_set(TARG, 1);
tmps = SvPVX(TARG);
*tmps++ = (char)value;
*tmps = '\0';
(void)SvPOK_only(TARG);
if (PL_encoding && !IN_BYTES) {
sv_recode_to_utf8(TARG, PL_encoding);
tmps = SvPVX(TARG);
if (SvCUR(TARG) == 0 || !is_utf8_string((U8*)tmps, SvCUR(TARG)) ||
memEQ(tmps, "\xef\xbf\xbd\0", 4)) {
SvGROW(TARG, 3);
tmps = SvPVX(TARG);
SvCUR_set(TARG, 2);
*tmps++ = (U8)UTF8_EIGHT_BIT_HI(value);
*tmps++ = (U8)UTF8_EIGHT_BIT_LO(value);
*tmps = '\0';
SvUTF8_on(TARG);
}
}
XPUSHs(TARG);
RETURN;
}
PP(pp_crypt)
{
dSP; dTARGET;
#ifdef HAS_CRYPT
dPOPTOPssrl;
STRLEN n_a;
STRLEN len;
char *tmps = SvPV(left, len);
if (DO_UTF8(left)) {
/* If Unicode, try to downgrade.
* If not possible, croak.
* Yes, we made this up. */
SV* tsv = sv_2mortal(newSVsv(left));
SvUTF8_on(tsv);
sv_utf8_downgrade(tsv, FALSE);
tmps = SvPVX(tsv);
}
# ifdef USE_ITHREADS
# ifdef HAS_CRYPT_R
if (!PL_reentrant_buffer->_crypt_struct_buffer) {
/* This should be threadsafe because in ithreads there is only
* one thread per interpreter. If this would not be true,
* we would need a mutex to protect this malloc. */
PL_reentrant_buffer->_crypt_struct_buffer =
(struct crypt_data *)safemalloc(sizeof(struct crypt_data));
#if defined(__GLIBC__) || defined(__EMX__)
if (PL_reentrant_buffer->_crypt_struct_buffer) {
PL_reentrant_buffer->_crypt_struct_buffer->initialized = 0;
/* work around glibc-2.2.5 bug */
PL_reentrant_buffer->_crypt_struct_buffer->current_saltbits = 0;
}
#endif
}
# endif /* HAS_CRYPT_R */
# endif /* USE_ITHREADS */
# ifdef FCRYPT
sv_setpv(TARG, fcrypt(tmps, SvPV(right, n_a)));
# else
sv_setpv(TARG, PerlProc_crypt(tmps, SvPV(right, n_a)));
# endif
SETs(TARG);
RETURN;
#else
DIE(aTHX_
"The crypt() function is unimplemented due to excessive paranoia.");
#endif
}
PP(pp_ucfirst)
{
dSP;
SV *sv = TOPs;
register U8 *s;
STRLEN slen;
SvGETMAGIC(sv);
if (DO_UTF8(sv) &&
(s = (U8*)SvPV_nomg(sv, slen)) && slen &&
UTF8_IS_START(*s)) {
U8 tmpbuf[UTF8_MAXBYTES_CASE+1];
STRLEN ulen;
STRLEN tculen;
utf8_to_uvchr(s, &ulen);
toTITLE_utf8(s, tmpbuf, &tculen);
utf8_to_uvchr(tmpbuf, 0);
if (!SvPADTMP(sv) || SvREADONLY(sv)) {
dTARGET;
/* slen is the byte length of the whole SV.
* ulen is the byte length of the original Unicode character
* stored as UTF-8 at s.
* tculen is the byte length of the freshly titlecased
* Unicode character stored as UTF-8 at tmpbuf.
* We first set the result to be the titlecased character,
* and then append the rest of the SV data. */
sv_setpvn(TARG, (char*)tmpbuf, tculen);
if (slen > ulen)
sv_catpvn(TARG, (char*)(s + ulen), slen - ulen);
SvUTF8_on(TARG);
SETs(TARG);
}
else {
s = (U8*)SvPV_force_nomg(sv, slen);
Copy(tmpbuf, s, tculen, U8);
}
}
else {
if (!SvPADTMP(sv) || SvREADONLY(sv)) {
dTARGET;
SvUTF8_off(TARG); /* decontaminate */
sv_setsv_nomg(TARG, sv);
sv = TARG;
SETs(sv);
}
s = (U8*)SvPV_force_nomg(sv, slen);
if (*s) {
if (IN_LOCALE_RUNTIME) {
TAINT;
SvTAINTED_on(sv);
*s = toUPPER_LC(*s);
}
else
*s = toUPPER(*s);
}
}
SvSETMAGIC(sv);
RETURN;
}
PP(pp_lcfirst)
{
dSP;
SV *sv = TOPs;
register U8 *s;
STRLEN slen;
SvGETMAGIC(sv);
if (DO_UTF8(sv) &&
(s = (U8*)SvPV_nomg(sv, slen)) && slen &&
UTF8_IS_START(*s)) {
STRLEN ulen;
U8 tmpbuf[UTF8_MAXBYTES_CASE+1];
U8 *tend;
UV uv;
toLOWER_utf8(s, tmpbuf, &ulen);
uv = utf8_to_uvchr(tmpbuf, 0);
tend = uvchr_to_utf8(tmpbuf, uv);
if (!SvPADTMP(sv) || (STRLEN)(tend - tmpbuf) != ulen || SvREADONLY(sv)) {
dTARGET;
sv_setpvn(TARG, (char*)tmpbuf, tend - tmpbuf);
if (slen > ulen)
sv_catpvn(TARG, (char*)(s + ulen), slen - ulen);
SvUTF8_on(TARG);
SETs(TARG);
}
else {
s = (U8*)SvPV_force_nomg(sv, slen);
Copy(tmpbuf, s, ulen, U8);
}
}
else {
if (!SvPADTMP(sv) || SvREADONLY(sv)) {
dTARGET;
SvUTF8_off(TARG); /* decontaminate */
sv_setsv_nomg(TARG, sv);
sv = TARG;
SETs(sv);
}
s = (U8*)SvPV_force_nomg(sv, slen);
if (*s) {
if (IN_LOCALE_RUNTIME) {
TAINT;
SvTAINTED_on(sv);
*s = toLOWER_LC(*s);
}
else
*s = toLOWER(*s);
}
}
SvSETMAGIC(sv);
RETURN;
}
PP(pp_uc)
{
dSP;
SV *sv = TOPs;
register U8 *s;
STRLEN len;
SvGETMAGIC(sv);
if (DO_UTF8(sv)) {
dTARGET;
STRLEN ulen;
register U8 *d;
U8 *send;
U8 tmpbuf[UTF8_MAXBYTES+1];
s = (U8*)SvPV_nomg(sv,len);
if (!len) {
SvUTF8_off(TARG); /* decontaminate */
sv_setpvn(TARG, "", 0);
SETs(TARG);
}
else {
STRLEN min = len + 1;
(void)SvUPGRADE(TARG, SVt_PV);
SvGROW(TARG, min);
(void)SvPOK_only(TARG);
d = (U8*)SvPVX(TARG);
send = s + len;
while (s < send) {
STRLEN u = UTF8SKIP(s);
toUPPER_utf8(s, tmpbuf, &ulen);
if (ulen > u && (SvLEN(TARG) < (min += ulen - u))) {
/* If the eventually required minimum size outgrows
* the available space, we need to grow. */
UV o = d - (U8*)SvPVX(TARG);
/* If someone uppercases one million U+03B0s we
* SvGROW() one million times. Or we could try
* guessing how much to allocate without allocating
* too much. Such is life. */
SvGROW(TARG, min);
d = (U8*)SvPVX(TARG) + o;
}
Copy(tmpbuf, d, ulen, U8);
d += ulen;
s += u;
}
*d = '\0';
SvUTF8_on(TARG);
SvCUR_set(TARG, d - (U8*)SvPVX(TARG));
SETs(TARG);
}
}
else {
if (!SvPADTMP(sv) || SvREADONLY(sv)) {
dTARGET;
SvUTF8_off(TARG); /* decontaminate */
sv_setsv_nomg(TARG, sv);
sv = TARG;
SETs(sv);
}
s = (U8*)SvPV_force_nomg(sv, len);
if (len) {
register U8 *send = s + len;
if (IN_LOCALE_RUNTIME) {
TAINT;
SvTAINTED_on(sv);
for (; s < send; s++)
*s = toUPPER_LC(*s);
}
else {
for (; s < send; s++)
*s = toUPPER(*s);
}
}
}
SvSETMAGIC(sv);
RETURN;
}
PP(pp_lc)
{
dSP;
SV *sv = TOPs;
register U8 *s;
STRLEN len;
SvGETMAGIC(sv);
if (DO_UTF8(sv)) {
dTARGET;
STRLEN ulen;
register U8 *d;
U8 *send;
U8 tmpbuf[UTF8_MAXBYTES_CASE+1];
s = (U8*)SvPV_nomg(sv,len);
if (!len) {
SvUTF8_off(TARG); /* decontaminate */
sv_setpvn(TARG, "", 0);
SETs(TARG);
}
else {
STRLEN min = len + 1;
(void)SvUPGRADE(TARG, SVt_PV);
SvGROW(TARG, min);
(void)SvPOK_only(TARG);
d = (U8*)SvPVX(TARG);
send = s + len;
while (s < send) {
STRLEN u = UTF8SKIP(s);
UV uv = toLOWER_utf8(s, tmpbuf, &ulen);
#define GREEK_CAPITAL_LETTER_SIGMA 0x03A3 /* Unicode U+03A3 */
if (uv == GREEK_CAPITAL_LETTER_SIGMA) {
/*
* Now if the sigma is NOT followed by
* /$ignorable_sequence$cased_letter/;
* and it IS preceded by
* /$cased_letter$ignorable_sequence/;
* where $ignorable_sequence is
* [\x{2010}\x{AD}\p{Mn}]*
* and $cased_letter is
* [\p{Ll}\p{Lo}\p{Lt}]
* then it should be mapped to 0x03C2,
* (GREEK SMALL LETTER FINAL SIGMA),
* instead of staying 0x03A3.
* "should be": in other words,
* this is not implemented yet.
* See lib/unicore/SpecialCasing.txt.
*/
}
if (ulen > u && (SvLEN(TARG) < (min += ulen - u))) {
/* If the eventually required minimum size outgrows
* the available space, we need to grow. */
UV o = d - (U8*)SvPVX(TARG);
/* If someone lowercases one million U+0130s we
* SvGROW() one million times. Or we could try
* guessing how much to allocate without allocating.
* too much. Such is life. */
SvGROW(TARG, min);
d = (U8*)SvPVX(TARG) + o;
}
Copy(tmpbuf, d, ulen, U8);
d += ulen;
s += u;
}
*d = '\0';
SvUTF8_on(TARG);
SvCUR_set(TARG, d - (U8*)SvPVX(TARG));
SETs(TARG);
}
}
else {
if (!SvPADTMP(sv) || SvREADONLY(sv)) {
dTARGET;
SvUTF8_off(TARG); /* decontaminate */
sv_setsv_nomg(TARG, sv);
sv = TARG;
SETs(sv);
}
s = (U8*)SvPV_force_nomg(sv, len);
if (len) {
register U8 *send = s + len;
if (IN_LOCALE_RUNTIME) {
TAINT;
SvTAINTED_on(sv);
for (; s < send; s++)
*s = toLOWER_LC(*s);
}
else {
for (; s < send; s++)
*s = toLOWER(*s);
}
}
}
SvSETMAGIC(sv);
RETURN;
}
PP(pp_quotemeta)
{
dSP; dTARGET;
SV *sv = TOPs;
STRLEN len;
register char *s = SvPV(sv,len);
register char *d;
SvUTF8_off(TARG); /* decontaminate */
if (len) {
(void)SvUPGRADE(TARG, SVt_PV);
SvGROW(TARG, (len * 2) + 1);
d = SvPVX(TARG);
if (DO_UTF8(sv)) {
while (len) {
if (UTF8_IS_CONTINUED(*s)) {
STRLEN ulen = UTF8SKIP(s);
if (ulen > len)
ulen = len;
len -= ulen;
while (ulen--)
*d++ = *s++;
}
else {
if (!isALNUM(*s))
*d++ = '\\';
*d++ = *s++;
len--;
}
}
SvUTF8_on(TARG);
}
else {
while (len--) {
if (!isALNUM(*s))
*d++ = '\\';
*d++ = *s++;
}
}
*d = '\0';
SvCUR_set(TARG, d - SvPVX(TARG));
(void)SvPOK_only_UTF8(TARG);
}
else
sv_setpvn(TARG, s, len);
SETs(TARG);
if (SvSMAGICAL(TARG))
mg_set(TARG);
RETURN;
}
/* Arrays. */
PP(pp_aslice)
{
dSP; dMARK; dORIGMARK;
register SV** svp;
register AV* av = (AV*)POPs;
register I32 lval = (PL_op->op_flags & OPf_MOD || LVRET);
I32 arybase = PL_curcop->cop_arybase;
I32 elem;
if (SvTYPE(av) == SVt_PVAV) {
if (lval && PL_op->op_private & OPpLVAL_INTRO) {
I32 max = -1;
for (svp = MARK + 1; svp <= SP; svp++) {
elem = SvIVx(*svp);
if (elem > max)
max = elem;
}
if (max > AvMAX(av))
av_extend(av, max);
}
while (++MARK <= SP) {
elem = SvIVx(*MARK);
if (elem > 0)
elem -= arybase;
svp = av_fetch(av, elem, lval);
if (lval) {
if (!svp || *svp == &PL_sv_undef)
DIE(aTHX_ PL_no_aelem, elem);
if (PL_op->op_private & OPpLVAL_INTRO)
save_aelem(av, elem, svp);
}
*MARK = svp ? *svp : &PL_sv_undef;
}
}
if (GIMME != G_ARRAY) {
MARK = ORIGMARK;
*++MARK = SP > ORIGMARK ? *SP : &PL_sv_undef;
SP = MARK;
}
RETURN;
}
/* Associative arrays. */
PP(pp_each)
{
dSP;
HV *hash = (HV*)POPs;
HE *entry;
I32 gimme = GIMME_V;
I32 realhv = (SvTYPE(hash) == SVt_PVHV);
PUTBACK;
/* might clobber stack_sp */
entry = realhv ? hv_iternext(hash) : avhv_iternext((AV*)hash);
SPAGAIN;
EXTEND(SP, 2);
if (entry) {
SV* sv = hv_iterkeysv(entry);
PUSHs(sv); /* won't clobber stack_sp */
if (gimme == G_ARRAY) {
SV *val;
PUTBACK;
/* might clobber stack_sp */
val = realhv ?
hv_iterval(hash, entry) : avhv_iterval((AV*)hash, entry);
SPAGAIN;
PUSHs(val);
}
}
else if (gimme == G_SCALAR)
RETPUSHUNDEF;
RETURN;
}
PP(pp_values)
{
return do_kv();
}
PP(pp_keys)
{
return do_kv();
}
PP(pp_delete)
{
dSP;
I32 gimme = GIMME_V;
I32 discard = (gimme == G_VOID) ? G_DISCARD : 0;
SV *sv;
HV *hv;
if (PL_op->op_private & OPpSLICE) {
dMARK; dORIGMARK;
U32 hvtype;
hv = (HV*)POPs;
hvtype = SvTYPE(hv);
if (hvtype == SVt_PVHV) { /* hash element */
while (++MARK <= SP) {
sv = hv_delete_ent(hv, *MARK, discard, 0);
*MARK = sv ? sv : &PL_sv_undef;
}
}
else if (hvtype == SVt_PVAV) {
if (PL_op->op_flags & OPf_SPECIAL) { /* array element */
while (++MARK <= SP) {
sv = av_delete((AV*)hv, SvIV(*MARK), discard);
*MARK = sv ? sv : &PL_sv_undef;
}
}
else { /* pseudo-hash element */
while (++MARK <= SP) {
sv = avhv_delete_ent((AV*)hv, *MARK, discard, 0);
*MARK = sv ? sv : &PL_sv_undef;
}
}
}
else
DIE(aTHX_ "Not a HASH reference");
if (discard)
SP = ORIGMARK;
else if (gimme == G_SCALAR) {
MARK = ORIGMARK;
if (SP > MARK)
*++MARK = *SP;
else
*++MARK = &PL_sv_undef;
SP = MARK;
}
}
else {
SV *keysv = POPs;
hv = (HV*)POPs;
if (SvTYPE(hv) == SVt_PVHV)
sv = hv_delete_ent(hv, keysv, discard, 0);
else if (SvTYPE(hv) == SVt_PVAV) {
if (PL_op->op_flags & OPf_SPECIAL)
sv = av_delete((AV*)hv, SvIV(keysv), discard);
else
sv = avhv_delete_ent((AV*)hv, keysv, discard, 0);
}
else
DIE(aTHX_ "Not a HASH reference");
if (!sv)
sv = &PL_sv_undef;
if (!discard)
PUSHs(sv);
}
RETURN;
}
PP(pp_exists)
{
dSP;
SV *tmpsv;
HV *hv;
if (PL_op->op_private & OPpEXISTS_SUB) {
GV *gv;
CV *cv;
SV *sv = POPs;
cv = sv_2cv(sv, &hv, &gv, FALSE);
if (cv)
RETPUSHYES;
if (gv && isGV(gv) && GvCV(gv) && !GvCVGEN(gv))
RETPUSHYES;
RETPUSHNO;
}
tmpsv = POPs;
hv = (HV*)POPs;
if (SvTYPE(hv) == SVt_PVHV) {
if (hv_exists_ent(hv, tmpsv, 0))
RETPUSHYES;
}
else if (SvTYPE(hv) == SVt_PVAV) {
if (PL_op->op_flags & OPf_SPECIAL) { /* array element */
if (av_exists((AV*)hv, SvIV(tmpsv)))
RETPUSHYES;
}
else if (avhv_exists_ent((AV*)hv, tmpsv, 0)) /* pseudo-hash element */
RETPUSHYES;
}
else {
DIE(aTHX_ "Not a HASH reference");
}
RETPUSHNO;
}
PP(pp_hslice)
{
dSP; dMARK; dORIGMARK;
register HV *hv = (HV*)POPs;
register I32 lval = (PL_op->op_flags & OPf_MOD || LVRET);
I32 realhv = (SvTYPE(hv) == SVt_PVHV);
bool localizing = PL_op->op_private & OPpLVAL_INTRO ? TRUE : FALSE;
bool other_magic = FALSE;
if (localizing) {
MAGIC *mg;
HV *stash;
other_magic = mg_find((SV*)hv, PERL_MAGIC_env) ||
((mg = mg_find((SV*)hv, PERL_MAGIC_tied))
/* Try to preserve the existenceness of a tied hash
* element by using EXISTS and DELETE if possible.
* Fallback to FETCH and STORE otherwise */
&& (stash = SvSTASH(SvRV(SvTIED_obj((SV*)hv, mg))))
&& gv_fetchmethod_autoload(stash, "EXISTS", TRUE)
&& gv_fetchmethod_autoload(stash, "DELETE", TRUE));
}
if (!realhv && localizing)
DIE(aTHX_ "Can't localize pseudo-hash element");
if (realhv || SvTYPE(hv) == SVt_PVAV) {
while (++MARK <= SP) {
SV *keysv = *MARK;
SV **svp;
bool preeminent = FALSE;
if (localizing) {
preeminent = SvRMAGICAL(hv) && !other_magic ? 1 :
realhv ? hv_exists_ent(hv, keysv, 0)
: avhv_exists_ent((AV*)hv, keysv, 0);
}
if (realhv) {
HE *he = hv_fetch_ent(hv, keysv, lval, 0);
svp = he ? &HeVAL(he) : 0;
}
else {
svp = avhv_fetch_ent((AV*)hv, keysv, lval, 0);
}
if (lval) {
if (!svp || *svp == &PL_sv_undef) {
STRLEN n_a;
DIE(aTHX_ PL_no_helem, SvPV(keysv, n_a));
}
if (localizing) {
if (preeminent)
save_helem(hv, keysv, svp);
else {
STRLEN keylen;
char *key = SvPV(keysv, keylen);
SAVEDELETE(hv, savepvn(key,keylen), keylen);
}
}
}
*MARK = svp ? *svp : &PL_sv_undef;
}
}
if (GIMME != G_ARRAY) {
MARK = ORIGMARK;
*++MARK = SP > ORIGMARK ? *SP : &PL_sv_undef;
SP = MARK;
}
RETURN;
}
/* List operators. */
PP(pp_list)
{
dSP; dMARK;
if (GIMME != G_ARRAY) {
if (++MARK <= SP)
*MARK = *SP; /* unwanted list, return last item */
else
*MARK = &PL_sv_undef;
SP = MARK;
}
RETURN;
}
PP(pp_lslice)
{
dSP;
SV **lastrelem = PL_stack_sp;
SV **lastlelem = PL_stack_base + POPMARK;
SV **firstlelem = PL_stack_base + POPMARK + 1;
register SV **firstrelem = lastlelem + 1;
I32 arybase = PL_curcop->cop_arybase;
I32 lval = PL_op->op_flags & OPf_MOD;
I32 is_something_there = lval;
register I32 max = lastrelem - lastlelem;
register SV **lelem;
register I32 ix;
if (GIMME != G_ARRAY) {
ix = SvIVx(*lastlelem);
if (ix < 0)
ix += max;
else
ix -= arybase;
if (ix < 0 || ix >= max)
*firstlelem = &PL_sv_undef;
else
*firstlelem = firstrelem[ix];
SP = firstlelem;
RETURN;
}
if (max == 0) {
SP = firstlelem - 1;
RETURN;
}
for (lelem = firstlelem; lelem <= lastlelem; lelem++) {
ix = SvIVx(*lelem);
if (ix < 0)
ix += max;
else
ix -= arybase;
if (ix < 0 || ix >= max)
*lelem = &PL_sv_undef;
else {
is_something_there = TRUE;
if (!(*lelem = firstrelem[ix]))
*lelem = &PL_sv_undef;
}
}
if (is_something_there)
SP = lastlelem;
else
SP = firstlelem - 1;
RETURN;
}
PP(pp_anonlist)
{
dSP; dMARK; dORIGMARK;
I32 items = SP - MARK;
SV *av = sv_2mortal((SV*)av_make(items, MARK+1));
SP = ORIGMARK; /* av_make() might realloc stack_sp */
XPUSHs(av);
RETURN;
}
PP(pp_anonhash)
{
dSP; dMARK; dORIGMARK;
HV* hv = (HV*)sv_2mortal((SV*)newHV());
while (MARK < SP) {
SV* key = *++MARK;
SV *val = NEWSV(46, 0);
if (MARK < SP)
sv_setsv(val, *++MARK);
else if (ckWARN(WARN_MISC))
Perl_warner(aTHX_ packWARN(WARN_MISC), "Odd number of elements in anonymous hash");
(void)hv_store_ent(hv,key,val,0);
}
SP = ORIGMARK;
XPUSHs((SV*)hv);
RETURN;
}
PP(pp_splice)
{
dSP; dMARK; dORIGMARK;
register AV *ary = (AV*)*++MARK;
register SV **src;
register SV **dst;
register I32 i;
register I32 offset;
register I32 length;
I32 newlen;
I32 after;
I32 diff;
SV **tmparyval = 0;
MAGIC *mg;
if ((mg = SvTIED_mg((SV*)ary, PERL_MAGIC_tied))) {
*MARK-- = SvTIED_obj((SV*)ary, mg);
PUSHMARK(MARK);
PUTBACK;
ENTER;
call_method("SPLICE",GIMME_V);
LEAVE;
SPAGAIN;
RETURN;
}
SP++;
if (++MARK < SP) {
offset = i = SvIVx(*MARK);
if (offset < 0)
offset += AvFILLp(ary) + 1;
else
offset -= PL_curcop->cop_arybase;
if (offset < 0)
DIE(aTHX_ PL_no_aelem, i);
if (++MARK < SP) {
length = SvIVx(*MARK++);
if (length < 0) {
length += AvFILLp(ary) - offset + 1;
if (length < 0)
length = 0;
}
}
else
length = AvMAX(ary) + 1; /* close enough to infinity */
}
else {
offset = 0;
length = AvMAX(ary) + 1;
}
if (offset > AvFILLp(ary) + 1) {
if (ckWARN(WARN_MISC))
Perl_warner(aTHX_ packWARN(WARN_MISC), "splice() offset past end of array" );
offset = AvFILLp(ary) + 1;
}
after = AvFILLp(ary) + 1 - (offset + length);
if (after < 0) { /* not that much array */
length += after; /* offset+length now in array */
after = 0;
if (!AvALLOC(ary))
av_extend(ary, 0);
}
/* At this point, MARK .. SP-1 is our new LIST */
newlen = SP - MARK;
diff = newlen - length;
if (newlen && !AvREAL(ary) && AvREIFY(ary))
av_reify(ary);
/* make new elements SVs now: avoid problems if they're from the array */
for (dst = MARK, i = newlen; i; i--) {
SV *h = *dst;
*dst++ = newSVsv(h);
}
if (diff < 0) { /* shrinking the area */
if (newlen) {
New(451, tmparyval, newlen, SV*); /* so remember insertion */
Copy(MARK, tmparyval, newlen, SV*);
}
MARK = ORIGMARK + 1;
if (GIMME == G_ARRAY) { /* copy return vals to stack */
MEXTEND(MARK, length);
Copy(AvARRAY(ary)+offset, MARK, length, SV*);
if (AvREAL(ary)) {
EXTEND_MORTAL(length);
for (i = length, dst = MARK; i; i--) {
sv_2mortal(*dst); /* free them eventualy */
dst++;
}
}
MARK += length - 1;
}
else {
*MARK = AvARRAY(ary)[offset+length-1];
if (AvREAL(ary)) {
sv_2mortal(*MARK);
for (i = length - 1, dst = &AvARRAY(ary)[offset]; i > 0; i--)
SvREFCNT_dec(*dst++); /* free them now */
}
}
AvFILLp(ary) += diff;
/* pull up or down? */
if (offset < after) { /* easier to pull up */
if (offset) { /* esp. if nothing to pull */
src = &AvARRAY(ary)[offset-1];
dst = src - diff; /* diff is negative */
for (i = offset; i > 0; i--) /* can't trust Copy */
*dst-- = *src--;
}
dst = AvARRAY(ary);
SvPVX(ary) = (char*)(AvARRAY(ary) - diff); /* diff is negative */
AvMAX(ary) += diff;
}
else {
if (after) { /* anything to pull down? */
src = AvARRAY(ary) + offset + length;
dst = src + diff; /* diff is negative */
Move(src, dst, after, SV*);
}
dst = &AvARRAY(ary)[AvFILLp(ary)+1];
/* avoid later double free */
}
i = -diff;
while (i)
dst[--i] = &PL_sv_undef;
if (newlen) {
Copy( tmparyval, AvARRAY(ary) + offset, newlen, SV* );
Safefree(tmparyval);
}
}
else { /* no, expanding (or same) */
if (length) {
New(452, tmparyval, length, SV*); /* so remember deletion */
Copy(AvARRAY(ary)+offset, tmparyval, length, SV*);
}
if (diff > 0) { /* expanding */
/* push up or down? */
if (offset < after && diff <= AvARRAY(ary) - AvALLOC(ary)) {
if (offset) {
src = AvARRAY(ary);
dst = src - diff;
Move(src, dst, offset, SV*);
}
SvPVX(ary) = (char*)(AvARRAY(ary) - diff);/* diff is positive */
AvMAX(ary) += diff;
AvFILLp(ary) += diff;
}
else {
if (AvFILLp(ary) + diff >= AvMAX(ary)) /* oh, well */
av_extend(ary, AvFILLp(ary) + diff);
AvFILLp(ary) += diff;
if (after) {
dst = AvARRAY(ary) + AvFILLp(ary);
src = dst - diff;
for (i = after; i; i--) {
*dst-- = *src--;
}
}
}
}
if (newlen) {
Copy( MARK, AvARRAY(ary) + offset, newlen, SV* );
}
MARK = ORIGMARK + 1;
if (GIMME == G_ARRAY) { /* copy return vals to stack */
if (length) {
Copy(tmparyval, MARK, length, SV*);
if (AvREAL(ary)) {
EXTEND_MORTAL(length);
for (i = length, dst = MARK; i; i--) {
sv_2mortal(*dst); /* free them eventualy */
dst++;
}
}
Safefree(tmparyval);
}
MARK += length - 1;
}
else if (length--) {
*MARK = tmparyval[length];
if (AvREAL(ary)) {
sv_2mortal(*MARK);
while (length-- > 0)
SvREFCNT_dec(tmparyval[length]);
}
Safefree(tmparyval);
}
else
*MARK = &PL_sv_undef;
}
SP = MARK;
RETURN;
}
PP(pp_push)
{
dSP; dMARK; dORIGMARK; dTARGET;
register AV *ary = (AV*)*++MARK;
register SV *sv = &PL_sv_undef;
MAGIC *mg;
if ((mg = SvTIED_mg((SV*)ary, PERL_MAGIC_tied))) {
*MARK-- = SvTIED_obj((SV*)ary, mg);
PUSHMARK(MARK);
PUTBACK;
ENTER;
call_method("PUSH",G_SCALAR|G_DISCARD);
LEAVE;
SPAGAIN;
}
else {
/* Why no pre-extend of ary here ? */
for (++MARK; MARK <= SP; MARK++) {
sv = NEWSV(51, 0);
if (*MARK)
sv_setsv(sv, *MARK);
av_push(ary, sv);
}
}
SP = ORIGMARK;
PUSHi( AvFILL(ary) + 1 );
RETURN;
}
PP(pp_pop)
{
dSP;
AV *av = (AV*)POPs;
SV *sv = av_pop(av);
if (AvREAL(av))
(void)sv_2mortal(sv);
PUSHs(sv);
RETURN;
}
PP(pp_shift)
{
dSP;
AV *av = (AV*)POPs;
SV *sv = av_shift(av);
EXTEND(SP, 1);
if (!sv)
RETPUSHUNDEF;
if (AvREAL(av))
(void)sv_2mortal(sv);
PUSHs(sv);
RETURN;
}
PP(pp_unshift)
{
dSP; dMARK; dORIGMARK; dTARGET;
register AV *ary = (AV*)*++MARK;
register SV *sv;
register I32 i = 0;
MAGIC *mg;
if ((mg = SvTIED_mg((SV*)ary, PERL_MAGIC_tied))) {
*MARK-- = SvTIED_obj((SV*)ary, mg);
PUSHMARK(MARK);
PUTBACK;
ENTER;
call_method("UNSHIFT",G_SCALAR|G_DISCARD);
LEAVE;
SPAGAIN;
}
else {
av_unshift(ary, SP - MARK);
while (MARK < SP) {
sv = newSVsv(*++MARK);
(void)av_store(ary, i++, sv);
}
}
SP = ORIGMARK;
PUSHi( AvFILL(ary) + 1 );
RETURN;
}
PP(pp_reverse)
{
dSP; dMARK;
register SV *tmp;
SV **oldsp = SP;
if (GIMME == G_ARRAY) {
MARK++;
while (MARK < SP) {
tmp = *MARK;
*MARK++ = *SP;
*SP-- = tmp;
}
/* safe as long as stack cannot get extended in the above */
SP = oldsp;
}
else {
register char *up;
register char *down;
register I32 tmp;
dTARGET;
STRLEN len;
SvUTF8_off(TARG); /* decontaminate */
if (SP - MARK > 1)
do_join(TARG, &PL_sv_no, MARK, SP);
else
sv_setsv(TARG, (SP > MARK) ? *SP : DEFSV);
up = SvPV_force(TARG, len);
if (len > 1) {
if (DO_UTF8(TARG)) { /* first reverse each character */
U8* s = (U8*)SvPVX(TARG);
U8* send = (U8*)(s + len);
while (s < send) {
if (UTF8_IS_INVARIANT(*s)) {
s++;
continue;
}
else {
if (!utf8_to_uvchr(s, 0))
break;
up = (char*)s;
s += UTF8SKIP(s);
down = (char*)(s - 1);
/* reverse this character */
while (down > up) {
tmp = *up;
*up++ = *down;
*down-- = (char)tmp;
}
}
}
up = SvPVX(TARG);
}
down = SvPVX(TARG) + len - 1;
while (down > up) {
tmp = *up;
*up++ = *down;
*down-- = (char)tmp;
}
(void)SvPOK_only_UTF8(TARG);
}
SP = MARK + 1;
SETTARG;
}
RETURN;
}
PP(pp_split)
{
dSP; dTARG;
AV *ary;
register IV limit = POPi; /* note, negative is forever */
SV *sv = POPs;
STRLEN len;
register char *s = SvPV(sv, len);
bool do_utf8 = DO_UTF8(sv);
char *strend = s + len;
register PMOP *pm;
register REGEXP *rx;
register SV *dstr;
register char *m;
I32 iters = 0;
STRLEN slen = do_utf8 ? utf8_length((U8*)s, (U8*)strend) : (strend - s);
I32 maxiters = slen + 10;
I32 i;
char *orig;
I32 origlimit = limit;
I32 realarray = 0;
I32 base;
I32 gimme = GIMME_V;
I32 oldsave = PL_savestack_ix;
I32 make_mortal = 1;
MAGIC *mg = (MAGIC *) NULL;
#ifdef DEBUGGING
Copy(&LvTARGOFF(POPs), &pm, 1, PMOP*);
#else
pm = (PMOP*)POPs;
#endif
if (!pm || !s)
DIE(aTHX_ "panic: pp_split");
rx = PM_GETRE(pm);
TAINT_IF((pm->op_pmflags & PMf_LOCALE) &&
(pm->op_pmflags & (PMf_WHITE | PMf_SKIPWHITE)));
RX_MATCH_UTF8_set(rx, do_utf8);
if (pm->op_pmreplroot) {
#ifdef USE_ITHREADS
ary = GvAVn((GV*)PAD_SVl(INT2PTR(PADOFFSET, pm->op_pmreplroot)));
#else
ary = GvAVn((GV*)pm->op_pmreplroot);
#endif
}
else if (gimme != G_ARRAY)
#ifdef USE_5005THREADS
ary = (AV*)PAD_SVl(0);
#else
ary = GvAVn(PL_defgv);
#endif /* USE_5005THREADS */
else
ary = Nullav;
if (ary && (gimme != G_ARRAY || (pm->op_pmflags & PMf_ONCE))) {
realarray = 1;
PUTBACK;
av_extend(ary,0);
av_clear(ary);
SPAGAIN;
if ((mg = SvTIED_mg((SV*)ary, PERL_MAGIC_tied))) {
PUSHMARK(SP);
XPUSHs(SvTIED_obj((SV*)ary, mg));
}
else {
if (!AvREAL(ary)) {
AvREAL_on(ary);
AvREIFY_off(ary);
for (i = AvFILLp(ary); i >= 0; i--)
AvARRAY(ary)[i] = &PL_sv_undef; /* don't free mere refs */
}
/* temporarily switch stacks */
SAVESWITCHSTACK(PL_curstack, ary);
make_mortal = 0;
}
}
base = SP - PL_stack_base;
orig = s;
if (pm->op_pmflags & PMf_SKIPWHITE) {
if (pm->op_pmflags & PMf_LOCALE) {
while (isSPACE_LC(*s))
s++;
}
else {
while (isSPACE(*s))
s++;
}
}
if (pm->op_pmflags & (PMf_MULTILINE|PMf_SINGLELINE)) {
SAVEINT(PL_multiline);
PL_multiline = pm->op_pmflags & PMf_MULTILINE;
}
if (!limit)
limit = maxiters + 2;
if (pm->op_pmflags & PMf_WHITE) {
while (--limit) {
m = s;
while (m < strend &&
!((pm->op_pmflags & PMf_LOCALE)
? isSPACE_LC(*m) : isSPACE(*m)))
++m;
if (m >= strend)
break;
dstr = newSVpvn(s, m-s);
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
s = m + 1;
while (s < strend &&
((pm->op_pmflags & PMf_LOCALE)
? isSPACE_LC(*s) : isSPACE(*s)))
++s;
}
}
else if (rx->precomp[0] == '^' && rx->precomp[1] == '\0') {
while (--limit) {
/*SUPPRESS 530*/
for (m = s; m < strend && *m != '\n'; m++) ;
m++;
if (m >= strend)
break;
dstr = newSVpvn(s, m-s);
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
s = m;
}
}
else if (do_utf8 == ((rx->reganch & ROPT_UTF8) != 0) &&
(rx->reganch & RE_USE_INTUIT) && !rx->nparens
&& (rx->reganch & ROPT_CHECK_ALL)
&& !(rx->reganch & ROPT_ANCH)) {
int tail = (rx->reganch & RE_INTUIT_TAIL);
SV *csv = CALLREG_INTUIT_STRING(aTHX_ rx);
len = rx->minlen;
if (len == 1 && !(rx->reganch & ROPT_UTF8) && !tail) {
STRLEN n_a;
char c = *SvPV(csv, n_a);
while (--limit) {
/*SUPPRESS 530*/
for (m = s; m < strend && *m != c; m++) ;
if (m >= strend)
break;
dstr = newSVpvn(s, m-s);
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
/* The rx->minlen is in characters but we want to step
* s ahead by bytes. */
if (do_utf8)
s = (char*)utf8_hop((U8*)m, len);
else
s = m + len; /* Fake \n at the end */
}
}
else {
#ifndef lint
while (s < strend && --limit &&
(m = fbm_instr((unsigned char*)s, (unsigned char*)strend,
csv, PL_multiline ? FBMrf_MULTILINE : 0)) )
#endif
{
dstr = newSVpvn(s, m-s);
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
/* The rx->minlen is in characters but we want to step
* s ahead by bytes. */
if (do_utf8)
s = (char*)utf8_hop((U8*)m, len);
else
s = m + len; /* Fake \n at the end */
}
}
}
else {
maxiters += slen * rx->nparens;
while (s < strend && --limit)
{
PUTBACK;
i = CALLREGEXEC(aTHX_ rx, s, strend, orig, 1 , sv, NULL, 0);
SPAGAIN;
if (i == 0)
break;
TAINT_IF(RX_MATCH_TAINTED(rx));
if (RX_MATCH_COPIED(rx) && rx->subbeg != orig) {
m = s;
s = orig;
orig = rx->subbeg;
s = orig + (m - s);
strend = s + (strend - m);
}
m = rx->startp[0] + orig;
dstr = newSVpvn(s, m-s);
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
if (rx->nparens) {
for (i = 1; i <= (I32)rx->nparens; i++) {
s = rx->startp[i] + orig;
m = rx->endp[i] + orig;
/* japhy (07/27/01) -- the (m && s) test doesn't catch
parens that didn't match -- they should be set to
undef, not the empty string */
if (m >= orig && s >= orig) {
dstr = newSVpvn(s, m-s);
}
else
dstr = &PL_sv_undef; /* undef, not "" */
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
}
}
s = rx->endp[0] + orig;
}
}
iters = (SP - PL_stack_base) - base;
if (iters > maxiters)
DIE(aTHX_ "Split loop");
/* keep field after final delim? */
if (s < strend || (iters && origlimit)) {
STRLEN l = strend - s;
dstr = newSVpvn(s, l);
if (make_mortal)
sv_2mortal(dstr);
if (do_utf8)
(void)SvUTF8_on(dstr);
XPUSHs(dstr);
iters++;
}
else if (!origlimit) {
while (iters > 0 && (!TOPs || !SvANY(TOPs) || SvCUR(TOPs) == 0)) {
if (TOPs && !make_mortal)
sv_2mortal(TOPs);
iters--;
*SP-- = &PL_sv_undef;
}
}
PUTBACK;
LEAVE_SCOPE(oldsave); /* may undo an earlier SWITCHSTACK */
SPAGAIN;
if (realarray) {
if (!mg) {
if (SvSMAGICAL(ary)) {
PUTBACK;
mg_set((SV*)ary);
SPAGAIN;
}
if (gimme == G_ARRAY) {
EXTEND(SP, iters);
Copy(AvARRAY(ary), SP + 1, iters, SV*);
SP += iters;
RETURN;
}
}
else {
PUTBACK;
ENTER;
call_method("PUSH",G_SCALAR|G_DISCARD);
LEAVE;
SPAGAIN;
if (gimme == G_ARRAY) {
/* EXTEND should not be needed - we just popped them */
EXTEND(SP, iters);
for (i=0; i < iters; i++) {
SV **svp = av_fetch(ary, i, FALSE);
PUSHs((svp) ? *svp : &PL_sv_undef);
}
RETURN;
}
}
}
else {
if (gimme == G_ARRAY)
RETURN;
}
GETTARGET;
PUSHi(iters);
RETURN;
}
#ifdef USE_5005THREADS
void
Perl_unlock_condpair(pTHX_ void *svv)
{
MAGIC *mg = mg_find((SV*)svv, PERL_MAGIC_mutex);
if (!mg)
Perl_croak(aTHX_ "panic: unlock_condpair unlocking non-mutex");
MUTEX_LOCK(MgMUTEXP(mg));
if (MgOWNER(mg) != thr)
Perl_croak(aTHX_ "panic: unlock_condpair unlocking mutex that we don't own");
MgOWNER(mg) = 0;
COND_SIGNAL(MgOWNERCONDP(mg));
DEBUG_S(PerlIO_printf(Perl_debug_log, "0x%"UVxf": unlock 0x%"UVxf"\n",
PTR2UV(thr), PTR2UV(svv)));
MUTEX_UNLOCK(MgMUTEXP(mg));
}
#endif /* USE_5005THREADS */
PP(pp_lock)
{
dSP;
dTOPss;
SV *retsv = sv;
SvLOCK(sv);
if (SvTYPE(retsv) == SVt_PVAV || SvTYPE(retsv) == SVt_PVHV
|| SvTYPE(retsv) == SVt_PVCV) {
retsv = refto(retsv);
}
SETs(retsv);
RETURN;
}
PP(pp_threadsv)
{
#ifdef USE_5005THREADS
dSP;
EXTEND(SP, 1);
if (PL_op->op_private & OPpLVAL_INTRO)
PUSHs(*save_threadsv(PL_op->op_targ));
else
PUSHs(THREADSV(PL_op->op_targ));
RETURN;
#else
DIE(aTHX_ "tried to access per-thread data in non-threaded perl");
#endif /* USE_5005THREADS */
}
/*
* Local variables:
* c-indentation-style: bsd
* c-basic-offset: 4
* indent-tabs-mode: t
* End:
*
* vim: shiftwidth=4:
*/
| 57,427
|
3,428
|
{"id":"00758","group":"easy-ham-1","checksum":{"type":"MD5","value":"af02ef2952273218fb9d6c7ddbec7910"},"text":"From <EMAIL> Tue Sep 24 10:49:36 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>assassin.taint.org\nReceived: from localhost (jalapeno [1172.16.58.3])\n\tby jmason.org (Postfix) with ESMTP id 19C8B16F03\n\tfor <jm@localhost>; Tue, 24 Sep 2002 10:49:36 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Tue, 24 Sep 2002 10:49:36 +0100 (IST)\nReceived: from xent.com ([64.161.22.236]) by dogma.slashnull.org\n (8.11.6/8.11.6) with ESMTP id g8O41BC20122 for <<EMAIL>>;\n Tue, 24 Sep 2002 05:01:11 +0100\nReceived: from lair.xent.com (localhost [127.0.0.1]) by xent.com (Postfix)\n with ESMTP id 2A305294248; Mon, 23 Sep 2002 20:53:09 -0700 (PDT)\nDelivered-To: <EMAIL>\nReceived: from mail.evergo.net (unknown [206.191.151.2]) by xent.com\n (Postfix) with SMTP id AC7FB294245 for <<EMAIL>>; Mon,\n 23 Sep 2002 20:53:00 -0700 (PDT)\nReceived: (qmail 4847 invoked from network); 24 Sep 2002 03:56:41 -0000\nReceived: from dsl.172.16.31.102.evergo.net (HELO JMHALL)\n (172.16.58.3) by mail.evergo.net with SMTP; 24 Sep 2002 03:56:41 -0000\nReply-To: <<EMAIL>>\nFrom: \"<NAME>\" <<EMAIL>>\nTo: \"FoRK\" <<EMAIL>>\nSubject: liberal defnitions\nMessage-Id: <001b01c2637e$643836f0$0200a8c0@JMHALL>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=\"us-ascii\"\nContent-Transfer-Encoding: 7bit\nX-Priority: 3 (Normal)\nX-Msmail-Priority: Normal\nX-Mailer: Microsoft Outlook, Build 10.0.2627\nImportance: Normal\nIn-Reply-To: <<EMAIL>>\nX-Mimeole: Produced By Microsoft MimeOLE V6.00.2600.0000\nSender: fork-admin@xent.com\nErrors-To: fork-admin@xent.com\nX-Beenthere: <EMAIL>\nX-Mailman-Version: 2.0.11\nPrecedence: bulk\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <http://xent.com/mailman/listinfo/fork>, <mailto:<EMAIL>?subject=subscribe>\nList-Id: Friends of <NAME> <fork.xent.com>\nList-Unsubscribe: <http://xent.com/mailman/listinfo/fork>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://xent.com/pipermail/fork/>\nDate: Mon, 23 Sep 2002 20:56:41 -0700\n\nDepends on how much over spending vs. how much (and what type) over\nregulation.\n\nThe biggest problem with over regulation is the costs can be invisible.\nIt also has the ability to single out particular people, while over\nspending spreads the damage more evenly. Rent control would be an\nexample of a regulation solution that is in general worse than spending\ntons of money on public housing.\n\nAs for the definition of a liberal being someone who seeks to impose\nboth, I find no fault in that definition whatsoever. The opinion that\nEITHER we are spending too much OR we have too much regulation is pretty\nmuch anathema to liberal politics.\n\nFinally, those who argue that there are private replacements for much\ngovernment regulation are not saying that a state of nature (no private\nreplacements, no government regulation) is better than government\nregulation itself.\n\nAnd in my experience people who label themselves 'Green' (which does not\ninclude everyone who loves trees and thinks smokestacks are ugly) is a\nwatermelon.\n\n\n\n\n> -----Original Message-----\n> From: <EMAIL> [mailto:<EMAIL>] On Behalf Of\nGeege\n> Schuman\n> \n> funny. i read it as green = red, as in accounting, as in fiscally\n> irresponsible. which do you think is the worse indictment -\n> overregulation\n> or overspending? there are many (dickheads) who buy into the\n> neo-conservative media's (fox's) definiton of \"liberal\" as \"one who\nseeks\n> to\n> impose both.\"\n\n\n"}
| 1,372
|
653
|
<reponame>mkinsner/llvm
//==-------------- h_item.hpp - SYCL standard header file ------------------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#pragma once
#include <CL/sycl/detail/helpers.hpp>
#include <CL/sycl/id.hpp>
#include <CL/sycl/item.hpp>
#include <CL/sycl/range.hpp>
__SYCL_INLINE_NAMESPACE(cl) {
namespace sycl {
namespace detail {
class Builder;
}
/// Identifies an instance of a group::parallel_for_work_item function object
/// executing at each point in a local range passed to a parallel_for_work_item
/// call or to the corresponding parallel_for_work_group call.
///
/// \ingroup sycl_api
template <int dimensions> class h_item {
public:
h_item() = delete;
h_item(const h_item &hi) = default;
h_item &operator=(const h_item &hi) = default;
/* -- public interface members -- */
item<dimensions, false> get_global() const { return globalItem; }
item<dimensions, false> get_local() const { return get_logical_local(); }
item<dimensions, false> get_logical_local() const { return logicalLocalItem; }
item<dimensions, false> get_physical_local() const { return localItem; }
range<dimensions> get_global_range() const {
return get_global().get_range();
}
size_t get_global_range(int dimension) const {
return get_global().get_range(dimension);
}
id<dimensions> get_global_id() const { return get_global().get_id(); }
size_t get_global_id(int dimension) const {
return get_global().get_id(dimension);
}
range<dimensions> get_local_range() const { return get_local().get_range(); }
size_t get_local_range(int dimension) const {
return get_local().get_range(dimension);
}
id<dimensions> get_local_id() const { return get_local().get_id(); }
size_t get_local_id(int dimension) const {
return get_local().get_id(dimension);
}
range<dimensions> get_logical_local_range() const {
return get_logical_local().get_range();
}
size_t get_logical_local_range(int dimension) const {
return get_logical_local().get_range(dimension);
}
id<dimensions> get_logical_local_id() const {
return get_logical_local().get_id();
}
size_t get_logical_local_id(int dimension) const {
return get_logical_local().get_id(dimension);
}
range<dimensions> get_physical_local_range() const {
return get_physical_local().get_range();
}
size_t get_physical_local_range(int dimension) const {
return get_physical_local().get_range(dimension);
}
id<dimensions> get_physical_local_id() const {
return get_physical_local().get_id();
}
size_t get_physical_local_id(int dimension) const {
return get_physical_local().get_id(dimension);
}
bool operator==(const h_item &rhs) const {
return (rhs.localItem == localItem) && (rhs.globalItem == globalItem) &&
(rhs.logicalLocalItem == logicalLocalItem);
}
bool operator!=(const h_item &rhs) const { return !((*this) == rhs); }
protected:
friend class detail::Builder;
friend class group<dimensions>;
h_item(const item<dimensions, false> &GL, const item<dimensions, false> &L,
const range<dimensions> &flexLocalRange)
: globalItem(GL), localItem(L),
logicalLocalItem(detail::Builder::createItem<dimensions, false>(
flexLocalRange, L.get_id())) {}
h_item(const item<dimensions, false> &GL, const item<dimensions, false> &L)
: globalItem(GL), localItem(L),
logicalLocalItem(detail::Builder::createItem<dimensions, false>(
localItem.get_range(), localItem.get_id())) {}
void setLogicalLocalID(const id<dimensions> &ID) {
detail::Builder::updateItemIndex(logicalLocalItem, ID);
}
private:
/// Describles physical workgroup range and current \c h_item position in it.
item<dimensions, false> localItem;
/// Describles global range and current \c h_item position in it.
item<dimensions, false> globalItem;
/// Describles logical flexible range and current \c h_item position in it.
item<dimensions, false> logicalLocalItem;
};
} // namespace sycl
} // __SYCL_INLINE_NAMESPACE(cl)
| 1,471
|
606
|
#ifndef _MODULE_DEBUG_H
#define _MODULE_DEBUG_H
//#define MINI_DRIVER
#ifndef MINI_DRIVER
#define M_PRINTF(format, args...) printf("VFAT: " format, ##args)
#else
#define M_PRINTF(format, args...)
#endif
#ifdef DEBUG
#define M_DEBUG M_PRINTF
#else
#define M_DEBUG(format, args...)
#endif
#endif
| 125
|
1,338
|
<filename>src/tests/kits/support/blocker/ConcurrencyTest1.cpp<gh_stars>1000+
/*
$Id: ConcurrencyTest1.cpp 301 2002-07-18 05:32:00Z tylerdauwalder $
This file implements a test class for testing BLocker functionality.
It tests use cases "Locking 1", "Locking 2", "Unlocking", "Is Locked",
"Locking Thread" and "Count Locks".
*/
#include <ThreadedTestCaller.h>
#include "ConcurrencyTest1.h"
#include <cppunit/TestSuite.h>
#include <Locker.h>
// This constant indicates the number of times the thread should test the
// acquisition and release of the BLocker.
const int32 MAXLOOP = 10000;
/*
* Method: ConcurrencyTest1::ConcurrencyTest1()
* Descr: This is the only constructor for this test case. It takes a
* test name and a flag to indicate whether to test a benaphore
* or semaphore type BLocker.
*/
ConcurrencyTest1::ConcurrencyTest1(std::string name, bool benaphoreFlag) :
LockerTestCase(name, benaphoreFlag), lockTestValue(false)
{
}
/*
* Method: ConcurrencyTest1::~ConcurrencyTest1()
* Descr: This is the descriptor for this test case.
*/
ConcurrencyTest1::~ConcurrencyTest1()
{
}
/*
* Method: ConcurrencyTest1::setUp()
* Descr: This member is called before starting the actual test threads
* and is used to ensure that the class is initialized for the
* testing. It just sets the "lockTestValue" flag to false. This
* flag is used to show that there is mutual exclusion between the
* threads.
*/
void
ConcurrencyTest1::setUp(void)
{
lockTestValue = false;
}
/*
* Method: ConcurrencyTest1::suite()
* Descr: This static member function returns a test suite for performing
* all combinations of "ConcurrencyTest1". The test suite contains
* two instances of the test. One is performed on a benaphore,
* the other on a semaphore based BLocker. Each individual test
* is created as a ThreadedTestCase (typedef'd as
* ConcurrencyTest1Caller) with three independent threads.
*/
CppUnit::Test *ConcurrencyTest1::suite(void)
{
typedef BThreadedTestCaller<ConcurrencyTest1>
ConcurrencyTest1Caller;
CppUnit::TestSuite *testSuite = new CppUnit::TestSuite("ConcurrencyTest1");
// Make a benaphore based test object, create a ThreadedTestCase for it and add
// three threads to it.
ConcurrencyTest1 *theTest = new ConcurrencyTest1("Benaphore", true);
ConcurrencyTest1Caller *threadedTest1 = new ConcurrencyTest1Caller("BLocker::Concurrency Test #1 (benaphore)", theTest);
threadedTest1->addThread("A", &ConcurrencyTest1::TestThread);
threadedTest1->addThread("B", &ConcurrencyTest1::TestThread);
threadedTest1->addThread("C", &ConcurrencyTest1::TestThread);
// Make a semaphore based test object, create a ThreadedTestCase for it and add
// three threads to it.
theTest = new ConcurrencyTest1("Semaphore", false);
ConcurrencyTest1Caller *threadedTest2 = new ConcurrencyTest1Caller("BLocker::Concurrency Test #1 (semaphore)", theTest);
threadedTest2->addThread("A", &ConcurrencyTest1::TestThread);
threadedTest2->addThread("B", &ConcurrencyTest1::TestThread);
threadedTest2->addThread("C", &ConcurrencyTest1::TestThread);
testSuite->addTest(threadedTest1);
testSuite->addTest(threadedTest2);
return(testSuite);
}
/*
* Method: ConcurrencyTest1::AcquireLock()
* Descr: This member function is passed the number of times through the
* acquisition loop (lockAttempt) and whether or not this is
* the first acquisition of the lock within this iteration.
* Based on these values, it may do a LockWithTimeout() or just
* a plain Lock() on theLocker. This is done to get coverage of
* both lock acquisition methods on the BLocker.
*/
bool ConcurrencyTest1::AcquireLock(int lockAttempt,
bool firstAcquisition)
{
bool timeoutLock;
bool result;
if (firstAcquisition) {
timeoutLock = ((lockAttempt % 2) == 1);
} else {
timeoutLock = (((lockAttempt / 2) % 2) == 1);
}
if (timeoutLock) {
result = (theLocker->LockWithTimeout(1000000) == B_OK);
} else {
result = theLocker->Lock();
}
return(result);
}
/*
* Method: ConcurrencyTest1::TestThread()
* Descr: This method is the core of the test. Each of the three threads
* run this method to perform the concurrency test. First, the
* SafetyLock class (see LockerTestCase.h) is used to make sure that
* the lock is released if an assertion happens. Then, each thread
* iterates MAXLOOP times through the main loop where the following
* actions are performed:
* - CheckLock() is used to show that the thread does not have
* the lock.
* - The thread acquires the lock.
* - The thread confirms that mutual exclusion is OK by testing
* lockTestValue.
* - The thread confirms the lock is held once by the thread.
* - The thread acquires the lock again.
* - The thread confirms the lock is held twice now by the thread.
* - The thread releases the lock once.
* - The thread confirms the lock is held once now.
* - The thread confirms that mutual exclusion is still OK by
* testing lockTestValue.
* - The thread releases the lock again.
* - The thread confirms that the lock is no longer held.
*/
void ConcurrencyTest1::TestThread(void)
{
int i;
SafetyLock theSafetyLock(theLocker);
for (i = 0; i < MAXLOOP; i++) {
// Print out 10 sub test markers per thread
if (i % (MAXLOOP / 10) == 0)
NextSubTest();
CheckLock(0);
CPPUNIT_ASSERT(AcquireLock(i, true));
CPPUNIT_ASSERT(!lockTestValue);
lockTestValue = true;
CheckLock(1);
CPPUNIT_ASSERT(AcquireLock(i, false));
CheckLock(2);
theLocker->Unlock();
CheckLock(1);
CPPUNIT_ASSERT(lockTestValue);
lockTestValue = false;
theLocker->Unlock();
CheckLock(0);
}
}
| 2,310
|
348
|
{"nom":"Battenheim","circ":"6ème circonscription","dpt":"Haut-Rhin","inscrits":1231,"abs":602,"votants":629,"blancs":5,"nuls":5,"exp":619,"res":[{"nuance":"REM","nom":"<NAME>","voix":202},{"nuance":"UDI","nom":"<NAME>","voix":136},{"nuance":"FN","nom":"M. <NAME>","voix":124},{"nuance":"DVD","nom":"Mme <NAME>","voix":39},{"nuance":"FI","nom":"M. <NAME>","voix":35},{"nuance":"REG","nom":"<NAME>","voix":35},{"nuance":"SOC","nom":"<NAME>","voix":14},{"nuance":"ECO","nom":"M. <NAME>","voix":11},{"nuance":"DIV","nom":"Mme <NAME>","voix":7},{"nuance":"DLF","nom":"Mme <NAME>","voix":7},{"nuance":"ECO","nom":"Mme <NAME>","voix":3},{"nuance":"COM","nom":"M. <NAME>","voix":2},{"nuance":"EXD","nom":"Mme <NAME>","voix":2},{"nuance":"DIV","nom":"<NAME>","voix":2},{"nuance":"DIV","nom":"<NAME>","voix":0},{"nuance":"EXG","nom":"Mme <NAME>","voix":0}]}
| 344
|
6,181
|
<filename>renderdoc/3rdparty/compressonator/BC1_Encode_kernel.cpp<gh_stars>1000+
//=====================================================================
// Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
//=====================================================================
#include "BC1_Encode_kernel.h"
//============================================== BC1 INTERFACES =======================================================
void CompressBlockBC1_Fast(
CMP_Vec4uc srcBlockTemp[16],
CMP_GLOBAL CGU_UINT32 compressedBlock[2])
{
int i, k;
CMP_Vec3f rgb;
CMP_Vec3f average_rgb; // The centrepoint of the axis
CMP_Vec3f v_rgb; // The axis
CMP_Vec3f uniques[16]; // The list of unique colours
int unique_pixels; // The number of unique pixels
CGU_FLOAT unique_recip; // Reciprocal of the above for fast multiplication
int index_map[16]; // The map of source pixels to unique indices
CGU_FLOAT pos_on_axis[16]; // The distance each unique falls along the compression axis
CGU_FLOAT dist_from_axis[16]; // The distance each unique falls from the compression axis
CGU_FLOAT left = 0, right = 0, centre = 0; // The extremities and centre (average of left/right) of uniques along the compression axis
CGU_FLOAT axis_mapping_error = 0; // The total computed error in mapping pixels to the axis
int swap; // Indicator if the RGB values need swapping to generate an opaque result
// -------------------------------------------------------------------------------------
// (3) Find the array of unique pixel values and sum them to find their average position
// -------------------------------------------------------------------------------------
{
// Find the array of unique pixel values and sum them to find their average position
int current_pixel, firstdiff;
current_pixel = unique_pixels = 0;
average_rgb = 0.0f;
firstdiff = -1;
for (i = 0; i<16; i++)
{
for (k = 0; k<i; k++)
if ((((srcBlockTemp[k].x ^ srcBlockTemp[i].x) & 0xf8) == 0) && (((srcBlockTemp[k].y ^ srcBlockTemp[i].y) & 0xfc) == 0) && (((srcBlockTemp[k].z ^ srcBlockTemp[i].z) & 0xf8) == 0))
break;
index_map[i] = current_pixel++;
//pixel_count[i] = 1;
CMP_Vec3f trgb;
rgb.x = (CGU_FLOAT)((srcBlockTemp[i].x) & 0xff);
rgb.y = (CGU_FLOAT)((srcBlockTemp[i].y) & 0xff);
rgb.z = (CGU_FLOAT)((srcBlockTemp[i].z) & 0xff);
trgb.x = CS_RED(rgb.x, rgb.y, rgb.z);
trgb.y = CS_GREEN(rgb.x, rgb.y, rgb.z);
trgb.z = CS_BLUE(rgb.x, rgb.y, rgb.z);
uniques[i] = trgb;
if (k == i)
{
unique_pixels++;
if ((i != 0) && (firstdiff < 0)) firstdiff = i;
}
average_rgb = average_rgb + trgb;
}
unique_pixels = 16;
// Compute average of the uniques
unique_recip = 1.0f / (CGU_FLOAT)unique_pixels;
average_rgb = average_rgb * unique_recip;
}
// -------------------------------------------------------------------------------------
// (4) For each component, reflect points about the average so all lie on the same side
// of the average, and compute the new average - this gives a second point that defines the axis
// To compute the sign of the axis sum the positive differences of G for each of R and B (the
// G axis is always positive in this implementation
// -------------------------------------------------------------------------------------
// An interesting situation occurs if the G axis contains no information, in which case the RB
// axis is also compared. I am not entirely sure if this is the correct implementation - should
// the priority axis be determined by magnitude?
{
CGU_FLOAT rg_pos, bg_pos, rb_pos;
v_rgb = 0.0f;
rg_pos = bg_pos = rb_pos = 0;
for (i = 0; i < unique_pixels; i++)
{
rgb = uniques[i] - average_rgb;
#ifndef ASPM_GPU
v_rgb.x += (CGU_FLOAT)fabs(rgb.x);
v_rgb.y += (CGU_FLOAT)fabs(rgb.y);
v_rgb.z += (CGU_FLOAT)fabs(rgb.z);
#else
v_rgb = v_rgb + fabs(rgb);
#endif
if (rgb.x > 0) { rg_pos += rgb.y; rb_pos += rgb.z; }
if (rgb.z > 0) bg_pos += rgb.y;
}
v_rgb = v_rgb*unique_recip;
if (rg_pos < 0) v_rgb.x = -v_rgb.x;
if (bg_pos < 0) v_rgb.z = -v_rgb.z;
if ((rg_pos == bg_pos) && (rg_pos == 0))
if (rb_pos < 0) v_rgb.z = -v_rgb.z;
}
// -------------------------------------------------------------------------------------
// (5) Axis projection and remapping
// -------------------------------------------------------------------------------------
{
CGU_FLOAT v2_recip;
// Normalise the axis for simplicity of future calculation
v2_recip = (v_rgb.x*v_rgb.x + v_rgb.y*v_rgb.y + v_rgb.z*v_rgb.z);
if (v2_recip > 0)
v2_recip = 1.0f / (CGU_FLOAT)sqrt(v2_recip);
else
v2_recip = 1.0f;
v_rgb = v_rgb*v2_recip;
}
// -------------------------------------------------------------------------------------
// (6) Map the axis
// -------------------------------------------------------------------------------------
// the line joining (and extended on either side of) average and axis
// defines the axis onto which the points will be projected
// Project all the points onto the axis, calculate the distance along
// the axis from the centre of the axis (average)
// From Foley & <NAME>: Closest point of approach of a line (P + v) to a point (R) is
// P + ((R-P).v) / (v.v))v
// The distance along v is therefore (R-P).v / (v.v)
// (v.v) is 1 if v is a unit vector.
//
// Calculate the extremities at the same time - these need to be reasonably accurately
// represented in all cases
//
// In this first calculation, also find the error of mapping the points to the axis - this
// is our major indicator of whether or not the block has compressed well - if the points
// map well onto the axis then most of the noise introduced is high-frequency noise
{
left = 10000.0f;
right = -10000.0f;
axis_mapping_error = 0;
for (i = 0; i < unique_pixels; i++)
{
// Compute the distance along the axis of the point of closest approach
CMP_Vec3f temp = (uniques[i] - average_rgb);
pos_on_axis[i] = (temp.x * v_rgb.x) + (temp.y * v_rgb.y) + (temp.z * v_rgb.z);
// Compute the actual point and thence the mapping error
rgb = uniques[i] - (average_rgb + (v_rgb * pos_on_axis[i]));
dist_from_axis[i] = rgb.x*rgb.x + rgb.y*rgb.y + rgb.z*rgb.z;
axis_mapping_error += dist_from_axis[i];
// Work out the extremities
if (pos_on_axis[i] < left)
left = pos_on_axis[i];
if (pos_on_axis[i] > right)
right = pos_on_axis[i];
}
}
// -------------------------------------------------------------------------------------
// (7) Now we have a good axis and the basic information about how the points are mapped
// to it
// Our initial guess is to represent the endpoints accurately, by moving the average
// to the centre and recalculating the point positions along the line
// -------------------------------------------------------------------------------------
{
centre = (left + right) / 2;
average_rgb = average_rgb + (v_rgb*centre);
for (i = 0; i<unique_pixels; i++)
pos_on_axis[i] -= centre;
right -= centre;
left -= centre;
// Accumulate our final resultant error
axis_mapping_error *= unique_recip * (1 / 255.0f);
}
// -------------------------------------------------------------------------------------
// (8) Calculate the high and low output colour values
// Involved in this is a rounding procedure which is undoubtedly slightly twitchy. A
// straight rounded average is not correct, as the decompressor 'unrounds' by replicating
// the top bits to the bottom.
// In order to take account of this process, we don't just apply a straight rounding correction,
// but base our rounding on the input value (a straight rounding is actually pretty good in terms of
// error measure, but creates a visual colour and/or brightness shift relative to the original image)
// The method used here is to apply a centre-biased rounding dependent on the input value, which was
// (mostly by experiment) found to give minimum MSE while preserving the visual characteristics of
// the image.
// rgb = (average_rgb + (left|right)*v_rgb);
// -------------------------------------------------------------------------------------
{
CGU_UINT32 c0, c1, t;
int rd, gd, bd;
rgb = (average_rgb + (v_rgb * left));
rd = ( CGU_INT32)DCS_RED(rgb.x, rgb.y, rgb.z);
gd = ( CGU_INT32)DCS_GREEN(rgb.x, rgb.y, rgb.z);
bd = ( CGU_INT32)DCS_BLUE(rgb.x, rgb.y, rgb.z);
ROUND_AND_CLAMP(rd, 5);
ROUND_AND_CLAMP(gd, 6);
ROUND_AND_CLAMP(bd, 5);
c0 = ((rd & 0xf8) << 8) + ((gd & 0xfc) << 3) + ((bd & 0xf8) >> 3);
rgb = average_rgb + (v_rgb * right);
rd = ( CGU_INT32)DCS_RED(rgb.x, rgb.y, rgb.z);
gd = ( CGU_INT32)DCS_GREEN(rgb.x, rgb.y, rgb.z);
bd = ( CGU_INT32)DCS_BLUE(rgb.x, rgb.y, rgb.z);
ROUND_AND_CLAMP(rd, 5);
ROUND_AND_CLAMP(gd, 6);
ROUND_AND_CLAMP(bd, 5);
c1 = (((rd & 0xf8) << 8) + ((gd & 0xfc) << 3) + ((bd & 0xf8) >> 3));
// Force to be a 4-colour opaque block - in which case, c0 is greater than c1
// blocktype == 4
{
if (c0 < c1)
{
t = c0;
c0 = c1;
c1 = t;
swap = 1;
}
else if (c0 == c1)
{
// This block will always be encoded in 3-colour mode
// Need to ensure that only one of the two points gets used,
// avoiding accidentally setting some transparent pixels into the block
for (i = 0; i<unique_pixels; i++)
pos_on_axis[i] = left;
swap = 0;
}
else
swap = 0;
}
compressedBlock[0] = c0 | (c1 << 16);
}
// -------------------------------------------------------------------------------------
// (9) Final clustering, creating the 2-bit values that define the output
// -------------------------------------------------------------------------------------
{
CGU_UINT32 bit;
CGU_FLOAT division;
CGU_FLOAT cluster_x[4];
CGU_FLOAT cluster_y[4];
int cluster_count[4];
// (blocktype == 4)
{
compressedBlock[1] = 0;
division = right*2.0f / 3.0f;
centre = (left + right) / 2; // Actually, this code only works if centre is 0 or approximately so
for (i = 0; i<4; i++)
{
cluster_x[i] = cluster_y[i] = 0.0f;
cluster_count[i] = 0;
}
for (i = 0; i<16; i++)
{
rgb.z = pos_on_axis[index_map[i]];
// Endpoints (indicated by block > average) are 0 and 1, while
// interpolants are 2 and 3
if (fabs(rgb.z) >= division)
bit = 0;
else
bit = 2;
// Positive is in the latter half of the block
if (rgb.z >= centre)
bit += 1;
// Set the output, taking swapping into account
compressedBlock[1] |= ((bit^swap) << (2 * i));
// Average the X and Y locations for each cluster
cluster_x[bit] += (CGU_FLOAT)(i & 3);
cluster_y[bit] += (CGU_FLOAT)(i >> 2);
cluster_count[bit]++;
}
for (i = 0; i<4; i++)
{
CGU_FLOAT cr;
if (cluster_count[i])
{
cr = 1.0f / cluster_count[i];
cluster_x[i] *= cr;
cluster_y[i] *= cr;
}
else
{
cluster_x[i] = cluster_y[i] = -1;
}
}
// patterns in axis position detection
// (same algorithm as used in the SSE version)
if ((compressedBlock[0] & 0xffff) != (compressedBlock[0] >> 16))
{
CGU_UINT32 i1, k1;
CGU_UINT32 x = 0, y = 0;
int xstep = 0, ystep = 0;
// Find a corner to search from
for (k1 = 0; k1<4; k1++)
{
switch (k1)
{
case 0:
x = 0; y = 0; xstep = 1; ystep = 1;
break;
case 1:
x = 0; y = 3; xstep = 1; ystep = -1;
break;
case 2:
x = 3; y = 0; xstep = -1; ystep = 1;
break;
case 3:
x = 3; y = 3; xstep = -1; ystep = -1;
break;
}
for (i1 = 0; i1<4; i1++)
{
if ((POS(x, y + ystep*i1) < POS(x + xstep, y + ystep*i1)) ||
(POS(x + xstep, y + ystep*i1) < POS(x + 2 * xstep, y + ystep*i1)) ||
(POS(x + 2 * xstep, y + ystep*i1) < POS(x + 3 * xstep, y + ystep*i1))
)
break;
if ((POS(x + xstep*i1, y) < POS(x + xstep*i1, y + ystep)) ||
(POS(x + xstep*i1, y + ystep) < POS(x + xstep*i1, y + 2 * ystep)) ||
(POS(x + xstep*i1, y + 2 * ystep) < POS(x + xstep*i1, y + 3 * ystep))
)
break;
}
if (i1 == 4)
break;
}
}
}
}
// done
}
INLINE void store_uint8(CMP_GLOBAL CGU_UINT8 u_dstptr[8], CGU_UINT32 data[2])
{
int shift = 0;
for (CGU_INT k=0; k<4; k++)
{
u_dstptr[k] = (data[0] >> shift)&0xFF;
shift += 8;
}
shift = 0;
for (CGU_INT k=4; k<8; k++)
{
u_dstptr[k] = (data[1] >> shift)&0xFF;
shift += 8;
}
}
void CompressBlockBC1_Internal(
const CMP_Vec4uc srcBlockTemp[16],
CMP_GLOBAL CGU_UINT32 compressedBlock[2],
CMP_GLOBAL const CMP_BC15Options *BC15options)
{
CGU_UINT8 blkindex = 0;
CGU_UINT8 srcindex = 0;
CGU_UINT8 rgbBlock[64];
for ( CGU_INT32 j = 0; j < 4; j++) {
for ( CGU_INT32 i = 0; i < 4; i++) {
rgbBlock[blkindex++] = (CGU_UINT8)srcBlockTemp[srcindex].z; // B
rgbBlock[blkindex++] = (CGU_UINT8)srcBlockTemp[srcindex].y; // G
rgbBlock[blkindex++] = (CGU_UINT8)srcBlockTemp[srcindex].x; // R
rgbBlock[blkindex++] = (CGU_UINT8)srcBlockTemp[srcindex].w; // A
srcindex++;
}
}
CMP_BC15Options internalOptions = *BC15options;
CalculateColourWeightings(rgbBlock, &internalOptions);
CompressRGBBlock(rgbBlock,
compressedBlock,
&internalOptions,
TRUE,
FALSE,
internalOptions.m_nAlphaThreshold);
}
//============================================== USER INTERFACES ========================================================
#ifndef ASPM_GPU
int CMP_CDECL CreateOptionsBC1(void **options)
{
CMP_BC15Options *BC15optionsDefault = new CMP_BC15Options;
if (BC15optionsDefault) {
SetDefaultBC15Options(BC15optionsDefault);
(*options) = BC15optionsDefault;
}
else {
(*options) = NULL;
return CGU_CORE_ERR_NEWMEM;
}
return CGU_CORE_OK;
}
int CMP_CDECL DestroyOptionsBC1(void *options)
{
if (!options) return CGU_CORE_ERR_INVALIDPTR;
CMP_BC15Options *BCOptions = reinterpret_cast <CMP_BC15Options *>(options);
delete BCOptions;
return CGU_CORE_OK;
}
int CMP_CDECL SetQualityBC1(void *options,
CGU_FLOAT fquality)
{
if (!options) return CGU_CORE_ERR_NEWMEM;
CMP_BC15Options *BC15optionsDefault = reinterpret_cast <CMP_BC15Options *>(options);
if (fquality < 0.0f) fquality = 0.0f;
else
if (fquality > 1.0f) fquality = 1.0f;
BC15optionsDefault->m_fquality = fquality;
return CGU_CORE_OK;
}
int CMP_CDECL SetAlphaThresholdBC1(void *options,
CGU_UINT8 alphaThreshold)
{
if (!options) return CGU_CORE_ERR_INVALIDPTR;
CMP_BC15Options *BC15optionsDefault = reinterpret_cast <CMP_BC15Options *>(options);
BC15optionsDefault->m_nAlphaThreshold = alphaThreshold;
return CGU_CORE_OK;
}
int CMP_CDECL SetDecodeChannelMapping(void *options,
CGU_BOOL mapRGBA)
{
if (!options) return CGU_CORE_ERR_INVALIDPTR;
CMP_BC15Options *BC15optionsDefault = reinterpret_cast <CMP_BC15Options *>(options);
BC15optionsDefault->m_mapDecodeRGBA = mapRGBA;
return CGU_CORE_OK;
}
int CMP_CDECL SetChannelWeightsBC1(void *options,
CGU_FLOAT WeightRed,
CGU_FLOAT WeightGreen,
CGU_FLOAT WeightBlue) {
if (!options) return CGU_CORE_ERR_INVALIDPTR;
CMP_BC15Options *BC15optionsDefault = (CMP_BC15Options *)options;
if ((WeightRed < 0.0f) || (WeightRed > 1.0f)) return CGU_CORE_ERR_RANGERED;
if ((WeightGreen < 0.0f) || (WeightGreen > 1.0f)) return CGU_CORE_ERR_RANGEGREEN;
if ((WeightBlue < 0.0f) || (WeightBlue > 1.0f)) return CGU_CORE_ERR_RANGEBLUE;
BC15optionsDefault->m_bUseChannelWeighting = true;
BC15optionsDefault->m_fChannelWeights[0] = WeightRed;
BC15optionsDefault->m_fChannelWeights[1] = WeightGreen;
BC15optionsDefault->m_fChannelWeights[2] = WeightBlue;
return CGU_CORE_OK;
}
int CMP_CDECL CompressBlockBC1(const unsigned char *srcBlock,
unsigned int srcStrideInBytes,
CMP_GLOBAL unsigned char cmpBlock[8],
const void *options = NULL) {
CMP_Vec4uc inBlock[16];
//----------------------------------
// Fill the inBlock with source data
//----------------------------------
CGU_INT srcpos = 0;
CGU_INT dstptr = 0;
for (CGU_UINT8 row=0; row < 4; row++)
{
srcpos = row * srcStrideInBytes;
for (CGU_UINT8 col = 0; col < 4; col++)
{
inBlock[dstptr].x = CGU_UINT8(srcBlock[srcpos++]);
inBlock[dstptr].y = CGU_UINT8(srcBlock[srcpos++]);
inBlock[dstptr].z = CGU_UINT8(srcBlock[srcpos++]);
inBlock[dstptr].w = CGU_UINT8(srcBlock[srcpos++]);
dstptr++;
}
}
CMP_BC15Options *BC15options = (CMP_BC15Options *)options;
CMP_BC15Options BC15optionsDefault;
if (BC15options == NULL)
{
BC15options = &BC15optionsDefault;
SetDefaultBC15Options(BC15options);
}
CompressBlockBC1_Internal(inBlock, (CMP_GLOBAL CGU_UINT32 *)cmpBlock, BC15options);
return CGU_CORE_OK;
}
int CMP_CDECL DecompressBlockBC1(const unsigned char cmpBlock[8],
CMP_GLOBAL unsigned char srcBlock[64],
const void *options = NULL) {
CMP_BC15Options *BC15options = (CMP_BC15Options *)options;
CMP_BC15Options BC15optionsDefault;
if (BC15options == NULL)
{
BC15options = &BC15optionsDefault;
SetDefaultBC15Options(BC15options);
}
DecompressDXTRGB_Internal(srcBlock, ( CGU_UINT32 *)cmpBlock, BC15options);
return CGU_CORE_OK;
}
#endif
//============================================== OpenCL USER INTERFACE ========================================================
#ifdef ASPM_GPU
CMP_STATIC CMP_KERNEL void CMP_GPUEncoder(
CMP_GLOBAL const CMP_Vec4uc* ImageSource,
CMP_GLOBAL CGU_UINT8* ImageDestination,
CMP_GLOBAL Source_Info* SourceInfo,
CMP_GLOBAL CMP_BC15Options* BC15options
)
{
CGU_UINT32 xID;
CGU_UINT32 yID;
//printf("SourceInfo: (H:%d,W:%d) Quality %1.2f \n", SourceInfo->m_src_height, SourceInfo->m_src_width, SourceInfo->m_fquality);
#ifdef ASPM_GPU
xID = get_global_id(0);
yID = get_global_id(1);
#else
xID = 0;
yID = 0;
#endif
if (xID >= (SourceInfo->m_src_width / BlockX)) return;
if (yID >= (SourceInfo->m_src_height / BlockX)) return;
int srcWidth = SourceInfo->m_src_width;
CGU_UINT32 destI = (xID*BC1CompBlockSize) + (yID*(srcWidth / BlockX)*BC1CompBlockSize);
int srcindex = 4 * (yID * srcWidth + xID);
int blkindex = 0;
CMP_Vec4uc srcData[16];
srcWidth = srcWidth - 4;
for ( CGU_INT32 j = 0; j < 4; j++) {
for ( CGU_INT32 i = 0; i < 4; i++) {
srcData[blkindex++] = ImageSource[srcindex++];
}
srcindex += srcWidth;
}
// fast low quality mode that matches v3.1 code
if (SourceInfo->m_fquality <= 0.04f)
CompressBlockBC1_Fast(srcData, (CMP_GLOBAL CGU_UINT32 *)&ImageDestination[destI]);
else
CompressBlockBC1_Internal(srcData, (CMP_GLOBAL CGU_UINT32 *)&ImageDestination[destI], BC15options);
}
#endif
| 11,097
|
408
|
package io.apigee.trireme.apptests;
import io.apigee.trireme.container.netty.NettyHttpContainer;
import io.apigee.trireme.core.NodeEnvironment;
import io.apigee.trireme.core.NodeException;
import io.apigee.trireme.core.NodeScript;
import io.apigee.trireme.core.ScriptFuture;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
@RunWith(Parameterized.class)
public class ExpressAppNettyTest
{
private static final int PORT = 33333;
private static ScriptFuture scriptFuture;
private final boolean useNetty;
private final String version;
@Parameterized.Parameters
public static Collection<Object[]> getParameters()
{
return Arrays.asList(new Object[][]{{true, "0.10"}});
}
public ExpressAppNettyTest(boolean useNetty, String version)
{
this.useNetty = useNetty;
this.version = version;
}
private int getPort()
{
return PORT + (useNetty ? 1 : 0) + ("0.12".equals(version) ? 10 : 0);
}
@Before
public void start()
throws NodeException, IOException, InterruptedException
{
System.out.println("Starting with useNetty = " + useNetty + " version = " + version);
NodeEnvironment env = new NodeEnvironment();
if (useNetty) {
env.setHttpContainer(new NettyHttpContainer());
}
int port = getPort();
NodeScript script = env.createScript("server.js", new File("./target/test-classes/dogs/server.js"),
new String[] { String.valueOf(port) });
scriptFuture = script.execute();
Utils.awaitPortOpen(port);
}
@After
public void stop()
throws ExecutionException, InterruptedException
{
System.out.println("Stopping");
scriptFuture.cancel(true);
try {
scriptFuture.get();
} catch (CancellationException ok) {
}
System.out.println("Server has stopped");
}
// Just one test because we want to start up and tear down only once
// ignored because it is flaky and because we have netty tests elsewhere.
@Ignore
@Test
public void testAPIs()
throws IOException
{
final String BASEURL = "http://localhost:" + getPort();
String response = Utils.getString(BASEURL + "/dogs", 200);
assertEquals("I like dogs", response);
assertTrue(response.length() > 0);
response = Utils.getString(BASEURL + "/dogs", 200, true);
System.out.println("Compressed response: " + response);
assertEquals("I like dogs", response);
assertTrue(response.length() > 0);
final String body = "{ \"name\": \"Bo\" }";
response = Utils.postString(BASEURL+ "/dogs", body, "application/json", 200);
System.out.println("Set response: " + response);
assertEquals("{\"name\":\"Bo\"}", response);
assertTrue(response.length() > 0);
response = Utils.postString(BASEURL+ "/dogs2", body, "text/plain", 200);
assertTrue(response.length() > 0);
assertEquals("ok", response);
}
}
| 1,365
|
308
|
import pytest
from fabric.api import run
from fabtools.utils import run_as_root
pytestmark = pytest.mark.network
@pytest.fixture(scope='module', autouse=True)
def check_for_debian_family():
from fabtools.system import distrib_family
if distrib_family() != 'debian':
pytest.skip("Skipping apt-key test on non-Debian distrib")
def test_add_apt_key_with_key_id_from_url():
from fabtools.deb import add_apt_key
try:
add_apt_key(keyid='C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt')
run_as_root('apt-key finger | grep -q C4DEFFEB')
finally:
run_as_root('apt-key del C4DEFFEB', quiet=True)
def test_add_apt_key_with_key_id_from_specific_key_server():
from fabtools.deb import add_apt_key
try:
add_apt_key(keyid='<KEY>', keyserver='keyserver.ubuntu.com')
run_as_root('apt-key finger | grep -q <KEY>')
finally:
run_as_root('apt-key del <KEY>', quiet=True)
def test_add_apt_key_with_key_id_from_file():
from fabtools.deb import add_apt_key
try:
run('wget http://repo.varnish-cache.org/debian/GPG-key.txt -O /tmp/tmp.fabtools.test.key')
add_apt_key(keyid='C4DEFFEB', filename='/tmp/tmp.fabtools.test.key')
run_as_root('apt-key finger | grep -q C4DEFFEB')
finally:
run_as_root('apt-key del C4DEFFEB', quiet=True)
def test_add_apt_key_without_key_id_from_url():
from fabtools.deb import add_apt_key
try:
add_apt_key(url='http://repo.varnish-cache.org/debian/GPG-key.txt')
run_as_root('apt-key finger | grep -q C4DEFFEB')
finally:
run_as_root('apt-key del C4DEFFEB', quiet=True)
def test_add_apt_key_without_key_id_from_file():
from fabtools.deb import add_apt_key
try:
run('wget http://repo.varnish-cache.org/debian/GPG-key.txt -O /tmp/tmp.fabtools.test.key')
add_apt_key(filename='/tmp/tmp.fabtools.test.key')
run_as_root('apt-key finger | grep -q C4DEFFEB')
finally:
run_as_root('apt-key del C4DEFFEB', quiet=True)
def test_require_deb_key_from_url():
from fabtools.require.deb import key as require_key
try:
require_key(keyid='C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt')
run_as_root('apt-key finger | grep -q C4DEFFEB')
finally:
run_as_root('apt-key del C4DEFFEB', quiet=True)
def test_require_deb_key_from_specific_keyserver():
from fabtools.require.deb import key as require_key
try:
require_key(keyid='<KEY>', keyserver='keyserver.ubuntu.com')
run_as_root('apt-key finger | grep -q <KEY>')
finally:
run_as_root('apt-key del <KEY>', quiet=True)
def test_require_deb_key_from_file():
from fabtools.require.deb import key as require_key
try:
run('wget http://repo.varnish-cache.org/debian/GPG-key.txt -O /tmp/tmp.fabtools.test.key')
require_key(keyid='C4DEFFEB', filename='/tmp/tmp.fabtools.test.key')
run_as_root('apt-key finger | grep -q C4DEFFEB')
finally:
run_as_root('apt-key del C4DEFFEB', quiet=True)
| 1,371
|
429
|
/**
* (C) Copyright 2016-2021 Intel Corporation.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*/
#include "dfuse_common.h"
#include "dfuse.h"
void
dfuse_cb_getattr(fuse_req_t req, struct dfuse_inode_entry *ie)
{
struct stat attr = {};
int rc;
if (ie->ie_unlinked) {
DFUSE_TRA_DEBUG(ie, "File is unlinked, returning most recent data");
DFUSE_REPLY_ATTR(ie, req, &ie->ie_stat);
return;
}
rc = dfs_ostat(ie->ie_dfs->dfs_ns, ie->ie_obj, &attr);
if (rc != 0)
D_GOTO(err, rc);
attr.st_ino = ie->ie_stat.st_ino;
/* Update the size as dfuse knows about it for future use.
*
* This size is used for detecting reads of zerod data for files
* so do not shrink the filesize here, potentially this getattr
* can race with a write, where the write would set the size, this
* getattr can fetch the stale size and then the write callback
* can complete, leaving dfuse thinking the filesize is smaller
* than it is. As such do not shrink the filesize here to avoid
* DAOS-8333
*/
if (attr.st_size > ie->ie_stat.st_size)
ie->ie_stat.st_size = attr.st_size;
DFUSE_REPLY_ATTR(ie, req, &attr);
return;
err:
DFUSE_REPLY_ERR_RAW(ie, req, rc);
}
| 464
|
2,360
|
<reponame>LiamBindle/spack<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class ROligoclasses(RPackage):
"""Classes for high-throughput arrays supported by oligo and crlmm
This package contains class definitions, validity checks, and
initialization methods for classes used by the oligo and crlmm
packages."""
homepage = "https://bioconductor.org/packages/oligoClasses"
git = "https://git.bioconductor.org/packages/oligoClasses.git"
version('1.52.0', commit='<PASSWORD>')
version('1.46.0', commit='325684f66fc92f778098f24bcfbef0ce3da9717c')
version('1.44.0', commit='d3e1134cdbea5f95b83215dc66e5f7b6a1cd0638')
version('1.42.0', commit='ef125700d487b470281a9c1e985390633c4dd2bd')
version('1.40.0', commit='32f40617e62d05c457baaebc7e27585b852848ed')
version('1.38.0', commit='fe2bb7f02c7ed3cbd338254c27ceba6ff829a962')
depends_on('r@2.14:', type=('build', 'run'))
depends_on('r-biocgenerics@0.3.2:', type=('build', 'run'))
depends_on('r-biocgenerics@0.27.1:', when='@1.44.0:', type=('build', 'run'))
depends_on('r-biobase@2.17.8:', type=('build', 'run'))
depends_on('r-iranges@2.5.17:', type=('build', 'run'))
depends_on('r-genomicranges@1.23.7:', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings@2.23.6:', type=('build', 'run'))
depends_on('r-affyio@1.23.2:', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-biocmanager', when='@1.44.0:', type=('build', 'run'))
depends_on('r-s4vectors@0.9.25:', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-dbi', when='@1.40.0:', type=('build', 'run'))
depends_on('r-ff', type=('build', 'run'))
depends_on('r-biocinstaller', when='@:1.42.0', type=('build', 'run'))
| 900
|
945
|
# -*- coding: utf-8 -*-
"""AWS operations by cli should be simpler."""
import os
import re
from setuptools import find_packages, setup
base_dir = os.path.dirname(os.path.abspath(__file__))
# Taken from "kennethreitz/requests": http://git.io/vcuY8
version = ''
with open('jungle/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
def read_file(filename):
filepath = os.path.join(base_dir, filename)
if os.path.exists(filepath):
return open(filepath).read()
else:
return ''
setup(
name='jungle',
version=version,
url='https://github.com/achiku/jungle',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='AWS operations by cli should be simpler',
long_description=read_file('README.md'),
long_description_content_type="text/markdown",
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'boto3',
'botocore',
'click'
],
entry_points={
'console_scripts': [
'jungle = jungle.cli:cli',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
# 'Operating System :: Windows',
# 'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='tests',
tests_require=[
'httpretty',
'tox',
'isort',
'flake8',
'pep257',
'py',
'pytest',
'pytest-cov',
'pytest-mock',
'pytest-isort',
'pytest-pep8',
'moto'
]
)
| 1,138
|
310
|
{
"name": "Tread",
"description": "A treadmill.",
"url": "https://www.onepeloton.com/tread"
}
| 41
|
665
|
// This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
/** python_procedure_test.cc -*- C++ -*-
<NAME>, 9 mars 2015
Copyright (c) 2015 mldb.ai inc. All rights reserved.
*/
#include "mldb/server/mldb_server.h"
#include "mldb/http/http_rest_proxy.h"
#include "mldb/builtin/plugin_resource.h"
#include "mldb/core/procedure.h"
#include "mldb/types/value_description.h"
#define BOOST_TEST_MAIN
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
using namespace std;
using namespace MLDB;
BOOST_AUTO_TEST_CASE( test_two_members )
{
MldbServer server;
server.init();
string httpBoundAddress = server.bindTcp(PortRange(17000,18000), "127.0.0.1");
cerr << "http listening on " << httpBoundAddress << endl;
server.start();
HttpRestProxy proxy(httpBoundAddress);
PolyConfig pluginConfig;
pluginConfig.type = "python";
PluginResource plugRes;
plugRes.source.main = R"foo(
def doTrain(mldb, trainingConfig):
mldb.log(str(trainingConfig))
print "wahou"
return {"status": "OK"}
mldb.create_procedure("my_procedure", "description of my procedure", doTrain)
print "pwet";
)foo";
pluginConfig.params = plugRes;
auto putResult = proxy.put("/v1/plugins/myplugin",
jsonEncode(pluginConfig));
cerr << putResult << endl;
BOOST_CHECK_EQUAL(putResult.code(), 201);
// Check procedure was added successfully
auto getResult = proxy.get("/v1/types/procedures");
cerr << "getResult = " << getResult << endl;
BOOST_REQUIRE(getResult.body().find("my_procedure") != string::npos);
// BOOST_CHECK_EQUAL(getResult.jsonBody()["how"].asString(), "are you");
Json::Value training;
training["id"] = "my_procedure_train";
training["type"] = "my_procedure";
Json::Value customConf;
customConf["param"] = 5;
training["params"] = customConf;
putResult = proxy.put("/v1/procedures/my_procedure_train",
jsonEncode(training));
cerr << putResult << endl;
BOOST_CHECK_EQUAL(putResult.code(), 201);
ProcedureRunConfig trainConf;
trainConf.id = "1";
putResult = proxy.put("/v1/procedures/my_procedure_train/runs/1", jsonEncode(trainConf));
cerr << putResult << endl;
BOOST_CHECK_EQUAL(putResult.code(), 201);
}
| 1,002
|
485
|
<reponame>708yamaguchi/MaixPy_scripts
from micropython import const
from time import sleep_ms
import ustruct
import image
# Display resolution
SPEINK_WIDTH = const(200)
SPEINK_HEIGHT = const(200)
SPEINK_ROTATION = const(180) # 0, 90, 180, 270
BUSY = const(1) # 1=busy, 0=idle
class SPEINK:
def __init__(self, spi, cs, dc, rst, busy, width, height, rotation):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.busy = busy
self.cs.value(0)
self.dc.value(0)
self.rst.value(1)
self.width = width
self.height = height
self.rotation = rotation
lut_vcom0 = bytearray(
b'\x0E\x14\x01\x0A\x06\x04\x0A\x0A\x0F\x03\x03\x0C\x06\x0A\x00')
lut_w = bytearray(
b'\x0E\x14\x01\x0A\x46\x04\x8A\x4A\x0F\x83\x43\x0C\x86\x0A\x04')
lut_b = bytearray(
b'\x0E\x14\x01\x8A\x06\x04\x8A\x4A\x0F\x83\x43\x0C\x06\x4A\x04')
lut_g1 = bytearray(
b'\x8E\x94\x01\x8A\x06\x04\x8A\x4A\x0F\x83\x43\x0C\x06\x0A\x04')
lut_g2 = bytearray(
b'\x8E\x94\x01\x8A\x06\x04\x8A\x4A\x0F\x83\x43\x0C\x06\x0A\x04')
lut_vcom1 = bytearray(
b'\x03\x1D\x01\x01\x08\x23\x37\x37\x01\x00\x00\x00\x00\x00\x00')
lut_red0 = bytearray(
b'\x83\x5D\x01\x81\x48\x23\x77\x77\x01\x00\x00\x00\x00\x00\x00')
lut_red1 = bytearray(
b'\x03\x1D\x01\x01\x08\x23\x37\x37\x01\x00\x00\x00\x00\x00\x00')
def _command(self, command, data=None):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
self.dc(1)
def _data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def reset(self):
self.dc(0)
sleep_ms(200)
self.dc(1)
self.rst(0)
sleep_ms(100)
self.rst(1)
sleep_ms(200)
def init(self):
self.reset()
self._command(0x01)
self._data(0x07) # 设置高低电压
self._data(0x00)
self._data(0x0f) # 红色电压设置,值越大红色越深
self._data(0x00)
self._command(0x06)
self._data(0x07)
self._data(0x07)
self._data(0x07)
self._command(0x04) # 上电
if self.wait_until_idle() == False:
pass
self._command(0X00)
self._data(0xcf) # 选择最大分辨率
self._command(0X50)
self._data(0x37)
self._command(0x30)
self._data(0x39) # PLL设定
self._command(0x61) # 像素设定
self._data(0xC8) # 200像素
self._data(0x00) # 200像素
self._data(0xC8)
self._command(0x82) # vcom设定
self._data(0x18)
self.lut_bw()
self.lut_red()
# brief: display image on eink
# img_r: red image
# img_bw: b/w image
def display(self, img_r, img_bw = None):
img1 = image.Image() # handle image
img1 = img1.resize(self.width, self.height)
if(img_bw == None):
self._command(0x10) # write "B/W" data to SRAM. 0x00:black
for i in range(10000):
self._data(0xff)
else:
img1.draw_image(img_bw, 0, 0)
# Parameter 'fov' is to slove data loss issues
img1.rotation_corr(x_rotation=self.rotation, fov=2)
img_bytes = img1.to_bytes() # That's "self.width*self.height*2" bytes
self._command(0x10) # write "B/W" data to SRAM 0x00:black,0xff:white
for i in range(0, self.width*self.height*2, 16):
b = 0
for j in range(0, 8, 2):
if img_bytes[i+j] or img_bytes[i+j+1]:
b = b | (0xc0 >> j)
self._data(~b)
b = 0
for j in range(8, 16, 2):
if img_bytes[i+j] or img_bytes[i+j+1]:
b = b | (0xc0 >> j-8)
self._data(~b)
img1.draw_image(img_r, 0, 0)
# Parameter 'fov' is to slove data loss issues
img1.rotation_corr(x_rotation=180, fov=2)
img_bytes = img1.to_bytes() # That's "self.width*self.height*2" bytes
self._command(0x13) # write "RED" data to SRAM 0x00:red,0xff:white
for i in range(0, self.width*self.height*2, 16):
b = 0
for j in range(0, 16, 2):
if img_bytes[i+j] or img_bytes[i+j+1]:
b = b | (0x80 >> j//2)
self._data(~b)
self._command(0x12) # display refresh
self.wait_until_idle()
def wait_until_idle(self):
for i in range(10):
sleep_ms(100)
if self.busy.value() != BUSY:
return True
print('self.busy', self.busy.value())
return False
def lut_bw(self):
self._command(0x20, SPEINK.lut_vcom0)
self._command(0x21, SPEINK.lut_w)
self._command(0x22, SPEINK.lut_b)
self._command(0x23, SPEINK.lut_g1)
self._command(0x24, SPEINK.lut_g2)
def lut_red(self):
self._command(0x25, SPEINK.lut_vcom1)
self._command(0x26, SPEINK.lut_red0)
self._command(0x27, SPEINK.lut_red1)
# enter deep sleep A0=1, A0=0 power on
def sleep(self):
self._command(0x50)
self._data(0xf7)
self._command(0x02)
self.wait_until_idle()
self._data(0x07)
self._command(0xa5)
if __name__ == "__main__":
from Maix import GPIO
from fpioa_manager import fm
from machine import SPI
# MaixCube | SPMOD
# [7 |VCC] [RST|3V3]
# [15 | 21] [D/C|SCK]
# [20 | 8] [CS |SI ]
# [GND| 6] [GND|BL ]
################### config ###################
SPI_EINK_NUM = SPI.SPI1
SPI_EINK_DC_PIN_NUM = const(15)
SPI_EINK_BUSY_PIN_NUM = const(6)
SPI_EINK_RST_PIN_NUM = const(7)
SPI_EINK_CS_PIN_NUM = const(20)
SPI_EINK_SCK_PIN_NUM = const(21)
SPI_EINK_MOSI_PIN_NUM = const(8)
SPI_EINK_FREQ_KHZ = const(600)
##############################################
spi1 = SPI(SPI_EINK_NUM, mode=SPI.MODE_MASTER, baudrate=SPI_EINK_FREQ_KHZ * 1000,
polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=SPI_EINK_SCK_PIN_NUM, mosi=SPI_EINK_MOSI_PIN_NUM)
fm.register(SPI_EINK_CS_PIN_NUM, fm.fpioa.GPIOHS20, force=True)
fm.register(SPI_EINK_DC_PIN_NUM, fm.fpioa.GPIOHS15, force=True)
fm.register(SPI_EINK_BUSY_PIN_NUM, fm.fpioa.GPIOHS6, force=True)
fm.register(SPI_EINK_RST_PIN_NUM, fm.fpioa.GPIOHS7, force=True)
cs = GPIO(GPIO.GPIOHS20, GPIO.OUT)
dc = GPIO(GPIO.GPIOHS15, GPIO.OUT)
busy = GPIO(GPIO.GPIOHS6, GPIO.IN, GPIO.PULL_DOWN)
rst = GPIO(GPIO.GPIOHS7, GPIO.OUT)
epd = SPEINK(spi1, cs, dc, rst, busy, SPEINK_WIDTH, SPEINK_HEIGHT, SPEINK_ROTATION)
epd.init()
# red image
img_r = image.Image()
img_r = img_r.resize(SPEINK_WIDTH, SPEINK_HEIGHT)
img_r.draw_line(0, 0, 100, 100)
img_r.draw_circle(50, 50, 20)
img_r.draw_rectangle(80, 80, 30, 30)
# bw image
img_bw = image.Image()
img_bw = img_bw.resize(SPEINK_WIDTH, SPEINK_HEIGHT)
img_bw.draw_line(100, 50, 200, 100)
img_bw.draw_circle(80, 80, 30)
img_bw.draw_rectangle(10, 10, 60, 60)
epd.display(img_r, img_bw)
epd.sleep()
| 4,528
|
10,125
|
<gh_stars>1000+
/** @file
Dummy library for dependency ordering.
Copyright (c) 2020, vit9696. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
| 63
|
3,428
|
<reponame>ghalimi/stdlib
{"id":"01594","group":"easy-ham-1","checksum":{"type":"MD5","value":"9e0ea187c7d58d78b9c2a00ecbb57e70"},"text":"From <EMAIL> Thu Sep 19 17:47:42 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.spamassassin.taint.org\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby jmason.org (Postfix) with ESMTP id 6F21E16F03\n\tfor <jm@localhost>; Thu, 19 Sep 2002 17:47:37 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Thu, 19 Sep 2002 17:47:37 +0100 (IST)\nReceived: from usw-sf-list2.sourceforge.net (usw-sf-fw2.sourceforge.net\n [216.136.171.252]) by dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id\n g8JD2vC20239 for <<EMAIL>>; Thu, 19 Sep 2002 14:02:57 +0100\nReceived: from usw-sf-list1-b.sourceforge.net ([10.3.1.13]\n helo=usw-sf-list1.sourceforge.net) by usw-sf-list2.sourceforge.net with\n esmtp (Exim 3.31-VA-mm2 #1 (Debian)) id 17s0j1-00069D-00; Thu,\n 19 Sep 2002 05:48:07 -0700\nReceived: from dhcp024-208-195-177.indy.rr.com ([24.208.195.177]\n helo=burgers.bubbanfriends.org) by usw-sf-list1.sourceforge.net with esmtp\n (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian)) id\n 17s0iN-0004u7-00 for <<EMAIL>>; Thu,\n 19 Sep 2002 05:47:28 -0700\nReceived: from localhost (localhost.localdomain [1172.16.58.3]) by\n burgers.bubbanfriends.org (Postfix) with ESMTP id 5CEF34B7E7C;\n Thu, 19 Sep 2002 07:47:17 -0500 (EST)\nReceived: by burgers.bubbanfriends.org (Postfix, from userid 500) id\n 1BB0D4B7E7B; Thu, 19 Sep 2002 07:47:16 -0500 (EST)\nReceived: from localhost (localhost [127.0.0.1]) by\n burgers.bubbanfriends.org (Postfix) with ESMTP id 092D7C026A6;\n Thu, 19 Sep 2002 07:47:15 -0500 (EST)\nFrom: <NAME> <<EMAIL>>\nTo: <NAME> <<EMAIL>>\nCc: <EMAIL>\nSubject: Re: [Razor-users] Razor: shall I use it with AMaViS or with\n SpamAssassin?\nIn-Reply-To: <<EMAIL>1c25fb5<EMAIL>>\nMessage-Id: <<EMAIL>.<EMAIL>>\nMIME-Version: 1.0\nContent-Type: TEXT/PLAIN; charset=US-ASCII\nX-Virus-Scanned: by AMaViS new-20020517\nSender: razor-users-admin<EMAIL>.sourceforge.net\nErrors-To: <EMAIL>-admin<EMAIL>.net\nX-Beenthere: <EMAIL>.<EMAIL>.net\nX-Mailman-Version: 2.0.9-sf.net\nPrecedence: bulk\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <https://example.sourceforge.net/lists/listinfo/razor-users>,\n <mailto:<EMAIL>?subject=subscribe>\nList-Id: <razor-users.example.sourceforge.net>\nList-Unsubscribe: <https://example.sourceforge.net/lists/listinfo/razor-users>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://sourceforge.net/mailarchives/forum.php?forum=razor-users>\nX-Original-Date: Thu, 19 Sep 2002 07:47:15 -0500 (EST)\nDate: Thu, 19 Sep 2002 07:47:15 -0500 (EST)\n\nDepends on how you want to use it.\n\nThe default setup of running it from procmail works just fine, as long as \nyou remember to go into your postfix/main.cf file, and tell it to use \nprocmail instead of the internal delivery agent.\n\nOn Thu, 19 Sep 2002, <NAME> wrote:\n\n> How do I intergrate razor into my postfix setup? Will it have to\n> interact with AMaViS or with SpamAssassin?\n> \n> Thank you\n> \n> <NAME>\n> Informa Srl\n> Via 42 Martiri, 165\n> 28924 Verbania (VB)\n> Tel +39 0323 586216\n> Fax +39 0323 586672\n> http://www.co-ver.it/informa\n> \n> \n> \n> \n> -------------------------------------------------------\n> This sf.net email is sponsored by:ThinkGeek\n> Welcome to geek heaven.\n> http://thinkgeek.com/sf\n> _______________________________________________\n> Razor-users mailing list\n> <EMAIL>\n> https://lists.sourceforge.net/lists/listinfo/razor-users\n> \n\n\n\n-------------------------------------------------------\nThis sf.net email is sponsored by:ThinkGeek\nWelcome to geek heaven.\nhttp://thinkgeek.com/sf\n_______________________________________________\nRazor-users mailing list\n<EMAIL>.net\nhttps://lists.sourceforge.net/lists/listinfo/razor-users\n\n\n"}
| 1,645
|
634
|
<filename>modules/base/platform-impl/src/main/java/consulo/start/WelcomeFrameManager.java<gh_stars>100-1000
/*
* Copyright 2013-2017 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package consulo.start;
import com.intellij.openapi.actionSystem.ActionPlaces;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ModalityState;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.project.DumbAwareRunnable;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.project.ProjectManager;
import com.intellij.openapi.project.ProjectManagerListener;
import com.intellij.openapi.wm.IdeFrame;
import com.intellij.openapi.wm.WindowManager;
import com.intellij.openapi.wm.ex.WindowManagerEx;
import consulo.ui.annotation.RequiredUIAccess;
import consulo.ui.UIAccess;
import consulo.ui.Size;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/**
* @author VISTALL
* @since 23-Sep-17
*/
public abstract class WelcomeFrameManager {
@Nonnull
public static WelcomeFrameManager getInstance() {
return ServiceManager.getService(WelcomeFrameManager.class);
}
public static final String DIMENSION_KEY = "WELCOME_SCREEN";
@Nonnull
public static Size getDefaultWindowSize() {
return new Size(800, 460);
}
public static boolean isFromWelcomeFrame(@Nonnull AnActionEvent e) {
return e.getPlace().equals(ActionPlaces.WELCOME_SCREEN);
}
private IdeFrame myFrameInstance;
private final Application myApplication;
protected WelcomeFrameManager(Application application) {
myApplication = application;
application.getMessageBus().connect().subscribe(ProjectManager.TOPIC, new ProjectManagerListener() {
@Override
public void projectOpened(Project project, UIAccess uiAccess) {
uiAccess.give(() -> closeFrame());
}
});
}
@Nullable
@RequiredUIAccess
public IdeFrame getCurrentFrame() {
UIAccess.assertIsUIThread();
return myFrameInstance;
}
protected void frameClosed() {
myFrameInstance = null;
}
@RequiredUIAccess
public void showFrame() {
UIAccess.assertIsUIThread();
if (myFrameInstance == null) {
myFrameInstance = createFrame();
myFrameInstance.getWindow().show();
}
}
@RequiredUIAccess
public void closeFrame() {
UIAccess.assertIsUIThread();
IdeFrame frameInstance = myFrameInstance;
if (frameInstance == null) {
return;
}
frameInstance.getWindow().close();
}
public void showIfNoProjectOpened() {
myApplication.invokeLater((DumbAwareRunnable)() -> {
WindowManagerEx windowManager = (WindowManagerEx)WindowManager.getInstance();
windowManager.disposeRootFrame();
IdeFrame[] frames = windowManager.getAllProjectFrames();
if (frames.length == 0) {
showFrame();
}
}, ModalityState.NON_MODAL);
}
@Nonnull
@RequiredUIAccess
protected abstract IdeFrame createFrame();
}
| 1,147
|
3,074
|
<gh_stars>1000+
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import time
print("Wait for 10 seconds..")
time.sleep(10)
print("Done waiting")
| 55
|
9,680
|
import torch
import nni.retiarii.nn.pytorch as nn
from nni.retiarii import model_wrapper
@model_wrapper
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, 10, 3)
self.conv2 = nn.LayerChoice([
nn.Conv2d(10, 10, 3),
nn.MaxPool2d(3)
])
self.conv3 = nn.LayerChoice([
nn.Identity(),
nn.Conv2d(10, 10, 1)
])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(10, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.avgpool(x).view(x.size(0), -1)
x = self.fc(x)
return x
@model_wrapper
class ModelInner(nn.Module):
def __init__(self):
super().__init__()
self.net1 = nn.LayerChoice([
nn.Linear(10, 10),
nn.Linear(10, 10, bias=False)
])
self.net2 = nn.LayerChoice([
nn.Linear(10, 10),
nn.Linear(10, 10, bias=False)
])
def forward(self, x):
x = self.net1(x)
x = self.net2(x)
return x
@model_wrapper
class ModelNested(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = ModelInner()
self.fc2 = nn.LayerChoice([
nn.Linear(10, 10),
nn.Linear(10, 10, bias=False)
])
self.fc3 = ModelInner()
def forward(self, x):
return self.fc3(self.fc2(self.fc1(x)))
def test_model_wrapper():
model = Model(3)
assert model.trace_symbol == Model.__wrapped__
assert model.trace_kwargs == {'in_channels': 3}
assert model.conv2.label == 'model_1'
assert model.conv3.label == 'model_2'
assert model(torch.randn(1, 3, 5, 5)).size() == torch.Size([1, 1])
model = Model(4)
assert model.trace_symbol == Model.__wrapped__
assert model.conv2.label == 'model_1' # not changed
def test_model_wrapper_nested():
model = ModelNested()
assert model.fc1.net1.label == 'model_1_1'
assert model.fc1.net2.label == 'model_1_2'
assert model.fc2.label == 'model_2'
assert model.fc3.net1.label == 'model_3_1'
assert model.fc3.net2.label == 'model_3_2'
if __name__ == '__main__':
test_model_wrapper_nested()
| 1,184
|
7,482
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-01-07 Grissiom add comment
*/
#include <rtthread.h>
#ifdef RT_USING_VBUS
#include <rtdevice.h>
#include <vbus.h>
#include <board.h>
struct rt_vbus_ring rt_vbus_rings[2] RT_SECTION("vbus_ring");
int rt_vbus_do_init(void)
{
return rt_vbus_init(&rt_vbus_rings[1], &rt_vbus_rings[0]);
}
INIT_COMPONENT_EXPORT(rt_vbus_do_init);
int rt_vbus_hw_init(void)
{
NVIC_ClearPendingIRQ(M0_M4CORE_IRQn);
NVIC_EnableIRQ(M0_M4CORE_IRQn);
return 0;
}
void M4CORE_IRQHandler(void)
{
LPC_CREG->M4TXEVENT = 0;
rt_vbus_isr(M0_M4CORE_IRQn, RT_NULL);
}
int rt_vbus_hw_eoi(int irqnr, void *param)
{
/* Nothing to do here as we cleared the interrupt in IRQHandler. */
return 0;
}
struct rt_vbus_dev rt_vbus_chn_devx[] = {
{
.req =
{
.prio = 30,
.name = "vecho",
.is_server = 0,
.recv_wm.low = RT_VMM_RB_BLK_NR / 3,
.recv_wm.high = RT_VMM_RB_BLK_NR * 2 / 3,
.post_wm.low = RT_VMM_RB_BLK_NR / 3,
.post_wm.high = RT_VMM_RB_BLK_NR * 2 / 3,
}
},
{
.req =
{
.name = RT_NULL,
}
},
};
#endif /* RT_USING_VBUS */
| 758
|
21,684
|
<filename>test/common/parsePolyglot.py
#!/usr/bin/env python
from __future__ import print_function
import os, re, sys
# == globals
printDebug = False
try:
unicode
except NameError:
unicode = str
# ==
class yamlValue(unicode):
linenumber = None
def __new__(cls, value, linenumber=None):
if isinstance(value, unicode):
real = unicode.__new__(cls, value)
else:
real = unicode.__new__(cls, value, "utf-8")
if linenumber is not None:
real.linenumber = int(linenumber)
return real
def __repr__(self):
real = super(yamlValue, self).__repr__()
return real.lstrip('u')
def parseYAML(source):
def debug(message):
if printDebug and message:
message = str(message).rstrip()
if message:
print(message)
sys.stdout.flush()
commentLineRegex = re.compile('^\s*#')
yamlLineRegex = re.compile('^(?P<indent> *)((?P<itemMarker>- +)(?P<itemContent>.*)|((?P<key>[\w\.]+)(?P<keyExtra>: *))?(?P<content>.*))\s*$')
def parseYAML_inner(source, indent):
returnItem = None
for linenumber, line in source:
if line == '': # no newline, so EOF
break
debug('line %d (%d):%s' % (linenumber, indent, line))
if line.strip() == '' or commentLineRegex.match(line): # empty or comment line, ignore
debug('\tempty/comment line')
continue
# - parse line
parsedLine = yamlLineRegex.match(line)
if not parsedLine:
raise Exception('Unparseable YAML line %d: %s' % (linenumber, line.rstrip()))
lineIndent = len(parsedLine.group('indent'))
lineItemMarker = parsedLine.group('itemMarker')
lineKey = parsedLine.group('key') or ''
lineKeyExtra = parsedLine.group('keyExtra') or ''
lineContent = (parsedLine.group('content') or parsedLine.group('itemContent') or '').strip()
# - handle end-of-sections
if lineIndent < indent:
# we have dropped out of this item, push back the line and return what we have
source.send((linenumber, line))
debug('\tout one level')
return returnItem
# - array item
if lineItemMarker:
debug('\tarray item')
# item in an array
if returnItem is None:
debug('\tnew array, indent is %d' % lineIndent)
returnItem = []
indent = lineIndent
elif not isinstance(returnItem, list):
raise Exception('Bad YAML, got a list item while working on a %s on line %d: %s' % (returnItem.__class__.__name__, linenumber, line.rstrip()))
indentLevel = lineIndent + len(lineItemMarker)
source.send((linenumber, (' ' * (indentLevel) )+ lineContent))
returnItem += [parseYAML_inner(source=source, indent=indent + 1)]
# - dict item
elif lineKey:
debug('\tdict item')
if returnItem is None:
debug('\tnew dict, indent is %d' % lineIndent)
# new dict
returnItem = {}
indent = lineIndent
elif not isinstance(returnItem, dict):
raise Exception('Bad YAML, got a dict value while working on a %s on line %d: %s' % (returnItem.__class__.__name__, linenumber, line.rstrip()))
indentLevel = lineIndent + len(lineKey) + len(lineKeyExtra)
source.send((linenumber, (' ' * indentLevel) + lineContent))
returnItem[lineKey] = parseYAML_inner(source=source, indent=indent + 1)
# - data - one or more lines of text
else:
debug('\tvalue')
if returnItem is None:
returnItem = yamlValue('', linenumber)
if lineContent.strip() in ('|', '|-', '>'):
continue # yaml multiline marker
elif not isinstance(returnItem, yamlValue):
raise Exception('Bad YAML, got a value while working on a %s on line %d: %s' % (returnItem.__class__.__name__, linenumber, line.rstrip()))
if returnItem:
returnItem = yamlValue(returnItem + "\n" + lineContent, returnItem.linenumber) # str subclasses are not fun
else:
returnItem = yamlValue(lineContent, linenumber)
return returnItem
def parseYAML_generator(source):
if hasattr(source, 'capitalize'):
if os.path.isfile(source):
source = open(source, 'r')
else:
source = source.splitlines(True)
elif hasattr(source, 'readlines'):
pass # the for loop will already work
backlines = []
for linenumber, line in enumerate(source):
backline = None
usedLine = False
while usedLine is False or backlines:
if backlines:
backline = yield backlines.pop()
else:
usedLine = True
backline = yield (linenumber + 1, line)
while backline: # loops returning None for every send()
assert isinstance(backline, tuple)
assert isinstance(backline[0], int)
backlines.append(backline)
backline = yield None
return parseYAML_inner(parseYAML_generator(source), indent=0)
if __name__ == '__main__':
import optparse, pprint
parser = optparse.OptionParser()
parser.add_option("-d", "--debug", dest="debug", action="store_true", default=False, help="print debug information")
(options, args) = parser.parse_args()
printDebug = options.debug
if len(args) < 1:
parser.error('%s needs files to process' % os.path.basename(__file__))
for filePath in args:
if not os.path.isfile(filePath):
sys.exit('target is not an existing file: %s' % os.path.basename(__file__))
for filePath in args:
print('=== %s' % filePath)
pprint.pprint(parseYAML(filePath))
| 3,261
|
2,143
|
<reponame>hascode/ArchUnit
package com.tngtech.archunit.testutil;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.HashSet;
import java.util.Set;
public class ReflectionTestUtils {
public static Field field(Class<?> clazz, String fieldName) {
try {
return clazz.getDeclaredField(fieldName);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
}
public static Constructor<?> constructor(Class<?> clazz, Class<?>... parameterTypes) {
try {
return clazz.getDeclaredConstructor(parameterTypes);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
public static Method method(Class<?> clazz, String name, Class<?>... parameterTypes) {
try {
return clazz.getDeclaredMethod(name, parameterTypes);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
public static Set<Class<?>> getHierarchy(Class<?> clazz) {
Set<Class<?>> result = new HashSet<>();
result.add(clazz);
if (clazz.getSuperclass() != null) {
result.addAll(getHierarchy(clazz.getSuperclass()));
}
for (Class<?> i : clazz.getInterfaces()) {
result.addAll(getHierarchy(i));
}
return result;
}
}
| 609
|
713
|
<filename>core/src/test/java/org/infinispan/util/TestOperation.java
package org.infinispan.util;
import java.util.Objects;
import org.infinispan.Cache;
/**
* @author <NAME>
* @since 12.0
*/
public enum TestOperation {
PUT {
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
cache.put(key, newValue);
return newValue;
}
},
PUT_IF_ABSENT {
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
V result = cache.putIfAbsent(key, newValue);
return result == null ? newValue : result;
}
},
REPLACE {
@Override
public boolean requiresPreviousValue() {
return true;
}
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
V result = cache.replace(key, newValue);
return Objects.equals(result, prevValue) ? newValue : result;
}
},
REPLACE_CONDITIONAL {
@Override
public boolean requiresPreviousValue() {
return true;
}
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
boolean result = cache.replace(key, prevValue, newValue);
return result ? newValue : prevValue;
}
},
REMOVE {
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
cache.remove(key);
return null;
}
},
REMOVE_CONDITIONAL {
@Override
public boolean requiresPreviousValue() {
return true;
}
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
boolean result = cache.remove(key, prevValue);
return result ? null : prevValue;
}
};
public boolean requiresPreviousValue() {
return false;
}
public abstract <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue);
}
| 863
|
353
|
<reponame>weisongf/ceph-deploy<filename>ceph_deploy/tests/unit/hosts/test_util.py
from ceph_deploy.hosts import util
from mock import Mock
class TestInstallYumPriorities(object):
def setup(self):
self.distro = Mock()
self.patch_path = 'ceph_deploy.hosts.centos.install.pkg_managers.yum'
self.yum = Mock()
def test_centos_six(self):
self.distro.release = ('6', '0')
self.distro.normalized_name = 'centos'
util.install_yum_priorities(self.distro, _yum=self.yum)
assert self.yum.call_args[0][1] == 'yum-plugin-priorities'
def test_centos_five(self):
self.distro.release = ('5', '0')
self.distro.normalized_name = 'centos'
util.install_yum_priorities(self.distro, _yum=self.yum)
assert self.yum.call_args[0][1] == 'yum-priorities'
def test_fedora(self):
self.distro.release = ('20', '0')
self.distro.normalized_name = 'fedora'
util.install_yum_priorities(self.distro, _yum=self.yum)
assert self.yum.call_args[0][1] == 'yum-plugin-priorities'
| 507
|
948
|
/*
* Copyright (c) 2015-2019, Texas Instruments Incorporated
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Texas Instruments Incorporated nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** ============================================================================
* @file CC2650_LAUNCHXL.h
*
* @brief CC2650 LaunchPad Board Specific header file.
*
* The CC2650_LAUNCHXL header file should be included in an application as
* follows:
* @code
* #include "CC2650_LAUNCHXL.h"
* @endcode
*
* ============================================================================
*/
#ifndef __CC2650_LAUNCHXL_BOARD_H__
#define __CC2650_LAUNCHXL_BOARD_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "contiki-conf.h"
/* Includes */
#include <ti/drivers/PIN.h>
#include <ti/devices/DeviceFamily.h>
#include DeviceFamily_constructPath(driverlib/ioc.h)
/* Externs */
extern const PIN_Config BoardGpioInitTable[];
/* Defines */
#define CC2650_LAUNCHXL
/* Mapping of pins to board signals using general board aliases
* <board signal alias> <pin mapping> <comments>
*/
/* Analog Capable DIOs */
#define CC2650_LAUNCHXL_DIO23_ANALOG IOID_23
#define CC2650_LAUNCHXL_DIO24_ANALOG IOID_24
#define CC2650_LAUNCHXL_DIO25_ANALOG IOID_25
#define CC2650_LAUNCHXL_DIO26_ANALOG IOID_26
#define CC2650_LAUNCHXL_DIO27_ANALOG IOID_27
#define CC2650_LAUNCHXL_DIO28_ANALOG IOID_28
#define CC2650_LAUNCHXL_DIO29_ANALOG IOID_29
#define CC2650_LAUNCHXL_DIO30_ANALOG IOID_30
/* Digital IOs */
#define CC2650_LAUNCHXL_DIO0 IOID_0
#define CC2650_LAUNCHXL_DIO1 IOID_1
#define CC2650_LAUNCHXL_DIO12 IOID_12
#define CC2650_LAUNCHXL_DIO15 IOID_15
#define CC2650_LAUNCHXL_DIO16_TDO IOID_16
#define CC2650_LAUNCHXL_DIO17_TDI IOID_17
#define CC2650_LAUNCHXL_DIO21 IOID_21
#define CC2650_LAUNCHXL_DIO22 IOID_22
/* Discrete Inputs */
#define CC2650_LAUNCHXL_PIN_BTN1 IOID_13
#define CC2650_LAUNCHXL_PIN_BTN2 IOID_14
/* GPIO */
#define CC2650_LAUNCHXL_GPIO_LED_ON 1
#define CC2650_LAUNCHXL_GPIO_LED_OFF 0
/* I2C */
#define CC2650_LAUNCHXL_I2C0_SCL0 IOID_4
#define CC2650_LAUNCHXL_I2C0_SDA0 IOID_5
/* I2S */
#define CC2650_LAUNCHXL_I2S_ADO IOID_25
#define CC2650_LAUNCHXL_I2S_ADI IOID_26
#define CC2650_LAUNCHXL_I2S_BCLK IOID_27
#define CC2650_LAUNCHXL_I2S_MCLK PIN_UNASSIGNED
#define CC2650_LAUNCHXL_I2S_WCLK IOID_28
/* LEDs */
#define CC2650_LAUNCHXL_PIN_LED_ON 1
#define CC2650_LAUNCHXL_PIN_LED_OFF 0
#define CC2650_LAUNCHXL_PIN_RLED IOID_6
#define CC2650_LAUNCHXL_PIN_GLED IOID_7
/* PWM Outputs */
#define CC2650_LAUNCHXL_PWMPIN0 CC2650_LAUNCHXL_PIN_RLED
#define CC2650_LAUNCHXL_PWMPIN1 CC2650_LAUNCHXL_PIN_GLED
#define CC2650_LAUNCHXL_PWMPIN2 PIN_UNASSIGNED
#define CC2650_LAUNCHXL_PWMPIN3 PIN_UNASSIGNED
#define CC2650_LAUNCHXL_PWMPIN4 PIN_UNASSIGNED
#define CC2650_LAUNCHXL_PWMPIN5 PIN_UNASSIGNED
#define CC2650_LAUNCHXL_PWMPIN6 PIN_UNASSIGNED
#define CC2650_LAUNCHXL_PWMPIN7 PIN_UNASSIGNED
/* SPI */
#define CC2650_LAUNCHXL_SPI_FLASH_CS IOID_20
#define CC2650_LAUNCHXL_FLASH_CS_ON 0
#define CC2650_LAUNCHXL_FLASH_CS_OFF 1
/* SPI Board */
#define CC2650_LAUNCHXL_SPI0_MISO IOID_8 /* RF1.20 */
#define CC2650_LAUNCHXL_SPI0_MOSI IOID_9 /* RF1.18 */
#define CC2650_LAUNCHXL_SPI0_CLK IOID_10 /* RF1.16 */
#define CC2650_LAUNCHXL_SPI0_CSN IOID_11
#define CC2650_LAUNCHXL_SPI1_MISO PIN_UNASSIGNED
#define CC2650_LAUNCHXL_SPI1_MOSI PIN_UNASSIGNED
#define CC2650_LAUNCHXL_SPI1_CLK PIN_UNASSIGNED
#define CC2650_LAUNCHXL_SPI1_CSN PIN_UNASSIGNED
/* UART Board */
#define CC2650_LAUNCHXL_UART_RX IOID_2 /* RXD */
#define CC2650_LAUNCHXL_UART_TX IOID_3 /* TXD */
#define CC2650_LAUNCHXL_UART_CTS IOID_19 /* CTS */
#define CC2650_LAUNCHXL_UART_RTS IOID_18 /* RTS */
/*!
* @brief Initialize the general board specific settings
*
* This function initializes the general board specific settings.
*/
void CC2650_LAUNCHXL_initGeneral(void);
/*!
* @brief Turn off the external flash on LaunchPads
*
*/
void CC2650_LAUNCHXL_shutDownExtFlash(void);
/*!
* @brief Wake up the external flash present on the board files
*
* This function toggles the chip select for the amount of time needed
* to wake the chip up.
*/
void CC2650_LAUNCHXL_wakeUpExtFlash(void);
/*!
* @def CC2650_LAUNCHXL_ADCBufName
* @brief Enum of ADCBufs
*/
typedef enum CC2650_LAUNCHXL_ADCBufName {
CC2650_LAUNCHXL_ADCBUF0 = 0,
CC2650_LAUNCHXL_ADCBUFCOUNT
} CC2650_LAUNCHXL_ADCBufName;
/*!
* @def CC2650_LAUNCHXL_ADCBuf0ChannelName
* @brief Enum of ADCBuf channels
*/
typedef enum CC2650_LAUNCHXL_ADCBuf0ChannelName {
CC2650_LAUNCHXL_ADCBUF0CHANNEL0 = 0,
CC2650_LAUNCHXL_ADCBUF0CHANNEL1,
CC2650_LAUNCHXL_ADCBUF0CHANNEL2,
CC2650_LAUNCHXL_ADCBUF0CHANNEL3,
CC2650_LAUNCHXL_ADCBUF0CHANNEL4,
CC2650_LAUNCHXL_ADCBUF0CHANNEL5,
CC2650_LAUNCHXL_ADCBUF0CHANNEL6,
CC2650_LAUNCHXL_ADCBUF0CHANNEL7,
CC2650_LAUNCHXL_ADCBUF0CHANNELVDDS,
CC2650_LAUNCHXL_ADCBUF0CHANNELDCOUPL,
CC2650_LAUNCHXL_ADCBUF0CHANNELVSS,
CC2650_LAUNCHXL_ADCBUF0CHANNELCOUNT
} CC2650_LAUNCHXL_ADCBuf0ChannelName;
/*!
* @def CC2650_LAUNCHXL_ADCName
* @brief Enum of ADCs
*/
typedef enum CC2650_LAUNCHXL_ADCName {
CC2650_LAUNCHXL_ADC0 = 0,
CC2650_LAUNCHXL_ADC1,
CC2650_LAUNCHXL_ADC2,
CC2650_LAUNCHXL_ADC3,
CC2650_LAUNCHXL_ADC4,
CC2650_LAUNCHXL_ADC5,
CC2650_LAUNCHXL_ADC6,
CC2650_LAUNCHXL_ADC7,
CC2650_LAUNCHXL_ADCDCOUPL,
CC2650_LAUNCHXL_ADCVSS,
CC2650_LAUNCHXL_ADCVDDS,
CC2650_LAUNCHXL_ADCCOUNT
} CC2650_LAUNCHXL_ADCName;
/*!
* @def CC2650_LAUNCHXL_CryptoName
* @brief Enum of Crypto names
*/
typedef enum CC2650_LAUNCHXL_CryptoName {
CC2650_LAUNCHXL_CRYPTO0 = 0,
CC2650_LAUNCHXL_CRYPTOCOUNT
} CC2650_LAUNCHXL_CryptoName;
/*!
* @def CC2650_LAUNCHXL_AESCCMName
* @brief Enum of AESCCM names
*/
typedef enum CC2650_LAUNCHXL_AESCCMName {
CC2650_LAUNCHXL_AESCCM0 = 0,
CC2650_LAUNCHXL_AESCCMCOUNT
} CC2650_LAUNCHXL_AESCCMName;
/*!
* @def CC2650_LAUNCHXL_AESGCMName
* @brief Enum of AESGCM names
*/
typedef enum CC2650_LAUNCHXL_AESGCMName {
CC2650_LAUNCHXL_AESGCM0 = 0,
CC2650_LAUNCHXL_AESGCMCOUNT
} CC2650_LAUNCHXL_AESGCMName;
/*!
* @def CC2650_LAUNCHXL_AESCBCName
* @brief Enum of AESCBC names
*/
typedef enum CC2650_LAUNCHXL_AESCBCName {
CC2650_LAUNCHXL_AESCBC0 = 0,
CC2650_LAUNCHXL_AESCBCCOUNT
} CC2650_LAUNCHXL_AESCBCName;
/*!
* @def CC2650_LAUNCHXL_AESCTRName
* @brief Enum of AESCTR names
*/
typedef enum CC2650_LAUNCHXL_AESCTRName {
CC2650_LAUNCHXL_AESCTR0 = 0,
CC2650_LAUNCHXL_AESCTRCOUNT
} CC2650_LAUNCHXL_AESCTRName;
/*!
* @def CC2650_LAUNCHXL_AESECBName
* @brief Enum of AESECB names
*/
typedef enum CC2650_LAUNCHXL_AESECBName {
CC2650_LAUNCHXL_AESECB0 = 0,
CC2650_LAUNCHXL_AESECBCOUNT
} CC2650_LAUNCHXL_AESECBName;
/*!
* @def CC2650_LAUNCHXL_AESCTRDRBGName
* @brief Enum of AESCTRDRBG names
*/
typedef enum CC2650_LAUNCHXL_AESCTRDRBGName {
CC2650_LAUNCHXL_AESCTRDRBG0 = 0,
CC2650_LAUNCHXL_AESCTRDRBGCOUNT
} CC2650_LAUNCHXL_AESCTRDRBGName;
/*!
* @def CC2650_LAUNCHXL_TRNGName
* @brief Enum of TRNG names
*/
typedef enum CC2650_LAUNCHXL_TRNGName {
CC2650_LAUNCHXL_TRNG0 = 0,
CC2650_LAUNCHXL_TRNGCOUNT
} CC2650_LAUNCHXL_TRNGName;
/*!
* @def CC2650_LAUNCHXL_GPIOName
* @brief Enum of GPIO names
*/
typedef enum CC2650_LAUNCHXL_GPIOName {
CC2650_LAUNCHXL_GPIO_S1 = 0,
CC2650_LAUNCHXL_GPIO_S2,
CC2650_LAUNCHXL_SPI_MASTER_READY,
CC2650_LAUNCHXL_SPI_SLAVE_READY,
CC2650_LAUNCHXL_GPIO_LED_GREEN,
CC2650_LAUNCHXL_GPIO_LED_RED,
CC2650_LAUNCHXL_GPIO_SPI_FLASH_CS,
CC2650_LAUNCHXL_SDSPI_CS,
CC2650_LAUNCHXL_GPIOCOUNT
} CC2650_LAUNCHXL_GPIOName;
/*!
* @def CC2650_LAUNCHXL_GPTimerName
* @brief Enum of GPTimer parts
*/
typedef enum CC2650_LAUNCHXL_GPTimerName {
CC2650_LAUNCHXL_GPTIMER0A = 0,
CC2650_LAUNCHXL_GPTIMER0B,
CC2650_LAUNCHXL_GPTIMER1A,
CC2650_LAUNCHXL_GPTIMER1B,
CC2650_LAUNCHXL_GPTIMER2A,
CC2650_LAUNCHXL_GPTIMER2B,
CC2650_LAUNCHXL_GPTIMER3A,
CC2650_LAUNCHXL_GPTIMER3B,
CC2650_LAUNCHXL_GPTIMERPARTSCOUNT
} CC2650_LAUNCHXL_GPTimerName;
/*!
* @def CC2650_LAUNCHXL_GPTimers
* @brief Enum of GPTimers
*/
typedef enum CC2650_LAUNCHXL_GPTimers {
CC2650_LAUNCHXL_GPTIMER0 = 0,
CC2650_LAUNCHXL_GPTIMER1,
CC2650_LAUNCHXL_GPTIMER2,
CC2650_LAUNCHXL_GPTIMER3,
CC2650_LAUNCHXL_GPTIMERCOUNT
} CC2650_LAUNCHXL_GPTimers;
/*!
* @def CC2650_LAUNCHXL_I2CName
* @brief Enum of I2C names
*/
typedef enum CC2650_LAUNCHXL_I2CName {
#if TI_I2C_CONF_I2C0_ENABLE
CC2650_LAUNCHXL_I2C0 = 0,
#endif
CC2650_LAUNCHXL_I2CCOUNT
} CC2650_LAUNCHXL_I2CName;
/*!
* @def CC2650_LAUNCHXL_I2SName
* @brief Enum of I2S names
*/
typedef enum CC2650_LAUNCHXL_I2SName {
CC2650_LAUNCHXL_I2S0 = 0,
CC2650_LAUNCHXL_I2SCOUNT
} CC2650_LAUNCHXL_I2SName;
/*!
* @def CC2650_LAUNCHXL_NVSName
* @brief Enum of NVS names
*/
typedef enum CC2650_LAUNCHXL_NVSName {
#if TI_NVS_CONF_NVS_INTERNAL_ENABLE
CC2650_LAUNCHXL_NVSCC26XX0 = 0,
#endif
#if TI_NVS_CONF_NVS_EXTERNAL_ENABLE
CC2650_LAUNCHXL_NVSSPI25X0,
#endif
CC2650_LAUNCHXL_NVSCOUNT
} CC2650_LAUNCHXL_NVSName;
/*!
* @def CC2650_LAUNCHXL_PWMName
* @brief Enum of PWM outputs
*/
typedef enum CC2650_LAUNCHXL_PWMName {
CC2650_LAUNCHXL_PWM0 = 0,
CC2650_LAUNCHXL_PWM1,
CC2650_LAUNCHXL_PWM2,
CC2650_LAUNCHXL_PWM3,
CC2650_LAUNCHXL_PWM4,
CC2650_LAUNCHXL_PWM5,
CC2650_LAUNCHXL_PWM6,
CC2650_LAUNCHXL_PWM7,
CC2650_LAUNCHXL_PWMCOUNT
} CC2650_LAUNCHXL_PWMName;
/*!
* @def CC2650_LAUNCHXL_SDName
* @brief Enum of SD names
*/
typedef enum CC2650_LAUNCHXL_SDName {
CC2650_LAUNCHXL_SDSPI0 = 0,
CC2650_LAUNCHXL_SDCOUNT
} CC2650_LAUNCHXL_SDName;
/*!
* @def CC2650_LAUNCHXL_SPIName
* @brief Enum of SPI names
*/
typedef enum CC2650_LAUNCHXL_SPIName {
#if TI_SPI_CONF_SPI0_ENABLE
CC2650_LAUNCHXL_SPI0 = 0,
#endif
#if TI_SPI_CONF_SPI1_ENABLE
CC2650_LAUNCHXL_SPI1,
#endif
CC2650_LAUNCHXL_SPICOUNT
} CC2650_LAUNCHXL_SPIName;
/*!
* @def CC2650_LAUNCHXL_UARTName
* @brief Enum of UARTs
*/
typedef enum CC2650_LAUNCHXL_UARTName {
#if TI_UART_CONF_UART0_ENABLE
CC2650_LAUNCHXL_UART0 = 0,
#endif
CC2650_LAUNCHXL_UARTCOUNT
} CC2650_LAUNCHXL_UARTName;
/*!
* @def CC2650_LAUNCHXL_UDMAName
* @brief Enum of DMA buffers
*/
typedef enum CC2650_LAUNCHXL_UDMAName {
CC2650_LAUNCHXL_UDMA0 = 0,
CC2650_LAUNCHXL_UDMACOUNT
} CC2650_LAUNCHXL_UDMAName;
/*!
* @def CC2650_LAUNCHXL_WatchdogName
* @brief Enum of Watchdogs
*/
typedef enum CC2650_LAUNCHXL_WatchdogName {
CC2650_LAUNCHXL_WATCHDOG0 = 0,
CC2650_LAUNCHXL_WATCHDOGCOUNT
} CC2650_LAUNCHXL_WatchdogName;
#ifdef __cplusplus
}
#endif
#endif /* __CC2650_LAUNCHXL_BOARD_H__ */
| 6,396
|
8,844
|
/* This is
a multi-line
comment */
| 12
|
667
|
import json
# https://raw.githubusercontent.com/whatwg/encoding/master/encodings.json
with open("encodings.json") as f:
s = json.loads(f.read())
print "// autogenerated by /tools/encodings/gen-encoding-aliases.py\n"
for gr in s:
print "// {}".format((gr["heading"]))
for dst in gr["encodings"]:
dst_enc = dst["name"]
print '{{ "{}", "{}" }},'.format(dst_enc, dst_enc)
for src_enc in dst["labels"]:
print '{{ "{}", "{}" }},'.format(dst_enc, src_enc)
print ""
| 227
|
542
|
#ifndef GUARD_le_imgui_H
#define GUARD_le_imgui_H
#include "le_core.h"
struct le_imgui_o;
struct le_renderpass_o; // declared in le_renderer.h
struct le_render_module_o; // declared in le_renderer.h
struct LeUiEvent; // declared in le_ui_event.h
// clang-format off
struct le_imgui_api {
struct le_imgui_interface_t {
le_imgui_o * ( * create ) ( );
void ( * destroy ) ( le_imgui_o* self );
void ( * begin_frame ) ( le_imgui_o* self);
void ( * end_frame ) ( le_imgui_o* self);
void ( * setup_resources )( le_imgui_o *self, le_render_module_o *p_render_module, float display_width, float display_height );
void ( * draw )( le_imgui_o* self, le_renderpass_o* renderpass);
void ( * process_events ) ( le_imgui_o* self, LeUiEvent const * events, size_t numEvents);
};
le_imgui_interface_t le_imgui_i;
};
// clang-format on
LE_MODULE( le_imgui );
LE_MODULE_LOAD_DEFAULT( le_imgui );
#ifdef __cplusplus
namespace le_imgui {
static const auto &api = le_imgui_api_i;
static const auto &le_imgui_i = api -> le_imgui_i;
} // namespace le_imgui
#endif // __cplusplus
#endif
| 602
|
3,212
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.security.util.crypto;
import at.favre.lib.crypto.bcrypt.BCrypt;
import at.favre.lib.crypto.bcrypt.Radix64Encoder;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.TimeUnit;
import org.apache.commons.codec.binary.Base64;
import org.bouncycastle.util.encoders.Hex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides an implementation of {@code Bcrypt} for secure password hashing.
* <p>
* One <strong>critical</strong> difference is that this implementation uses a
* <strong>static universal</strong> salt unless instructed otherwise, which provides
* strict determinism across nodes in a cluster. The purpose for this is to allow for
* blind equality comparison of sensitive values hashed on different nodes (with
* potentially different {@code nifi.sensitive.props.key} values) during flow inheritance
* (see {@code FingerprintFactory}).
* <p>
* The resulting output is referred to as a <em>hash</em> to be consistent with {@link SecureHasher} terminology.
*/
public class BcryptSecureHasher extends AbstractSecureHasher {
private static final Logger logger = LoggerFactory.getLogger(BcryptSecureHasher.class);
/**
* These values can be calculated automatically using the code {@see BcryptCipherProviderGroovyTest#calculateMinimumParameters} or manually updated by a maintainer
*/
private static final int DEFAULT_COST = 12;
private static final int DEFAULT_SALT_LENGTH = 16;
private static final int MIN_COST = 4;
private static final int MAX_COST = 31;
private static final int MIN_SALT_LENGTH = 16;
private final int cost;
/**
* Instantiates a Bcrypt secure hasher using the default cost parameter
* ({@code cost = }{@link #DEFAULT_COST}
*/
public BcryptSecureHasher() {
this(DEFAULT_COST, 0);
}
/**
* Instantiates a Bcrypt secure hasher using the provided cost parameters. A static
* {@link #DEFAULT_SALT_LENGTH} byte salt will be generated on every hash request.
*
* @param cost the (log) number of key expansion rounds [4..31]
*/
public BcryptSecureHasher(int cost) {
this(cost, 0);
}
/**
* Instantiates an Bcrypt secure hasher using the provided cost parameters. A unique
* salt of the specified length will be generated on every hash request.
*
* @param cost the (log) number of key expansion rounds [4..31]
* @param saltLength the salt length in bytes {@code >= 8})
*/
public BcryptSecureHasher(int cost, int saltLength) {
validateParameters(cost, saltLength);
this.cost = cost;
this.saltLength = saltLength;
}
/**
* Enforces valid Scrypt secure hasher cost parameters are provided.
*
* @param cost the (log) number of key expansion rounds [4..31]
* @param saltLength the salt length in bytes {@code >= 16})
*/
private void validateParameters(Integer cost, Integer saltLength) {
if (!isCostValid(cost)) {
logger.error("The provided cost factor {} is outside the boundary of 4 to 31.", cost);
throw new IllegalArgumentException("Invalid cost is not within the cost factor boundary.");
}
initializeSalt(saltLength);
}
/**
* Returns true if the provided cost factor is within boundaries. The lower bound >= 4 and the
* upper bound <= 31.
*
* @param cost the (log) number of key expansion rounds [4..31]
* @return true if cost factor is within boundaries
*/
public static boolean isCostValid(Integer cost) {
if (cost < DEFAULT_COST) {
logger.warn("The provided cost factor {} is below the recommended minimum {}.", cost, DEFAULT_COST);
}
return cost >= MIN_COST && cost <= MAX_COST;
}
public static String convertBcryptRadix64ToMimeBase64(String radix64) {
return CipherUtility.encodeBase64NoPadding(new Radix64Encoder.Default().decode(radix64.getBytes(StandardCharsets.UTF_8)));
}
public static String convertMimeBase64ToBcryptRadix64(String base64) {
return new String(new Radix64Encoder.Default().encode(Base64.decodeBase64(base64)), StandardCharsets.UTF_8);
}
/**
* Returns the algorithm-specific default salt length in bytes.
*
* @return the default salt length
*/
@Override
int getDefaultSaltLength() {
return DEFAULT_SALT_LENGTH;
}
/**
* Returns the algorithm-specific minimum salt length in bytes.
*
* @return the min salt length
*/
@Override
int getMinSaltLength() {
return MIN_SALT_LENGTH;
}
/**
* Returns the algorithm-specific maximum salt length in bytes.
*
* @return the max salt length
*/
@Override
int getMaxSaltLength() {
return Integer.MAX_VALUE;
}
/**
* Returns the algorithm-specific name for logging and messages.
*
* @return the algorithm name
*/
@Override
String getAlgorithmName() {
return "Bcrypt";
}
/**
* Returns {@code true} if the algorithm can accept empty (non-{@code null}) inputs.
*
* @return the true if {@code ""} is allowable input
*/
@Override
boolean acceptsEmptyInput() {
return false;
}
/**
* Internal method to hash the raw bytes.
*
* @param input the raw bytes to hash (can be length 0)
* @return the generated hash
*/
byte[] hash(byte[] input) {
// Contains only the raw salt
byte[] rawSalt = getSalt();
return hash(input, rawSalt);
}
/**
* Internal method to hash the raw bytes.
*
* @param input the raw bytes to hash (can be length 0)
* @param rawSalt the raw bytes to salt
* @return the generated hash
*/
byte[] hash(byte[] input, byte[] rawSalt) {
logger.debug("Creating Bcrypt hash with salt [{}] ({} bytes)", Hex.toHexString(rawSalt), rawSalt.length);
if (!isSaltLengthValid(rawSalt.length)) {
throw new IllegalArgumentException("The salt length (" + rawSalt.length + " bytes) is invalid");
}
final long startNanos = System.nanoTime();
byte[] hash = BCrypt.withDefaults().hash(cost, rawSalt, input);
final long generateNanos = System.nanoTime();
final long totalDurationMillis = TimeUnit.NANOSECONDS.toMillis(generateNanos - startNanos);
logger.debug("Generated Bcrypt hash in {} ms", totalDurationMillis);
return hash;
}
}
| 2,548
|
2,151
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_MAIN_THREAD_DEADLINE_TASK_RUNNER_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_MAIN_THREAD_DEADLINE_TASK_RUNNER_H_
#include "base/callback.h"
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/scheduler/child/cancelable_closure_holder.h"
namespace blink {
namespace scheduler {
// Runs a posted task at latest by a given deadline, but possibly sooner.
class PLATFORM_EXPORT DeadlineTaskRunner {
public:
DeadlineTaskRunner(const base::RepeatingClosure& callback,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
~DeadlineTaskRunner();
// If there is no outstanding task then a task is posted to run after |delay|.
// If there is an outstanding task which is scheduled to run:
// a) sooner - then this is a NOP.
// b) later - then the outstanding task is cancelled and a new task is
// posted to run after |delay|.
//
// Once the deadline task has run, we reset.
void SetDeadline(const base::Location& from_here,
base::TimeDelta delay,
base::TimeTicks now);
private:
void RunInternal();
CancelableClosureHolder cancelable_run_internal_;
base::RepeatingClosure callback_;
base::TimeTicks deadline_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(DeadlineTaskRunner);
};
} // namespace scheduler
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_MAIN_THREAD_DEADLINE_TASK_RUNNER_H_
| 677
|
510
|
package com.didi.drouter.remote;
import android.os.Parcel;
import android.os.Parcelable;
/**
* Created by gaowei on 2018/10/23
*/
class RemoteResult implements Parcelable {
static final String EXECUTING = "executing";
static final String SUCCESS = "success";
static final String FAIL = "fail";
String state;
Object result;
RemoteResult(String state) {
this.state = state;
}
RemoteResult(Parcel in) {
state = in.readString();
result = RemoteStream.reverse(in.readValue(getClass().getClassLoader()));
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(state);
dest.writeValue(RemoteStream.transform(result));
}
public static final Creator<RemoteResult> CREATOR = new Creator<RemoteResult>() {
public RemoteResult createFromParcel(Parcel in) {
return new RemoteResult(in);
}
public RemoteResult[] newArray(int size) {
return new RemoteResult[size];
}
};
@Override
public int describeContents() {
return 0;
}
}
| 431
|
2,921
|
{
"name": "Hard",
"symbol": "HARD",
"type": "KAVA",
"decimals": 6,
"description": "HARD Protocol is a decentralized money market built on Kava, enabling the lending and borrowing of cross-chain assets. HARD Protocol supports supply-side deposits for BTC, XRP, BNB, BUSD, and USDX. The platform will allow overcollateralized borrowing for supported assets.",
"website": "https://hard.kava.io/",
"explorer": "https://www.mintscan.io/kava",
"status": "active",
"id": "hard"
}
| 177
|
573
|
#!/usr/bin/env python3
#
# Install dylib dependencies, strip debugging symbols, and deal with the shitfucked digital abortion that is the macOS dynamic linker.
#
import re
import sys
import subprocess
import itertools
import shutil
from os import chmod, environ, pathsep
from pathlib import Path
build_root = Path(environ['MESON_BUILD_ROOT'])
source_root = Path(environ['MESON_SOURCE_ROOT'])
install_prefix = Path(environ['MESON_INSTALL_DESTDIR_PREFIX'])
args = sys.argv[1:]
macos_lib_paths = filter(None, (Path(x) for x in args.pop(0).split(pathsep)))
macos_tool_paths = filter(None, (Path(x) for x in args.pop(0).split(pathsep)))
macos_tool_prefix = args.pop(0)
if macos_tool_paths:
environ['PATH'] = pathsep.join(itertools.chain((str(p) for p in macos_tool_paths), [environ['PATH']]))
exe_path = install_prefix / 'Taisei.app' / 'Contents' / 'MacOS' / 'Taisei'
dylib_dir_path = exe_path.parent / 'dylibs'
dylib_dir_path.mkdir(mode=0o755, parents=True, exist_ok=True)
def tool(name):
def func(*args):
cmd = [macos_tool_prefix + name] + list(args)
print(' '.join(('{0!r}' if ' ' in str(s) else '{0}').format(str(s)) for s in cmd))
return subprocess.check_output(cmd, universal_newlines=True)
return func
otool = tool('otool')
install_name_tool = tool('install_name_tool')
strip = tool('strip')
def fix_libs(opath):
handled = set()
regex = re.compile(r'\s*(.*?\.dylib) \(')
def install(src, dst):
src = str(src)
dst = str(dst)
print('Installing {0} as {1}'.format(src, dst))
shutil.copy(str(src), str(dst))
chmod(str(dst), 0o755)
def fix(path):
for lib in regex.findall(otool('-L', path)):
if lib.startswith('/usr/lib') or lib.startswith('@'):
continue
src_lib_path = Path(lib)
dst_lib_path = dylib_dir_path / src_lib_path.name
install_name_tool(path, '-change', lib, '@executable_path/dylibs/{0}'.format(dst_lib_path.name))
if dst_lib_path in handled:
continue
install(src_lib_path, dst_lib_path)
handled.add(dst_lib_path)
fix(dst_lib_path)
install_name_tool(path, '-id', path.name)
strip('-S', path)
fix(opath)
return handled
new_files = fix_libs(exe_path)
if new_files:
with (build_root / 'meson-logs' / 'install-log.txt').open('a') as f:
f.write('# ...except when it does :^)\n')
for i in new_files:
f.write('{0}\n'.format(str(i)))
| 1,148
|
322
|
import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
| 3,798
|
399
|
from openrec.tf1.utils.samplers.sampler import Sampler
from openrec.tf1.utils.samplers.random_pairwise_sampler import RandomPairwiseSampler
from openrec.tf1.utils.samplers.vbpr_pairwise_sampler import VBPRPairwiseSampler
from openrec.tf1.utils.samplers.random_pointwise_sampler import RandomPointwiseSampler
from openrec.tf1.utils.samplers.stratified_pointwise_sampler import StratifiedPointwiseSampler
from openrec.tf1.utils.samplers.evaluation_sampler import EvaluationSampler
from openrec.tf1.utils.samplers.vbpr_evaluation_sampler import VBPREvaluationSampler
from openrec.tf1.utils.samplers.temporal_sampler import TemporalSampler
from openrec.tf1.utils.samplers.temporal_evaluation_sampler import TemporalEvaluationSampler
from openrec.tf1.utils.samplers.youtube_sampler import YouTubeSampler
from openrec.tf1.utils.samplers.youtube_evaluation_sampler import YouTubeEvaluationSampler
| 294
|
2,517
|
// This file is part of CAF, the C++ Actor Framework. See the file LICENSE in
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#pragma once
#include <chrono>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <iosfwd>
#include <iterator>
#include <map>
#include <string>
#include <tuple>
#include <type_traits>
#include <vector>
#include "caf/config_value_reader.hpp"
#include "caf/config_value_writer.hpp"
#include "caf/detail/bounds_checker.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/detail/move_if_not_ptr.hpp"
#include "caf/detail/parse.hpp"
#include "caf/detail/type_traits.hpp"
#include "caf/dictionary.hpp"
#include "caf/expected.hpp"
#include "caf/fwd.hpp"
#include "caf/inspector_access.hpp"
#include "caf/inspector_access_type.hpp"
#include "caf/optional.hpp"
#include "caf/raise_error.hpp"
#include "caf/string_algorithms.hpp"
#include "caf/string_view.hpp"
#include "caf/sum_type.hpp"
#include "caf/sum_type_access.hpp"
#include "caf/sum_type_token.hpp"
#include "caf/timespan.hpp"
#include "caf/timestamp.hpp"
#include "caf/uri.hpp"
#include "caf/variant.hpp"
namespace caf::detail {
template <class T>
struct is_config_value_type : std::false_type {};
#define CAF_ADD_CONFIG_VALUE_TYPE(type_name) \
template <> \
struct is_config_value_type<type_name> : std::true_type {}
CAF_ADD_CONFIG_VALUE_TYPE(none_t);
CAF_ADD_CONFIG_VALUE_TYPE(int64_t);
CAF_ADD_CONFIG_VALUE_TYPE(bool);
CAF_ADD_CONFIG_VALUE_TYPE(double);
CAF_ADD_CONFIG_VALUE_TYPE(timespan);
CAF_ADD_CONFIG_VALUE_TYPE(uri);
CAF_ADD_CONFIG_VALUE_TYPE(std::string);
CAF_ADD_CONFIG_VALUE_TYPE(std::vector<config_value>);
CAF_ADD_CONFIG_VALUE_TYPE(dictionary<config_value>);
#undef CAF_ADD_CONFIG_VALUE_TYPE
template <class T>
constexpr bool is_config_value_type_v = is_config_value_type<T>::value;
} // namespace caf::detail
namespace caf {
/// A type for config parameters with similar interface to a `variant`. This
/// type is not implemented as a simple variant alias because variants cannot
/// contain lists of themselves.
class CAF_CORE_EXPORT config_value {
public:
// -- member types -----------------------------------------------------------
using integer = int64_t;
using boolean = bool;
using real = double;
using string = std::string;
using list = std::vector<config_value>;
using dictionary = caf::dictionary<config_value>;
using types = detail::type_list<none_t, integer, boolean, real, timespan, uri,
string, list, dictionary>;
using variant_type = detail::tl_apply_t<types, variant>;
// -- constructors, destructors, and assignment operators --------------------
config_value() = default;
config_value(config_value&& other) = default;
config_value(const config_value& other) = default;
template <class T, class E = detail::enable_if_t<
!std::is_same<detail::decay_t<T>, config_value>::value>>
explicit config_value(T&& x) {
set(std::forward<T>(x));
}
config_value& operator=(config_value&& other) = default;
config_value& operator=(const config_value& other) = default;
template <class T, class E = detail::enable_if_t<
!std::is_same<detail::decay_t<T>, config_value>::value>>
config_value& operator=(T&& x) {
set(std::forward<T>(x));
return *this;
}
~config_value();
// -- parsing ----------------------------------------------------------------
/// Tries to parse a value from given characters.
static expected<config_value> parse(string_view::iterator first,
string_view::iterator last);
/// Tries to parse a value from `str`.
static expected<config_value> parse(string_view str);
/// Tries to parse a config value (list) from `str` and to convert it to an
/// allowed input message type for `Handle`.
template <class Handle>
static optional<message> parse_msg(string_view str, const Handle&) {
auto allowed = Handle::allowed_inputs();
return parse_msg_impl(str, allowed);
}
// -- properties -------------------------------------------------------------
/// Converts the value to a list with one element (unless the config value
/// holds `nullptr`). Does nothing if the value already is a list.
void convert_to_list();
/// Returns the value as a list, converting it to one if needed.
list& as_list();
/// Returns the value as a dictionary, converting it to one if needed. The
/// only data structure that CAF can convert to a dictionary is a list of
/// lists, where each nested list contains exactly two elements (key and
/// value). In all other cases, the conversion results in an empty dictionary.
dictionary& as_dictionary();
/// Appends `x` to a list. Converts this config value to a list first by
/// calling `convert_to_list` if needed.
void append(config_value x);
/// Returns a human-readable type name of the current value.
const char* type_name() const noexcept;
/// Returns the underlying variant.
variant_type& get_data() {
return data_;
}
/// Returns the underlying variant.
const variant_type& get_data() const {
return data_;
}
/// Returns a pointer to the underlying variant.
variant_type* get_data_ptr() {
return &data_;
}
/// Returns a pointer to the underlying variant.
const variant_type* get_data_ptr() const {
return &data_;
}
/// Checks whether this config value is not null.
explicit operator bool() const noexcept {
return data_.index() != 0;
}
/// Checks whether this config value is null.
bool operator!() const noexcept {
return data_.index() == 0;
}
/// @private
ptrdiff_t signed_index() const noexcept;
// -- utility ----------------------------------------------------------------
/// @private
type_id_t type_id() const noexcept;
/// @private
error_code<sec> default_construct(type_id_t);
/// @private
expected<bool> to_boolean() const;
/// @private
expected<integer> to_integer() const;
/// @private
expected<real> to_real() const;
/// @private
expected<timespan> to_timespan() const;
/// @private
expected<uri> to_uri() const;
/// @private
expected<list> to_list() const;
/// @private
expected<dictionary> to_dictionary() const;
/// @private
bool can_convert_to_dictionary() const;
/// @private
template <class T, class Token>
expected<T> convert_to(Token token) const {
auto tmp = T{};
config_value_reader reader{this};
if (detail::load(reader, tmp, token))
return {std::move(tmp)};
else
return {reader.move_error()};
}
template <class T>
error assign(const T& x) {
if constexpr (detail::is_config_value_type_v<T>) {
data_ = x;
return {};
} else {
config_value_writer writer{this};
if (writer.apply(x))
return {};
else
return {writer.move_error()};
}
}
template <class T>
static constexpr string_view mapped_type_name() {
if constexpr (detail::is_complete<caf::type_name<T>>) {
return caf::type_name<T>::value;
} else if constexpr (detail::is_list_like_v<T>) {
return "list";
} else {
return "dictionary";
}
}
private:
// -- properties -------------------------------------------------------------
static const char* type_name_at_index(size_t index) noexcept;
static optional<message>
parse_msg_impl(string_view str, span<const type_id_list> allowed_types);
// -- auto conversion of related types ---------------------------------------
template <class T>
void set_range(T& xs, std::true_type) {
auto& dict = as_dictionary();
dict.clear();
for (auto& [key, val] : xs)
dict.emplace(key, std::move(val));
}
template <class T>
void set_range(T& xs, std::false_type) {
auto& ls = as_list();
ls.clear();
ls.insert(ls.end(), std::make_move_iterator(xs.begin()),
std::make_move_iterator(xs.end()));
}
template <class T>
void set(T x) {
if constexpr (detail::is_config_value_type_v<T>) {
data_ = std::move(x);
} else if constexpr (std::is_integral<T>::value) {
data_ = static_cast<int64_t>(x);
} else if constexpr (std::is_convertible<T, const char*>::value) {
data_ = std::string{x};
} else {
static_assert(detail::is_iterable<T>::value);
using value_type = typename T::value_type;
detail::bool_token<detail::is_pair<value_type>::value> is_map_type;
set_range(x, is_map_type);
}
}
void set(float x) {
data_ = static_cast<double>(x);
}
void set(const char* x) {
data_ = std::string{x};
}
void set(string_view x) {
data_ = std::string{x.begin(), x.end()};
}
// -- member variables -------------------------------------------------------
variant_type data_;
};
/// @relates config_value
CAF_CORE_EXPORT std::string to_string(const config_value& x);
// -- conversion via get_as ----------------------------------------------------
template <class T>
expected<T> get_as(const config_value&, inspector_access_type::none) {
static_assert(detail::always_false_v<T>,
"cannot convert to T: found no a suitable inspect overload");
}
template <class T>
expected<T> get_as(const config_value&, inspector_access_type::unsafe) {
static_assert(detail::always_false_v<T>,
"cannot convert types that are tagged as unsafe");
}
template <class T>
expected<T>
get_as(const config_value& x, inspector_access_type::specialization token) {
return x.convert_to<T>(token);
}
template <class T>
expected<T>
get_as(const config_value& x, inspector_access_type::inspect token) {
return x.convert_to<T>(token);
}
template <class T>
expected<T>
get_as(const config_value& x, inspector_access_type::builtin_inspect token) {
return x.convert_to<T>(token);
}
template <class T>
expected<T> get_as(const config_value& x, inspector_access_type::builtin) {
if constexpr (std::is_same<T, std::string>::value) {
return to_string(x);
} else if constexpr (std::is_same<T, bool>::value) {
return x.to_boolean();
} else if constexpr (std::is_integral<T>::value) {
if (auto result = x.to_integer()) {
if (detail::bounds_checker<T>::check(*result))
return static_cast<T>(*result);
else
return make_error(sec::conversion_failed, "narrowing error");
} else {
return std::move(result.error());
}
} else if constexpr (std::is_floating_point<T>::value) {
if (auto result = x.to_real()) {
if constexpr (sizeof(T) >= sizeof(config_value::real)) {
return *result;
} else {
auto narrowed = static_cast<T>(*result);
if (!std::isfinite(*result) || std::isfinite(narrowed)) {
return narrowed;
} else {
return make_error(sec::conversion_failed, "narrowing error");
}
}
} else {
return std::move(result.error());
}
} else {
static_assert(detail::always_false_v<T>,
"sorry, this conversion is not implemented yet");
}
}
template <class T>
expected<T> get_as(const config_value& x, inspector_access_type::empty) {
// Technically, we could always simply return T{} here. However,
// *semantically* it only makes sense to converts dictionaries to objects. So
// at least we check for this condition here.
if (x.can_convert_to_dictionary())
return T{};
else
return make_error(sec::conversion_failed,
"invalid element type: expected a dictionary");
}
template <class T, size_t... Is>
expected<T>
get_as_tuple(const config_value::list& x, std::index_sequence<Is...>) {
auto boxed = std::make_tuple(get_as<std::tuple_element_t<Is, T>>(x[Is])...);
if ((get<Is>(boxed) && ...))
return T{std::move(*get<Is>(boxed))...};
else
return make_error(sec::conversion_failed, "invalid element types");
}
template <class T>
expected<T> get_as(const config_value& x, inspector_access_type::tuple) {
static_assert(!std::is_array<T>::value,
"cannot return an array from a function");
if (auto wrapped_values = x.to_list()) {
static constexpr size_t n = std::tuple_size<T>::value;
if (wrapped_values->size() == n)
return get_as_tuple<T>(*wrapped_values, std::make_index_sequence<n>{});
else
return make_error(sec::conversion_failed, "wrong number of arguments");
} else {
return {std::move(wrapped_values.error())};
}
}
template <class T>
expected<T> get_as(const config_value& x, inspector_access_type::map) {
using key_type = typename T::key_type;
using mapped_type = typename T::mapped_type;
T result;
if (auto dict = x.to_dictionary()) {
for (auto&& [string_key, wrapped_value] : *dict) {
config_value wrapped_key{std::move(string_key)};
if (auto key = get_as<key_type>(wrapped_key)) {
if (auto val = get_as<mapped_type>(wrapped_value)) {
if (!result.emplace(std::move(*key), std::move(*val)).second) {
return make_error(sec::conversion_failed,
"ambiguous mapping of keys to key_type");
}
} else {
return make_error(sec::conversion_failed,
"failed to convert values to mapped_type");
}
} else {
return make_error(sec::conversion_failed,
"failed to convert keys to key_type");
}
}
return {std::move(result)};
} else {
return {std::move(dict.error())};
}
}
template <class T>
expected<T> get_as(const config_value& x, inspector_access_type::list) {
if (auto wrapped_values = x.to_list()) {
using value_type = typename T::value_type;
T result;
if constexpr (detail::has_reserve_v<T>)
result.reserve(wrapped_values->size());
for (const auto& wrapped_value : *wrapped_values)
if (auto maybe_value = get_as<value_type>(wrapped_value)) {
if constexpr (detail::has_emplace_back_v<T>)
result.emplace_back(std::move(*maybe_value));
else
result.insert(result.end(), std::move(*maybe_value));
} else {
return {std::move(maybe_value.error())};
}
return {std::move(result)};
} else {
return {std::move(wrapped_values.error())};
}
}
/// Converts a @ref config_value to builtin types or user-defined types that
/// opted into the type inspection API.
/// @relates config_value
template <class T>
expected<T> get_as(const config_value& value) {
if constexpr (std::is_same<T, timespan>::value) {
return value.to_timespan();
} else if constexpr (std::is_same<T, config_value::list>::value) {
return value.to_list();
} else if constexpr (std::is_same<T, config_value::dictionary>::value) {
return value.to_dictionary();
} else if constexpr (std::is_same<T, uri>::value) {
return value.to_uri();
} else {
auto token = inspect_access_type<config_value_reader, T>();
return get_as<T>(value, token);
}
}
// -- conversion via get_or ----------------------------------------------------
/// Customization point for configuring automatic mappings from default value
/// types to deduced types. For example, `get_or(value, "foo"sv)` must return a
/// `string` rather than a `string_view`. However, user-defined overloads *must
/// not* specialize this class for any type from the namespaces `std` or `caf`.
template <class T>
struct get_or_deduction_guide {
using value_type = T;
template <class V>
static decltype(auto) convert(V&& x) {
return std::forward<V>(x);
}
};
template <>
struct get_or_deduction_guide<string_view> {
using value_type = std::string;
static value_type convert(string_view str) {
return {str.begin(), str.end()};
}
};
template <>
struct get_or_deduction_guide<const char*> {
using value_type = std::string;
static value_type convert(const char* str) {
return {str};
}
};
template <class T>
struct get_or_deduction_guide<span<T>> {
using value_type = std::vector<T>;
static value_type convert(span<T> buf) {
return {buf.begin(), buf.end()};
}
};
/// Configures @ref get_or to uses the @ref get_or_deduction_guide.
struct get_or_auto_deduce {};
/// Converts a @ref config_value to `To` or returns `fallback` if the conversion
/// fails.
/// @relates config_value
template <class To = get_or_auto_deduce, class Fallback>
auto get_or(const config_value& x, Fallback&& fallback) {
if constexpr (std::is_same<To, get_or_auto_deduce>::value) {
using guide = get_or_deduction_guide<std::decay_t<Fallback>>;
using value_type = typename guide::value_type;
if (auto val = get_as<value_type>(x))
return std::move(*val);
else
return guide::convert(std::forward<Fallback>(fallback));
} else {
if (auto val = get_as<To>(x))
return std::move(*val);
else
return To{std::forward<Fallback>(fallback)};
}
}
// -- SumType-like access ------------------------------------------------------
template <class T, class = std::enable_if_t<detail::is_config_value_type_v<T>>>
auto get_if(const config_value* x) {
return get_if<T>(x->get_data_ptr());
}
template <class T, class = std::enable_if_t<detail::is_config_value_type_v<T>>>
auto get_if(config_value* x) {
return get_if<T>(x->get_data_ptr());
}
template <class T, class = std::enable_if_t<detail::is_config_value_type_v<T>>>
decltype(auto) get(const config_value& x) {
return get<T>(x.get_data());
}
template <class T, class = std::enable_if_t<detail::is_config_value_type_v<T>>>
decltype(auto) get(config_value& x) {
return get<T>(x.get_data());
}
template <class T, class = std::enable_if_t<detail::is_config_value_type_v<T>>>
auto holds_alternative(const config_value& x) {
return holds_alternative<T>(x.get_data());
}
// -- comparison operator overloads --------------------------------------------
/// @relates config_value
CAF_CORE_EXPORT bool operator<(const config_value& x, const config_value& y);
/// @relates config_value
CAF_CORE_EXPORT bool operator<=(const config_value& x, const config_value& y);
/// @relates config_value
CAF_CORE_EXPORT bool operator==(const config_value& x, const config_value& y);
/// @relates config_value
CAF_CORE_EXPORT bool operator>(const config_value& x, const config_value& y);
/// @relates config_value
CAF_CORE_EXPORT bool operator>=(const config_value& x, const config_value& y);
/// @relates config_value
inline bool operator!=(const config_value& x, const config_value& y) {
return !(x == y);
}
/// @relates config_value
CAF_CORE_EXPORT std::ostream& operator<<(std::ostream& out,
const config_value& x);
// -- convenience APIs ---------------------------------------------------------
template <class... Ts>
config_value make_config_value_list(Ts&&... xs) {
std::vector<config_value> lst{config_value{std::forward<Ts>(xs)}...};
return config_value{std::move(lst)};
}
// -- inspection API -----------------------------------------------------------
template <>
struct variant_inspector_traits<config_value> {
using value_type = config_value;
static constexpr type_id_t allowed_types[] = {
type_id_v<none_t>,
type_id_v<config_value::integer>,
type_id_v<config_value::boolean>,
type_id_v<config_value::real>,
type_id_v<timespan>,
type_id_v<uri>,
type_id_v<config_value::string>,
type_id_v<config_value::list>,
type_id_v<config_value::dictionary>,
};
static auto type_index(const config_value& x) {
return x.get_data().index();
}
template <class F, class Value>
static auto visit(F&& f, Value&& x) {
return caf::visit(std::forward<F>(f), x.get_data());
}
template <class U>
static void assign(value_type& x, U&& value) {
x = std::move(value);
}
template <class F>
static bool load(type_id_t type, F continuation) {
switch (type) {
default:
return false;
case type_id_v<none_t>: {
auto tmp = config_value{};
continuation(tmp);
return true;
}
case type_id_v<config_value::integer>: {
auto tmp = config_value::integer{};
continuation(tmp);
return true;
}
case type_id_v<config_value::boolean>: {
auto tmp = config_value::boolean{};
continuation(tmp);
return true;
}
case type_id_v<config_value::real>: {
auto tmp = config_value::real{};
continuation(tmp);
return true;
}
case type_id_v<timespan>: {
auto tmp = timespan{};
continuation(tmp);
return true;
}
case type_id_v<uri>: {
auto tmp = uri{};
continuation(tmp);
return true;
}
case type_id_v<config_value::string>: {
auto tmp = config_value::string{};
continuation(tmp);
return true;
}
case type_id_v<config_value::list>: {
auto tmp = config_value::list{};
continuation(tmp);
return true;
}
case type_id_v<config_value::dictionary>: {
auto tmp = config_value::dictionary{};
continuation(tmp);
return true;
}
}
}
};
template <>
struct inspector_access<config_value> : variant_inspector_access<config_value> {
// nop
};
} // namespace caf
| 8,103
|
5,169
|
{
"name": "YBSlantedCollectionViewLayout",
"version": "2.1.0",
"summary": "UICollectionViewLayout allowing the display of slanted content on UICollectionView",
"description": "YBSlantedCollectionViewLayout is a subclass of UICollectionViewLayout allowing the display of slanted content on UICollectionView.",
"homepage": "https://github.com/yacir/YBSlantedCollectionViewLayout",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/yacir/YBSlantedCollectionViewLayout.git",
"tag": "2.1.0"
},
"social_media_url": "https://linkedin.com/in/yassir-barchi-318a7949",
"platforms": {
"ios": "8.0"
},
"requires_arc": true,
"source_files": "Source/*.{h,swift}",
"frameworks": "UIKit",
"deprecated_in_favor_of": "CollectionViewSlantedLayout"
}
| 309
|
348
|
{"nom":"Bligny-sur-Ouche","circ":"5ème circonscription","dpt":"Côte-d'Or","inscrits":642,"abs":401,"votants":241,"blancs":23,"nuls":5,"exp":213,"res":[{"nuance":"REM","nom":"<NAME>","voix":128},{"nuance":"LR","nom":"<NAME>","voix":85}]}
| 99
|
465
|
<filename>vs2015/realtime_server/lib/realtinet/third/muduo/base/ThreadLocalSingleton.h
// Use of this source code is governed by a BSD-style license
// that can be found in the License file.
//
// Author: <NAME> (chenshuo at chenshuo dot com)
#ifndef MUDUO_BASE_THREADLOCALSINGLETON_H
#define MUDUO_BASE_THREADLOCALSINGLETON_H
#include <muduo/base/noncopyable.h>
#include <assert.h>
#include <pthread.h>
namespace muduo
{
template<typename T>
class ThreadLocalSingleton : noncopyable
{
public:
static T& instance()
{
if ( !t_value_ )
{
t_value_ = new T();
deleter_.set( t_value_ );
}
return *t_value_;
}
static T* pointer()
{
return t_value_;
}
private:
ThreadLocalSingleton();
~ThreadLocalSingleton();
static void destructor( void* obj )
{
assert( obj == t_value_ );
typedef char T_must_be_complete_type[sizeof( T ) == 0 ? -1 : 1];
T_must_be_complete_type dummy; ( void )dummy;
delete t_value_;
t_value_ = 0;
}
class Deleter
{
public:
Deleter()
{
pthread_key_create( &pkey_, &ThreadLocalSingleton::destructor );
}
~Deleter()
{
pthread_key_delete( pkey_ );
}
void set( T* newObj )
{
assert( pthread_getspecific( pkey_ ) == NULL );
pthread_setspecific( pkey_, newObj );
}
pthread_key_t pkey_;
};
static __thread T* t_value_;
static Deleter deleter_;
};
template<typename T>
__thread T* ThreadLocalSingleton<T>::t_value_ = 0;
template<typename T>
typename ThreadLocalSingleton<T>::Deleter ThreadLocalSingleton<T>::deleter_;
}
#endif
| 696
|
1,617
|
#include "caffe/util/nms.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
static
Dtype iou(const Dtype A[], const Dtype B[])
{
if (A[0] > B[2] || A[1] > B[3] || A[2] < B[0] || A[3] < B[1]) {
return 0;
}
// overlapped region (= box)
const Dtype x1 = std::max(A[0], B[0]);
const Dtype y1 = std::max(A[1], B[1]);
const Dtype x2 = std::min(A[2], B[2]);
const Dtype y2 = std::min(A[3], B[3]);
// intersection area
const Dtype width = std::max((Dtype)0, x2 - x1 + (Dtype)1);
const Dtype height = std::max((Dtype)0, y2 - y1 + (Dtype)1);
const Dtype area = width * height;
// area of A, B
const Dtype A_area = (A[2] - A[0] + (Dtype)1) * (A[3] - A[1] + (Dtype)1);
const Dtype B_area = (B[2] - B[0] + (Dtype)1) * (B[3] - B[1] + (Dtype)1);
// IoU
return area / (A_area + B_area - area);
}
template float iou(const float A[], const float B[]);
template double iou(const double A[], const double B[]);
template <typename Dtype>
void nms_cpu(const int num_boxes,
const Dtype boxes[],
int index_out[],
int* const num_out,
const int base_index,
const Dtype nms_thresh, const int max_num_out)
{
int count = 0;
std::vector<char> is_dead(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
is_dead[i] = 0;
}
for (int i = 0; i < num_boxes; ++i) {
if (is_dead[i]) {
continue;
}
index_out[count++] = base_index + i;
if (count == max_num_out) {
break;
}
for (int j = i + 1; j < num_boxes; ++j) {
if (!is_dead[j] && iou(&boxes[i * 5], &boxes[j * 5]) > nms_thresh) {
is_dead[j] = 1;
}
}
}
*num_out = count;
is_dead.clear();
}
template
void nms_cpu(const int num_boxes,
const float boxes[],
int index_out[],
int* const num_out,
const int base_index,
const float nms_thresh, const int max_num_out);
template
void nms_cpu(const int num_boxes,
const double boxes[],
int index_out[],
int* const num_out,
const int base_index,
const double nms_thresh, const int max_num_out);
} // namespace caffe
| 1,081
|
892
|
<filename>advisories/unreviewed/2022/04/GHSA-6g32-5m95-62fv/GHSA-6g32-5m95-62fv.json
{
"schema_version": "1.2.0",
"id": "GHSA-6g32-5m95-62fv",
"modified": "2022-04-23T00:03:15Z",
"published": "2022-04-16T00:00:44Z",
"aliases": [
"CVE-2021-44493"
],
"details": "An issue was discovered in YottaDB through r1.32 and V7.0-000 and FIS GT.M through V7.0-000. Using crafted input, an attacker can cause a call to $Extract to force an signed integer holding the size of a buffer to take on a large negative number, which is then used as the length of a memcpy call that occurs on the stack, causing a buffer overflow.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-44493"
},
{
"type": "WEB",
"url": "https://gitlab.com/YottaDB/DB/YDB/-/issues/828"
},
{
"type": "WEB",
"url": "https://sourceforge.net/projects/fis-gtm/files/"
},
{
"type": "WEB",
"url": "http://tinco.pair.com/bhaskar/gtm/doc/articles/GTM_V7.0-002_Release_Notes.html"
}
],
"database_specific": {
"cwe_ids": [
"CWE-120"
],
"severity": "HIGH",
"github_reviewed": false
}
}
| 643
|
1,511
|
/* -----------------------------------------------------------------------
ffi.c - Copyright (c) 2004 <NAME>
Copyright (c) 2008 Red Hat, Inc.
M32R Foreign Function Interface
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
| 108
|
4,551
|
<filename>shadows/framework/src/main/java/org/robolectric/shadows/ShadowFontsContract.java
package org.robolectric.shadows;
import static android.os.Build.VERSION_CODES.O;
import static org.robolectric.util.reflector.Reflector.reflector;
import android.content.Context;
import android.graphics.Typeface;
import android.provider.FontRequest;
import android.provider.FontsContract;
import org.robolectric.annotation.Implementation;
import org.robolectric.annotation.Implements;
import org.robolectric.annotation.Resetter;
import org.robolectric.util.reflector.Accessor;
import org.robolectric.util.reflector.ForType;
import org.robolectric.util.reflector.Static;
@Implements(value = FontsContract.class, minSdk = O)
public class ShadowFontsContract {
/** Returns a stub typeface immediately. */
@Implementation
public static Typeface getFontSync(FontRequest request) {
return Typeface.create(request.getQuery(), Typeface.NORMAL);
}
@Resetter
public static void reset() {
reflector(FontsContractReflector.class).setContext(null);
}
@ForType(FontsContract.class)
private interface FontsContractReflector {
@Static
@Accessor("sContext")
void setContext(Context context);
}
}
| 396
|
11,351
|
<reponame>LearnJavaByus/eureka<filename>eureka-client-jersey2/src/test/java/com/netflix/discovery/guice/Jersey2EurekaModuleTest.java
package com.netflix.discovery.guice;
import com.google.inject.AbstractModule;
import com.google.inject.Binding;
import com.google.inject.Key;
import com.google.inject.Scopes;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.providers.MyDataCenterInstanceConfigProvider;
import com.netflix.config.ConfigurationManager;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.EurekaClientConfig;
import com.netflix.discovery.shared.transport.jersey.TransportClientFactories;
import com.netflix.discovery.shared.transport.jersey2.Jersey2TransportClientFactories;
import com.netflix.governator.InjectorBuilder;
import com.netflix.governator.LifecycleInjector;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* @author <NAME>
*/
public class Jersey2EurekaModuleTest {
private LifecycleInjector injector;
@Before
public void setUp() throws Exception {
ConfigurationManager.getConfigInstance().setProperty("eureka.region", "default");
ConfigurationManager.getConfigInstance().setProperty("eureka.shouldFetchRegistry", "false");
ConfigurationManager.getConfigInstance().setProperty("eureka.registration.enabled", "false");
ConfigurationManager.getConfigInstance().setProperty("eureka.serviceUrl.default", "http://localhost:8080/eureka/v2");
injector = InjectorBuilder
.fromModule(new Jersey2EurekaModule())
.overrideWith(new AbstractModule() {
@Override
protected void configure() {
// the default impl of EurekaInstanceConfig is CloudInstanceConfig, which we only want in an AWS
// environment. Here we override that by binding MyDataCenterInstanceConfig to EurekaInstanceConfig.
bind(EurekaInstanceConfig.class).toProvider(MyDataCenterInstanceConfigProvider.class).in(Scopes.SINGLETON);
}
})
.createInjector();
}
@After
public void tearDown() {
if (injector != null) {
injector.shutdown();
}
ConfigurationManager.getConfigInstance().clear();
}
@SuppressWarnings("deprecation")
@Test
public void testDI() {
InstanceInfo instanceInfo = injector.getInstance(InstanceInfo.class);
Assert.assertEquals(ApplicationInfoManager.getInstance().getInfo(), instanceInfo);
EurekaClient eurekaClient = injector.getInstance(EurekaClient.class);
DiscoveryClient discoveryClient = injector.getInstance(DiscoveryClient.class);
Assert.assertEquals(DiscoveryManager.getInstance().getEurekaClient(), eurekaClient);
Assert.assertEquals(DiscoveryManager.getInstance().getDiscoveryClient(), discoveryClient);
Assert.assertEquals(eurekaClient, discoveryClient);
EurekaClientConfig eurekaClientConfig = injector.getInstance(EurekaClientConfig.class);
Assert.assertEquals(DiscoveryManager.getInstance().getEurekaClientConfig(), eurekaClientConfig);
EurekaInstanceConfig eurekaInstanceConfig = injector.getInstance(EurekaInstanceConfig.class);
Assert.assertEquals(DiscoveryManager.getInstance().getEurekaInstanceConfig(), eurekaInstanceConfig);
Binding<TransportClientFactories> binding = injector.getExistingBinding(Key.get(TransportClientFactories.class));
Assert.assertNotNull(binding); // has a binding for jersey2
TransportClientFactories transportClientFactories = injector.getInstance(TransportClientFactories.class);
Assert.assertTrue(transportClientFactories instanceof Jersey2TransportClientFactories);
}
}
| 1,441
|
7,482
|
<reponame>BreederBai/rt-thread
/**
******************************************************************************
* @file tae32f53xx_ll_def.h
* @author MCD Application Team
* @brief This file contains LL common defines, enumeration, macros and
* structures definitions.
*
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2020 Tai-Action.
* All rights reserved.</center></h2>
*
* This software is licensed by Tai-Action under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef _TAE32F53XX_LL_DEF_H_
#define _TAE32F53XX_LL_DEF_H_
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* Includes ------------------------------------------------------------------*/
#include <stddef.h>
#include "tae32f53xx.h"
/** @addtogroup TAE32F53xx_LL_Driver
* @{
*/
/** @defgroup DEFINE_LL DEFINE LL
* @brief DEFINE LL
* @{
*/
/* Exported constants --------------------------------------------------------*/
/** @defgroup DEFINE_LL_Exported_Constants DEFINE LL Exported Constants
* @brief DEFINE LL Exported Constants
* @{
*/
/**
* @brief LL wait forever time definition
*/
#define LL_WAIT_FOREVER 0xFFFFFFFFUL
/**
* @brief LL wait max delay time definition
*/
#define LL_MAX_DELAY (LL_WAIT_FOREVER - 1U)
/**
* @}
*/
/* Exported types ------------------------------------------------------------*/
/** @defgroup DEFINE_LL_Exported_Types DEFINE LL Exported Types
* @brief DEFINE LL Exported Types
* @{
*/
/**
* @brief LL Status type definition
*/
typedef enum {
LL_OK = 0x00U, /*! LL status OK */
LL_ERROR = 0x01U, /*! LL status ERROR */
LL_BUSY = 0x02U, /*! LL status BUSY */
LL_TIMEOUT = 0x03U, /*! LL status TIMEOUT */
LL_FAILED = 0x04U, /*! LL status FAILED */
} LL_StatusETypeDef;
/**
* @brief LL Flag status type definition
*/
typedef enum {
RESET = 0, /*!< LL flag status RESET */
SET = !RESET, /*!< LL flag status SET */
} LL_FlagStatusETypeDef;
/**
* @brief LL Functional status type definition
*/
typedef enum {
DISABLE = 0, /*!< LL functional status DISABLE */
ENABLE = !DISABLE, /*!< LL functional status ENABLE */
} LL_FuncStatusETypeDef;
/**
* @brief LL Error status type definiton
*/
typedef enum {
SUCCESS = 0U, /*!< LL error status SUCCESS */
ERROR = !SUCCESS, /*!< LL error status ERROR */
} LL_ErrStatusETypeDef;
/**
* @}
*/
/* Exported macro ------------------------------------------------------------*/
/** @defgroup DEFINE_LL_Exported_Macros DEFINE LL Exported Macros
* @brief DEFINE LL Exported Macros
* @{
*/
/* Compiler ALIAS and WEAK attribute definition */
#if defined (__CC_ARM) /*!< AC5 Compiler */
#define __ALIAS_FUNC(FUNC) __attribute__ ((weak, alias(#FUNC)))
#define __WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS) void FUNC(void) __attribute__ ((weak, alias(#FUNC_ALIAS)));
#elif defined (__ICCARM__) /*!< IAR Compiler */
#define __WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS) void FUNC(void);_Pragma(_STRINGIFY(_WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS)))
#define _WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS) weak WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS)
#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /*!< AC6 Compiler */
#define __ALIAS_FUNC(FUNC) __attribute__ ((weak, alias(#FUNC)))
#define __WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS) void FUNC(void) __attribute__ ((weak, alias(#FUNC_ALIAS)));
#elif defined (__GNUC__) /*!< GCC Compiler */
#define __ALIAS_FUNC(FUNC) __attribute__ ((weak, alias(#FUNC)))
#define __WEAK_ALIAS_FUNC(FUNC, FUNC_ALIAS) void FUNC(void) __attribute__ ((weak, alias(#FUNC_ALIAS)));
#else
#error Not supported compiler type
#endif
/* Macro to get variable aligned on 4-bytes, for __ICCARM__ the directive "#pragma data_alignment=4" must be used instead */
/* Compiler aligned on 4-bytes attribute definition */
#if defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */
#ifndef __ALIGN_END
#define __ALIGN_END __attribute__ ((aligned (4)))
#endif
#ifndef __ALIGN_BEGIN
#define __ALIGN_BEGIN
#endif
#else
#ifndef __ALIGN_END
#define __ALIGN_END
#endif
#ifndef __ALIGN_BEGIN
#if defined (__CC_ARM) /* ARM Compiler */
#define __ALIGN_BEGIN __align(4)
#elif defined (__ICCARM__) /* IAR Compiler */
#define __ALIGN_BEGIN
#endif
#endif
#endif
/* Compiler __NOINLINE attribute definition */
#if defined (__CC_ARM) || defined (__GNUC__) /* ARM & GNUCompiler */
#define __NOINLINE __attribute__ ( (noinline) )
#elif defined (__ICCARM__) /* ICCARM Compiler */
#define __NOINLINE _Pragma("optimize = no_inline")
#endif
/* Compiler misc attribute definition */
#if defined (__CC_ARM) /*!< AC5 Compiler */
#define __NO_INIT __attribute__((zero_init))
#define __AT(n) __attribute__((at(n)))
#define __SECTION(SECT) __attribute__((section(#SECT)))
#elif defined (__ICCARM__) /*!< IAR Compiler */
#define __NO_INIT __no_init
#define __AT(n) @(n)
#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /*!< AC6 Compiler */
#define __NO_INIT
#define __AT(n) __attribute__ ((section(".ARM.__at_"#n)))
#define __SECTION(SECT) __attribute__((section(#SECT)))
#elif defined (__GNUC__) /*!< GCC Compiler */
#define __NO_INIT __attribute__((zero_init))
#define __AT(n)
#define __SECTION(SECT) __attribute__((section(#SECT)))
#endif
/**
* @brief Bit left shift definition
* @param pos left shift position
* @return Bit left shift value
*/
#define BIT(pos) (1U << (pos))
/**
* @brief Set bit definition
* @param REG register
* @param BIT Bit to set
* @return None
*/
#define SET_BIT(REG, BIT) ((REG) |= (BIT))
/**
* @brief Clear bit definition
* @param REG register
* @param BIT Bit to clear
* @return None
*/
#define CLEAR_BIT(REG, BIT) ((REG) &= ~(BIT))
/**
* @brief Read bit definition
* @param REG register
* @param BIT Bit to read
* @return None
*/
#define READ_BIT(REG, BIT) ((REG) & (BIT))
/**
* @brief Clear register definiton
* @param REG register
* @return None
*/
#define CLEAR_REG(REG) ((REG) = (0x0))
/**
* @brief Write register definiton
* @param REG register
* @param VAL write value
* @return None
*/
#define WRITE_REG(REG, VAL) ((REG) = (VAL))
/**
* @brief Read register definition
* @param REG register
* @return None
*/
#define READ_REG(REG) ((REG))
/**
* @brief Modify register definition
* @param REG register
* @param CLEARMASK clear mask
* @param SETMASK set mask
* @return None
*/
#define MODIFY_REG(REG, CLEARMASK, SETMASK) WRITE_REG((REG), (((READ_REG(REG)) & (~(CLEARMASK))) | (SETMASK)))
/**
* @brief Position value definition
* @param VAL value
* @return None
*/
#define POSITION_VAL(VAL) (__CLZ(__RBIT(VAL)))
/**
* @brief To avoid gcc/g++ warnings
* @param X avoid warning param
* @return None
*/
#define LL_UNUSED(X) (void)X
/**
* @brief Macro for counting the element number of an array
* @param a Array to be Counted
* @return size of Array
*/
#define ARRAY_SIZE(a) (sizeof((a)) / sizeof((a)[0]))
/**
* @}
*/
/* Exported functions --------------------------------------------------------*/
/* Private types -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
/* Private constants ---------------------------------------------------------*/
/* Private macros ------------------------------------------------------------*/
/* Private functions ---------------------------------------------------------*/
/**
* @}
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _TAE32F53XX_LL_DEF_H_ */
/************************* (C) COPYRIGHT Tai-Action *****END OF FILE***********/
| 3,946
|
410
|
<filename>defibus-broker/src/test/java/com/webank/defibus/broker/monitor/QueueListeningMonitorTest.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webank.defibus.broker.monitor;
import com.webank.defibus.broker.DeFiBrokerController;
import com.webank.defibus.common.DeFiBusBrokerConfig;
import java.lang.reflect.Field;
import org.apache.rocketmq.common.BrokerConfig;
import org.apache.rocketmq.remoting.netty.NettyClientConfig;
import org.apache.rocketmq.remoting.netty.NettyServerConfig;
import org.apache.rocketmq.store.config.MessageStoreConfig;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.verify;
@RunWith(MockitoJUnitRunner.class)
public class QueueListeningMonitorTest {
private DeFiBrokerController deFiBrokerController;
@Mock
QueueListeningMonitor queueListeningMonitor;
@Before
public void init() throws Exception {
deFiBrokerController = new DeFiBrokerController(
new BrokerConfig(),
new NettyServerConfig(),
new NettyClientConfig(),
new MessageStoreConfig(),
new DeFiBusBrokerConfig());
assertThat(deFiBrokerController.initialize());
Field field = DeFiBrokerController.class.getDeclaredField("queueListeningMonitor");
field.setAccessible(true);
field.set(deFiBrokerController, queueListeningMonitor);
}
@Test
public void testQueueListeningMonitorStart() throws Exception {
deFiBrokerController.start();
verify(queueListeningMonitor).start();
}
@After
public void shutdown() {
deFiBrokerController.shutdown();
}
}
| 856
|
6,071
|
"""
Mongodb-based Trials Object
===========================
Components involved:
- mongo
e.g. mongod ...
- driver
e.g. hyperopt-mongo-search mongo://address bandit_json bandit_algo_json
- worker
e.g. hyperopt-mongo-worker --loop mongo://address
Mongo
=====
Mongo (daemon process mongod) is used for IPC between the driver and worker.
Configure it as you like, so that hyperopt-mongo-search can communicate with it.
I think there is some support in this file for an ssh+mongo connection type.
The experiment uses the following collections for IPC:
* jobs - documents of a standard form used to store suggested trials and their
results. These documents have keys:
* spec : subdocument returned by bandit_algo.suggest
* exp_key: an identifier of which driver suggested this trial
* cmd: a tuple (protocol, ...) identifying bandit.evaluate
* state: 0, 1, 2, 3 for job state (new, running, ok, fail)
* owner: None for new jobs, (hostname, pid) for started jobs
* book_time: time a job was reserved
* refresh_time: last time the process running the job checked in
* result: the subdocument returned by bandit.evaluate
* error: for jobs of state 3, a reason for failure.
* logs: a dict of sequences of strings received by ctrl object
* info: info messages
* warn: warning messages
* error: error messages
* fs - a gridfs storage collection (used for pickling)
* drivers - documents describing drivers. These are used to prevent two drivers
from using the same exp_key simultaneously, and to attach saved states.
* exp_key
* workdir: [optional] path where workers should chdir to
Attachments:
* pkl: [optional] saved state of experiment class
* bandit_args_kwargs: [optional] pickled (clsname, args, kwargs) to
reconstruct bandit in worker processes
The MongoJobs, and CtrlObj classes as well as the main_worker
method form the abstraction barrier around this database layout.
Worker
======
A worker looks up a job in a mongo database, maps that job document to a
runnable python object, calls that object, and writes the return value back to
the database.
A worker *reserves* a job by atomically identifying a document in the jobs
collection whose owner is None and whose state is 0, and setting the state to
1. If it fails to identify such a job, it loops with a random sleep interval
of a few seconds and polls the database.
If hyperopt-mongo-worker is called with a --loop argument then it goes back to
the database after finishing a job to identify and perform another one.
CtrlObj
-------
The worker allocates a CtrlObj and passes it to bandit.evaluate in addition to
the subdocument found at job['spec']. A bandit can use ctrl.info, ctrl.warn,
ctrl.error and so on like logger methods, and those messages will be written
to the mongo database (to job['logs']). They are not written synchronously
though, they are written when the bandit.evaluate function calls
ctrl.checkpoint().
Ctrl.checkpoint does several things:
* flushes logging messages to the database
* updates the refresh_time
* optionally updates the result subdocument
The main_worker routine calls Ctrl.checkpoint(rval) once after the
bandit.evaluate function has returned before setting the state to 2 or 3 to
finalize the job in the database.
"""
from future import standard_library
import copy
# import hashlib
import logging
import optparse
import os
# import shutil
import signal
import socket
import subprocess
import sys
import time
import urllib.parse
import warnings
import numpy
try:
import pymongo
import gridfs
from bson import SON
_has_mongo = True
except:
_has_mongo = False
from .base import JOB_STATES
from .base import JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE, JOB_STATE_ERROR
from .base import Trials
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
from .utils import working_dir, temp_dir
import six
__authors__ = ["<NAME>", "<NAME>"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
standard_library.install_aliases()
logger = logging.getLogger(__name__)
try:
import cloudpickle as pickler
except Exception as e:
logger.info(
'Failed to load cloudpickle, try installing cloudpickle via "pip '
'install cloudpickle" for enhanced pickling support.'
)
import six.moves.cPickle as pickler
class OperationFailure(Exception):
"""Proxy that could be factored out if we also want to use CouchDB and
JobmanDB classes with this interface
"""
class Shutdown(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class WaitQuit(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class InvalidMongoTrial(InvalidTrial):
pass
class DomainSwapError(Exception):
"""Raised when the search program tries to change the bandit attached to
an experiment.
"""
class ReserveTimeout(Exception):
"""No job was reserved in the allotted time"""
def read_pw():
return open(os.path.join(os.getenv("HOME"), ".hyperopt")).read()[:-1]
def parse_url(url, pwfile=None):
"""Unpacks a url of the form
protocol://[username[:pw]]@hostname[:port]/db/collection
:rtype: tuple of strings
:returns: protocol, username, password, hostname, port, dbname, collection
:note:
If the password is not given in the url but the username is, then
this function will read the password from file by calling
``open(pwfile).read()[:-1]``
"""
protocol = url[: url.find(":")]
ftp_url = "ftp" + url[url.find(":") :]
# -- parse the string as if it were an ftp address
tmp = urllib.parse.urlparse(ftp_url)
query_params = urllib.parse.parse_qs(tmp.query)
logger.info("PROTOCOL %s" % protocol)
logger.info("USERNAME %s" % tmp.username)
logger.info("HOSTNAME %s" % tmp.hostname)
logger.info("PORT %s" % tmp.port)
logger.info("PATH %s" % tmp.path)
authdbname = None
if "authSource" in query_params and len(query_params["authSource"]):
authdbname = query_params["authSource"][-1]
logger.info("AUTH DB %s" % authdbname)
try:
_, dbname, collection = tmp.path.split("/")
except:
print("Failed to parse '%s'" % (str(tmp.path)), file=sys.stderr)
raise
logger.info("DB %s" % dbname)
logger.info("COLLECTION %s" % collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = <PASSWORD>
if password is not None:
logger.info("PASS ***")
port = int(float(tmp.port)) # port has to be casted explicitly here.
return (
protocol,
tmp.username,
password,
tmp.hostname,
port,
dbname,
collection,
authdbname,
)
def connection_with_tunnel(
dbname,
host="localhost",
auth_dbname=None,
port=27017,
ssh=False,
user="hyperopt",
pw=None,
):
if ssh:
local_port = numpy.random.randint(low=27500, high=28000)
# -- forward from local to remote machine
ssh_tunnel = subprocess.Popen(
["ssh", "-NTf", "-L", "%i:%s:%i" % (local_port, "127.0.0.1", port), host]
)
# -- give the subprocess time to set up
time.sleep(0.5)
connection = pymongo.MongoClient(
"127.0.0.1", local_port, document_class=SON, w=1, j=True
)
else:
connection = pymongo.MongoClient(host, port, document_class=SON, w=1, j=True)
if user:
if not pw:
pw = read_pw()
if user == "hyperopt" and not auth_dbname:
auth_dbname = "admin"
connection[dbname].authenticate(user, pw, source=auth_dbname)
ssh_tunnel = None
# Note that the w=1 and j=True args to MongoClient above should:
# -- Ensure that changes are written to at least one server.
# -- Ensure that changes are written to the journal if there is one.
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection, authdb = parse_url(s)
if protocol == "mongo":
ssh = False
elif protocol in ("mongo+ssh", "ssh+mongo"):
ssh = True
else:
raise ValueError("unrecognized protocol for MongoJobs", protocol)
connection, tunnel = connection_with_tunnel(
dbname=db, ssh=ssh, user=user, pw=pw, host=host, port=port, auth_dbname=authdb
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs:
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
"""
Parameters
----------
db - Mongo Database (e.g. `Connection()[dbname]`)
database in which all job-related info is stored
jobs - Mongo Collection handle
collection within `db` to use for job arguments, return vals,
and various bookkeeping stuff and meta-data. Typically this is
`db['jobs']`
gfs - Mongo GridFS handle
GridFS is used to store attachments - binary blobs that don't fit
or are awkward to store in the `jobs` collection directly.
conn - Mongo Connection
Why we need to keep this, I'm not sure.
tunnel - something for ssh tunneling if you're doing that
See `connection_with_tunnel` for more info.
config_name - string
XXX: No idea what this is for, seems unimportant.
"""
if not _has_mongo:
raise Exception(
"MongoJobs cannot import pymongo classes. Make sure that pymongo "
"is available in your environment. E.g., try running 'import pymongo'"
)
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn = conn
self.tunnel = tunnel
self.config_name = config_name
collection = property(lambda s: s.jobs)
@classmethod
def alloc(
cls,
dbname,
host="localhost",
auth_dbname="admin",
port=27017,
jobs_coll="jobs",
gfs_coll="fs",
ssh=False,
user=None,
pw=None,
):
connection, tunnel = connection_with_tunnel(
dbname, host, auth_dbname, port, ssh, user, pw
)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll="fs", config_name="spec"):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ["exp_key", "result.loss", "book_time"]:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index("exp_key", unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if not r.get("MIA", False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if r.get("MIA", False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# -- this call adds an _id field to cpy
_id = self.jobs.insert(cpy, check_keys=True)
# -- so now we return the dict with the _id field
assert _id == cpy["_id"]
return cpy
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# This was meant to make it easier to catch insertion errors
# in a generic way even if different databases were used.
# ... but there's just MongoDB so far, so kinda goofy.
raise OperationFailure(e)
def delete(self, job):
"""Delete job[s]"""
try:
self.jobs.remove(job)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all(self, cond=None):
"""Delete all jobs and attachments"""
if cond is None:
cond = {}
try:
for d in self.jobs.find(filter=cond, projection=["_id", "_attachments"]):
logger.info("deleting job %s" % d["_id"])
for name, file_id in d.get("_attachments", []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error(f"failed to remove attachment {name}:{file_id}")
self.jobs.remove(d)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all_error_jobs(self):
return self.delete_all(cond={"state": JOB_STATE_ERROR})
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(
cond
) # copy is important, will be modified, but only the top-level
if exp_key is not None:
cond["exp_key"] = exp_key
# having an owner of None implies state==JOB_STATE_NEW, so this effectively
# acts as a filter to make sure that only new jobs get reserved.
if cond.get("owner") is not None:
raise ValueError("refusing to reserve owned job")
else:
cond["owner"] = None
cond[
"state"
] = JOB_STATE_NEW # theoretically this is redundant, theoretically
try:
rval = self.jobs.find_and_modify(
cond,
{
"$set": {
"owner": host_id,
"book_time": now,
"state": JOB_STATE_RUNNING,
"refresh_time": now,
}
},
new=True,
upsert=False,
)
except pymongo.errors.OperationFailure as e:
logger.error("Error during reserve_job: %s" % str(e))
rval = None
return rval
def refresh(self, doc):
self.update(doc, dict(refresh_time=coarse_utcnow()))
def update(self, doc, dct, collection=None, do_sanity_checks=True):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
"""
if collection is None:
collection = self.collection
dct = copy.deepcopy(dct)
if "_id" not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if "_id" in dct:
if dct["_id"] != doc["_id"]:
raise ValueError("cannot update the _id field")
del dct["_id"]
if "version" in dct:
if dct["version"] != doc["version"]:
warnings.warn('Ignoring "version" field in update dictionary')
if "version" in doc:
doc_query = dict(_id=doc["_id"], version=doc["version"])
dct["version"] = doc["version"] + 1
else:
doc_query = dict(_id=doc["_id"])
dct["version"] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(doc_query, {"$set": dct}, upsert=False, multi=False)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if do_sanity_checks:
server_doc = collection.find_one(
dict(_id=doc["_id"], version=doc["version"])
)
if server_doc is None:
raise OperationFailure("updated doc not found : %s" % str(doc))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], str), name_id
return str(name_id[0])
return list(map(as_str, doc.get("_attachments", [])))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get("_attachments", [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename="{}_{}".format(doc["_id"], name))
logger.info(
"stored blob of %i bytes with id=%s and filename %s_%s"
% (len(blob), str(new_file_id), doc["_id"], name)
)
new_attachments = [a for a in attachments if a[0] != name] + [
(name, new_file_id)
]
try:
ii = 0
doc = self.update(
doc, {"_attachments": new_attachments}, collection=collection
)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning(
"Leak during set_attachment: old_file_id=%s" % (name_matches[ii][1])
)
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
# return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get("_attachments", [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure("Attachment not found: %s" % name)
if len(file_ids) > 1:
raise OperationFailure("multiple name matches", (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get("_attachments", [])
file_id = None
for i, a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure("Attachment not found: %s" % name)
del attachments[i]
self.update(doc, {"_attachments": attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
"""Trials maps on to an entire mongo collection. It's basically a wrapper
around MongoJobs for now.
As a concession to performance, this object permits trial filtering based
on the exp_key, but I feel that's a hack. The case of `cmd` is similar--
the exp_key and cmd are semantically coupled.
WRITING TO THE DATABASE
-----------------------
The trials object is meant for *reading* a trials database. Writing
to a database is different enough from writing to an in-memory
collection that no attempt has been made to abstract away that
difference. If you want to update the documents within
a MongoTrials collection, then retrieve the `.handle` attribute (a
MongoJobs instance) and use lower-level methods, or pymongo's
interface directly. When you are done writing, call refresh() or
refresh_tids() to bring the MongoTrials up to date.
"""
asynchronous = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None, refresh=True):
if not _has_mongo:
raise Exception(
"MongoTrials cannot import pymongo classes. Make sure that pymongo "
"is available in your environment. E.g., try running 'import pymongo'"
)
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(
self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh,
)
return rval
def refresh_tids(self, tids):
"""Sync documents with `['tid']` in the list of `tids` from the
database (not *to* the database).
Local trial documents whose tid is not in `tids` are not
affected by this call. Local trial documents whose tid is in `tids` may
be:
* *deleted* (if db no longer has corresponding document), or
* *updated* (if db has an updated document) or,
* *left alone* (if db document matches local one).
Additionally, if the db has a matching document, but there is no
local trial with a matching tid, then the db document will be
*inserted* into the local collection.
"""
exp_key = self._exp_key
query = {"exp_key": exp_key} if exp_key != None else {}
t0 = time.time()
query["state"] = {"$ne": JOB_STATE_ERROR}
if tids is not None:
query["tid"] = {"$in": list(tids)}
orig_trials = getattr(self, "_trials", [])
_trials = orig_trials[:] # copy to make sure it doesn't get screwed up
if _trials:
db_data = list(self.handle.jobs.find(query, projection=["_id", "version"]))
# -- pull down a fresh list of ids from mongo
if db_data:
# make numpy data arrays
db_data = numpy.rec.array(
[(x["_id"], int(x["version"])) for x in db_data],
names=["_id", "version"],
)
db_data.sort(order=["_id", "version"])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array(
[(x["_id"], int(x["version"])) for x in _trials],
names=["_id", "version"],
)
existing_data.sort(order=["_id", "version"])
# which records are in db but not in existing, and vice versa
db_in_existing = fast_isin(db_data["_id"], existing_data["_id"])
existing_in_db = fast_isin(existing_data["_id"], db_data["_id"])
# filtering out out-of-date records
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
# new data is what's in db that's not in existing
new_data = db_data[numpy.invert(db_in_existing)]
# having removed the new and out of data data,
# concentrating on data in db and existing for state changes
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data["_id"] == db_data["_id"]).all()
assert (existing_data["version"] <= db_data["version"]).all()
except:
report_path = os.path.join(
os.getcwd(),
"hyperopt_refresh_crash_report_"
+ str(numpy.random.randint(1e8))
+ ".pkl",
)
logger.error(
"HYPEROPT REFRESH ERROR: writing error file to %s" % report_path
)
_file = open(report_path, "w")
pickler.dump(
{"db_data": db_data, "existing_data": existing_data}, _file
)
_file.close()
raise
same_version = existing_data["version"] == db_data["version"]
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
# actually get the updated records
update_ids = new_data["_id"].tolist() + version_changes["_id"].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query["_id"] = {"$in": update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
# this case is for performance, though should be able to be removed
# without breaking correctness.
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug(
"Refresh data download took %f seconds for %d ids"
% (time.time() - t0, num_new)
)
if tids is not None:
# -- If tids were given, then _trials only contains
# documents with matching tids. Here we augment these
# fresh matching documents, with our current ones whose
# tids don't match.
new_trials = _trials
tids_set = set(tids)
assert all(t["tid"] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t["tid"] not in tids_set]
_trials = new_trials + old_trials
# -- reassign new trials to self, in order of increasing tid
jarray = numpy.array([j["_id"] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]["spec"] for _idx in jobsort]
self._results = [_trials[_idx]["result"] for _idx in jobsort]
self._miscs = [_trials[_idx]["misc"] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
# TODO: consider searching by SON rather than dict
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError("invalid state", arg)
query = dict(state=arg)
else:
assert hasattr(arg, "__iter__")
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={"$in": states})
if exp_key != None:
query["exp_key"] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
cond = {} if cond is None else dict(cond)
if self._exp_key:
cond["exp_key"] = self._exp_key
# -- remove all documents matching condition
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, last_id):
db = self.handle.db
# N.B. that the exp key is *not* used here. It was once, but it caused
# a nasty bug: tids were generated by a global experiment
# with exp_key=None, running a suggest() that introduced sub-experiments
# with exp_keys, which ran jobs that did result injection. The tids of
# injected jobs were sometimes unique within an experiment, and
# sometimes not. Hilarious!
#
# Solution: tids are generated to be unique across the db, not just
# within an exp_key.
#
# -- mongo docs say you can't upsert an empty document
query = {"a": 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query, {"$inc": {"last_id": last_id}}, upsert=True
)
if doc is None:
logger.warning("no last_id found, re-trying")
time.sleep(1.0)
lid = doc.get("last_id", 0)
return list(range(lid, lid + last_id))
def trial_attachments(self, trial):
"""
Attachments to a single trial (e.g. learned weights)
Returns a dictionary interface to the attachments.
"""
# don't offer more here than in MongoCtrl
class Attachments:
def __init__(self, handle: MongoJobs):
self.handle = handle
def __contains__(self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(self, name):
try:
return self.handle.get_attachment(doc=trial, name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(self, name, value):
self.handle.set_attachment(
doc=trial, blob=value, name=name, collection=self.handle.db.jobs
)
def __delitem__(self, name):
raise NotImplementedError("delete trial_attachment")
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments(self.handle)
@property
def attachments(self):
"""
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
gfs = self.handle.gfs
query = {}
if self._exp_key:
query["exp_key"] = self._exp_key
class Attachments:
def __iter__(_self):
if query:
# -- gfs.list does not accept query kwargs
# (at least, as of pymongo 2.4)
filenames = [fname for fname in gfs.list() if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, encoding="utf-8", **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
class MongoWorker:
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(
self,
mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename="logfile.txt",
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(fmt="%(levelname)s (%(name)s): %(message)s")
)
self.log_handler.setLevel(logging.INFO)
def run_one(self, host_id=None, reserve_timeout=None, erase_created_workdir=False):
if host_id == None:
host_id = ("%s:%i" % (socket.gethostname(), os.getpid()),)
job = None
start_time = time.time()
mj = self.mj
while job is None:
if reserve_timeout and (time.time() - start_time) > reserve_timeout:
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = 1 + numpy.random.rand() * (float(self.poll_interval) - 1.0)
logger.info("no job found, sleeping for %.1fs" % interval)
time.sleep(interval)
logger.debug("job found: %s" % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job["misc"])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job["exp_key"], refresh=False),
read_only=False,
current_trial=job,
)
if self.workdir is None:
workdir = job["misc"].get("workdir", os.getcwd())
if workdir is None:
workdir = ""
workdir = os.path.join(workdir, str(job["_id"]))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job["misc"]["cmd"]
cmd_protocol = cmd[0]
try:
if cmd_protocol == "cpickled fn":
worker_fn = pickler.loads(cmd[1])
elif cmd_protocol == "call evaluate":
bandit = pickler.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == "token_load":
cmd_toks = cmd[1].split(".")
cmd_module = ".".join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == "bandit_json evaluate":
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == "driver_attachment":
# name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = pickler.loads(blob)
worker_fn = json_call(
bandit_name, args=bandit_args, kwargs=bandit_kwargs
).evaluate
elif cmd_protocol == "domain_attachment":
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = pickler.loads(blob)
except BaseException as e:
logger.info("Error while unpickling.")
raise
worker_fn = domain.evaluate
else:
raise ValueError("Unrecognized cmd protocol", cmd_protocol)
with temp_dir(workdir, erase_created_workdir), working_dir(workdir):
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException as e:
# XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info("job exception: %s" % str(e))
ctrl.checkpoint()
mj.update(
job, {"state": JOB_STATE_ERROR, "error": (str(type(e)), str(e))}
)
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
logger.info("job finished: %s" % str(job["_id"]))
attachments = result.pop("attachments", {})
for aname, aval in list(attachments.items()):
logger.info(
"mongoexp: saving attachment name=%s (%i bytes)" % (aname, len(aval))
)
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {"state": JOB_STATE_DONE})
class MongoCtrl(Ctrl):
"""
Attributes:
current_trial - current job document
jobs - MongoJobs object in which current_trial resides
read_only - True means don't change the db
"""
def __init__(self, trials, current_trial, read_only):
self.trials = trials
self.current_trial = current_trial
self.read_only = read_only
def debug(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.warn(*args, **kwargs)
def error(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.error(*args, **kwargs)
def checkpoint(self, result=None):
if not self.read_only:
handle = self.trials.handle
handle.refresh(self.current_trial)
if result is not None:
return handle.update(self.current_trial, dict(result=result))
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
@property
def set_attachment(self):
# XXX: Is there a better deprecation error?
raise RuntimeError(
"set_attachment deprecated. Use `self.attachments[name] = value`"
)
def exec_import(cmd_module, cmd):
worker_fn = None
exec(f"import {cmd_module}; worker_fn = {cmd}")
return worker_fn
def as_mongo_str(s):
if s.startswith("mongo://"):
return s
return "mongo://%s" % s
def number_of_jobs_in_db(options):
mj = MongoJobs.new_from_connection_str(as_mongo_str(options.mongo) + "/jobs")
final_num = mj.jobs.find().count()
return final_num
def main_worker_helper(options, args):
N = int(options.max_jobs)
if options.last_job_timeout is not None:
end_time = time.time() + float(options.last_job_timeout)
else:
end_time = None
def sighandler_shutdown(signum, frame):
logger.info("Caught signal %i, shutting down." % signum)
raise Shutdown(signum)
def sighandler_wait_quit(signum, frame):
logger.info("Caught signal %i, shutting down." % signum)
raise WaitQuit(signum)
is_windows = os.name == "nt"
if not is_windows:
signal.signal(signal.SIGHUP, sighandler_shutdown)
signal.signal(signal.SIGUSR1, sighandler_wait_quit)
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
if N > 1:
proc = None
cons_errs = 0
while N and cons_errs < int(options.max_consecutive_failures):
# exit due to time limit:
if end_time and time.time() > end_time:
logger.info("Exiting due to last_job_timeout")
return
# exit due to threshold on number of jobs:
if (
options.max_jobs_in_db is not None
and options.max_jobs_in_db != sys.maxsize
):
num_jobs_db = number_of_jobs_in_db(options)
if int(num_jobs_db) >= int(options.max_jobs_in_db):
logger.info(
"Exiting because there are "
+ str(num_jobs_db)
+ " jobs in the database, but the limit is "
+ str(options.max_jobs_in_db)
)
return
# try to run one MongoWorker
try:
if options.use_subprocesses:
# recursive Popen, dropping N from the argv
# By using another process to run this job
# we protect ourselves from memory leaks, bad cleanup
# and other annoying details.
# The tradeoff is that a large dataset must be reloaded once for
# each subprocess.
sub_argv = [
sys.argv[0],
"--poll-interval=%s" % options.poll_interval,
"--max-jobs=1",
"--mongo=%s" % options.mongo,
"--reserve-timeout=%s" % options.reserve_timeout,
]
if options.workdir is not None:
sub_argv.append("--workdir=%s" % options.workdir)
if options.exp_key is not None:
sub_argv.append("--exp-key=%s" % options.exp_key)
proc = subprocess.Popen(sub_argv)
retcode = proc.wait()
proc = None
else:
current_mongo_str = as_mongo_str(options.mongo)
# Remove this if not necessary:
if "/jobs" not in current_mongo_str:
current_mongo_str += "/jobs"
mj = MongoJobs.new_from_connection_str(current_mongo_str)
mworker = MongoWorker(
mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key,
)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
retcode = 0
except Shutdown:
# this is the normal way to stop the infinite loop (if originally N=-1)
if proc:
# proc.terminate() is only available as of 2.6
os.kill(
proc.pid, signal.CTRL_C_EVENT if is_windows else signal.SIGTERM
)
return proc.wait()
return 0
except WaitQuit:
# -- sending SIGUSR1 to a looping process will cause it to
# break out of the loop after the current subprocess finishes
# normally.
if proc:
return proc.wait()
return 0
if retcode != 0:
cons_errs += 1
else:
cons_errs = 0
N -= 1
logger.info(
"exiting with N=%i after %i consecutive exceptions" % (N, cons_errs)
)
elif N == 1:
# XXX: the name of the jobs collection is a parameter elsewhere,
# so '/jobs' should not be hard-coded here
mj = MongoJobs.new_from_connection_str(as_mongo_str(options.mongo) + "/jobs")
mworker = MongoWorker(
mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key,
)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
else:
raise ValueError("N <= 0")
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
sys.exit(main_worker())
def main_worker():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option(
"--exp-key",
dest="exp_key",
default=None,
metavar="str",
help="identifier for this workers's jobs",
)
parser.add_option(
"--last-job-timeout",
dest="last_job_timeout",
metavar="T",
default=None,
help="Do not reserve a job after T seconds have passed",
)
parser.add_option(
"--max-consecutive-failures",
dest="max_consecutive_failures",
metavar="N",
default=4,
help="stop if N consecutive jobs fail (default: 4)",
)
parser.add_option(
"--max-jobs",
dest="max_jobs",
default=sys.maxsize,
help="stop after running this many jobs (default: inf)",
)
parser.add_option(
"--mongo",
dest="mongo",
default="localhost/hyperopt",
help="<host>[:port]/<db> for IPC and job storage",
)
parser.add_option(
"--poll-interval",
dest="poll_interval",
metavar="N",
default=5,
help="check work queue every 1 < T < N seconds (default: 5",
)
parser.add_option(
"--reserve-timeout",
dest="reserve_timeout",
metavar="T",
default=120.0,
help="poll database for up to T seconds to reserve a job",
)
parser.add_option(
"--workdir",
dest="workdir",
default=None,
help="root workdir (default: load from mongo)",
metavar="DIR",
)
parser.add_option(
"--no-subprocesses",
dest="use_subprocesses",
default=True,
action="store_false",
help="do not use sub-processes for each objective evaluation, the objective function will run in the same "
"python process (useful to keep in memory large data across objective evals) but you have to pay "
"attention to memory leaks (default: False)",
)
parser.add_option(
"--max-jobs-in-db",
dest="max_jobs_in_db",
default=sys.maxsize,
help="max jobs in db (default: " + str(sys.maxsize) + ")",
)
(options, args) = parser.parse_args()
if args:
parser.print_help()
return -1
return main_worker_helper(options, args)
| 22,911
|
462
|
package com.ppdai.infrastructure.mq.biz;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.junit.runners.Suite.SuiteClasses;
import com.ppdai.infrastructure.mq.biz.service.impl.AuditLogServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerCommitServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerGroupCheckServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerGroupConsumerCheckServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerGroupServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerGroupTopicCheckServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerGroupTopicServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.ConsumerServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.DbNodeServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.EmailServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.LogServiceImplTest;
import com.ppdai.infrastructure.mq.biz.service.impl.Message01ServiceImplTest;
@RunWith(Suite.class)
@SuiteClasses({ AuditLogServiceImplTest.class, ConsumerCommitServiceImplTest.class,
ConsumerGroupCheckServiceImplTest.class, ConsumerGroupConsumerCheckServiceImplTest.class,
ConsumerGroupServiceImplTest.class, ConsumerGroupTopicCheckServiceImplTest.class,
ConsumerGroupTopicServiceImplTest.class, ConsumerGroupTopicServiceImplTest.class, ConsumerServiceImplTest.class,
DbNodeServiceImplTest.class, DbNodeServiceImplTest.class, EmailServiceImplTest.class, LogServiceImplTest.class,
Message01ServiceImplTest.class })
public class AllBizTests {
}
| 539
|
435
|
<filename>pycon-uk-2017/videos/pycon-uk-2017-from-future-import-truth.json<gh_stars>100-1000
{
"copyright_text": "Standard YouTube License",
"description": "Fake news, alternative facts, post-truth: this has definitely been the year of misinformation.\n\nWhat's more, the rise of social media is rising as the primary news source for many people means that traditional ways of ensuring our information is correct are no longer up to scratch. Since 2010, Full Fact \u2013 the UK's independent factchecking charity \u2013 has been uncovering the truth behind statements made by politicians and in the media. We\u2019ve been at the forefront of public debate in the country, having factchecked 3 elections and 2 referendums in the past 7 years. In 2013, we began thinking about what a system that could augment and automate the factchecking process might look like. In the past year we\u2019ve come a long way to making it reality.\n\nIn this talk, I\u2019ll take you through the unusual technical and philosophical difficulties that creating such a system entails, and show you how Python has helped us begin to tackle them.",
"duration": 1603,
"language": "eng",
"recorded": "2017-10-29T17:00:00+01:00",
"related_urls": [
{
"label": "event schedule",
"url": "http://2017.pyconuk.org/schedule/"
}
],
"speakers": [
"<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/-TRRUsnmyV4/hqdefault.jpg",
"title": "from __future__ import Truth",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=-TRRUsnmyV4"
}
]
}
| 496
|
1,353
|
/*
* Copyright (c) 2014, <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.twelvemonkeys.imageio.plugins.pnm;
import javax.imageio.IIOException;
import javax.imageio.stream.ImageInputStream;
import java.io.IOException;
import static com.twelvemonkeys.lang.Validate.notNull;
abstract class HeaderParser {
protected final ImageInputStream input;
protected HeaderParser(final ImageInputStream input) {
this.input = notNull(input);
}
public abstract PNMHeader parse() throws IOException;
public static PNMHeader parse(ImageInputStream input) throws IOException {
short type = input.readShort();
return createParser(input, type).parse();
}
private static HeaderParser createParser(final ImageInputStream input, final short type) throws IOException {
switch (type) {
case PNM.PBM_PLAIN:
case PNM.PBM:
case PNM.PGM_PLAIN:
case PNM.PGM:
case PNM.PPM_PLAIN:
case PNM.PPM:
return new PNMHeaderParser(input, type);
case PNM.PAM:
return new PAMHeaderParser(input);
case PNM.PFM_GRAY:
case PNM.PFM_RGB:
return new PFMHeaderParser(input, type);
default:
throw new IIOException("Unexpected type for PBM, PGM or PPM format: " + type);
}
}
}
| 1,005
|
3,395
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RVL-CDIP (Ryerson Vision Lab Complex Document Information Processing) dataset"""
import os
import datasets
from datasets.tasks import ImageClassification
_CITATION = """\
@inproceedings{harley2015icdar,
title = {Evaluation of Deep Convolutional Nets for Document Image Classification and Retrieval},
author = {<NAME> and <NAME> and <NAME>},
booktitle = {International Conference on Document Analysis and Recognition ({ICDAR})}},
year = {2015}
}
"""
_DESCRIPTION = """\
The RVL-CDIP (Ryerson Vision Lab Complex Document Information Processing) dataset consists of 400,000 grayscale images in 16 classes, with 25,000 images per class. There are 320,000 training images, 40,000 validation images, and 40,000 test images.
"""
_HOMEPAGE = "https://www.cs.cmu.edu/~aharley/rvl-cdip/"
_LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/"
_URLS = {
"rvl-cdip": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/rvl-cdip.tar.gz",
}
_METADATA_URLS = {
"train": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/train.txt",
"test": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/test.txt",
"val": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/val.txt",
}
_CLASSES = [
"letter",
"form",
"email",
"handwritten",
"advertisement",
"scientific report",
"scientific publication",
"specification",
"file folder",
"news article",
"budget",
"invoice",
"presentation",
"questionnaire",
"resume",
"memo",
]
_IMAGES_DIR = "images/"
class RvlCdip(datasets.GeneratorBasedBuilder):
"""Ryerson Vision Lab Complex Document Information Processing dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=_CLASSES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
task_templates=[ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_URLS["rvl-cdip"])
labels_path = dl_manager.download(_METADATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"archive_iterator": dl_manager.iter_archive(archive_path),
"labels_filepath": labels_path["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"archive_iterator": dl_manager.iter_archive(archive_path),
"labels_filepath": labels_path["test"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"archive_iterator": dl_manager.iter_archive(archive_path),
"labels_filepath": labels_path["val"],
},
),
]
@staticmethod
def _get_image_to_class_map(data):
image_to_class_id = {}
for item in data:
image_path, class_id = item.split(" ")
image_path = os.path.join(_IMAGES_DIR, image_path)
image_to_class_id[image_path] = int(class_id)
return image_to_class_id
def _generate_examples(self, archive_iterator, labels_filepath):
with open(labels_filepath, encoding="utf-8") as f:
data = f.read().splitlines()
image_to_class_id = self._get_image_to_class_map(data)
for file_path, file_obj in archive_iterator:
if file_path.startswith(_IMAGES_DIR):
if file_path in image_to_class_id:
class_id = image_to_class_id[file_path]
label = _CLASSES[class_id]
yield file_path, {"image": {"path": file_path, "bytes": file_obj.read()}, "label": label}
| 2,145
|
1,645
|
<reponame>smsahu/seldon-server
/*
* Seldon -- open source prediction engine
* =======================================
*
* Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/)
*
* ********************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* ********************************************************************************************
*/
package io.seldon.recommendation.model;
import io.seldon.clustering.recommender.RecommendationContext;
import io.seldon.mf.PerClientExternalLocationListener;
import io.seldon.recommendation.ClientStrategy;
import io.seldon.resources.external.NewResourceNotifier;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import org.apache.log4j.Logger;
/**
* @author firemanphil
* Date: 28/04/15
* Time: 12:16
*/
public abstract class ModelManager<T> implements PerClientExternalLocationListener {
private static final String MODEL_PROPERTY_NAME = "io.seldon.algorithm.model.name";
private static Logger logger = Logger.getLogger(ModelManager.class.getName());
private final ConcurrentMap<String, ConcurrentMap<String,T>> clientStores
= new ConcurrentHashMap<>();
private final Executor executor;
private final Set<String> nodeBases;
public ModelManager(NewResourceNotifier notifier, Set<String> nodePatterns) {
this(notifier, nodePatterns, Executors.newFixedThreadPool(2));
}
public ModelManager(NewResourceNotifier notifier, Set<String> nodePatterns, Executor executor) {
this.nodeBases = nodePatterns;
for (String pattern : nodePatterns) {
notifier.addListener(pattern, this);
}
this.executor = executor;
}
@Override
public void newClientLocation(final String client, final String location, final String nodePattern) {
logger.info("New location "+client+" : "+location+ " : "+nodePattern);
String rightBase = null;
Iterator<String> iter = nodeBases.iterator();
while(rightBase==null && iter.hasNext()) {
String base = iter.next();
if (nodePattern.contains(base))
rightBase = base;
}
final String finalPartOfNode = nodePattern.replace(rightBase,"").replaceFirst("/", "");
executor.execute(new Runnable() {
@Override
public void run() {
String key = getKey(client,nodePattern);
logger.info("Loading with client:"+client+" location:"+location+" key:"+key+" finalPartOfNode:"+finalPartOfNode);
T result = loadModel(location, client);
logger.info("Loaded with client:"+client+" location:"+location+" key:"+key+"finalPartOfNodeL"+finalPartOfNode+" result:"+result);
clientStores.putIfAbsent(key, new ConcurrentHashMap<String, T>());
clientStores.get(key).put(finalPartOfNode, result);
for (Map<String, T> store : clientStores.values()) {
for (String t : store.keySet()) {
logger.info(t + " " + store.get(t));
}
}
}
});
}
public T getClientStore(String client, RecommendationContext.OptionsHolder options){
String type = nodeBases.iterator().next();
return getClientStore(client, type, options);
}
public T getClientStore(String client, String type, RecommendationContext.OptionsHolder options){
String modelName = options.getStringOption(MODEL_PROPERTY_NAME);
String key = getKey(client, type);
if (logger.isDebugEnabled())
logger.debug("Get client store for client "+client+" type "+type+" modelName "+modelName+" key:"+key);
if (!clientStores.containsKey(key))
{
if (logger.isDebugEnabled())
logger.debug("Failed to find store with key:"+key+" for client "+client);
return null;
}
// check whether we are testing or not and get relevant model.
switch (modelName) {
case ClientStrategy.DEFAULT_NAME:
logger.debug("Returning default store for client "+modelName);
return clientStores.get(key).get("");
default:
T store = clientStores.get(key).get(modelName);
if (store == null) {
logger.warn("Couldn't find model under name " + modelName + " for client " + client);
return clientStores.get(key).get("");
} else {
return store;
}
}
}
@Override
public void clientLocationDeleted(String client, String nodePattern) {
String rightBase = null;
Iterator<String> iter = nodeBases.iterator();
while(rightBase==null && iter.hasNext()) {
String base = iter.next();
if (nodePattern.contains(base))
rightBase = base;
}
String key = getKey(client, rightBase);
final String finalPartOfNode = nodePattern.replace(rightBase, "").replaceFirst("/", "");
if(clientStores.get(key)!=null){
clientStores.get(key).remove(finalPartOfNode);
}
}
protected abstract T loadModel(String location,String client);
private String getKey(String client,String key)
{
return client + ":" + key;
}
}
| 2,365
|
1,538
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Authors: <EMAIL> (<NAME>), <EMAIL> (<NAME>)
//
// This file provides a few functions for hashing strings. On x86-64
// hardware as of early 2010, CityHash64() is much faster than
// MurmurHash64(), and passes the quality-of-hash tests in
// ./hasheval/hasheval_test.cc, among others, with flying colors. The
// difference in speed can be a factor of two for strings of 50 to 64
// bytes, and sometimes even more for cache-resident longer strings.
//
// CityHash128() is optimized for relatively long strings and returns
// a 128-bit hash. For strings more than about 2000 bytes it can be
// faster than CityHash64().
//
// Functions in the CityHash family are not suitable for cryptography.
//
// By the way, for some hash functions, given strings a and b, the hash
// of a+b is easily derived from the hashes of a and b. This property
// doesn't hold for any hash functions in this file.
#ifndef UTIL_HASH_CITY_H_
#define UTIL_HASH_CITY_H_
#include <stddef.h> // for size_t.
#include "kudu/gutil/int128.h"
#include "kudu/gutil/integral_types.h"
namespace util_hash {
// Hash function for a byte array.
// The mapping may change from time to time.
uint64 CityHash64(const char *buf, size_t len);
// Hash function for a byte array. For convenience, a 64-bit seed is also
// hashed into the result. The mapping may change from time to time.
uint64 CityHash64WithSeed(const char *buf, size_t len, uint64 seed);
// Hash function for a byte array. For convenience, two seeds are also
// hashed into the result. The mapping may change from time to time.
uint64 CityHash64WithSeeds(const char *buf, size_t len,
uint64 seed0, uint64 seed1);
// Hash function for a byte array. The mapping will never change.
uint128 CityHash128(const char *s, size_t len);
// Hash function for a byte array. For convenience, a 128-bit seed is also
// hashed into the result. The mapping will never change.
uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed);
} // namespace util_hash
#endif // UTIL_HASH_CITY_H_
| 658
|
644
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
from formalsystems.formalsystems import FormalSystem, Theorem
def main():
parser = argparse.ArgumentParser(description='Formal system processor.')
parser.add_argument('yaml_file',
metavar='file',
type=str,
help='path to YAML definition of the formal system')
parser.add_argument('-d', '--derivation',
dest='theorem',
type=Theorem,
default=None,
help='print theorem derivation')
parser.add_argument('-s', '--schema',
action='store_true',
default=None,
help='iteration over axioms schema')
parser.add_argument('-a', '--axiom',
dest='axiom',
type=Theorem,
default=None,
help='check axiom definition')
parser.add_argument('-i', '--iteration',
type=int,
default=10,
help='define max iteration (default 10)')
parser.add_argument('-q', '--quiet',
action='store_true',
help='quiet mode')
args = parser.parse_args()
fs = FormalSystem()
fs.read_formal_system(args.yaml_file)
infinite_axioms = any(ax.wildcards for ax in fs.axioms)
and_in_rule = any(len(r.oldts) > 1 for r in fs.rules)
if args.schema is not None:
print '> Generating axioms schema'
for ax in fs.iterate_over_schema(max_iter=args.iteration):
print ax
return
if args.axiom is not None:
fs.is_axiom(args.axiom, verbose=not(args.quiet))
return
if infinite_axioms:
print '> Infinite number of axioms, using bucket algorithm'
else:
print '> Finite number of axioms, using step algorithm'
if and_in_rule:
print '> Rule with several parents, using recursivity'
print
# Main
if args.theorem is None:
if infinite_axioms:
fs.apply_rules_bucket_till(fs.iterate_over_schema(),
min_len=None, # wont apply
max_turns=args.iteration,
full=and_in_rule,
verbose=not(args.quiet))
else:
fs.apply_rules_step(fs.iterate_over_schema(),
step=args.iteration,
verbose=not(args.quiet))
else:
if infinite_axioms:
fs.derivation_asc(fs.iterate_over_schema(),
args.theorem,
max_turns=args.iteration,
full=and_in_rule,
verbose=not(args.quiet))
else:
fs.derivation_step(fs.iterate_over_schema(),
args.theorem,
step=args.iteration,
verbose=not(args.quiet))
if __name__ == '__main__':
main()
| 1,828
|
424
|
from . import urllib3
import sublime
__all__ = ['ST2', 'ST3','Settings']
ST2 = sublime.version().startswith('2')
ST3 = not ST2
class Settings(object):
"""Helper class for accessing sublime.Settings' values.
Settings(settings, none_erases=False)
* settings (sublime.Settings)
Should be self-explanatory.
* none_erases (bool, optional)
Iff ``True`` a setting's key will be erased when setting it to
``None``. This only has a meaning when the key you erase is
defined in a parent Settings collection which would be
retrieved in that case.
Defines the default methods for sublime.Settings:
get(key, default=None)
set(key, value)
erase(key)
has(key)
add_on_change(key, on_change)
clear_on_change(key, on_change)
http://www.sublimetext.com/docs/2/api_reference.html#sublime.Settings
If ``none_erases == True`` you can erase a key when setting it to
``None``. This only has a meaning when the key you erase is defined in
a parent Settings collection which would be retrieved in that case.
The following methods can be used to retrieve a setting's value:
value = self.get('key', default)
value = self['key']
value = self.key_without_spaces
The following methods can be used to set a setting's value:
self.set('key', value)
self['key'] = value
self.key_without_spaces = value
The following methods can be used to erase a key in the setting:
self.erase('key')
self.set('key', None) or similar # iff ``none_erases == True``
del self.key_without_spaces
! Important:
Don't use the attribute method with one of these keys; ``dir(Settings)``:
['__class__', '__delattr__', '__dict__', '__doc__', '__format__',
'__getattr__', '__getattribute__', '__getitem__', '__hash__',
'__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__',
'__repr__', '__setattr__', '__setitem__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__',
'_none_erases', '_s', '_settable_attributes',
'add_on_change', 'clear_on_change',
'erase', 'get', 'has', 'set']
Getting will return the respective function/value, setting will do
nothing. Setting of _leading_underline_values from above will result in
unpredictable behavior. Please don't do this! And re-consider even when
you know what you're doing.
"""
_none_erases = False
_s = None
_settable_attributes = ('_s', '_none_erases') # allow only setting of these attributes
def __init__(self, settings, none_erases=False):
if not isinstance(settings, sublime.Settings):
raise ValueError("Not an instance of sublime.Settings")
self._s = settings
self._none_erases = none_erases
def get(self, key, default=None):
"""Returns the named setting, or ``default`` if it's not defined.
"""
return self._s.get(key, default)
def set(self, key, value):
"""Sets the named setting. Only primitive types, lists, and
dictionaries are accepted.
Erases the key iff ``value is None``.
"""
if value is None and self._none_erases:
self.erase(key)
else:
self._s.set(key, value)
def erase(self, key):
"""Removes the named setting. Does not remove it from any parent Settings.
"""
self._s.erase(key)
def has(self, key):
"""Returns true iff the named option exists in this set of Settings or
one of its parents.
"""
return self._s.has(key)
def add_on_change(self, key, on_change):
"""Register a callback to be run whenever the setting with this key in
this object is changed.
"""
self._s.add_on_change(key, on_change)
def clear_on_change(self, key, on_change):
"""Remove all callbacks registered with the given key.
"""
self._s.clear_on_change(key, on_change)
def __getitem__(self, key):
"""self[key]"""
return self.get(key)
def __setitem__(self, key, value):
"""self[key] = value"""
self.set(key, value)
def __getattr__(self, key):
"""self.key_without_spaces"""
return self.get(key)
def __setattr__(self, key, value):
"""self.key_without_spaces = value"""
if key in self._settable_attributes:
object.__setattr__(self, key, value)
else:
self.set(key, value)
def __delattr__(self, key):
"""del self.key_without_spaces"""
if key in dir(self):
return
else:
self.erase(key)
| 2,152
|
3,212
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.toolkit.cli.impl.result.nifi;
import org.apache.commons.lang3.Validate;
import org.apache.nifi.toolkit.cli.api.ResultType;
import org.apache.nifi.toolkit.cli.impl.result.AbstractWritableResult;
import org.apache.nifi.toolkit.cli.impl.result.writer.DynamicTableWriter;
import org.apache.nifi.toolkit.cli.impl.result.writer.Table;
import org.apache.nifi.toolkit.cli.impl.result.writer.TableWriter;
import org.apache.nifi.web.api.dto.ReportingTaskDTO;
import org.apache.nifi.web.api.entity.ReportingTaskEntity;
import org.apache.nifi.web.api.entity.ReportingTasksEntity;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Result for ReportingTasksEntity.
*/
public class ReportingTasksResult extends AbstractWritableResult<ReportingTasksEntity> {
private final ReportingTasksEntity reportingTasksEntity;
public ReportingTasksResult(final ResultType resultType, final ReportingTasksEntity reportingTasksEntity) {
super(resultType);
this.reportingTasksEntity = reportingTasksEntity;
Validate.notNull(this.reportingTasksEntity);
}
@Override
protected void writeSimpleResult(final PrintStream output) throws IOException {
final Set<ReportingTaskEntity> tasksEntities = reportingTasksEntity.getReportingTasks();
if (tasksEntities == null) {
return;
}
final List<ReportingTaskDTO> taskDTOS = tasksEntities.stream()
.map(ReportingTaskEntity::getComponent)
.sorted(Comparator.comparing(ReportingTaskDTO::getName))
.collect(Collectors.toList());
final Table table = new Table.Builder()
.column("#", 3, 3, false)
.column("Name", 5, 40, true)
.column("ID", 36, 36, false)
.column("Type", 5, 40, true)
.column("Run Status", 10, 20, false)
.build();
for (int i = 0; i < taskDTOS.size(); i++) {
final ReportingTaskDTO taskDTO = taskDTOS.get(i);
final String[] typeSplit = taskDTO.getType().split("\\.", -1);
table.addRow(
String.valueOf(i + 1),
taskDTO.getName(),
taskDTO.getId(),
typeSplit[typeSplit.length - 1],
taskDTO.getState()
);
}
final TableWriter tableWriter = new DynamicTableWriter();
tableWriter.write(table, output);
}
@Override
public ReportingTasksEntity getResult() {
return reportingTasksEntity;
}
}
| 1,314
|
4,335
|
/*----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Fat Fs HeadFile
* Author: <NAME>
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/* Define to prevent recursive inclusion ------------------------------------ */
#ifndef _LOS_FATFS_H
#define _LOS_FATFS_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif
#endif /* __cplusplus */
/* Includes ----------------------------------------------------------------- */
#include "ff.h"
#include "diskio.h"
#include <stdint.h>
/* Defines ------------------------------------------------------------------ */
#define DISK_STATE_INITIALIZED 1
/* Macros ------------------------------------------------------------------- */
/* Typedefs ----------------------------------------------------------------- */
struct diskio_drv {
DSTATUS (*initialize)(BYTE); /* !< Initialize Disk Drive */
DSTATUS (*status)(BYTE); /* !< Get Disk Status */
DRESULT (*read)(BYTE, BYTE *, DWORD, UINT); /* !< Read Sector(s) */
DRESULT (*write)(BYTE, const BYTE *, DWORD, UINT); /* !< Write Sector(s) */
DRESULT (*ioctl)(BYTE, BYTE, void *); /* !< I/O control operation */
};
struct disk_dev {
uint8_t state;
const struct diskio_drv *drv;
uint8_t lun;
};
struct disk_mnt {
struct disk_dev dev[FF_VOLUMES];
volatile uint8_t num;
};
/* Extern variables --------------------------------------------------------- */
/* Functions API ------------------------------------------------------------ */
int fatfs_init(void);
int fatfs_mount(const char *path, struct diskio_drv *drv, uint8_t *drive);
int fatfs_unmount(const char *path, uint8_t drive);
#ifdef __cplusplus
#if __cplusplus
}
#endif
#endif /* __cplusplus */
#endif /* _LOS_FATFS_H */
| 1,026
|
317
|
<filename>assets/src/ba_data/python/bastd/ui/getremote.py
# Released under the MIT License. See LICENSE for details.
#
"""Provides a popup telling the user about the BSRemote app."""
from __future__ import annotations
from typing import TYPE_CHECKING
import ba
from bastd.ui import popup
if TYPE_CHECKING:
pass
class GetBSRemoteWindow(popup.PopupWindow):
"""Popup telling the user about BSRemote app."""
def __init__(self) -> None:
position = (0.0, 0.0)
uiscale = ba.app.ui.uiscale
scale = (2.3 if uiscale is ba.UIScale.SMALL else
1.65 if uiscale is ba.UIScale.MEDIUM else 1.23)
self._transitioning_out = False
self._width = 570
self._height = 350
bg_color = (0.5, 0.4, 0.6)
popup.PopupWindow.__init__(self,
position=position,
size=(self._width, self._height),
scale=scale,
bg_color=bg_color)
self._cancel_button = ba.buttonwidget(
parent=self.root_widget,
position=(50, self._height - 30),
size=(50, 50),
scale=0.5,
label='',
color=bg_color,
on_activate_call=self._on_cancel_press,
autoselect=True,
icon=ba.gettexture('crossOut'),
iconscale=1.2)
ba.imagewidget(parent=self.root_widget,
position=(self._width * 0.5 - 110,
self._height * 0.67 - 110),
size=(220, 220),
texture=ba.gettexture('multiplayerExamples'))
ba.textwidget(parent=self.root_widget,
size=(0, 0),
h_align='center',
v_align='center',
maxwidth=self._width * 0.9,
position=(self._width * 0.5, 60),
text=ba.Lstr(
resource='remoteAppInfoShortText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText')),
('${REMOTE_APP_NAME}',
ba.Lstr(resource='remote_app.app_name'))]))
def _on_cancel_press(self) -> None:
self._transition_out()
def _transition_out(self) -> None:
if not self._transitioning_out:
self._transitioning_out = True
ba.containerwidget(edit=self.root_widget, transition='out_scale')
def on_popup_cancel(self) -> None:
ba.playsound(ba.getsound('swish'))
self._transition_out()
| 1,434
|
1,252
|
<filename>tests/stlcontainer.cpp
/*{{{
Copyright (C) 2012-2015 <NAME> <<EMAIL>>
Permission to use, copy, modify, and distribute this software
and its documentation for any purpose and without fee is hereby
granted, provided that the above copyright notice appear in all
copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaim all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
}}}*/
#include "unittest.h"
#include <Vc/Allocator>
#include <vector>
#include <array>
#include <forward_list>
#include <list>
#include <deque>
#include "../Vc/common/macros.h"
template<typename Vec> size_t alignmentMask()
{
if (Vec::Size == 1) {
// on 32bit the maximal alignment is 4 Bytes, even for 8-Byte doubles.
return std::min(sizeof(void*), sizeof(typename Vec::EntryType)) - 1;
}
// AVX::VectorAlignment is too large
return std::min<size_t>(sizeof(Vec), Vc::VectorAlignment) - 1;
}
template<typename T> struct SomeStruct { char a; T x; };
TEST_TYPES(V, stdVectorAlignment, AllVectors)
{
const size_t mask = alignmentMask<V>();
const char *const null = 0;
std::vector<V> v(11);
for (int i = 0; i < 11; ++i) {
COMPARE((reinterpret_cast<char *>(&v[i]) - null) & mask, 0u) << "&v[i] = " << &v[i] << ", mask = " << mask << ", i = " << i;
}
std::vector<SomeStruct<V>, Vc::Allocator<SomeStruct<V> > > v2(11);
for (int i = 0; i < 11; ++i) {
COMPARE((reinterpret_cast<char *>(&v2[i]) - null) & mask, 0u) << "&v2[i] = " << &v2[i] << ", mask = " << mask << ", i = " << i;
}
std::vector<V> v3(v);
std::vector<SomeStruct<V>, Vc::Allocator<SomeStruct<V> > > v4(v2);
typedef typename V::EntryType T;
for (int i = 1; i < 100; ++i) {
std::vector<T, Vc::Allocator<T>> v5(i);
const size_t expectedAlignment = alignof(V);
COMPARE(reinterpret_cast<std::uintptr_t>(v5.data()) & (expectedAlignment - 1), 0u)
<< "expectedAlignment: " << expectedAlignment;
}
}
template <typename V, typename Container, std::size_t... Indexes>
void listInitializationImpl(Vc::index_sequence<Indexes...>)
{
typedef typename V::EntryType T;
const auto data = Vc::makeContainer<Container>({T(Indexes + 1)...});
V reference = V([](int n) { return n + 1; });
for (const auto &v : data) {
reference.setZero(reference > int(sizeof...(Indexes)));
COMPARE(v, reference) << vir::typeToString<Container>() << " -> "
<< vir::typeToString<decltype(data)>();
reference += int(V::size());
}
}
TEST_TYPES(V, listInitialization, AllVectors)
{
listInitializationImpl<V, std::vector<V>>(Vc::make_index_sequence<9>());
listInitializationImpl<V, std::vector<V>>(Vc::make_index_sequence<3>());
listInitializationImpl<V, std::array<V, 9>>(Vc::make_index_sequence<9>());
listInitializationImpl<V, std::array<V, 3>>(Vc::make_index_sequence<3>());
#ifndef Vc_MSVC
listInitializationImpl<V, std::deque<V>>(Vc::make_index_sequence<9>());
listInitializationImpl<V, std::deque<V>>(Vc::make_index_sequence<3>());
#endif
// The following two crash (at least with AVX). Probably unaligned memory access.
//listInitialization<V, std::forward_list<V>>();
//listInitialization<V, std::list<V>>();
}
#ifdef Vc_CXX14
TEST_TYPES(V, simdForEach, AllVectors)
{
typedef typename V::EntryType T;
std::vector<T> data;
data.resize(100);
for (int variant = 0; variant < 2; ++variant) {
std::iota(data.begin(), data.end(), T(0));
T reference = 1;
int called_with_scalar = 0;
int called_with_V = 0;
int position = 1;
auto &&test1 = [&](auto &x) {
const auto ref = reference + x.IndexesFromZero();
COMPARE(ref, x);
reference += x.Size;
x += 1;
if (std::is_same<decltype(x), Vc::Scalar::Vector<T> &>::value) {
++called_with_scalar;
}
if (std::is_same<decltype(x), V &>::value) {
++called_with_V;
}
static_assert(std::is_same<decltype(x), Vc::Scalar::Vector<T> &>::value ||
std::is_same<decltype(x), V &>::value,
"wut?");
for (std::size_t i = 0; i < x.Size; ++i) {
data[position++] += T(2); // modify the container directly - if it is not
// undone by simd_for_each we have a bug
}
};
auto &&test2 = [&](auto x) {
const auto ref = reference + x.IndexesFromZero();
COMPARE(ref, x);
reference += x.Size;
x += 1;
for (std::size_t i = 0; i < x.Size; ++i) {
data[position++] += T(2); // modify the container directly - if it is
// undone by simd_for_each we have a bug
}
};
auto &&test3 = [&reference](auto x) {
const auto ref = reference + x.IndexesFromZero();
COMPARE(ref, x) << "if ref == x + 2 then simd_for_each wrote back the "
"closure argument, even though it should not have";
reference += x.Size;
};
auto &&for_each = [&](auto test) {
auto b = std::next(data.begin());
if (variant == 0) {
Vc::simd_for_each(b, data.end(), test);
} else {
Vc::simd_for_each_n(b, data.size() - 1, test);
}
};
for_each(test1);
VERIFY(called_with_scalar > 0);
VERIFY(called_with_V > 0);
if (Vc::Scalar::is_vector<V>::value) {
// in this case called_with_V and called_with_scalar will have been
// incremented both on every call
COMPARE(called_with_V, called_with_scalar);
COMPARE(called_with_scalar, int(data.size() - 1));
} else {
COMPARE(called_with_V * V::Size + called_with_scalar, data.size() - 1);
}
reference = 2;
position = 1;
for_each(test2);
reference = 4;
for_each(test3);
}
}
#endif
| 3,080
|
4,901
|
<filename>jre_emul/android/platform/libcore/harmony-tests/src/test/java/org/apache/harmony/tests/java/lang/reflect/ModifierTest.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.harmony.tests.java.lang.reflect;
import java.lang.reflect.Modifier;
public class ModifierTest extends junit.framework.TestCase {
private static final int ALL_FLAGS = 0x7FF;
/**
* java.lang.reflect.Modifier#Modifier()
*/
public void test_Constructor() {
// Test for method java.lang.reflect.Modifier()
new Modifier();
}
/**
* java.lang.reflect.Modifier#isAbstract(int)
*/
public void test_isAbstractI() {
// Test for method boolean java.lang.reflect.Modifier.isAbstract(int)
assertTrue("ABSTRACT returned false", Modifier.isAbstract(ALL_FLAGS));
assertTrue("ABSTRACT returned false", Modifier
.isAbstract(Modifier.ABSTRACT));
assertTrue("Non-ABSTRACT returned true", !Modifier
.isAbstract(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isFinal(int)
*/
public void test_isFinalI() {
// Test for method boolean java.lang.reflect.Modifier.isFinal(int)
assertTrue("FINAL returned false", Modifier.isFinal(ALL_FLAGS));
assertTrue("FINAL returned false", Modifier.isFinal(Modifier.FINAL));
assertTrue("Non-FINAL returned true", !Modifier
.isFinal(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isInterface(int)
*/
public void test_isInterfaceI() {
// Test for method boolean java.lang.reflect.Modifier.isInterface(int)
assertTrue("INTERFACE returned false", Modifier.isInterface(ALL_FLAGS));
assertTrue("INTERFACE returned false", Modifier
.isInterface(Modifier.INTERFACE));
assertTrue("Non-INTERFACE returned true", !Modifier
.isInterface(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isNative(int)
*/
public void test_isNativeI() {
// Test for method boolean java.lang.reflect.Modifier.isNative(int)
assertTrue("NATIVE returned false", Modifier.isNative(ALL_FLAGS));
assertTrue("NATIVE returned false", Modifier.isNative(Modifier.NATIVE));
assertTrue("Non-NATIVE returned true", !Modifier
.isNative(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isPrivate(int)
*/
public void test_isPrivateI() {
// Test for method boolean java.lang.reflect.Modifier.isPrivate(int)
assertTrue("PRIVATE returned false", Modifier.isPrivate(ALL_FLAGS));
assertTrue("PRIVATE returned false", Modifier
.isPrivate(Modifier.PRIVATE));
assertTrue("Non-PRIVATE returned true", !Modifier
.isPrivate(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isProtected(int)
*/
public void test_isProtectedI() {
// Test for method boolean java.lang.reflect.Modifier.isProtected(int)
assertTrue("PROTECTED returned false", Modifier.isProtected(ALL_FLAGS));
assertTrue("PROTECTED returned false", Modifier
.isProtected(Modifier.PROTECTED));
assertTrue("Non-PROTECTED returned true", !Modifier
.isProtected(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isPublic(int)
*/
public void test_isPublicI() {
// Test for method boolean java.lang.reflect.Modifier.isPublic(int)
assertTrue("PUBLIC returned false", Modifier.isPublic(ALL_FLAGS));
assertTrue("PUBLIC returned false", Modifier.isPublic(Modifier.PUBLIC));
assertTrue("Non-PUBLIC returned true", !Modifier
.isPublic(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isStatic(int)
*/
public void test_isStaticI() {
// Test for method boolean java.lang.reflect.Modifier.isStatic(int)
assertTrue("STATIC returned false", Modifier.isStatic(ALL_FLAGS));
assertTrue("STATIC returned false", Modifier.isStatic(Modifier.STATIC));
assertTrue("Non-STATIC returned true", !Modifier
.isStatic(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isStrict(int)
*/
public void test_isStrictI() {
// Test for method boolean java.lang.reflect.Modifier.isStrict(int)
assertTrue("STRICT returned false", Modifier.isStrict(Modifier.STRICT));
assertTrue("Non-STRICT returned true", !Modifier
.isStrict(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#isSynchronized(int)
*/
public void test_isSynchronizedI() {
// Test for method boolean
// java.lang.reflect.Modifier.isSynchronized(int)
assertTrue("Synchronized returned false", Modifier
.isSynchronized(ALL_FLAGS));
assertTrue("Non-Synchronized returned true", !Modifier
.isSynchronized(Modifier.VOLATILE));
}
/**
* java.lang.reflect.Modifier#isTransient(int)
*/
public void test_isTransientI() {
// Test for method boolean java.lang.reflect.Modifier.isTransient(int)
assertTrue("Transient returned false", Modifier.isTransient(ALL_FLAGS));
assertTrue("Transient returned false", Modifier
.isTransient(Modifier.TRANSIENT));
assertTrue("Non-Transient returned true", !Modifier
.isTransient(Modifier.VOLATILE));
}
/**
* java.lang.reflect.Modifier#isVolatile(int)
*/
public void test_isVolatileI() {
// Test for method boolean java.lang.reflect.Modifier.isVolatile(int)
assertTrue("Volatile returned false", Modifier.isVolatile(ALL_FLAGS));
assertTrue("Volatile returned false", Modifier
.isVolatile(Modifier.VOLATILE));
assertTrue("Non-Volatile returned true", !Modifier
.isVolatile(Modifier.TRANSIENT));
}
/**
* java.lang.reflect.Modifier#toString(int)
*/
public void test_toStringI() {
// Test for method java.lang.String
// java.lang.reflect.Modifier.toString(int)
assertTrue("Returned incorrect string value: "
+ Modifier.toString(java.lang.reflect.Modifier.PUBLIC
+ java.lang.reflect.Modifier.ABSTRACT), Modifier
.toString(
java.lang.reflect.Modifier.PUBLIC
+ java.lang.reflect.Modifier.ABSTRACT).equals(
"public abstract"));
int i = 0xFFF;
String modification = "public protected private abstract static final transient "
+ "volatile synchronized native strictfp interface";
assertTrue("Returned incorrect string value", Modifier.toString(i)
.equals(modification));
}
public void test_Constants_Value() {
assertEquals(1024, Modifier.ABSTRACT);
assertEquals(16, Modifier.FINAL);
assertEquals(512, Modifier.INTERFACE);
assertEquals(256, Modifier.NATIVE);
assertEquals(2, Modifier.PRIVATE);
assertEquals(4, Modifier.PROTECTED);
assertEquals(1, Modifier.PUBLIC);
assertEquals(8, Modifier.STATIC);
assertEquals(2048, Modifier.STRICT);
assertEquals(32, Modifier.SYNCHRONIZED);
assertEquals(128, Modifier.TRANSIENT);
assertEquals(64, Modifier.VOLATILE);
}
abstract class AbstractClazz {
}
final class FinalClazz {
}
static class StaticClazz {
}
interface InterfaceClazz {
}
public class PublicClazz {
}
protected class ProtectedClazz {
}
private class PrivateClazz {
}
public abstract class PublicAbstractClazz {
}
protected abstract class ProtectedAbstractClazz {
}
private abstract class PrivateAbstractClazz {
}
public final class PublicFinalClazz {
}
protected final class ProtectedFinalClazz {
}
private final class PrivateFinalClazz {
}
public static class PublicStaticClazz {
}
protected static class ProtectedStaticClazz {
}
private static class PrivateStaticClazz {
}
public interface PublicInterface {
}
protected interface ProtectedInterface {
}
private interface PrivateInterface {
}
static abstract class StaticAbstractClazz {
}
public static abstract class PublicStaticAbstractClazz {
}
protected static abstract class ProtectedStaticAbstractClazz {
}
private static abstract class PrivateStaticAbstractClazz {
}
static final class StaticFinalClazz {
}
public static final class PublicStaticFinalClazz {
}
protected static final class ProtectedStaticFinalClazz {
}
private static final class PrivateStaticFinalClazz {
}
static interface StaticInterface {
}
public static interface PublicStaticInterface {
}
protected static interface ProtectedStaticInterface {
}
private static interface PrivateStaticInterface {
}
static abstract interface StaticAbstractInterface {
}
public static abstract interface PublicStaticAbstractInterface {
}
protected static abstract interface ProtectedStaticAbstractInterface {
}
private static abstract interface PrivateStaticAbstractInterface {
}
public void test_Class_Modifier() {
assertEquals(Modifier.ABSTRACT, AbstractClazz.class.getModifiers());
assertEquals(Modifier.FINAL, FinalClazz.class.getModifiers());
assertEquals(Modifier.STATIC, StaticClazz.class.getModifiers());
assertEquals(Modifier.INTERFACE + Modifier.STATIC + Modifier.ABSTRACT,
InterfaceClazz.class.getModifiers());
assertEquals(Modifier.PUBLIC, PublicClazz.class.getModifiers());
assertEquals(Modifier.PROTECTED, ProtectedClazz.class.getModifiers());
assertEquals(Modifier.PRIVATE, PrivateClazz.class.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.ABSTRACT,
PublicAbstractClazz.class.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.ABSTRACT,
ProtectedAbstractClazz.class.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.ABSTRACT,
PrivateAbstractClazz.class.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.FINAL, PublicFinalClazz.class
.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.FINAL,
ProtectedFinalClazz.class.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.FINAL, PrivateFinalClazz.class
.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.STATIC, PublicStaticClazz.class
.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.STATIC,
ProtectedStaticClazz.class.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.STATIC,
PrivateStaticClazz.class.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, PublicInterface.class.getModifiers());
assertEquals(Modifier.STATIC + Modifier.FINAL, StaticFinalClazz.class
.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, PrivateInterface.class.getModifiers());
assertEquals(Modifier.STATIC + Modifier.ABSTRACT,
StaticAbstractClazz.class.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.STATIC + Modifier.ABSTRACT,
PublicStaticAbstractClazz.class.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.STATIC + Modifier.ABSTRACT,
ProtectedStaticAbstractClazz.class.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.STATIC + Modifier.ABSTRACT,
PrivateStaticAbstractClazz.class.getModifiers());
assertEquals(Modifier.STATIC + Modifier.FINAL, StaticFinalClazz.class
.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.STATIC + Modifier.FINAL,
PublicStaticFinalClazz.class.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.STATIC + Modifier.FINAL,
ProtectedStaticFinalClazz.class.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.STATIC + Modifier.FINAL,
PrivateStaticFinalClazz.class.getModifiers());
assertEquals(Modifier.INTERFACE + Modifier.STATIC + Modifier.ABSTRACT,
StaticInterface.class.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, PublicStaticInterface.class.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, ProtectedStaticInterface.class
.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, PrivateStaticInterface.class
.getModifiers());
assertEquals(Modifier.INTERFACE + Modifier.STATIC + Modifier.ABSTRACT,
StaticAbstractInterface.class.getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, PublicStaticAbstractInterface.class
.getModifiers());
assertEquals(Modifier.PROTECTED + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, ProtectedStaticAbstractInterface.class
.getModifiers());
assertEquals(Modifier.PRIVATE + Modifier.INTERFACE + Modifier.STATIC
+ Modifier.ABSTRACT, PrivateStaticAbstractInterface.class
.getModifiers());
}
static abstract class MethodClass {
public abstract void publicAbstractMethod();
public static void publicStaticMethod() {
}
public final void publicFinalMethod() {
}
public static final void publicStaticFinalMethod() {
}
}
public void test_Method_Modifier() throws Exception {
assertEquals(Modifier.PUBLIC + Modifier.ABSTRACT, MethodClass.class
.getMethod("publicAbstractMethod", new Class[0]).getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.STATIC, MethodClass.class
.getMethod("publicStaticMethod", new Class[0]).getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.FINAL, MethodClass.class
.getMethod("publicFinalMethod", new Class[0]).getModifiers());
assertEquals(Modifier.PUBLIC + Modifier.STATIC + Modifier.FINAL,
MethodClass.class.getMethod("publicStaticFinalMethod",
new Class[0]).getModifiers());
}
/**
* Sets up the fixture, for example, open a network connection. This method
* is called before a test is executed.
*/
protected void setUp() {
}
/**
* Tears down the fixture, for example, close a network connection. This
* method is called after a test is executed.
*/
protected void tearDown() {
}
}
| 6,418
|
606
|
/*
This code is DEPRECATED!
I'm keeping it here cause maybe the uninstrumentation of a function is needed
for some strange reason.
*/
/*******************************************************************************
Copyright (c) 2019-2022, <NAME>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include "map_macro.h"
#include <sys/types.h>
#include <pwd.h>
#define X_GET_FNPAR(type, name) name
#define GET_FNPAR(x) X_GET_FNPAR x
#define X_GET_FNTYPE(type, name) type
#define GET_FNTYPE(x) X_GET_FNTYPE x
#define X_GET_FNDECL(type, name) type name
#define GET_FNDECL(x) X_GET_FNDECL x
#define HOOK_UNINSTRUMENT(rettype, name, ...) \
rettype (*__lq_libc_##name)(MAP_LIST(GET_FNTYPE, __VA_ARGS__)); \
rettype name(MAP_LIST(GET_FNDECL, __VA_ARGS__)) { \
\
if (!(__lq_libc_##name)) __lq_libc_##name = ASSERT_DLSYM(name); \
int state = QASAN_SWAP(QASAN_DISABLED); \
rettype r = __lq_libc_##name(MAP_LIST(GET_FNPAR, __VA_ARGS__)); \
QASAN_SWAP(state); \
\
return r; \
\
}
HOOK_UNINSTRUMENT(char *, getenv, (const char *, name))
/*
HOOK_UNINSTRUMENT(char*, setlocale, (int, category), (const char *, locale))
HOOK_UNINSTRUMENT(int, setenv, (const char *, name), (const char *, value),
(int, overwrite)) HOOK_UNINSTRUMENT(char*, getenv, (const char *, name))
HOOK_UNINSTRUMENT(char*, bindtextdomain, (const char *, domainname), (const char
*, dirname)) HOOK_UNINSTRUMENT(char*, bind_textdomain_codeset, (const char *,
domainname), (const char *, codeset)) HOOK_UNINSTRUMENT(char*, gettext, (const
char *, msgid)) HOOK_UNINSTRUMENT(char*, dgettext, (const char *, domainname),
(const char *, msgid)) HOOK_UNINSTRUMENT(char*, dcgettext, (const char *,
domainname), (const char *, msgid), (int, category)) HOOK_UNINSTRUMENT(int,
__gen_tempname, (char, *tmpl), (int, suffixlen), (int, flags), (int, kind))
HOOK_UNINSTRUMENT(int, mkstemp, (char *, template))
HOOK_UNINSTRUMENT(int, mkostemp, (char *, template), (int, flags))
HOOK_UNINSTRUMENT(int, mkstemps, (char *, template), (int, suffixlen))
HOOK_UNINSTRUMENT(int, mkostemps, (char *, template), (int, suffixlen), (int,
flags)) HOOK_UNINSTRUMENT(struct passwd *, getpwnam, (const char *, name))
HOOK_UNINSTRUMENT(struct passwd *, getpwuid, (uid_t, uid))
HOOK_UNINSTRUMENT(int, getpwnam_r, (const char *, name), (struct passwd *, pwd),
(char *, buf), (size_t, buflen), (struct passwd **, result))
HOOK_UNINSTRUMENT(int, getpwuid_r, (uid_t, uid), (struct passwd *, pwd), (char
*, buf), (size_t, buflen), (struct passwd **, result))
*/
| 1,728
|
884
|
<filename>entity-view/testsuite/src/test/java/com/blazebit/persistence/view/testsuite/AbstractEntityViewTest.java
/*
* Copyright 2014 - 2021 Blazebit.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.blazebit.persistence.view.testsuite;
import com.blazebit.persistence.CriteriaBuilder;
import com.blazebit.persistence.CriteriaBuilderFactory;
import com.blazebit.persistence.spi.PackageOpener;
import com.blazebit.persistence.testsuite.AbstractCoreTest;
import com.blazebit.persistence.view.ConfigurationProperties;
import com.blazebit.persistence.view.EntityViewManager;
import com.blazebit.persistence.view.EntityViewSetting;
import com.blazebit.persistence.view.EntityViews;
import com.blazebit.persistence.view.FlushMode;
import com.blazebit.persistence.view.FlushStrategy;
import com.blazebit.persistence.view.impl.EntityViewManagerImpl;
import com.blazebit.persistence.view.impl.proxy.ProxyFactory;
import com.blazebit.persistence.view.metamodel.ManagedViewType;
import com.blazebit.persistence.view.spi.EntityViewAttributeMapping;
import com.blazebit.persistence.view.spi.EntityViewConfiguration;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
/**
*
* @author <NAME>
* @since 1.0.0
*/
public class AbstractEntityViewTest extends AbstractCoreTest {
protected static EntityViewManager evm;
private static Map<ProxyFactoryCacheKey, ProxyFactory> proxyFactoryCache = new HashMap<>();
private static Map<EntityViewManagerFactoryCacheKey, EntityViewManager> evmCache = new HashMap<>();
public EntityViewManager build(Class<?>... classes) {
return build(EntityViews.createDefaultConfiguration(), classes);
}
public EntityViewManager build(EntityViewConfiguration cfg, Class<?>... classes) {
for (Class<?> c : classes) {
cfg.addEntityView(c);
}
EntityViewManagerFactoryCacheKey cacheKey = new EntityViewManagerFactoryCacheKey(cbf, cfg);
EntityViewManager evm;
if ((evm = evmCache.get(cacheKey)) == null) {
evm = build0(cfg, classes);
evmCache.put(cacheKey, evm);
}
AbstractEntityViewTest.evm = evm;
PackageOpener packageOpener = cbf.getService(PackageOpener.class);
boolean unsafeDisabled = !Boolean.valueOf(String.valueOf(cfg.getProperty(ConfigurationProperties.PROXY_UNSAFE_ALLOWED)));
boolean strictCascadingCheck = Boolean.valueOf(String.valueOf(cfg.getProperty(ConfigurationProperties.UPDATER_STRICT_CASCADING_CHECK)));
ProxyFactoryCacheKey proxyFactoryCacheKey = new ProxyFactoryCacheKey(unsafeDisabled, strictCascadingCheck, packageOpener);
ProxyFactory proxyFactory;
if ((proxyFactory = proxyFactoryCache.get(proxyFactoryCacheKey)) == null) {
proxyFactoryCache.put(proxyFactoryCacheKey, ((EntityViewManagerImpl) evm).getProxyFactory());
} else {
try {
Field proxyFactoryField = EntityViewManagerImpl.class.getDeclaredField("proxyFactory");
proxyFactoryField.setAccessible(true);
proxyFactoryField.set(evm, proxyFactory);
boolean scanStaticImplementations = !Boolean.valueOf(String.valueOf(cfg.getProperty(ConfigurationProperties.STATIC_IMPLEMENTATION_SCANNING_DISABLED)));
for (ManagedViewType<?> managedView : evm.getMetamodel().getManagedViews()) {
Class<?> javaType = managedView.getJavaType();
if (!javaType.isInterface() && !Modifier.isAbstract(javaType.getModifiers())) {
proxyFactory.setImplementation(javaType);
} else if (scanStaticImplementations) {
proxyFactory.loadImplementation(new HashSet<>(), managedView, evm);
}
}
} catch (NoSuchFieldException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return evm;
}
private EntityViewManager build0(EntityViewConfiguration cfg, Class<?>[] classes) {
return cfg.createEntityViewManager(cbf);
}
protected <T> CriteriaBuilder<T> applySetting(EntityViewManager evm, Class<T> entityViewClass, CriteriaBuilder<?> criteriaBuilder) {
EntityViewSetting<T, CriteriaBuilder<T>> setting = EntityViewSetting.create(entityViewClass);
return evm.applySetting(setting, criteriaBuilder);
}
private static class ProxyFactoryCacheKey {
private final boolean unsafeDisabled;
private final boolean strictCascadingCheck;
private final PackageOpener packageOpener;
private ProxyFactoryCacheKey(boolean unsafeDisabled, boolean strictCascadingCheck, PackageOpener packageOpener) {
this.unsafeDisabled = unsafeDisabled;
this.strictCascadingCheck = strictCascadingCheck;
this.packageOpener = packageOpener;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ProxyFactoryCacheKey that = (ProxyFactoryCacheKey) o;
return unsafeDisabled == that.unsafeDisabled &&
strictCascadingCheck == that.strictCascadingCheck &&
packageOpener.equals(that.packageOpener);
}
@Override
public int hashCode() {
return Objects.hash(unsafeDisabled, strictCascadingCheck, packageOpener);
}
}
private static class EntityViewManagerFactoryCacheKey {
private final CriteriaBuilderFactory cbf;
private final EntityViewConfigurationEqualityWrapper entityViewConfiguration;
private EntityViewManagerFactoryCacheKey(CriteriaBuilderFactory cbf, EntityViewConfiguration entityViewConfiguration) {
this.cbf = cbf;
this.entityViewConfiguration = new EntityViewConfigurationEqualityWrapper(entityViewConfiguration);
}
private static class EntityViewConfigurationEqualityWrapper {
private final Properties properties;
private final Map<String, Object> optionalParameters;
private final Map<Class<?>, Map<Class<?>, Class<?>>> typeConverters;
private final Map<Class<?>, Class<?>> basicUserTypes;
private final Set<EntityViewMappingEqualityWrapper> entityViewMappings;
private EntityViewConfigurationEqualityWrapper(EntityViewConfiguration cfg) {
this.properties = cfg.getProperties();
this.optionalParameters = cfg.getOptionalParameters();
this.typeConverters = cfg.getTypeConverters().entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey, entry -> entry.getValue().entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey, entry2 -> entry2.getValue().getClass()
))
));
this.basicUserTypes = cfg.getBasicUserTypes().entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey, entry -> entry.getValue().getClass()
));
this.entityViewMappings = cfg.getEntityViewMappings().stream()
.map(mapping -> new EntityViewMappingEqualityWrapper(mapping.getEntityViewClass(), mapping.getFlushMode(), mapping.getFlushStrategy(), mapping.getVersionAttribute() != null))
.collect(Collectors.toSet());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EntityViewConfigurationEqualityWrapper that = (EntityViewConfigurationEqualityWrapper) o;
return properties.equals(that.properties) &&
optionalParameters.equals(that.optionalParameters) &&
typeConverters.equals(that.typeConverters) &&
basicUserTypes.equals(that.basicUserTypes) &&
entityViewMappings.equals(that.entityViewMappings);
}
@Override
public int hashCode() {
return Objects.hash(properties, optionalParameters, typeConverters, basicUserTypes, entityViewMappings);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EntityViewManagerFactoryCacheKey that = (EntityViewManagerFactoryCacheKey) o;
return cbf.equals(that.cbf) &&
entityViewConfiguration.equals(that.entityViewConfiguration);
}
@Override
public int hashCode() {
return Objects.hash(cbf, entityViewConfiguration);
}
private static class EntityViewMappingEqualityWrapper {
private final Class<?> entityViewClass;
private final FlushMode flushMode;
private final FlushStrategy flushStrategy;
private final boolean versionAttributeSet;
public EntityViewMappingEqualityWrapper(Class<?> entityViewClass, FlushMode flushMode, FlushStrategy flushStrategy, boolean versionAttributeSet) {
this.entityViewClass = entityViewClass;
this.flushMode = flushMode;
this.flushStrategy = flushStrategy;
this.versionAttributeSet = versionAttributeSet;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EntityViewMappingEqualityWrapper that = (EntityViewMappingEqualityWrapper) o;
return versionAttributeSet == that.versionAttributeSet &&
entityViewClass.equals(that.entityViewClass) &&
flushMode == that.flushMode &&
flushStrategy == that.flushStrategy;
}
@Override
public int hashCode() {
return Objects.hash(entityViewClass, flushMode, flushStrategy, versionAttributeSet);
}
}
}
}
| 4,419
|
810
|
from .inaturalist import INATURALIST2018
| 13
|
1,092
|
/*
* Copyright 2014-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.restdocs.mockmvc;
import java.util.HashMap;
import java.util.Map;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import org.springframework.restdocs.generate.RestDocumentationGenerator;
import org.springframework.restdocs.snippet.Snippet;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.test.web.servlet.ResultActions;
import org.springframework.test.web.servlet.ResultHandler;
import org.springframework.util.Assert;
/**
* A Spring MVC Test {@code ResultHandler} for documenting RESTful APIs.
*
* @author <NAME>
* @author <NAME>
* @see MockMvcRestDocumentation#document(String, Snippet...)
*/
public class RestDocumentationResultHandler implements ResultHandler {
static final String ATTRIBUTE_NAME_CONFIGURATION = "org.springframework.restdocs.configuration";
private final RestDocumentationGenerator<MockHttpServletRequest, MockHttpServletResponse> delegate;
RestDocumentationResultHandler(
RestDocumentationGenerator<MockHttpServletRequest, MockHttpServletResponse> delegate) {
Assert.notNull(delegate, "delegate must be non-null");
this.delegate = delegate;
}
@Override
public void handle(MvcResult result) throws Exception {
this.delegate.handle(result.getRequest(), result.getResponse(), retrieveConfiguration(result));
}
/**
* Creates a new {@link RestDocumentationResultHandler} to be passed into
* {@link ResultActions#andDo(ResultHandler)} that will produce documentation using
* the given {@code snippets}. For example:
*
* <pre>
* this.mockMvc.perform(MockMvcRequestBuilders.get("/search"))
* .andExpect(status().isOk())
* .andDo(this.documentationHandler.document(responseFields(
* fieldWithPath("page").description("The requested Page")
* ));
* </pre>
* @param snippets the snippets
* @return the new result handler
*/
public RestDocumentationResultHandler document(Snippet... snippets) {
return new RestDocumentationResultHandler(this.delegate.withSnippets(snippets)) {
@Override
public void handle(MvcResult result) throws Exception {
Map<String, Object> configuration = new HashMap<>(retrieveConfiguration(result));
configuration.remove(RestDocumentationGenerator.ATTRIBUTE_NAME_DEFAULT_SNIPPETS);
getDelegate().handle(result.getRequest(), result.getResponse(), configuration);
}
};
}
/**
* Returns the {@link RestDocumentationGenerator} that is used as a delegate.
* @return the delegate
*/
protected final RestDocumentationGenerator<MockHttpServletRequest, MockHttpServletResponse> getDelegate() {
return this.delegate;
}
private Map<String, Object> retrieveConfiguration(MvcResult result) {
@SuppressWarnings("unchecked")
Map<String, Object> configuration = (Map<String, Object>) result.getRequest()
.getAttribute(ATTRIBUTE_NAME_CONFIGURATION);
Assert.state(configuration != null, () -> "REST Docs configuration not found. Did you forget to apply a "
+ MockMvcRestDocumentationConfigurer.class.getSimpleName() + " when building the MockMvc instance?");
return configuration;
}
}
| 1,141
|
8,027
|
<reponame>Unknoob/buck
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.swift;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.rules.impl.NoopBuildRule;
import com.facebook.buck.core.toolchain.tool.Tool;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.swift.toolchain.SwiftPlatform;
import com.facebook.buck.swift.toolchain.SwiftTargetTriple;
import com.google.common.collect.ImmutableList;
import java.nio.file.Path;
import java.util.Optional;
/**
* This {@link BuildRule} holds tools and flags to create {@link SwiftPlatform}. It's a {@link
* NoopBuildRule} with no build steps or outputs.
*/
public class SwiftToolchainBuildRule extends NoopBuildRule {
private final Tool swiftc;
private final Optional<Tool> swiftStdlibTool;
private final ImmutableList<Path> runtimePathsForBundling;
private final ImmutableList<Path> runtimePathsForLinking;
private final ImmutableList<Path> staticRuntimePaths;
private final ImmutableList<Path> runtimeRunPaths;
public SwiftToolchainBuildRule(
BuildTarget buildTarget,
ProjectFilesystem projectFilesystem,
Tool swiftc,
Optional<Tool> swiftStdlibTool,
ImmutableList<Path> runtimePathsForBundling,
ImmutableList<Path> runtimePathsForLinking,
ImmutableList<Path> staticRuntimePaths,
ImmutableList<Path> runtimeRunPaths) {
super(buildTarget, projectFilesystem);
this.swiftc = swiftc;
this.swiftStdlibTool = swiftStdlibTool;
this.runtimePathsForBundling = runtimePathsForBundling;
this.runtimePathsForLinking = runtimePathsForLinking;
this.staticRuntimePaths = staticRuntimePaths;
this.runtimeRunPaths = runtimeRunPaths;
}
/** Provides SwiftPlatform for given Swift target triple */
public SwiftPlatform getSwiftPlatform(SwiftTargetTriple swiftTarget) {
return SwiftPlatform.builder()
.setSwiftc(swiftc)
.setSwiftStdlibTool(swiftStdlibTool)
.setSwiftSharedLibraryRunPaths(runtimeRunPaths)
.setSwiftTarget(swiftTarget)
.addSwiftRuntimePathsForBundling(runtimePathsForBundling.toArray(new Path[0]))
.addSwiftRuntimePathsForLinking(runtimePathsForLinking.toArray(new Path[0]))
.addSwiftStaticRuntimePaths(staticRuntimePaths.toArray(new Path[0]))
.addSwiftSharedLibraryRunPaths(runtimeRunPaths.toArray(new Path[0]))
.build();
}
}
| 1,003
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.