text
stringlengths
2
97.5k
meta
dict
/* * $Id: c_mpifnb.c,v 1.2 2008-07-23 16:16:53 haley Exp $ */ /************************************************************************ * * * Copyright (C) 2000 * * University Corporation for Atmospheric Research * * All Rights Reserved * * * * The use of this Software is governed by a License Agreement. * * * ************************************************************************/ #include <ncarg/ncargC.h> extern int NGCALLF(mpifnb,MPIFNB)(NGstring,int); int c_mpifnb #ifdef NeedFuncProto ( char *chrs ) #else (chrs) char *chrs; #endif { int len; NGstring chrs_f; len=NGSTRLEN(chrs); chrs_f=NGCstrToFstr(chrs,len); return(NGCALLF(mpifnb,MPIFNB)(chrs_f,len)); }
{ "pile_set_name": "Github" }
/* * Copyright 2015-2018 Igor Maznitsa. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.igormaznitsa.mindmap.plugins.processors; import com.igormaznitsa.mindmap.model.Extra; import com.igormaznitsa.mindmap.model.Topic; import com.igormaznitsa.mindmap.plugins.PopUpSection; import com.igormaznitsa.mindmap.plugins.api.AbstractFocusedTopicPlugin; import com.igormaznitsa.mindmap.plugins.api.ExternallyExecutedPlugin; import com.igormaznitsa.mindmap.plugins.api.PluginContext; import com.igormaznitsa.mindmap.swing.panel.Texts; import com.igormaznitsa.mindmap.swing.services.IconID; import com.igormaznitsa.mindmap.swing.services.ImageIconServiceProvider; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.swing.Icon; public class ExtraJumpPlugin extends AbstractFocusedTopicPlugin implements ExternallyExecutedPlugin { private static final Icon ICO = ImageIconServiceProvider.findInstance().getIconForId(IconID.POPUP_EXTRAS_JUMP); @Override public int getOrder() { return 4; } @Override @Nullable protected Icon getIcon(@Nonnull final PluginContext contextl, @Nullable final Topic activeTopic) { return ICO; } @Override @Nonnull protected String getName(@Nonnull final PluginContext context, @Nullable final Topic activeTopic) { if (activeTopic == null) { return "..."; } return activeTopic.getExtras().containsKey(Extra.ExtraType.TOPIC) ? Texts.getString("MMDGraphEditor.makePopUp.miEditTransition") : Texts.getString("MMDGraphEditor.makePopUp.miAddTransition"); } @Override @Nonnull public PopUpSection getSection() { return PopUpSection.EXTRAS; } }
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: 40d85ef72aeb24b5da650efc13aa4a4a timeCreated: 1498693438 MonoImporter: serializedVersion: 2 defaultReferences: [] executionOrder: 0 icon: {instanceID: 0} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
1
{ "pile_set_name": "Github" }
/*------------------------------------------------------------------------ * (The MIT License) * * Copyright (c) 2008-2011 Rhomobile, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * http://rhomobile.com *------------------------------------------------------------------------*/ #include "rhodes/JNIRhodes.h" #include "rhodes/JNIRhoRuby.h" #include "rhodes/jni/com_rhomobile_rhodes_alert_Alert.h" #include "rhodes/jni/com_rhomobile_rhodes_alert_PopupActivity.h" #include <common/rhoparams.h> #include <common/RhodesApp.h> #undef DEFAULT_LOGCATEGORY #define DEFAULT_LOGCATEGORY "Alert" RHO_GLOBAL void JNICALL Java_com_rhomobile_rhodes_alert_PopupActivity_doCallback (JNIEnv *env, jclass, jstring url, jstring id, jstring title) { rho_rhodesapp_callPopupCallback(rho_cast<std::string>(env, url).c_str(), rho_cast<std::string>(env, id).c_str(), rho_cast<std::string>(env, title).c_str()); } RHO_GLOBAL void alert_show_status(const char* szTitle, const char* szMessage, const char* szHide) { JNIEnv *env = jnienv(); jclass cls = getJNIClass(RHODES_JAVA_CLASS_ALERT); if (!cls) return; jmethodID mid = getJNIClassStaticMethod(env, cls, "showStatusPopup", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V"); if (!mid) return; RAWLOG_INFO("alert_show_status"); jhstring jhTitle = rho_cast<jstring>(env, szTitle); jhstring jhMessage = rho_cast<jstring>(env, szMessage); jhstring jhHide = rho_cast<jstring>(env, szHide); env->CallStaticVoidMethod(cls, mid, jhTitle.get(), jhMessage.get(), jhHide.get()); } RHO_GLOBAL void alert_show_popup(rho_param *p) { JNIEnv *env = jnienv(); jclass cls = getJNIClass(RHODES_JAVA_CLASS_ALERT); if (!cls) return; jmethodID mid = getJNIClassStaticMethod(env, cls, "showPopup", "(Ljava/lang/Object;)V"); if (!mid) return; if (p->type != RHO_PARAM_STRING && p->type != RHO_PARAM_HASH) { RAWLOG_ERROR("show_popup: wrong input parameter (expect String or Hash)"); return; } jhobject paramsObj = RhoValueConverter(env).createObject(p); env->CallStaticVoidMethod(cls, mid, paramsObj.get()); } RHO_GLOBAL void alert_hide_popup() { JNIEnv *env = jnienv(); jclass cls = getJNIClass(RHODES_JAVA_CLASS_ALERT); if (!cls) return; jmethodID mid = getJNIClassStaticMethod(env, cls, "hidePopup", "()V"); if (!mid) return; env->CallStaticVoidMethod(cls, mid); } RHO_GLOBAL void alert_vibrate(int duration_ms) { JNIEnv *env = jnienv(); jclass cls = getJNIClass(RHODES_JAVA_CLASS_ALERT); if (!cls) return; jmethodID mid = getJNIClassStaticMethod(env, cls, "vibrate", "(I)V"); if (!mid) return; env->CallStaticVoidMethod(cls, mid, duration_ms); } RHO_GLOBAL void alert_play_file(char* file_name, char *media_type) { JNIEnv *env = jnienv(); jclass cls = getJNIClass(RHODES_JAVA_CLASS_ALERT); if (!cls) return; jmethodID mid = getJNIClassStaticMethod(env, cls, "playFile", "(Ljava/lang/String;Ljava/lang/String;)V"); if (!mid) return; jhstring jhFileName = rho_cast<jstring>(env, file_name); jhstring jhMedia = rho_cast<jstring>(env, media_type); env->CallStaticVoidMethod(cls, mid, jhFileName.get(), jhMedia.get()); }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2010 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef IDBFactoryBackendImpl_h #define IDBFactoryBackendImpl_h #include "IDBFactoryBackendInterface.h" #include <wtf/HashMap.h> #include <wtf/text/StringHash.h> #if ENABLE(INDEXED_DATABASE) namespace WebCore { class DOMStringList; class IDBBackingStore; class IDBDatabaseBackendImpl; class IDBTransactionCoordinator; class IDBFactoryBackendImpl : public IDBFactoryBackendInterface { public: static PassRefPtr<IDBFactoryBackendImpl> create() { return adoptRef(new IDBFactoryBackendImpl()); } virtual ~IDBFactoryBackendImpl(); // Notifications from weak pointers. void removeIDBDatabaseBackend(const String& uniqueIdentifier); void addIDBBackingStore(const String& uniqueIdentifier, IDBBackingStore*); void removeIDBBackingStore(const String& uniqueIdentifier); virtual void open(const String& name, PassRefPtr<IDBCallbacks>, PassRefPtr<SecurityOrigin>, Frame*, const String& dataDir, int64_t maximumSize, BackingStoreType); private: IDBFactoryBackendImpl(); typedef HashMap<String, IDBDatabaseBackendImpl*> IDBDatabaseBackendMap; IDBDatabaseBackendMap m_databaseBackendMap; typedef HashMap<String, IDBBackingStore*> IDBBackingStoreMap; IDBBackingStoreMap m_backingStoreMap; RefPtr<IDBTransactionCoordinator> m_transactionCoordinator; // Only one instance of the factory should exist at any given time. static IDBFactoryBackendImpl* idbFactoryBackendImpl; }; } // namespace WebCore #endif #endif // IDBFactoryBackendImpl_h
{ "pile_set_name": "Github" }
/* * This file is part of the UCB release of Plan 9. It is subject to the license * terms in the LICENSE file found in the top-level directory of this * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No * part of the UCB release of Plan 9, including this file, may be copied, * modified, propagated, or distributed except according to the terms contained * in the LICENSE file. */ #include <u.h> #include <libc.h> #include <fcall.h> #include <bio.h> #include <regexp.h> #define Extern #include "exportfs.h" Reprog **exclude, **include; char *patternfile; void exclusions(void) { Biobuf *f; int ni, nmaxi, ne, nmaxe; char *line; if(patternfile == nil) return; f = Bopen(patternfile, OREAD); if(f == nil) fatal("cannot open patternfile"); ni = 0; nmaxi = 100; include = malloc(nmaxi*sizeof(*include)); if(include == nil) fatal("out of memory"); include[0] = nil; ne = 0; nmaxe = 100; exclude = malloc(nmaxe*sizeof(*exclude)); if(exclude == nil) fatal("out of memory"); exclude[0] = nil; while((line = Brdline(f, '\n')) != nil){ line[Blinelen(f) - 1] = 0; if(strlen(line) < 2 || line[1] != ' ') continue; switch(line[0]){ case '+': if(ni+1 >= nmaxi){ nmaxi = 2*nmaxi; include = realloc(include, nmaxi*sizeof(*include)); if(include == nil) fatal("out of memory"); } DEBUG(DFD, "\tinclude %s\n", line+2); include[ni] = regcomp(line+2); include[++ni] = nil; break; case '-': if(ne+1 >= nmaxe){ nmaxe = 2*nmaxe; exclude = realloc(exclude, nmaxe*sizeof(*exclude)); if(exclude == nil) fatal("out of memory"); } DEBUG(DFD, "\texclude %s\n", line+2); exclude[ne] = regcomp(line+2); exclude[++ne] = nil; break; default: DEBUG(DFD, "ignoring pattern %s\n", line); break; } } Bterm(f); } int excludefile(char *path) { Reprog **re; char *p; if(*(path+1) == 0) p = "/"; else p = path+1; DEBUG(DFD, "checking %s\n", path); for(re = include; *re != nil; re++){ if(regexec(*re, p, nil, 0) != 1){ DEBUG(DFD, "excluded+ %s\n", path); return -1; } } for(re = exclude; *re != nil; re++){ if(regexec(*re, p, nil, 0) == 1){ DEBUG(DFD, "excluded- %s\n", path); return -1; } } return 0; } int preaddir(Fid *f, uint8_t *data, int n, int64_t offset) { int r = 0, m; Dir *d; DEBUG(DFD, "\tpreaddir n=%d wo=%lld fo=%lld\n", n, offset, f->offset); if(offset == 0 && f->offset != 0){ if(seek(f->fid, 0, 0) != 0) return -1; f->offset = f->cdir = f->ndir = 0; free(f->dir); f->dir = nil; }else if(offset != f->offset){ werrstr("can't seek dir %lld to %lld", f->offset, offset); return -1; } while(n > 0){ if(f->dir == nil){ f->ndir = dirread(f->fid, &f->dir); if(f->ndir < 0) return f->ndir; if(f->ndir == 0) return r; } d = &f->dir[f->cdir++]; if(exclude){ char *p = makepath(f->f, d->name); if(excludefile(p)){ free(p); goto skipentry; } free(p); } m = convD2M(d, data, n); DEBUG(DFD, "\t\tconvD2M %d\n", m); if(m <= BIT16SZ){ DEBUG(DFD, "\t\t\tneeded %d\n", GBIT16(data)); /* not enough room for full entry; leave for next time */ f->cdir--; return r; }else{ data += m; n -= m; r += m; f->offset += m; } skipentry: if(f->cdir >= f->ndir){ f->cdir = f->ndir = 0; free(f->dir); f->dir = nil; } } return r; }
{ "pile_set_name": "Github" }
import fib from './fib'; let key = 'obj'; export {key, fib};
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <set xmlns:android="http://schemas.android.com/apk/res/android"> <alpha android:fromAlpha="1.0" android:toAlpha="0.0"/> <translate android:fromYDelta="0" android:toYDelta="50%"/> </set>
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <!-- Copyright (C) 2017 The Android Open Source Project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <com.android.launcher3.views.WorkFooterContainer xmlns:android="http://schemas.android.com/apk/res/android" android:layout_width="match_parent" android:layout_height="wrap_content" android:focusable="true" android:paddingBottom="@dimen/all_apps_work_profile_tab_footer_bottom_padding" android:paddingLeft="@dimen/dynamic_grid_cell_padding_x" android:paddingRight="@dimen/dynamic_grid_cell_padding_x" android:paddingTop="@dimen/all_apps_work_profile_tab_footer_top_padding"> <ImageView android:id="@+id/work_footer_divider" android:layout_width="match_parent" android:layout_height="wrap_content" android:focusable="false" android:importantForAccessibility="no" android:paddingBottom="@dimen/all_apps_divider_margin_vertical" android:paddingTop="@dimen/all_apps_divider_margin_vertical" android:scaleType="fitXY" android:src="@drawable/all_apps_divider"/> <com.android.launcher3.allapps.WorkModeSwitch android:id="@+id/work_mode_toggle" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_alignParentEnd="true" android:layout_below="@id/work_footer_divider"/> <TextView android:id="@android:id/title" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_alignBaseline="@id/work_mode_toggle" android:layout_alignParentStart="true" android:ellipsize="end" android:lines="1" android:text="@string/work_profile_toggle_label" android:textColor="?android:attr/textColorTertiary" android:textSize="16sp"/> <ImageView android:id="@android:id/icon" android:layout_width="24dp" android:layout_height="24dp" android:layout_below="@android:id/title" android:layout_marginTop="8dp" android:src="@drawable/ic_corp"/> <TextView android:id="@+id/managed_by_label" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_below="@android:id/title" android:layout_marginTop="8dp" android:layout_toEndOf="@android:id/icon" android:ellipsize="end" android:gravity="center_vertical" android:lines="1" android:minHeight="24dp" android:paddingStart="12dp" android:textColor="?android:attr/textColorHint" android:textSize="13sp"/> </com.android.launcher3.views.WorkFooterContainer>
{ "pile_set_name": "Github" }
#!/bin/sh export LC_ALL="en_US.UTF-8" # working directory of koreader KOREADER_DIR="${0%/*}" # we're always starting from our working directory cd "${KOREADER_DIR}" || exit # export load library path export LD_LIBRARY_PATH=${KOREADER_DIR}/libs:${LD_LIBRARY_PATH} RETURN_VALUE=85 if [ $# -eq 0 ]; then # no arguments if [ -n "${XDG_DOCUMENTS_DIR+x}" ]; then start_path=${XDG_DOCUMENTS_DIR} else start_path=$(pwd) fi else start_path="$*" fi while [ ${RETURN_VALUE} -eq 85 ]; do ./reader.lua "${start_path}" RETURN_VALUE=$? done exit ${RETURN_VALUE}
{ "pile_set_name": "Github" }
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Copyright (C) 2015, Itseez Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #ifndef OPENCV_HAL_INTRIN_NEON_HPP #define OPENCV_HAL_INTRIN_NEON_HPP #include <algorithm> #include "opencv2/core/utility.hpp" namespace cv { //! @cond IGNORED #define CV_SIMD128 1 #if defined(__aarch64__) #define CV_SIMD128_64F 1 #else #define CV_SIMD128_64F 0 #endif #if CV_SIMD128_64F #define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \ template <typename T> static inline \ _Tpv vreinterpretq_##suffix##_f64(T a) { return (_Tpv) a; } \ template <typename T> static inline \ float64x2_t vreinterpretq_f64_##suffix(T a) { return (float64x2_t) a; } OPENCV_HAL_IMPL_NEON_REINTERPRET(uint8x16_t, u8) OPENCV_HAL_IMPL_NEON_REINTERPRET(int8x16_t, s8) OPENCV_HAL_IMPL_NEON_REINTERPRET(uint16x8_t, u16) OPENCV_HAL_IMPL_NEON_REINTERPRET(int16x8_t, s16) OPENCV_HAL_IMPL_NEON_REINTERPRET(uint32x4_t, u32) OPENCV_HAL_IMPL_NEON_REINTERPRET(int32x4_t, s32) OPENCV_HAL_IMPL_NEON_REINTERPRET(uint64x2_t, u64) OPENCV_HAL_IMPL_NEON_REINTERPRET(int64x2_t, s64) OPENCV_HAL_IMPL_NEON_REINTERPRET(float32x4_t, f32) #endif struct v_uint8x16 { typedef uchar lane_type; enum { nlanes = 16 }; v_uint8x16() {} explicit v_uint8x16(uint8x16_t v) : val(v) {} v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) { uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; val = vld1q_u8(v); } uchar get0() const { return vgetq_lane_u8(val, 0); } uint8x16_t val; }; struct v_int8x16 { typedef schar lane_type; enum { nlanes = 16 }; v_int8x16() {} explicit v_int8x16(int8x16_t v) : val(v) {} v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) { schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; val = vld1q_s8(v); } schar get0() const { return vgetq_lane_s8(val, 0); } int8x16_t val; }; struct v_uint16x8 { typedef ushort lane_type; enum { nlanes = 8 }; v_uint16x8() {} explicit v_uint16x8(uint16x8_t v) : val(v) {} v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) { ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; val = vld1q_u16(v); } ushort get0() const { return vgetq_lane_u16(val, 0); } uint16x8_t val; }; struct v_int16x8 { typedef short lane_type; enum { nlanes = 8 }; v_int16x8() {} explicit v_int16x8(int16x8_t v) : val(v) {} v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) { short v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; val = vld1q_s16(v); } short get0() const { return vgetq_lane_s16(val, 0); } int16x8_t val; }; struct v_uint32x4 { typedef unsigned lane_type; enum { nlanes = 4 }; v_uint32x4() {} explicit v_uint32x4(uint32x4_t v) : val(v) {} v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) { unsigned v[] = {v0, v1, v2, v3}; val = vld1q_u32(v); } unsigned get0() const { return vgetq_lane_u32(val, 0); } uint32x4_t val; }; struct v_int32x4 { typedef int lane_type; enum { nlanes = 4 }; v_int32x4() {} explicit v_int32x4(int32x4_t v) : val(v) {} v_int32x4(int v0, int v1, int v2, int v3) { int v[] = {v0, v1, v2, v3}; val = vld1q_s32(v); } int get0() const { return vgetq_lane_s32(val, 0); } int32x4_t val; }; struct v_float32x4 { typedef float lane_type; enum { nlanes = 4 }; v_float32x4() {} explicit v_float32x4(float32x4_t v) : val(v) {} v_float32x4(float v0, float v1, float v2, float v3) { float v[] = {v0, v1, v2, v3}; val = vld1q_f32(v); } float get0() const { return vgetq_lane_f32(val, 0); } float32x4_t val; }; struct v_uint64x2 { typedef uint64 lane_type; enum { nlanes = 2 }; v_uint64x2() {} explicit v_uint64x2(uint64x2_t v) : val(v) {} v_uint64x2(unsigned v0, unsigned v1) { uint64 v[] = {v0, v1}; val = vld1q_u64(v); } uint64 get0() const { return vgetq_lane_u64(val, 0); } uint64x2_t val; }; struct v_int64x2 { typedef int64 lane_type; enum { nlanes = 2 }; v_int64x2() {} explicit v_int64x2(int64x2_t v) : val(v) {} v_int64x2(int v0, int v1) { int64 v[] = {v0, v1}; val = vld1q_s64(v); } int64 get0() const { return vgetq_lane_s64(val, 0); } int64x2_t val; }; #if CV_SIMD128_64F struct v_float64x2 { typedef double lane_type; enum { nlanes = 2 }; v_float64x2() {} explicit v_float64x2(float64x2_t v) : val(v) {} v_float64x2(double v0, double v1) { double v[] = {v0, v1}; val = vld1q_f64(v); } double get0() const { return vgetq_lane_f64(val, 0); } float64x2_t val; }; #endif #if defined (HAVE_FP16) // Workaround for old comiplers template <typename T> static inline int16x4_t vreinterpret_s16_f16(T a) { return (int16x4_t)a; } template <typename T> static inline float16x4_t vreinterpret_f16_s16(T a) { return (float16x4_t)a; } template <typename T> static inline float16x4_t vld1_f16(const T* ptr) { return vreinterpret_f16_s16(vld1_s16((const short*)ptr)); } template <typename T> static inline void vst1_f16(T* ptr, float16x4_t a) { vst1_s16((short*)ptr, vreinterpret_s16_f16(a)); } struct v_float16x4 { typedef short lane_type; enum { nlanes = 4 }; v_float16x4() {} explicit v_float16x4(float16x4_t v) : val(v) {} v_float16x4(short v0, short v1, short v2, short v3) { short v[] = {v0, v1, v2, v3}; val = vld1_f16(v); } short get0() const { return vget_lane_s16(vreinterpret_s16_f16(val), 0); } float16x4_t val; }; #endif #define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \ inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \ inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \ inline _Tpv##_t vreinterpretq_##suffix##_##suffix(_Tpv##_t v) { return v; } \ inline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(vreinterpretq_u8_##suffix(v.val)); } \ inline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(vreinterpretq_s8_##suffix(v.val)); } \ inline v_uint16x8 v_reinterpret_as_u16(const v_##_Tpv& v) { return v_uint16x8(vreinterpretq_u16_##suffix(v.val)); } \ inline v_int16x8 v_reinterpret_as_s16(const v_##_Tpv& v) { return v_int16x8(vreinterpretq_s16_##suffix(v.val)); } \ inline v_uint32x4 v_reinterpret_as_u32(const v_##_Tpv& v) { return v_uint32x4(vreinterpretq_u32_##suffix(v.val)); } \ inline v_int32x4 v_reinterpret_as_s32(const v_##_Tpv& v) { return v_int32x4(vreinterpretq_s32_##suffix(v.val)); } \ inline v_uint64x2 v_reinterpret_as_u64(const v_##_Tpv& v) { return v_uint64x2(vreinterpretq_u64_##suffix(v.val)); } \ inline v_int64x2 v_reinterpret_as_s64(const v_##_Tpv& v) { return v_int64x2(vreinterpretq_s64_##suffix(v.val)); } \ inline v_float32x4 v_reinterpret_as_f32(const v_##_Tpv& v) { return v_float32x4(vreinterpretq_f32_##suffix(v.val)); } OPENCV_HAL_IMPL_NEON_INIT(uint8x16, uchar, u8) OPENCV_HAL_IMPL_NEON_INIT(int8x16, schar, s8) OPENCV_HAL_IMPL_NEON_INIT(uint16x8, ushort, u16) OPENCV_HAL_IMPL_NEON_INIT(int16x8, short, s16) OPENCV_HAL_IMPL_NEON_INIT(uint32x4, unsigned, u32) OPENCV_HAL_IMPL_NEON_INIT(int32x4, int, s32) OPENCV_HAL_IMPL_NEON_INIT(uint64x2, uint64, u64) OPENCV_HAL_IMPL_NEON_INIT(int64x2, int64, s64) OPENCV_HAL_IMPL_NEON_INIT(float32x4, float, f32) #if CV_SIMD128_64F #define OPENCV_HAL_IMPL_NEON_INIT_64(_Tpv, suffix) \ inline v_float64x2 v_reinterpret_as_f64(const v_##_Tpv& v) { return v_float64x2(vreinterpretq_f64_##suffix(v.val)); } OPENCV_HAL_IMPL_NEON_INIT(float64x2, double, f64) OPENCV_HAL_IMPL_NEON_INIT_64(uint8x16, u8) OPENCV_HAL_IMPL_NEON_INIT_64(int8x16, s8) OPENCV_HAL_IMPL_NEON_INIT_64(uint16x8, u16) OPENCV_HAL_IMPL_NEON_INIT_64(int16x8, s16) OPENCV_HAL_IMPL_NEON_INIT_64(uint32x4, u32) OPENCV_HAL_IMPL_NEON_INIT_64(int32x4, s32) OPENCV_HAL_IMPL_NEON_INIT_64(uint64x2, u64) OPENCV_HAL_IMPL_NEON_INIT_64(int64x2, s64) OPENCV_HAL_IMPL_NEON_INIT_64(float32x4, f32) OPENCV_HAL_IMPL_NEON_INIT_64(float64x2, f64) #endif #define OPENCV_HAL_IMPL_NEON_PACK(_Tpvec, _Tp, hreg, suffix, _Tpwvec, wsuffix, pack, op) \ inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \ { \ hreg a1 = vqmov##op##_##wsuffix(a.val), b1 = vqmov##op##_##wsuffix(b.val); \ return _Tpvec(vcombine_##suffix(a1, b1)); \ } \ inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ { \ hreg a1 = vqmov##op##_##wsuffix(a.val); \ vst1_##suffix(ptr, a1); \ } \ template<int n> inline \ _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \ { \ hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \ hreg b1 = vqrshr##op##_n_##wsuffix(b.val, n); \ return _Tpvec(vcombine_##suffix(a1, b1)); \ } \ template<int n> inline \ void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ { \ hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \ vst1_##suffix(ptr, a1); \ } OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, u16, pack, n) OPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, s16, pack, n) OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, u32, pack, n) OPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, s32, pack, n) OPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, u64, pack, n) OPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, s64, pack, n) OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, s16, pack_u, un) OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, s32, pack_u, un) inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, const v_float32x4& m1, const v_float32x4& m2, const v_float32x4& m3) { float32x2_t vl = vget_low_f32(v.val), vh = vget_high_f32(v.val); float32x4_t res = vmulq_lane_f32(m0.val, vl, 0); res = vmlaq_lane_f32(res, m1.val, vl, 1); res = vmlaq_lane_f32(res, m2.val, vh, 0); res = vmlaq_lane_f32(res, m3.val, vh, 1); return v_float32x4(res); } #define OPENCV_HAL_IMPL_NEON_BIN_OP(bin_op, _Tpvec, intrin) \ inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ { \ return _Tpvec(intrin(a.val, b.val)); \ } \ inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ { \ a.val = intrin(a.val, b.val); \ return a; \ } OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint8x16, vqaddq_u8) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint8x16, vqsubq_u8) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int8x16, vqaddq_s8) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int8x16, vqsubq_s8) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint16x8, vqaddq_u16) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint16x8, vqsubq_u16) OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint16x8, vmulq_u16) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int16x8, vqaddq_s16) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int16x8, vqsubq_s16) OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int16x8, vmulq_s16) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int32x4, vaddq_s32) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int32x4, vsubq_s32) OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int32x4, vmulq_s32) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint32x4, vaddq_u32) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint32x4, vsubq_u32) OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint32x4, vmulq_u32) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float32x4, vaddq_f32) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float32x4, vsubq_f32) OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float32x4, vmulq_f32) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int64x2, vaddq_s64) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int64x2, vsubq_s64) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint64x2, vaddq_u64) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint64x2, vsubq_u64) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float32x4, vdivq_f32) OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float64x2, vaddq_f64) OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float64x2, vsubq_f64) OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float64x2, vmulq_f64) OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float64x2, vdivq_f64) #else inline v_float32x4 operator / (const v_float32x4& a, const v_float32x4& b) { float32x4_t reciprocal = vrecpeq_f32(b.val); reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); return v_float32x4(vmulq_f32(a.val, reciprocal)); } inline v_float32x4& operator /= (v_float32x4& a, const v_float32x4& b) { float32x4_t reciprocal = vrecpeq_f32(b.val); reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); a.val = vmulq_f32(a.val, reciprocal); return a; } #endif inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, v_int32x4& c, v_int32x4& d) { c.val = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val)); d.val = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val)); } inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, v_uint32x4& c, v_uint32x4& d) { c.val = vmull_u16(vget_low_u16(a.val), vget_low_u16(b.val)); d.val = vmull_u16(vget_high_u16(a.val), vget_high_u16(b.val)); } inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, v_uint64x2& c, v_uint64x2& d) { c.val = vmull_u32(vget_low_u32(a.val), vget_low_u32(b.val)); d.val = vmull_u32(vget_high_u32(a.val), vget_high_u32(b.val)); } inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) { int32x4_t c = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val)); int32x4_t d = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val)); int32x4x2_t cd = vuzpq_s32(c, d); return v_int32x4(vaddq_s32(cd.val[0], cd.val[1])); } #define OPENCV_HAL_IMPL_NEON_LOGIC_OP(_Tpvec, suffix) \ OPENCV_HAL_IMPL_NEON_BIN_OP(&, _Tpvec, vandq_##suffix) \ OPENCV_HAL_IMPL_NEON_BIN_OP(|, _Tpvec, vorrq_##suffix) \ OPENCV_HAL_IMPL_NEON_BIN_OP(^, _Tpvec, veorq_##suffix) \ inline _Tpvec operator ~ (const _Tpvec& a) \ { \ return _Tpvec(vreinterpretq_##suffix##_u8(vmvnq_u8(vreinterpretq_u8_##suffix(a.val)))); \ } OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint8x16, u8) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int8x16, s8) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint16x8, u16) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int16x8, s16) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint32x4, u32) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int32x4, s32) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint64x2, u64) OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int64x2, s64) #define OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(bin_op, intrin) \ inline v_float32x4 operator bin_op (const v_float32x4& a, const v_float32x4& b) \ { \ return v_float32x4(vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val)))); \ } \ inline v_float32x4& operator bin_op##= (v_float32x4& a, const v_float32x4& b) \ { \ a.val = vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val))); \ return a; \ } OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(&, vandq_s32) OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(|, vorrq_s32) OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(^, veorq_s32) inline v_float32x4 operator ~ (const v_float32x4& a) { return v_float32x4(vreinterpretq_f32_s32(vmvnq_s32(vreinterpretq_s32_f32(a.val)))); } #if CV_SIMD128_64F inline v_float32x4 v_sqrt(const v_float32x4& x) { return v_float32x4(vsqrtq_f32(x.val)); } inline v_float32x4 v_invsqrt(const v_float32x4& x) { v_float32x4 one = v_setall_f32(1.0f); return one / v_sqrt(x); } #else inline v_float32x4 v_sqrt(const v_float32x4& x) { float32x4_t x1 = vmaxq_f32(x.val, vdupq_n_f32(FLT_MIN)); float32x4_t e = vrsqrteq_f32(x1); e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e); e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e); return v_float32x4(vmulq_f32(x.val, e)); } inline v_float32x4 v_invsqrt(const v_float32x4& x) { float32x4_t e = vrsqrteq_f32(x.val); e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e); e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e); return v_float32x4(e); } #endif #define OPENCV_HAL_IMPL_NEON_ABS(_Tpuvec, _Tpsvec, usuffix, ssuffix) \ inline _Tpuvec v_abs(const _Tpsvec& a) { return v_reinterpret_as_##usuffix(_Tpsvec(vabsq_##ssuffix(a.val))); } OPENCV_HAL_IMPL_NEON_ABS(v_uint8x16, v_int8x16, u8, s8) OPENCV_HAL_IMPL_NEON_ABS(v_uint16x8, v_int16x8, u16, s16) OPENCV_HAL_IMPL_NEON_ABS(v_uint32x4, v_int32x4, u32, s32) inline v_float32x4 v_abs(v_float32x4 x) { return v_float32x4(vabsq_f32(x.val)); } #if CV_SIMD128_64F #define OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(bin_op, intrin) \ inline v_float64x2 operator bin_op (const v_float64x2& a, const v_float64x2& b) \ { \ return v_float64x2(vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val)))); \ } \ inline v_float64x2& operator bin_op##= (v_float64x2& a, const v_float64x2& b) \ { \ a.val = vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val))); \ return a; \ } OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(&, vandq_s64) OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(|, vorrq_s64) OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(^, veorq_s64) inline v_float64x2 operator ~ (const v_float64x2& a) { return v_float64x2(vreinterpretq_f64_s32(vmvnq_s32(vreinterpretq_s32_f64(a.val)))); } inline v_float64x2 v_sqrt(const v_float64x2& x) { return v_float64x2(vsqrtq_f64(x.val)); } inline v_float64x2 v_invsqrt(const v_float64x2& x) { v_float64x2 one = v_setall_f64(1.0f); return one / v_sqrt(x); } inline v_float64x2 v_abs(v_float64x2 x) { return v_float64x2(vabsq_f64(x.val)); } #endif // TODO: exp, log, sin, cos #define OPENCV_HAL_IMPL_NEON_BIN_FUNC(_Tpvec, func, intrin) \ inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ { \ return _Tpvec(intrin(a.val, b.val)); \ } OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_min, vminq_u8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_max, vmaxq_u8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_min, vminq_s8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_max, vmaxq_s8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_min, vminq_u16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_max, vmaxq_u16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_min, vminq_s16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_max, vmaxq_s16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_min, vminq_u32) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_max, vmaxq_u32) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_min, vminq_s32) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_max, vmaxq_s32) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_min, vminq_f32) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_max, vmaxq_f32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_min, vminq_f64) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_max, vmaxq_f64) #endif #if CV_SIMD128_64F inline int64x2_t vmvnq_s64(int64x2_t a) { int64x2_t vx = vreinterpretq_s64_u32(vdupq_n_u32(0xFFFFFFFF)); return veorq_s64(a, vx); } inline uint64x2_t vmvnq_u64(uint64x2_t a) { uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF)); return veorq_u64(a, vx); } #endif #define OPENCV_HAL_IMPL_NEON_INT_CMP_OP(_Tpvec, cast, suffix, not_suffix) \ inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ { return _Tpvec(cast(vceqq_##suffix(a.val, b.val))); } \ inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ { return _Tpvec(cast(vmvnq_##not_suffix(vceqq_##suffix(a.val, b.val)))); } \ inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ { return _Tpvec(cast(vcltq_##suffix(a.val, b.val))); } \ inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \ { return _Tpvec(cast(vcgtq_##suffix(a.val, b.val))); } \ inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ { return _Tpvec(cast(vcleq_##suffix(a.val, b.val))); } \ inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ { return _Tpvec(cast(vcgeq_##suffix(a.val, b.val))); } OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint8x16, OPENCV_HAL_NOP, u8, u8) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int8x16, vreinterpretq_s8_u8, s8, u8) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint16x8, OPENCV_HAL_NOP, u16, u16) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int16x8, vreinterpretq_s16_u16, s16, u16) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint32x4, OPENCV_HAL_NOP, u32, u32) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int32x4, vreinterpretq_s32_u32, s32, u32) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float32x4, vreinterpretq_f32_u32, f32, u32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint64x2, OPENCV_HAL_NOP, u64, u64) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int64x2, vreinterpretq_s64_u64, s64, u64) OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float64x2, vreinterpretq_f64_u64, f64, u64) #endif OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_add_wrap, vaddq_u8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_add_wrap, vaddq_s8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_add_wrap, vaddq_u16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_add_wrap, vaddq_s16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_sub_wrap, vsubq_u8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_sub_wrap, vsubq_s8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_sub_wrap, vsubq_u16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_sub_wrap, vsubq_s16) // TODO: absdiff for signed integers OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_absdiff, vabdq_u8) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_absdiff, vabdq_u16) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_absdiff, vabdq_u32) OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_absdiff, vabdq_f32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_absdiff, vabdq_f64) #endif #define OPENCV_HAL_IMPL_NEON_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \ inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \ { \ return _Tpvec2(cast(intrin(a.val, b.val))); \ } OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int8x16, v_uint8x16, vreinterpretq_u8_s8, v_absdiff, vabdq_s8) OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int16x8, v_uint16x8, vreinterpretq_u16_s16, v_absdiff, vabdq_s16) OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int32x4, v_uint32x4, vreinterpretq_u32_s32, v_absdiff, vabdq_s32) inline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b) { v_float32x4 x(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val)); return v_sqrt(x); } inline v_float32x4 v_sqr_magnitude(const v_float32x4& a, const v_float32x4& b) { return v_float32x4(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val)); } inline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) { return v_float32x4(vmlaq_f32(c.val, a.val, b.val)); } #if CV_SIMD128_64F inline v_float64x2 v_magnitude(const v_float64x2& a, const v_float64x2& b) { v_float64x2 x(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val))); return v_sqrt(x); } inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b) { return v_float64x2(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val))); } inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) { return v_float64x2(vaddq_f64(c.val, vmulq_f64(a.val, b.val))); } #endif // trade efficiency for convenience #define OPENCV_HAL_IMPL_NEON_SHIFT_OP(_Tpvec, suffix, _Tps, ssuffix) \ inline _Tpvec operator << (const _Tpvec& a, int n) \ { return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)n))); } \ inline _Tpvec operator >> (const _Tpvec& a, int n) \ { return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)-n))); } \ template<int n> inline _Tpvec v_shl(const _Tpvec& a) \ { return _Tpvec(vshlq_n_##suffix(a.val, n)); } \ template<int n> inline _Tpvec v_shr(const _Tpvec& a) \ { return _Tpvec(vshrq_n_##suffix(a.val, n)); } \ template<int n> inline _Tpvec v_rshr(const _Tpvec& a) \ { return _Tpvec(vrshrq_n_##suffix(a.val, n)); } OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint8x16, u8, schar, s8) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int8x16, s8, schar, s8) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint16x8, u16, short, s16) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int16x8, s16, short, s16) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint32x4, u32, int, s32) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int32x4, s32, int, s32) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint64x2, u64, int64, s64) OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int64x2, s64, int64, s64) #define OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(_Tpvec, _Tp, suffix) \ inline _Tpvec v_load(const _Tp* ptr) \ { return _Tpvec(vld1q_##suffix(ptr)); } \ inline _Tpvec v_load_aligned(const _Tp* ptr) \ { return _Tpvec(vld1q_##suffix(ptr)); } \ inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ { return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr0), vld1_##suffix(ptr1))); } \ inline void v_store(_Tp* ptr, const _Tpvec& a) \ { vst1q_##suffix(ptr, a.val); } \ inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ { vst1q_##suffix(ptr, a.val); } \ inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ { vst1_##suffix(ptr, vget_low_##suffix(a.val)); } \ inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ { vst1_##suffix(ptr, vget_high_##suffix(a.val)); } OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint8x16, uchar, u8) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int8x16, schar, s8) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint16x8, ushort, u16) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int16x8, short, s16) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint32x4, unsigned, u32) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int32x4, int, s32) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint64x2, uint64, u64) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int64x2, int64, s64) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64) #endif #if defined (HAVE_FP16) // Workaround for old comiplers inline v_float16x4 v_load_f16(const short* ptr) { return v_float16x4(vld1_f16(ptr)); } inline void v_store_f16(short* ptr, v_float16x4& a) { vst1_f16(ptr, a.val); } #endif #define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ inline scalartype v_reduce_##func(const _Tpvec& a) \ { \ _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \ a0 = vp##vectorfunc##_##suffix(a0, a0); \ return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, a0),0); \ } OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, sum, add, u16) OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, max, max, u16) OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, min, min, u16) OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, sum, add, s16) OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, max, max, s16) OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, min, min, s16) #define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ inline scalartype v_reduce_##func(const _Tpvec& a) \ { \ _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \ return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, vget_high_##suffix(a.val)),0); \ } OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, sum, add, u32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, max, max, u32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, min, min, u32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, sum, add, s32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, max, max, s32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, min, min, s32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, sum, add, f32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, max, max, f32) OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, min, min, f32) inline int v_signmask(const v_uint8x16& a) { int8x8_t m0 = vcreate_s8(CV_BIG_UINT(0x0706050403020100)); uint8x16_t v0 = vshlq_u8(vshrq_n_u8(a.val, 7), vcombine_s8(m0, m0)); uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0))); return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8); } inline int v_signmask(const v_int8x16& a) { return v_signmask(v_reinterpret_as_u8(a)); } inline int v_signmask(const v_uint16x8& a) { int16x4_t m0 = vcreate_s16(CV_BIG_UINT(0x0003000200010000)); uint16x8_t v0 = vshlq_u16(vshrq_n_u16(a.val, 15), vcombine_s16(m0, m0)); uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(v0)); return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 4); } inline int v_signmask(const v_int16x8& a) { return v_signmask(v_reinterpret_as_u16(a)); } inline int v_signmask(const v_uint32x4& a) { int32x2_t m0 = vcreate_s32(CV_BIG_UINT(0x0000000100000000)); uint32x4_t v0 = vshlq_u32(vshrq_n_u32(a.val, 31), vcombine_s32(m0, m0)); uint64x2_t v1 = vpaddlq_u32(v0); return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 2); } inline int v_signmask(const v_int32x4& a) { return v_signmask(v_reinterpret_as_u32(a)); } inline int v_signmask(const v_float32x4& a) { return v_signmask(v_reinterpret_as_u32(a)); } #if CV_SIMD128_64F inline int v_signmask(const v_uint64x2& a) { int64x1_t m0 = vdup_n_s64(0); uint64x2_t v0 = vshlq_u64(vshrq_n_u64(a.val, 63), vcombine_s64(m0, m0)); return (int)vgetq_lane_u64(v0, 0) + ((int)vgetq_lane_u64(v0, 1) << 1); } inline int v_signmask(const v_float64x2& a) { return v_signmask(v_reinterpret_as_u64(a)); } #endif #define OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(_Tpvec, suffix, shift) \ inline bool v_check_all(const v_##_Tpvec& a) \ { \ _Tpvec##_t v0 = vshrq_n_##suffix(vmvnq_##suffix(a.val), shift); \ uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \ return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) == 0; \ } \ inline bool v_check_any(const v_##_Tpvec& a) \ { \ _Tpvec##_t v0 = vshrq_n_##suffix(a.val, shift); \ uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \ return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) != 0; \ } OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint8x16, u8, 7) OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint16x8, u16, 15) OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint32x4, u32, 31) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint64x2, u64, 63) #endif inline bool v_check_all(const v_int8x16& a) { return v_check_all(v_reinterpret_as_u8(a)); } inline bool v_check_all(const v_int16x8& a) { return v_check_all(v_reinterpret_as_u16(a)); } inline bool v_check_all(const v_int32x4& a) { return v_check_all(v_reinterpret_as_u32(a)); } inline bool v_check_all(const v_float32x4& a) { return v_check_all(v_reinterpret_as_u32(a)); } inline bool v_check_any(const v_int8x16& a) { return v_check_any(v_reinterpret_as_u8(a)); } inline bool v_check_any(const v_int16x8& a) { return v_check_any(v_reinterpret_as_u16(a)); } inline bool v_check_any(const v_int32x4& a) { return v_check_any(v_reinterpret_as_u32(a)); } inline bool v_check_any(const v_float32x4& a) { return v_check_any(v_reinterpret_as_u32(a)); } #if CV_SIMD128_64F inline bool v_check_all(const v_int64x2& a) { return v_check_all(v_reinterpret_as_u64(a)); } inline bool v_check_all(const v_float64x2& a) { return v_check_all(v_reinterpret_as_u64(a)); } inline bool v_check_any(const v_int64x2& a) { return v_check_any(v_reinterpret_as_u64(a)); } inline bool v_check_any(const v_float64x2& a) { return v_check_any(v_reinterpret_as_u64(a)); } #endif #define OPENCV_HAL_IMPL_NEON_SELECT(_Tpvec, suffix, usuffix) \ inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ { \ return _Tpvec(vbslq_##suffix(vreinterpretq_##usuffix##_##suffix(mask.val), a.val, b.val)); \ } OPENCV_HAL_IMPL_NEON_SELECT(v_uint8x16, u8, u8) OPENCV_HAL_IMPL_NEON_SELECT(v_int8x16, s8, u8) OPENCV_HAL_IMPL_NEON_SELECT(v_uint16x8, u16, u16) OPENCV_HAL_IMPL_NEON_SELECT(v_int16x8, s16, u16) OPENCV_HAL_IMPL_NEON_SELECT(v_uint32x4, u32, u32) OPENCV_HAL_IMPL_NEON_SELECT(v_int32x4, s32, u32) OPENCV_HAL_IMPL_NEON_SELECT(v_float32x4, f32, u32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_SELECT(v_float64x2, f64, u64) #endif #define OPENCV_HAL_IMPL_NEON_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix) \ inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ { \ b0.val = vmovl_##suffix(vget_low_##suffix(a.val)); \ b1.val = vmovl_##suffix(vget_high_##suffix(a.val)); \ } \ inline _Tpwvec v_load_expand(const _Tp* ptr) \ { \ return _Tpwvec(vmovl_##suffix(vld1_##suffix(ptr))); \ } OPENCV_HAL_IMPL_NEON_EXPAND(v_uint8x16, v_uint16x8, uchar, u8) OPENCV_HAL_IMPL_NEON_EXPAND(v_int8x16, v_int16x8, schar, s8) OPENCV_HAL_IMPL_NEON_EXPAND(v_uint16x8, v_uint32x4, ushort, u16) OPENCV_HAL_IMPL_NEON_EXPAND(v_int16x8, v_int32x4, short, s16) OPENCV_HAL_IMPL_NEON_EXPAND(v_uint32x4, v_uint64x2, uint, u32) OPENCV_HAL_IMPL_NEON_EXPAND(v_int32x4, v_int64x2, int, s32) inline v_uint32x4 v_load_expand_q(const uchar* ptr) { uint8x8_t v0 = vcreate_u8(*(unsigned*)ptr); uint16x4_t v1 = vget_low_u16(vmovl_u8(v0)); return v_uint32x4(vmovl_u16(v1)); } inline v_int32x4 v_load_expand_q(const schar* ptr) { int8x8_t v0 = vcreate_s8(*(unsigned*)ptr); int16x4_t v1 = vget_low_s16(vmovl_s8(v0)); return v_int32x4(vmovl_s16(v1)); } #if defined(__aarch64__) #define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \ inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \ { \ b0.val = vzip1q_##suffix(a0.val, a1.val); \ b1.val = vzip2q_##suffix(a0.val, a1.val); \ } \ inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \ { \ return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \ } \ inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \ { \ return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \ } \ inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \ { \ c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \ d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \ } #else #define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \ inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \ { \ _Tpvec##x2_t p = vzipq_##suffix(a0.val, a1.val); \ b0.val = p.val[0]; \ b1.val = p.val[1]; \ } \ inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \ { \ return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \ } \ inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \ { \ return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \ } \ inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \ { \ c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \ d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \ } #endif OPENCV_HAL_IMPL_NEON_UNPACKS(uint8x16, u8) OPENCV_HAL_IMPL_NEON_UNPACKS(int8x16, s8) OPENCV_HAL_IMPL_NEON_UNPACKS(uint16x8, u16) OPENCV_HAL_IMPL_NEON_UNPACKS(int16x8, s16) OPENCV_HAL_IMPL_NEON_UNPACKS(uint32x4, u32) OPENCV_HAL_IMPL_NEON_UNPACKS(int32x4, s32) OPENCV_HAL_IMPL_NEON_UNPACKS(float32x4, f32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_UNPACKS(float64x2, f64) #endif #define OPENCV_HAL_IMPL_NEON_EXTRACT(_Tpvec, suffix) \ template <int s> \ inline v_##_Tpvec v_extract(const v_##_Tpvec& a, const v_##_Tpvec& b) \ { \ return v_##_Tpvec(vextq_##suffix(a.val, b.val, s)); \ } OPENCV_HAL_IMPL_NEON_EXTRACT(uint8x16, u8) OPENCV_HAL_IMPL_NEON_EXTRACT(int8x16, s8) OPENCV_HAL_IMPL_NEON_EXTRACT(uint16x8, u16) OPENCV_HAL_IMPL_NEON_EXTRACT(int16x8, s16) OPENCV_HAL_IMPL_NEON_EXTRACT(uint32x4, u32) OPENCV_HAL_IMPL_NEON_EXTRACT(int32x4, s32) OPENCV_HAL_IMPL_NEON_EXTRACT(uint64x2, u64) OPENCV_HAL_IMPL_NEON_EXTRACT(int64x2, s64) OPENCV_HAL_IMPL_NEON_EXTRACT(float32x4, f32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_EXTRACT(float64x2, f64) #endif inline v_int32x4 v_round(const v_float32x4& a) { static const int32x4_t v_sign = vdupq_n_s32(1 << 31), v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f)); int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(a.val))); return v_int32x4(vcvtq_s32_f32(vaddq_f32(a.val, vreinterpretq_f32_s32(v_addition)))); } inline v_int32x4 v_floor(const v_float32x4& a) { int32x4_t a1 = vcvtq_s32_f32(a.val); uint32x4_t mask = vcgtq_f32(vcvtq_f32_s32(a1), a.val); return v_int32x4(vaddq_s32(a1, vreinterpretq_s32_u32(mask))); } inline v_int32x4 v_ceil(const v_float32x4& a) { int32x4_t a1 = vcvtq_s32_f32(a.val); uint32x4_t mask = vcgtq_f32(a.val, vcvtq_f32_s32(a1)); return v_int32x4(vsubq_s32(a1, vreinterpretq_s32_u32(mask))); } inline v_int32x4 v_trunc(const v_float32x4& a) { return v_int32x4(vcvtq_s32_f32(a.val)); } #if CV_SIMD128_64F inline v_int32x4 v_round(const v_float64x2& a) { static const int32x2_t zero = vdup_n_s32(0); return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero)); } inline v_int32x4 v_floor(const v_float64x2& a) { static const int32x2_t zero = vdup_n_s32(0); int64x2_t a1 = vcvtq_s64_f64(a.val); uint64x2_t mask = vcgtq_f64(vcvtq_f64_s64(a1), a.val); a1 = vaddq_s64(a1, vreinterpretq_s64_u64(mask)); return v_int32x4(vcombine_s32(vmovn_s64(a1), zero)); } inline v_int32x4 v_ceil(const v_float64x2& a) { static const int32x2_t zero = vdup_n_s32(0); int64x2_t a1 = vcvtq_s64_f64(a.val); uint64x2_t mask = vcgtq_f64(a.val, vcvtq_f64_s64(a1)); a1 = vsubq_s64(a1, vreinterpretq_s64_u64(mask)); return v_int32x4(vcombine_s32(vmovn_s64(a1), zero)); } inline v_int32x4 v_trunc(const v_float64x2& a) { static const int32x2_t zero = vdup_n_s32(0); return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero)); } #endif #define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \ inline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \ const v_##_Tpvec& a2, const v_##_Tpvec& a3, \ v_##_Tpvec& b0, v_##_Tpvec& b1, \ v_##_Tpvec& b2, v_##_Tpvec& b3) \ { \ /* m00 m01 m02 m03 */ \ /* m10 m11 m12 m13 */ \ /* m20 m21 m22 m23 */ \ /* m30 m31 m32 m33 */ \ _Tpvec##x2_t t0 = vtrnq_##suffix(a0.val, a1.val); \ _Tpvec##x2_t t1 = vtrnq_##suffix(a2.val, a3.val); \ /* m00 m10 m02 m12 */ \ /* m01 m11 m03 m13 */ \ /* m20 m30 m22 m32 */ \ /* m21 m31 m23 m33 */ \ b0.val = vcombine_##suffix(vget_low_##suffix(t0.val[0]), vget_low_##suffix(t1.val[0])); \ b1.val = vcombine_##suffix(vget_low_##suffix(t0.val[1]), vget_low_##suffix(t1.val[1])); \ b2.val = vcombine_##suffix(vget_high_##suffix(t0.val[0]), vget_high_##suffix(t1.val[0])); \ b3.val = vcombine_##suffix(vget_high_##suffix(t0.val[1]), vget_high_##suffix(t1.val[1])); \ } OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(uint32x4, u32) OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(int32x4, s32) OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(float32x4, f32) #define OPENCV_HAL_IMPL_NEON_INTERLEAVED(_Tpvec, _Tp, suffix) \ inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b) \ { \ _Tpvec##x2_t v = vld2q_##suffix(ptr); \ a.val = v.val[0]; \ b.val = v.val[1]; \ } \ inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, v_##_Tpvec& c) \ { \ _Tpvec##x3_t v = vld3q_##suffix(ptr); \ a.val = v.val[0]; \ b.val = v.val[1]; \ c.val = v.val[2]; \ } \ inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, \ v_##_Tpvec& c, v_##_Tpvec& d) \ { \ _Tpvec##x4_t v = vld4q_##suffix(ptr); \ a.val = v.val[0]; \ b.val = v.val[1]; \ c.val = v.val[2]; \ d.val = v.val[3]; \ } \ inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b) \ { \ _Tpvec##x2_t v; \ v.val[0] = a.val; \ v.val[1] = b.val; \ vst2q_##suffix(ptr, v); \ } \ inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, const v_##_Tpvec& c) \ { \ _Tpvec##x3_t v; \ v.val[0] = a.val; \ v.val[1] = b.val; \ v.val[2] = c.val; \ vst3q_##suffix(ptr, v); \ } \ inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ const v_##_Tpvec& c, const v_##_Tpvec& d) \ { \ _Tpvec##x4_t v; \ v.val[0] = a.val; \ v.val[1] = b.val; \ v.val[2] = c.val; \ v.val[3] = d.val; \ vst4q_##suffix(ptr, v); \ } OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint8x16, uchar, u8) OPENCV_HAL_IMPL_NEON_INTERLEAVED(int8x16, schar, s8) OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint16x8, ushort, u16) OPENCV_HAL_IMPL_NEON_INTERLEAVED(int16x8, short, s16) OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint32x4, unsigned, u32) OPENCV_HAL_IMPL_NEON_INTERLEAVED(int32x4, int, s32) OPENCV_HAL_IMPL_NEON_INTERLEAVED(float32x4, float, f32) #if CV_SIMD128_64F OPENCV_HAL_IMPL_NEON_INTERLEAVED(float64x2, double, f64) #endif inline v_float32x4 v_cvt_f32(const v_int32x4& a) { return v_float32x4(vcvtq_f32_s32(a.val)); } #if CV_SIMD128_64F inline v_float32x4 v_cvt_f32(const v_float64x2& a) { float32x2_t zero = vdup_n_f32(0.0f); return v_float32x4(vcombine_f32(vcvt_f32_f64(a.val), zero)); } inline v_float64x2 v_cvt_f64(const v_int32x4& a) { return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_low_s32(a.val)))); } inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) { return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_high_s32(a.val)))); } inline v_float64x2 v_cvt_f64(const v_float32x4& a) { return v_float64x2(vcvt_f64_f32(vget_low_f32(a.val))); } inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) { return v_float64x2(vcvt_f64_f32(vget_high_f32(a.val))); } #endif #if defined (HAVE_FP16) inline v_float32x4 v_cvt_f32(const v_float16x4& a) { return v_float32x4(vcvt_f32_f16(a.val)); } inline v_float16x4 v_cvt_f16(const v_float32x4& a) { return v_float16x4(vcvt_f16_f32(a.val)); } #endif //! @name Check SIMD support //! @{ //! @brief Check CPU capability of SIMD operation static inline bool hasSIMD128() { return checkHardwareSupport(CV_CPU_NEON); } //! @} //! @endcond } #endif
{ "pile_set_name": "Github" }
package de.lessvoid.nifty.effects.impl; import de.lessvoid.nifty.Nifty; import de.lessvoid.nifty.effects.EffectImpl; import de.lessvoid.nifty.effects.EffectProperties; import de.lessvoid.nifty.effects.Falloff; import de.lessvoid.nifty.elements.Element; import de.lessvoid.nifty.render.NiftyRenderEngine; import de.lessvoid.nifty.tools.LinearInterpolator; import de.lessvoid.nifty.tools.SizeValue; import javax.annotation.Nonnull; import javax.annotation.Nullable; /** * ImageSize effect. * * @author void */ public class ImageSize implements EffectImpl { private float startSize; private float endSize; @Nonnull private SizeValue imageSize = new SizeValue("100%"); @Nullable private LinearInterpolator interpolator; @Override public final void activate( @Nonnull final Nifty nifty, @Nonnull final Element element, @Nonnull final EffectProperties parameter) { // for normal mode startSize = Float.parseFloat(parameter.getProperty("startSize", "1.0")); endSize = Float.parseFloat(parameter.getProperty("endSize", "2.0")); // for hover mode only String maxSizeString = parameter.getProperty("maxSize"); if (maxSizeString != null) { imageSize = new SizeValue(maxSizeString); } interpolator = parameter.getInterpolator(); } @Override public void execute( @Nonnull final Element element, final float normalizedTime, @Nullable final Falloff falloff, @Nonnull final NiftyRenderEngine r) { float scale; if (falloff == null) { if (interpolator != null) { scale = interpolator.getValue(normalizedTime); } else { scale = startSize + normalizedTime * (endSize - startSize); } } else { scale = 1.0f + falloff.getFalloffValue() * imageSize.getValue(1.0f); } r.setImageScale(scale); } @Override public void deactivate() { } }
{ "pile_set_name": "Github" }
/* * Copyright 2020 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.optaplanner.core.impl.domain.variable.listener.support; import static org.assertj.core.api.Assertions.assertThat; import java.util.Arrays; import org.junit.jupiter.api.Test; public class SmallScalingOrderedSetTest { @Test public void addRemoveAroundThreshold() { SmallScalingOrderedSet<String> set = new SmallScalingOrderedSet<>(); assertThat(set.add("s1")).isTrue(); assertThat(set.add("s1")).isFalse(); assertThat(set.add("s2")).isTrue(); assertThat(set.add("s1")).isFalse(); assertThat(set.add("s2")).isFalse(); assertThat(set.remove("s2")).isTrue(); assertThat(set.remove("s2")).isFalse(); assertThat(set.add("s2")).isTrue(); assertThat(set.size()).isEqualTo(2); assertThat(set.contains("s1")).isTrue(); assertThat(set.contains("s2")).isTrue(); for (int i = 0; i < SmallScalingOrderedSet.LIST_SIZE_THRESHOLD - 3; i++) { set.add("filler " + i); } assertThat(set.add("s2")).isFalse(); assertThat(set.add("s3")).isTrue(); assertThat(set.add("s2")).isFalse(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD); assertThat(set.add("s4")).isTrue(); assertThat(set.add("s2")).isFalse(); assertThat(set.add("s3")).isFalse(); assertThat(set.add("s4")).isFalse(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD + 1); assertThat(set.remove("s4")).isTrue(); assertThat(set.add("s2")).isFalse(); assertThat(set.add("s3")).isFalse(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD); assertThat(set.add("s5")).isTrue(); assertThat(set.add("s2")).isFalse(); assertThat(set.add("s3")).isFalse(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD + 1); assertThat(set.add("s6")).isTrue(); assertThat(set.add("s2")).isFalse(); assertThat(set.add("s3")).isFalse(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD + 2); assertThat(set.contains("s1")).isTrue(); assertThat(set.contains("s2")).isTrue(); assertThat(set.contains("s3")).isTrue(); assertThat(set.contains("s4")).isFalse(); assertThat(set.contains("s5")).isTrue(); assertThat(set.contains("s6")).isTrue(); } @Test public void addAllAroundThreshold() { SmallScalingOrderedSet<String> set = new SmallScalingOrderedSet<>(); assertThat(set.addAll(Arrays.asList("s1", "s2", "s3"))).isTrue(); assertThat(set.size()).isEqualTo(3); assertThat(set.addAll(Arrays.asList("s1", "s3", "s4", "s5"))).isTrue(); assertThat(set.addAll(Arrays.asList("s1", "s2", "s4"))).isFalse(); assertThat(set.size()).isEqualTo(5); assertThat(set.contains("s1")).isTrue(); assertThat(set.contains("s2")).isTrue(); assertThat(set.contains("s3")).isTrue(); assertThat(set.contains("s4")).isTrue(); assertThat(set.contains("s5")).isTrue(); for (int i = 0; i < SmallScalingOrderedSet.LIST_SIZE_THRESHOLD - 7; i++) { set.add("filler " + i); } assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD - 2); assertThat(set.addAll(Arrays.asList("s6", "s7", "s2", "s3", "s8", "s9"))).isTrue(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD + 2); assertThat(set.remove("s1")).isTrue(); assertThat(set.remove("s5")).isTrue(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD); assertThat(set.addAll(Arrays.asList("s1", "s2", "s10"))).isTrue(); assertThat(set.size()).isEqualTo(SmallScalingOrderedSet.LIST_SIZE_THRESHOLD + 2); assertThat(set.contains("s1")).isTrue(); assertThat(set.contains("s2")).isTrue(); assertThat(set.contains("s3")).isTrue(); assertThat(set.contains("s4")).isTrue(); assertThat(set.contains("s5")).isFalse(); assertThat(set.contains("s6")).isTrue(); assertThat(set.contains("s7")).isTrue(); assertThat(set.contains("s8")).isTrue(); assertThat(set.contains("s9")).isTrue(); assertThat(set.contains("s10")).isTrue(); } }
{ "pile_set_name": "Github" }
(mclheader mcltype matrix dimensions 10x10 ) (mclmatrix begin 0 1 2 3 $ 1 0 4 5 $ 2 0 7 9 $ 3 0 6 8 $ 4 1 6 7 $ 5 1 8 9 $ 6 3 4 9 $ 7 2 4 8 $ 8 3 5 7 $ 9 2 5 6 $ )
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html lang="en" class="js csstransforms3d"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="generator" content="Hugo 0.56.3" /> <meta name="description" content=""> <link rel="icon" href="/weblogic-kubernetes-operator/2.2.1/images/favicon.png" type="image/png"> <title>Create a domain :: WebLogic Kubernetes Operator</title> <link href="/weblogic-kubernetes-operator/2.2.1/css/nucleus.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/fontawesome-all.min.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/hybrid.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/featherlight.min.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/perfect-scrollbar.min.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/auto-complete.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/theme.css?1566939938" rel="stylesheet"> <link href="/weblogic-kubernetes-operator/2.2.1/css/hugo-theme.css?1566939938" rel="stylesheet"> <script src="/weblogic-kubernetes-operator/2.2.1/js/jquery-2.x.min.js?1566939938"></script> <style type="text/css"> :root #header + #content > #left > #rlblock_left{ display:none !important; } </style> </head> <body class="" data-url="/weblogic-kubernetes-operator/2.2.1/quickstart/create-domain/"> <nav id="sidebar" class=""> <div id="header-wrapper"> <div id="header"> <a id="logo" href="http://oracle.github.io/weblogic-kubernetes-operator/2.2.1"> <img src="http://oracle.github.io/weblogic-kubernetes-operator/2.2.1/images/logo.png" height="60"><br/> Kubernetes Operator </a> </div> <div class="searchbox"> <label for="search-by"><i class="fas fa-search"></i></label> <input data-search-input id="search-by" type="search" placeholder="Search..."> <span data-search-clear=""><i class="fas fa-times"></i></span> </div> <script type="text/javascript" src="/weblogic-kubernetes-operator/2.2.1/js/lunr.min.js?1566939938"></script> <script type="text/javascript" src="/weblogic-kubernetes-operator/2.2.1/js/auto-complete.js?1566939938"></script> <script type="text/javascript"> var baseurl = "http:\/\/oracle.github.io\/weblogic-kubernetes-operator\/2.2.1"; </script> <script type="text/javascript" src="/weblogic-kubernetes-operator/2.2.1/js/search.js?1566939938"></script> </div> <div class="highlightable"> <ul class="topics"> <li><a class="padding" href="/weblogic-kubernetes-operator/">Home</a></li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/" title="Quick Start" class="dd-item parent "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/"> <b>1. </b>Quick Start </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/introduction/" title="Introduction" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/introduction/"> Introduction </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/prerequisites/" title="Before you begin" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/prerequisites/"> Before you begin </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/get-images/" title="Get images" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/get-images/"> Get images </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/install/" title="Install the operator and load balancer" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/install/"> Install the operator and load balancer </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/prepare/" title="Prepare for a domain" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/prepare/"> Prepare for a domain </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/create-domain/" title="Create a domain" class="dd-item active"> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/create-domain/"> Create a domain </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/quickstart/cleanup/" title="Clean up" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/quickstart/cleanup/"> Clean up </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/" title="User Guide" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/"> <b>2. </b>User Guide </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/" title="Introduction" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/"> Introduction </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/introduction/" title="Get started" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/introduction/"> Get started </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/demo/" title="Demo" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/demo/"> Demo </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/architecture/" title="Architecture" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/architecture/"> Architecture </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/design/" title="Design philosophy" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/introduction/design/"> Design philosophy </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/overview/" title="Overview" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/overview/"> Overview </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/overview/prepare/" title="Prepare your environment" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/overview/prepare/"> Prepare your environment </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/overview/k8s-setup/" title="Set up Kubernetes" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/overview/k8s-setup/"> Set up Kubernetes </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/overview/database/" title="Run a database" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/overview/database/"> Run a database </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/" title="Manage operators" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/"> Manage operators </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/installation/" title="Install the operator" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/installation/"> Install the operator </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/using-the-operator/" title="Use the operator" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/using-the-operator/"> <b> </b>Use the operator </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/using-the-operator/using-helm/" title="Use Helm" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/using-the-operator/using-helm/"> Use Helm </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/using-the-operator/the-rest-api/" title="The REST API" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-operators/using-the-operator/the-rest-api/"> The REST API </a> </li> </ul> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/" title="Manage domains" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/"> Manage domains </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/choosing-a-model/" title="Choose a model" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/choosing-a-model/"> <b> </b>Choose a model </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/prepare/" title="Prepare to run a domain" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/prepare/"> <b> </b>Prepare to run a domain </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-resource/" title="Domain resource" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-resource/"> <b> </b>Domain resource </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/persistent-storage/" title="Persistent storage" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/persistent-storage/"> <b> </b>Persistent storage </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-in-image/" title="Domain in image" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-in-image/"> <b> </b>Domain in image </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-in-image/base-images/" title="Base images" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-in-image/base-images/"> Base images </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/fmw-infra/" title="FMW domains" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/fmw-infra/"> <b> </b>FMW domains </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/configoverrides/" title="Configuration overrides" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/configoverrides/"> <b> </b>Configuration overrides </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/ingress/" title="Ingress" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/ingress/"> <b> </b>Ingress </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/accessing-the-domain/" title="Accessing the domain" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/accessing-the-domain/"> <b> </b>Accessing the domain </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/accessing-the-domain/wlst/" title="Using WLST" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/accessing-the-domain/wlst/"> Using WLST </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/" title="Domain life cycle" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/"> <b> </b>Domain life cycle </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/startup/" title="Startup and shutdown" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/startup/"> Startup and shutdown </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/restarting/" title="Restarting" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/restarting/"> Restarting </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/scaling/" title="Scaling" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/managing-domains/domain-lifecycle/scaling/"> Scaling </a> </li> </ul> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/" title="CI/CD considerations" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/"> CI/CD considerations </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/layering/" title="Docker image layering" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/layering/"> Docker image layering </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/why-layering-matters/" title="Why layering matters" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/why-layering-matters/"> Why layering matters </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/choose-an-approach/" title="Choose an approach" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/choose-an-approach/"> Choose an approach </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/mutate-the-domain-layer/" title="Mutate the domain layer" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/mutate-the-domain-layer/"> Mutate the domain layer </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/how-to-copy-domains/" title="Copy domains" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/how-to-copy-domains/"> Copy domains </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/tools/" title="Tools" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/userguide/cicd/tools/"> Tools </a> </li> </ul> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/" title="Samples" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/"> <b>3. </b>Samples </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/" title="Simple samples" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/"> <b> </b>Simple samples </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/credentials/" title="Credentials" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/credentials/"> Credentials </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/storage/" title="Storage" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/storage/"> Storage </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/" title="Domains" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/"> Domains </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/manually-create-domain/" title="Manually" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/manually-create-domain/"> Manually </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/fmw-domain/" title="FMW Infrastructure domain" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/fmw-domain/"> FMW Infrastructure domain </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/domain-home-on-pv/" title="Domain home on a PV" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/domain-home-on-pv/"> Domain home on a PV </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/domain-home-in-image/" title="Domain home in image" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/domain-home-in-image/"> Domain home in image </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/delete-domain/" title="Delete domain resources" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/domains/delete-domain/"> Delete domain resources </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/rest/" title="REST APIs" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/rest/"> REST APIs </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/ingress/" title="Ingress" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/ingress/"> Ingress </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/samples/simple/elastic-stack/" title="Elastic Stack" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/samples/simple/elastic-stack/"> Elastic Stack </a> </li> </ul> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/" title="Developer Guide" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/"> <b>4. </b>Developer Guide </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/requirements/" title="Requirements" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/requirements/"> Requirements </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/building/" title="Building" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/building/"> Building </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/integration-tests/" title="Integration tests" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/integration-tests/"> Integration tests </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/branching/" title="Branching" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/branching/"> Branching </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/coding-standards/" title="Coding standards" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/coding-standards/"> Coding standards </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/code-structure/" title="Code structure" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/code-structure/"> Code structure </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/asynchronous-call-model/" title="Asynchronous call model" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/asynchronous-call-model/"> Asynchronous call model </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/domain-processing/" title="Domain processing" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/domain-processing/"> Domain processing </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/documentation/" title="Documentation" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/documentation/"> Documentation </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/developerguide/backwards-compatibility/" title="Backward compatibility" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/developerguide/backwards-compatibility/"> Backward compatibility </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/reference/" title="Reference" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/reference/"> <b>5. </b>Reference </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/reference/javadoc/" title="Javadoc" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/reference/javadoc/"> Javadoc </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/reference/swagger/" title="Swagger" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/reference/swagger/"> Swagger </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/reference/domain-resource/" title="Domain resource" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/reference/domain-resource/"> Domain resource </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/" title="Security" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/"> <b>6. </b>Security </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/certificates/" title="Certificates" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/certificates/"> Certificates </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/domain-security/" title="Domain security" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/domain-security/"> Domain security </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/domain-security/image-protection/" title="Docker image protection" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/domain-security/image-protection/"> Docker image protection </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/domain-security/weblogic-channels/" title="Channels" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/domain-security/weblogic-channels/"> Channels </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/encryption/" title="Encryption" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/encryption/"> Encryption </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/service-accounts/" title="Service accounts" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/service-accounts/"> Service accounts </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/rbac/" title="RBAC" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/rbac/"> RBAC </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/security/secrets/" title="Secrets" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/security/secrets/"> Secrets </a> </li> </ul> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/faq/" title="Frequently asked questions" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/faq/"> <b>7. </b>Frequently asked questions </a> <ul> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/faq/coherence-requirements/" title="Coherence Requirements" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/faq/coherence-requirements/"> Coherence Requirements </a> </li> <li data-nav-id="/weblogic-kubernetes-operator/2.2.1/faq/cannot-pull-image/" title="Cannot Pull Image" class="dd-item "> <a href="/weblogic-kubernetes-operator/2.2.1/faq/cannot-pull-image/"> Cannot Pull Image </a> </li> </ul> </li> <li><a class="padding" href="/weblogic-kubernetes-operator/release-notes">Release Notes</a></li> </ul> <section id="footer"> <p>Created with <i class="fas fa-heart"></i> from <a href="https://www.oracle.com">Oracle</a></p> <p>&nbsp;</p> <table border="1" bgcolor="white"> <tbody> <tr> <td align="center"> &lt;<p style="color:red">You are viewing the archived documentation for version 2.2.1. To view the documentation for the current release. please <a href="https://oracle.github.io/weblogic-kubernetes-operator">click here</a></p> </td> </tr> </tbody> </table> <p>&nbsp;</p> <p><a href="https://github.com/oracle/weblogic-kubernetes-operator"><i class="fab fa-github"></i> GitHub repo</a></p> <p><a href="https://weblogic-slack-inviter.herokuapp.com/"><i class="fab fa-slack"></i> Public Slack #operator</a></p> <p>&nbsp;</p> <img src="http://build.weblogick8s.org:8080/buildStatus/icon?job=weblogic-kubernetes-operator"> </section> </div> </nav> <section id="body"> <div id="overlay"></div> <div class="padding highlightable"> <div> <div id="top-bar"> <div id="breadcrumbs" itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"> <span id="sidebar-toggle-span"> <a href="#" id="sidebar-toggle" data-sidebar-toggle=""> <i class="fas fa-bars"></i> </a> </span> <span id="toc-menu"><i class="fas fa-list-alt"></i></span> <span class="links"> <a href='/weblogic-kubernetes-operator/2.2.1/'></a> > <a href='/weblogic-kubernetes-operator/2.2.1/quickstart/'>Quick Start</a> > Create a domain </span> </div> <div class="progress"> <div class="wrapper"> </div> </div> </div> </div> <div id="body-inner"> <h1>Create a domain</h1> <ol> <li><p>For use in the following steps:</p> <ul> <li>Select a user name and password, following the required rules for password creation (at least 8 alphanumeric characters with at least one number or special character).</li> <li>Pick or create a directory to which you can write output.</li> </ul></li> <li><p>Create a Kubernetes secret for the WebLogic administrator credentials containing the <code>username</code> and <code>password</code> for the domain, using the <a href="http://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/scripts/create-weblogic-domain-credentials/create-weblogic-credentials.sh">create-weblogic-credentials</a> script:</p> <pre><code class="language-bash">$ kubernetes/samples/scripts/create-weblogic-domain-credentials/create-weblogic-credentials.sh \ -u &lt;username&gt; -p &lt;password&gt; -n sample-domain1-ns -d sample-domain1 </code></pre> <p>The sample will create a secret named <code>domainUID-weblogic-credentials</code> where the <code>domainUID</code> is replaced with the value you provided. For example, the command above would create a secret named <code>sample-domain1-weblogic-credentials</code>.</p></li> <li><p>Create a new image with a domain home by running the <a href="http://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/scripts/create-weblogic-domain/domain-home-in-image/create-domain.sh">create-domain</a> script. First, copy the sample <a href="http://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/scripts/create-weblogic-domain/domain-home-in-image/create-domain-inputs.yaml">create-domain-inputs.yaml</a> file and update your copy with:</p> <ul> <li><code>domainUID</code>: <code>sample-domain1</code></li> <li><code>image</code>: Leave empty unless you need to tag the new image that the script builds to a different name.</li> <li><code>weblogicCredentialsSecretName</code>: <code>sample-domain1-weblogic-credentials</code></li> <li><code>namespace</code>: <code>sample-domain1-ns</code></li> <li><code>domainHomeImageBase</code>: <code>container-registry.oracle.com/middleware/weblogic:12.2.1.3</code></li> </ul> <p>For example, assuming you named your copy <code>my-inputs.yaml</code>:</p> <pre><code class="language-bash">$ cd kubernetes/samples/scripts/create-weblogic-domain/domain-home-in-image $ ./create-domain.sh -i my-inputs.yaml -o /&lt;your output directory&gt; -u &lt;username&gt; -p &lt;password&gt; -e </code></pre></li> </ol> <div class="notices note" >You need to provide the same WebLogic domain administrator user name and password in the `-u` and `-p` options respectively, as you provided when creating the Kubernetes secret in Step 1. </div> <ol> <li><p>Confirm that the operator started the servers for the domain:</p> <p>a. Use <code>kubectl</code> to show that the domain resource was created:</p> <pre><code class="language-bash">$ kubectl describe domain sample-domain1 -n sample-domain1-ns </code></pre> <p>b. After a short time, you will see the Administration Server and Managed Servers running.</p> <pre><code class="language-bash">$ kubectl get pods -n sample-domain1-ns </code></pre> <p>c. You should also see all the Kubernetes services for the domain.</p> <pre><code class="language-bash">$ kubectl get services -n sample-domain1-ns </code></pre></li> <li><p>Create an Ingress for the domain, in the domain namespace, by using the <a href="http://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/ingress-per-domain/README.md">sample</a> Helm chart:</p> <pre><code class="language-bash">$ helm install kubernetes/samples/charts/ingress-per-domain \ --name sample-domain1-ingress \ --namespace sample-domain1-ns \ --set wlsDomain.domainUID=sample-domain1 \ --set traefik.hostname=sample-domain1.org </code></pre></li> <li><p>To confirm that the load balancer noticed the new Ingress and is successfully routing to the domain&rsquo;s server pods, you can send a request to the URL for the &ldquo;WebLogic ReadyApp framework&rdquo; which will return a HTTP 200 status code, as shown in the example below.</p> <pre><code>$ curl -v -H 'host: sample-domain1.org' http://localhost:30305/weblogic/ready About to connect() to localhost port 30305 (#0) Trying 10.196.1.64... Connected to localhost (10.196.1.64) port 30305 (#0) &gt; GET /weblogic/ HTTP/1.1 &gt; User-Agent: curl/7.29.0 &gt; Accept: */* &gt; host: domain1.org &gt; &lt; HTTP/1.1 200 OK &lt; Content-Length: 0 &lt; Date: Thu, 20 Dec 2018 14:52:22 GMT &lt; Vary: Accept-Encoding &lt; Connection #0 to host localhost left intact </code></pre></li> </ol> <div class="notices note" > Depending on where your Kubernetes cluster is running, you may need to open firewall ports or update security lists to allow ingress to this port. </div> <ol> <li><p>To access the WLS Administration Console:</p> <p>a. Edit the <code>my-inputs.yaml</code> file (assuming that you named your copy <code>my-inputs.yaml</code>) to set <code>exposedAdminNodePort: true</code>.</p> <p>b. Open a browser to <code>http://localhost:30701</code>.</p></li> </ol> <footer class=" footline" > </footer> <script type="application/javascript"> var doNotTrack = false; if (!doNotTrack) { window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date; ga('create', 'UA-129126578-2', 'auto'); ga('send', 'pageview'); } </script> <script async src='https://www.google-analytics.com/analytics.js'></script> </div> </div> <div id="navigation"> <a class="nav nav-prev" href="/weblogic-kubernetes-operator/2.2.1/quickstart/prepare/" title="Prepare for a domain"> <i class="fa fa-chevron-left"></i></a> <a class="nav nav-next" href="/weblogic-kubernetes-operator/2.2.1/quickstart/cleanup/" title="Clean up" style="margin-right: 0px;"><i class="fa fa-chevron-right"></i></a> </div> </section> <div style="left: -1000px; overflow: scroll; position: absolute; top: -1000px; border: none; box-sizing: content-box; height: 200px; margin: 0px; padding: 0px; width: 200px;"> <div style="border: none; box-sizing: content-box; height: 200px; margin: 0px; padding: 0px; width: 200px;"></div> </div> <script src="/weblogic-kubernetes-operator/2.2.1/js/clipboard.min.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/perfect-scrollbar.min.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/perfect-scrollbar.jquery.min.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/jquery.sticky.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/featherlight.min.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/html5shiv-printshiv.min.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/highlight.pack.js?1566939938"></script> <script>hljs.initHighlightingOnLoad();</script> <script src="/weblogic-kubernetes-operator/2.2.1/js/modernizr.custom.71422.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/learn.js?1566939938"></script> <script src="/weblogic-kubernetes-operator/2.2.1/js/hugo-learn.js?1566939938"></script> <link href="/weblogic-kubernetes-operator/2.2.1/mermaid/mermaid.css?1566939938" type="text/css" rel="stylesheet" /> <script src="/weblogic-kubernetes-operator/2.2.1/mermaid/mermaid.js?1566939938"></script> <script> mermaid.initialize({ startOnLoad: true }); </script> </body> </html>
{ "pile_set_name": "Github" }
{ "pluginIcon": "", "pluginName": "Simple about dialog", "pluginDescription": "Simple dialog about qutIM and it's authors", "extensionHeader": "simpleaboutcreator.h", "extensionClass": "Core::SimpleAboutCreator", "qmlTypes": { "aboutinfo.h": "Core::SimpleAbout::registerTypes();" } }
{ "pile_set_name": "Github" }
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.event; import azkaban.spi.EventType; import com.google.common.base.Preconditions; public class Event { private final Object runner; private final EventType type; private final EventData eventData; private final long time; private Event(final Object runner, final EventType type, final EventData eventData) { this.runner = runner; this.type = type; this.eventData = eventData; this.time = System.currentTimeMillis(); } /** * Creates a new event. * * @param runner runner. * @param type type. * @param eventData EventData, null is not allowed. * @return New Event instance. * @throws NullPointerException if EventData is null. */ public static Event create(final Object runner, final EventType type, final EventData eventData) throws NullPointerException { Preconditions.checkNotNull(eventData, "EventData was null"); return new Event(runner, type, eventData); } public Object getRunner() { return this.runner; } public EventType getType() { return this.type; } public long getTime() { return this.time; } public EventData getData() { return this.eventData; } }
{ "pile_set_name": "Github" }
using BepuUtilities; using DemoContentLoader; using DemoRenderer; using System; using System.Numerics; using BepuPhysics; using BepuPhysics.Collidables; using System.Diagnostics; namespace Demos.SpecializedTests { public class MeshSerializationTestDemo : Demo { public override void Initialize(ContentArchive content, Camera camera) { camera.Position = new Vector3(-30, 8, -60); camera.Yaw = MathHelper.Pi * 3f / 4; camera.Pitch = 0; Simulation = Simulation.Create(BufferPool, new DemoNarrowPhaseCallbacks(), new DemoPoseIntegratorCallbacks(new Vector3(0, -10, 0)), new PositionFirstTimestepper()); var startTime = Stopwatch.GetTimestamp(); DemoMeshHelper.CreateDeformedPlane(1025, 1025, (x, y) => new Vector3(x * 0.125f, MathF.Sin(x) + MathF.Sin(y), y * 0.125f), Vector3.One, BufferPool, out var originalMesh); Simulation.Statics.Add(new StaticDescription(new Vector3(0, 0, 0), new CollidableDescription(Simulation.Shapes.Add(originalMesh), 0.1f))); var endTime = Stopwatch.GetTimestamp(); var freshConstructionTime = (endTime - startTime) / (double)Stopwatch.Frequency; Console.WriteLine($"Fresh construction time (ms): {freshConstructionTime * 1e3}"); BufferPool.Take(originalMesh.GetSerializedByteCount(), out var serializedMeshBytes); originalMesh.Serialize(serializedMeshBytes); startTime = Stopwatch.GetTimestamp(); var loadedMesh = new Mesh(serializedMeshBytes, BufferPool); endTime = Stopwatch.GetTimestamp(); var loadTime = (endTime - startTime) / (double)Stopwatch.Frequency; Console.WriteLine($"Load time (ms): {(endTime - startTime) * 1e3 / Stopwatch.Frequency}"); Console.WriteLine($"Relative speedup: {freshConstructionTime / loadTime}"); Simulation.Statics.Add(new StaticDescription(new Vector3(128, 0, 0), new CollidableDescription(Simulation.Shapes.Add(loadedMesh), 0.1f))); BufferPool.Return(ref serializedMeshBytes); var random = new Random(5); var shapeToDrop = new Box(1, 1, 1); shapeToDrop.ComputeInertia(1, out var shapeToDropInertia); var descriptionToDrop = BodyDescription.CreateDynamic(new Vector3(), shapeToDropInertia, new CollidableDescription(Simulation.Shapes.Add(shapeToDrop), 0.1f), new BodyActivityDescription(0.01f)); for (int i = 0; i < 1024; ++i) { descriptionToDrop.Pose.Position = new Vector3(8 + 240 * (float)random.NextDouble(), 10 + 10 * (float)random.NextDouble(), 8 + 112 * (float)random.NextDouble()); Simulation.Bodies.Add(descriptionToDrop); } } } }
{ "pile_set_name": "Github" }
function generate_LR_Vimeo90K() %% matlab code to genetate bicubic-downsampled for Vimeo90K dataset up_scale = 4; mod_scale = 4; idx = 0; filepaths = dir('/data/datasets/SR/vimeo_septuplet/sequences/train/*/*/*.png'); for i = 1 : length(filepaths) [~,imname,ext] = fileparts(filepaths(i).name); folder_path = filepaths(i).folder; save_LR_folder = strrep(folder_path,'vimeo_septuplet','vimeo_septuplet_matlabLRx4'); if ~exist(save_LR_folder, 'dir') mkdir(save_LR_folder); end if isempty(imname) disp('Ignore . folder.'); elseif strcmp(imname, '.') disp('Ignore .. folder.'); else idx = idx + 1; str_rlt = sprintf('%d\t%s.\n', idx, imname); fprintf(str_rlt); % read image img = imread(fullfile(folder_path, [imname, ext])); img = im2double(img); % modcrop img = modcrop(img, mod_scale); % LR im_LR = imresize(img, 1/up_scale, 'bicubic'); if exist('save_LR_folder', 'var') imwrite(im_LR, fullfile(save_LR_folder, [imname, '.png'])); end end end end %% modcrop function img = modcrop(img, modulo) if size(img,3) == 1 sz = size(img); sz = sz - mod(sz, modulo); img = img(1:sz(1), 1:sz(2)); else tmpsz = size(img); sz = tmpsz(1:2); sz = sz - mod(sz, modulo); img = img(1:sz(1), 1:sz(2),:); end end
{ "pile_set_name": "Github" }
{ "created_at": "2015-02-27T22:28:42.136341", "description": "height level file system", "fork": false, "full_name": "RubyLouvre/hfs", "language": "JavaScript", "updated_at": "2015-02-27T23:43:17.720593" }
{ "pile_set_name": "Github" }
import {NgModule, Component} from '@angular/core'; import {CommonModule} from '@angular/common'; @Component({ selector: 'lazy-a-component', template: 'LazyC' }) export class LazyCComponent {} @NgModule({ imports: [CommonModule], exports: [LazyCComponent], declarations: [LazyCComponent], bootstrap: [LazyCComponent] }) export class LazyCModule {}
{ "pile_set_name": "Github" }
\lab{K-Means Clustering}{K-Means Clustering} \objective{Clustering is the one of the main tools in unsupervised learning---machine learning problems where the data comes without labels. In this lab we implement the k-means algorithm, a simple and popular clustering method, and apply it to geographic clustering and color quantization. } \section*{Clustering} Previously, we analyzed the iris dataset from \li{sklearn} using PCA; we have reproduced the first two principal components of the iris data in Figure \ref{fig:iris_data}. Upon inspection, a human can easily see that there are two very distinct groups of irises. Can we create an algorithm to identify these groups without human supervision? This task is called \emph{clustering}, an instance of \emph{unsupervised learning}. \begin{figure} \centering \includegraphics[width=.7\textwidth]{iris_pca.pdf} \caption{The first two principal components of the iris dataset.} \label{fig:iris_data} \end{figure} The objective of clustering is to find a partition of the data such that points in the same subset will be ``close'' according to some metric. The metric used will likely depend on the data, but some obvious choices include Euclidean distance and angular distance. Throughout this lab we will use the metric $d(x,y) = \|x-y\|_2$, the Euclidean distance between $x$ and $y$. More formally, suppose we have a collection of $\mathbb{R}^K$-valued observations $X = \{x_1,x_2,\ldots,x_n\}$. Let $N \in \mathbb{N}$ and let $\mathcal{S}$ be the set of all $N$-partitions of $X$, where an $N$-partition is a partition with exactly $N$ nonempty elements. We can represent a typical partition in $\mathcal{S}$ as $S = \{S_1,S_2,\ldots,S_N\}$, where \[ X = \bigcup_{i=1}^N S_i \] and \[ |S_i| > 0, \qquad i=1,2,\ldots,N. \] We seek the $N$-partition $S^*$ that minimizes the within-cluster sum of squares, i.e. \[ S^* = \underset{S\in\mathcal{S}}{\arg\min} \sum_{i=1}^N\sum_{x_j\in S_i}\|x_j-\mu_i\|_2^2, \] where $\mu_i$ is the mean of the elements in $S_i$, i.e. \[ \mu_i = \frac{1}{|S_i|}\sum_{x_j\in S_i}x_j. \] \subsection*{The K-Means Algorithm} Finding the global minimizing partition $S^*$ is generally intractable since the set of partitions can be very large indeed, but the \emph{k-means} algorithm is a heuristic approach that can often provide reasonably accurate results. We begin by specifying an initial cluster mean $\mu_i^{(1)}$ for each $i = 1, \cdots, N$ (this can be done by random initialization, or according to some heuristic). For each iteration, we adopt the following procedure. Given a current set of cluster means $\mu^{(t)}$, we find a partition $S^{(t)}$ of the observations such that \begin{equation*} S_{i}^{(t)} = \{x_j \; : \; \|x_j - \mu_{i}^{(t)}\|_2^2 \leq \|x_j - \mu_{l}^{(t)}\|_2^2,\,\,\, l = 1, \cdots, N\}. \end{equation*} We then update our cluster means by computing for each $i = 1, \cdots, N$. We continue to iterate in this manner until the partition ceases to change. Figure \ref{fig:iris_clusterings} shows two different clusterings of the iris data produced by the \emph{k-means} algorithm. Note that the quality of the clustering can depend heavily on the initial cluster means. We can use the within-cluster sum of squares as a measure of the quality of a clustering (a lower sum of squares is better). Where possible, it is advisable to run the clustering algorithm several times, each with a different initialization of the means, and keep the best clustering. Note also that it is possible to have very slow convergence. Thus, when implementing the algorithm, it is a good idea to terminate after some specified maximum number of iterations. % \begin{figure}[h] \centering \begin{tabular}{cc} \includegraphics[width=.49\textwidth]{iris_means_1.pdf} & \includegraphics[width=.49\textwidth]{iris_means_2.pdf} \end{tabular} \caption{Two different K-Means clusterings for the iris dataset. Notice that the clustering on the left predicts the flower species to a high degree of accuracy, while the clustering on the right is less effective.} \label{fig:iris_clusterings} \end{figure} % The algorithm can be summarized as follows. \begin{enumerate} \item Choose $k$ initial cluster centers. \item For $i=0,\ \ldots,$\ \li{max_iter}, \begin{enumerate} \item Assign each data point to the cluster center that is closest, forming $k$ clusters. \item Recompute the cluster centers as the means of the new clusters. \item If the old cluster centers and the new cluster centers are sufficiently close, terminate early. \end{enumerate} \end{enumerate} \begin{problem} Write a \li{KMeans} class for doing basic $k$-means clustering. Implement the following methods, following \li{sklearn} class conventions. \begin{enumerate} \item \li{__init__()}: Accept a number of clusters $k$, a maximum number of iterations, and a convergence tolerance. Store these as attributes. \item \li{fit()}: Accept an $m \times n$ matrix $X$ of $m$ data points with $n$ features. Choose $k$ random rows of $X$ as the initial cluster centers. Run the $k$-means iteration until consecutive centers are within the convergence tolerance, or until iterating the maximum number of times. Save the cluster centers as attributes. If a cluster is empty, reassign the cluster center as a random row of $X$. \item \li{predict()}: Accept an $l \times n$ matrix $X$ of data. Return an array of $l$ integers where the $i$th entry indicates which cluster center the $i$th row of $X$ is closest to. \end{enumerate} % Test your class on the iris data set after reducing the data to two principal components. Plot the data, coloring by cluster. \end{problem} \section*{Detecting Active Earthquake Regions} % ============================== Suppose we are interested in learning about which regions are prone to experience frequent earthquake activity. We could make a map of all earthquakes over a given period of time and examine it ourselves, but this, as an unsupervised learning problem, can be solved using our $k$-means clustering tool. % % Old approach (required students to parse 6 files for some reason). % These files contain a lot of information which isn't of interest to us at the present time; all we would like to extract from them is the location of each earthquake, which appears in characters $21$ through $33$ of each line. % Characters $21$ through $26$ contain the latitude of each epicenter, character $26$ denoting North or South, and characters $27$ through $33$ contain the longitude of each epicenter, character $33$ denoting East or West. % We need to divide each value by $1,000$ to represent these as degrees and decimals. \begin{figure}[H] \centering \includegraphics[width=.7\textwidth]{earthquakes.png} \caption{Earthquake epicenters over a 6 month period.} \label{fig:earthquakes} \end{figure} The file \texttt{earthquake\_coordinates.npy} contains earthquake data throughout the world from January 2010 through June 2010. Each row represents a different earthquake; the columns are scaled longitude and latitude measurements. We want to cluster this data into active earthquake regions. For this task, we might think that we can regard any epicenter as a point in $\mathbb{R}^{2}$ with coordinates being their latitude and longitude. This, however, would be incorrect, because the earth is not flat. Instead, latitude and longitude should be viewed in \emph{spherical coordinates} in $\mathbb{R}^{3}$, which could then be clustered. A simple way to accomplish this transformation is to first transform the latitude and longitude values to spherical coordinates, and then to Euclidean coordinates. Recall that a spherical coordinate in $\mathbb{R}^3$ is a triple $(r,\theta,\varphi)$, where $r$ is the distance from the origin, $\theta$ is the radial angle in the $xy$-plane from the $x$-axis, and $\varphi$ is the angle from the $z$-axis. In our earthquake data, once the longitude is converted to radians it is an appropriate $\theta$ value; the latitude needs to be offset by $90^\circ$ degrees, then converted to radians to obtain $\varphi$. For simplicity, we can take $r=1$, since the earth is roughly a sphere. We can then transform to Euclidean coordinates using the following relationships. \[ \theta = \frac{\pi}{180}\left(\text{longitude}\right) \qquad \varphi = \frac{\pi}{180}\left(90 - \text{latitude}\right) \] \begin{align*} r & = \sqrt{x^{2} + y^{2} + z^{2}} & x & = r \sin \varphi \cos \theta \\ \varphi & = \arccos \frac{z}{r} & y & = r \sin \varphi \sin \theta \\ \theta & = \arctan \frac{y}{x} & z & = r \cos \varphi \end{align*} There is one last issue to solve before clustering. Each earthquake data point has norm 1 in Euclidean coordinates, since it lies on the surface of a sphere of radius 1. Therefore, the cluster centers should also have norm 1. Otherwise, the means can't be interpreted as locations on the surface of the earth, and the \emph{k-means} algorithm will struggle to find good clusters. A solution to this problem is to normalize the mean vectors at each iteration, so that they are always unit vectors. \begin{problem} Add a keyword argument \li{normalize=False} to your \li{KMeans} constructor. Modify \li{fit()} so that if \li{normalize} is \li{True}, the cluster centers are normalized at each iteration. Cluster the earthquake data in three dimensions by converting the data from raw data to spherical coordinates to euclidean coordinates on the sphere. \begin{enumerate} \item Convert longitude and latitude to radians, then to spherical coordinates. \\(Hint: \li{np.deg2rad()} may be helpful.) \item Convert the spherical coordinates to euclidean coordinates in $\mathbb{R}^3$. \item Use your \li{KMeans} class with normalization to cluster the euclidean coordinates. \item Translate the cluster center coordinates back to spherical coordinates, then to degrees. Transform the cluster means back to latitude and longitude coordinates. \\(Hint: use \li{numpy.arctan2()} for $\arctan$, so that that correct quadrant is chosen). \item Plot the data, coloring by cluster. Also mark the cluster centers. \end{enumerate} With 15 clusters, your plot should resemble the Figure \ref{fig:earthquakeclusters}. \end{problem} \begin{figure}[H] \centering \includegraphics[width=.7\textwidth]{earthquake_clusters.png} \caption{Earthquake epicenter clusters with $k = 15$.} \label{fig:earthquakeclusters} \end{figure} \begin{comment} Though plotting our results in two dimensions gives us a good picture, we can see that this is not entirely accurate. There are points that appear to be closer to a different cluster center than the one to which they belong. This comes from viewing the results in only two dimensions. When viewing in three dimensions, we can see more clearly the accuracy of our results. \begin{problem} Add a keyword argument \li{3d=False} to your \li{kmeans} function, and add code to show the three-dimensional plot instead of the two-dimensional scatter plot should this argument be set to \li{True}. Maintain the same color-coding scheme as before. Use \li{mpl_toolkits.mplot3d.Axes3D} to make your plot. \end{problem} \end{comment} \section*{Color Quantization} % =============================================== The $k$-means algorithm uses the euclidean metric, so it is natural to cluster geographic data. However, clustering can be done in any abstract vector space. The following application is one example. Images are usually represented on computers as $3$-dimensional arrays. Each $2$-dimensional layer represents the red, green, and blue color values, so each pixel on the image is really a vector in $\mathbb{R}^3$. Clustering the pixels in $RGB$ space leads a one kind of image segmentation that facilitate memory reduction. Reading: \url{https://en.wikipedia.org/wiki/Color_quantization} \begin{problem} Write a function that accepts an image array (of shape $(m,n,3)$), an integer number of clusters $k$, and an integer number of samples $S$. Reshape the image so that each row represents a single pixel. Choose $S$ pixels to train a $k$-means model on with $k$ clusters. Make a copy of the original picture where each pixel has the same color as its cluster center. Return the new image. For this problem, you may use \li{sklearn.cluster.KMeans} instead of your \li{KMeans} class from Problem 1. Test your function on some of the provided NASA images. \end{problem} \newpage \section*{Additional Material} % ============================================== \subsection*{Spectral Clustering} We now turn to another method for solving a clustering problem, namely that of Spectral Clustering. As you can see in Figure ???, it can cluster data not just by its location on a graph, but can even separate shapes that overlap others into distinct clusters. It does so by utilizing the spectral properties of a Laplacian matrix. Different types of Laplacian matrices can be used. In order to construct a Laplacian matrix, we first need to create a graph of vertices and edges from our data points. This graph can be represented as a symmetric matrix $W$ where $w_{ij}$ represents the edge from $x_i$ to $x_j$. In the simplest approach, we can set $w_{ij} = 1$ if there exists an edge and $w_{ij} = 0$ otherwise. However, we are interested in the similarity of points, so we will weight the edges by using a \emph{similarity measure}. Points that are similar to one another are assigned a high similarity measure value, and dissimilar points a low value. One possible measure is the \emph{Gaussian similarity function}, which defines the similarity between distinct points $x_i$ and $x_j$ as \begin{equation*} s(x_i,x_j) = e^{- \frac{\| x_i - x_j \| ^2}{2 \sigma ^2}} \end{equation*} for some set value $\sigma$. Note that some similarity functions can yield extremely small values for dissimilar points. We have several options for dealing with this possibility. One is simply to set all values which are less than some $\epsilon$ to be zero, entirely erasing the edge between these two points. Another option is to keep only the $T$ largest-valued edges for each vertex. Whichever method we choose to use, we will end up with a weighted \emph{similarity matrix} $W$. Using this we can find the diagonal \emph{degree matrix} $D$, which gives the number of edges found at each vertex. If we have the original fully-connected graph, then $D_{ii} = n-1$ for each $i$. If we keep the $T$ highest-valued edges, $D_{ii} = T$ for each $i$. As mentioned before, we may use different types of Laplacian matrices. Three such possibilities are: \begin{enumerate} \item The \emph{unnormalized Laplacian}, $L = D - W$ \item The \emph{symmetric normalized Laplacian}, $L_{sym} = I - D^{-1/2}WD^{-1/2}$ \item The \emph{random walk normalized Laplacian}, $L_{rw} = I - D^{-1}W$. \end{enumerate} Given a similarity measure, which type of Laplacian to use, and the desired number of clusters $k$, we can now proceed with the Spectral Clustering algorithm as follows: \begin{itemize} \item Compute $W$, $D$, and the appropriate Laplacian matrix. \item Compute the first $k$ eigenvectors $u_1, \cdots , u_k$ of the Laplacian matrix. \item Set $U = [u_1, \cdots , u_k]$, and if using $L_{sym}$ or $L_{rw}$ normalize $U$ so that each row is a unit vector in the Euclidean norm. \item Perform $k$-means clustering on the $n$ rows of $U$. \item The $n$ labels returned from your \li{kmeans} function correspond to the label assignments for $x_1, \cdots, x_n$. \end{itemize} As before, we need to run through our $k$-means function multiple times to find the best measure when we use random initialization. Also, if you normalize the rows of $U$, then you will need to set the argument \li{normalize = True}. \begin{problem} Implement the Spectral Clustering Algorithm by calling your \li{kmeans} function, using the following function declaration: \begin{lstlisting} def specClus(measure,Laplacian,args,arg1=None,kiters=10): """ Cluster a dataset using the k-means algorithm. Parameters ---------- measure : function The function used to calculate the similarity measure. Laplacian : int in {1,2,3} Which Laplacian matrix to use. 1 corresponds to the unnormalized, 2 to the symmetric normalized, 3 to the random walk normalized. args : tuple The arguments as they were passed into your k-means function, consisting of (data, n_clusters, init, max_iter, normalize). Note that you will not pass 'data' into your k-means function. arg1 : None, float, or int If Laplacian==1, it should remain as None If Laplacian==2, the cut-off value, epsilon. If Laplacian==3, the number of edges to retain, T. kiters : int How many times to call your kmeans function to get the best measure. Returns ------- labels : ndarray of shape (n,) The i-th entry is an integer in [0,n_clusters-1] indicating which cluster the i-th row of data belongs to. """ pass \end{lstlisting} \end{problem} We now need a way to test our code. The website http://cs.joensuu.fi/sipu/datasets/ contains many free data sets that will be of use to us. Scroll down to the ``Shape sets" heading, and download some of the datasets found there to use for trial datasets. \begin{problem} Create a function that will return the accuracy of your spectral clustering implementation, as follows: \begin{lstlisting} def test_specClus(location,measure,Laplacian,args,arg1=None,kiters=10): """ Cluster a dataset using the k-means algorithm. Parameters ---------- location : string The location of the dataset to be tested. measure : function The function used to calculate the similarity measure. Laplacian : int in {1,2,3} Which Laplacian matrix to use. 1 corresponds to the unnormalized, 2 to the symmetric normalized, 3 to the random walk normalized. args : tuple The arguments as they were passed into your k-means function, consisting of (data, n_clusters, init, max_iter, normalize). Note that you will not pass 'data' into your k-means function. arg1 : None, float, or int If Laplacian==1, it should remain as None If Laplacian==2, the cut-off value, epsilon. If Laplacian==3, the number of edges to retain, T. kiters : int How many times to call your kmeans function to get the best measure. Returns ------- accuracy : float The percent of labels correctly predicted by your spectral clustering function with the given arguments (the number correctly predicted divided by the total number of points. """ pass \end{lstlisting} \end{problem}
{ "pile_set_name": "Github" }
/* ***** BEGIN LICENSE BLOCK ***** * Distributed under the BSD license: * * Copyright (c) 2012, Ajax.org B.V. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Ajax.org B.V. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Contributor(s): * * Libo Cannici <libo AT zendesk DOT com> * * * * ***** END LICENSE BLOCK ***** */ define(function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); // defines the parent mode var HtmlMode = require("./html").Mode; var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent; var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules; var HtmlFoldMode = require("./folding/html").FoldMode; // defines the language specific highlighters and folding rules var CurlyHighlightRules = require("./curly_highlight_rules").CurlyHighlightRules; var Mode = function() { HtmlMode.call(this); this.HighlightRules = CurlyHighlightRules; this.$outdent = new MatchingBraceOutdent(); this.foldingRules = new HtmlFoldMode(); }; oop.inherits(Mode, HtmlMode); (function() { this.$id = "ace/mode/curly"; }).call(Mode.prototype); exports.Mode = Mode; });
{ "pile_set_name": "Github" }
--- layout: base title: 'Statistics of Tense in UD_German-PUD' udver: '2' --- ## Treebank Statistics: UD_German-PUD: Features: `Tense` This feature is universal. It occurs with 2 different values: `Past`, `Pres`. 2328 tokens (11%) have a non-empty value of `Tense`. 963 types (15%) occur at least once with a non-empty value of `Tense`. 615 lemmas (12%) occur at least once with a non-empty value of `Tense`. The feature is used with 3 part-of-speech tags: <tt><a href="de_pud-pos-VERB.html">VERB</a></tt> (1497; 7% instances), <tt><a href="de_pud-pos-AUX.html">AUX</a></tt> (827; 4% instances), <tt><a href="de_pud-pos-X.html">X</a></tt> (4; 0% instances). ### `VERB` 1497 <tt><a href="de_pud-pos-VERB.html">VERB</a></tt> tokens (75% of all `VERB` tokens) have a non-empty value of `Tense`. The most frequent other feature values with which `VERB` and `Tense` co-occurred: <tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt> (1067; 71%), <tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt> (1035; 69%), <tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt> (794; 53%). `VERB` tokens may have the following values of `Tense`: * `Past` (1011; 68% of non-empty `Tense`): <em>sagte, verwendet, wurde, begann, genutzt, war, veröffentlicht, errichtet, gegeben, gewählt</em> * `Pres` (486; 32% of non-empty `Tense`): <em>ist, hat, gibt, sagt, haben, liegt, sind, steht, hilft, beträgt</em> * `EMPTY` (491): <em>an, auf, aus, ein, zurück, haben, ab, machen, werden, finden</em> <table> <tr><th>Paradigm <i>haben</i></th><th><tt>Pres</tt></th><th><tt>Past</tt></th></tr> <tr><td><tt>_</tt></td><td></td><td><em>gehabt</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>hat</em></td><td><em>hatte</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Plur</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=1</tt></tt></td><td><em>haben</em></td><td></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Plur</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>haben</em></td><td><em>hatten</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Sub</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>habe</em></td><td></td></tr> </table> ### `AUX` 827 <tt><a href="de_pud-pos-AUX.html">AUX</a></tt> tokens (95% of all `AUX` tokens) have a non-empty value of `Tense`. The most frequent other feature values with which `AUX` and `Tense` co-occurred: <tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt> (768; 93%), <tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt> (717; 87%), <tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt> (567; 69%). `AUX` tokens may have the following values of `Tense`: * `Past` (436; 53% of non-empty `Tense`): <em>war, wurde, worden, wurden, hatte, waren, würde, hatten, könnte, sollte</em> * `Pres` (391; 47% of non-empty `Tense`): <em>ist, wird, sind, hat, kann, werden, haben, können, muss, sei</em> * `EMPTY` (42): <em>werden, sein, haben, können, müssen</em> <table> <tr><th>Paradigm <i>sein</i></th><th><tt>Pres</tt></th><th><tt>Past</tt></th></tr> <tr><td><tt>_</tt></td><td></td><td><em>gewesen</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=1</tt></tt></td><td><em>bin</em></td><td><em>war</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>ist</em></td><td><em>war, wawr</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Plur</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=1</tt></tt></td><td><em>sind</em></td><td><em>waren</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Ind</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Plur</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>sind</em></td><td><em>waren</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Sub</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Sing</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>sei</em></td><td><em>wäre</em></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Sub</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Plur</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=1</tt></tt></td><td><em>seien</em></td><td></td></tr> <tr><td><tt><tt><a href="de_pud-feat-Mood.html">Mood</a></tt><tt>=Sub</tt>|<tt><a href="de_pud-feat-Number.html">Number</a></tt><tt>=Plur</tt>|<tt><a href="de_pud-feat-Person.html">Person</a></tt><tt>=3</tt></tt></td><td><em>seien</em></td><td><em>wären</em></td></tr> </table> ### `X` 4 <tt><a href="de_pud-pos-X.html">X</a></tt> tokens (11% of all `X` tokens) have a non-empty value of `Tense`. `X` tokens may have the following values of `Tense`: * `Past` (2; 50% of non-empty `Tense`): <em>didn', t</em> * `Pres` (2; 50% of non-empty `Tense`): <em>Breaking, Don'</em> * `EMPTY` (32): <em>E, Multi, Bundes, Demografie, Druck, Einzel, Ex, Fjögur, Go, Handels</em> ## Relations with Agreement in `Tense` The 10 most frequent relations where parent and child node agree in `Tense`: <tt>VERB --[<tt><a href="de_pud-dep-aux-pass.html">aux:pass</a></tt>]--> AUX</tt> (154; 67%), <tt>VERB --[<tt><a href="de_pud-dep-conj.html">conj</a></tt>]--> VERB</tt> (139; 79%), <tt>VERB --[<tt><a href="de_pud-dep-advcl.html">advcl</a></tt>]--> VERB</tt> (85; 58%), <tt>VERB --[<tt><a href="de_pud-dep-parataxis.html">parataxis</a></tt>]--> VERB</tt> (23; 61%), <tt>VERB --[<tt><a href="de_pud-dep-acl-relcl.html">acl:relcl</a></tt>]--> VERB</tt> (12; 75%), <tt>VERB --[<tt><a href="de_pud-dep-nsubj.html">nsubj</a></tt>]--> VERB</tt> (1; 100%).
{ "pile_set_name": "Github" }
.search-title { text-align:right; padding-top:5px; } .cache-content{ width:100%; text-align:center; min-height:330px; } .json-content{ text-align:left; min-height:250px; word-wrap:break-word; }
{ "pile_set_name": "Github" }
package com.wangdaye.collection.ui; import android.annotation.SuppressLint; import android.content.Context; import androidx.annotation.NonNull; import androidx.recyclerview.widget.RecyclerView; import android.util.AttributeSet; import android.view.LayoutInflater; import android.view.View; import com.wangdaye.collection.R; import com.wangdaye.collection.R2; import com.wangdaye.base.i.PagerManageView; import com.wangdaye.base.i.PagerView; import com.wangdaye.common.presenter.pager.PagerScrollablePresenter; import com.wangdaye.common.presenter.pager.PagerStateManagePresenter; import com.wangdaye.common.ui.adapter.multipleState.MiniErrorStateAdapter; import com.wangdaye.common.ui.adapter.multipleState.MiniLoadingStateAdapter; import com.wangdaye.common.ui.adapter.photo.PhotoAdapter; import com.wangdaye.common.ui.decoration.GridMarginsItemDecoration; import com.wangdaye.common.ui.widget.MultipleStateRecyclerView; import com.wangdaye.common.ui.widget.insets.FitBottomSystemBarBothWaySwipeRefreshLayout; import com.wangdaye.common.ui.widget.swipeBackView.SwipeBackCoordinatorLayout; import com.wangdaye.common.ui.widget.swipeRefreshView.BothWaySwipeRefreshLayout; import com.wangdaye.common.utils.BackToTopUtils; import com.wangdaye.common.utils.helper.RecyclerViewHelper; import com.wangdaye.common.utils.manager.ThemeManager; import butterknife.BindView; import butterknife.ButterKnife; /** * Collection photos view. * * This view is used to show the photos in a collection. * * */ public class CollectionPhotosView extends FitBottomSystemBarBothWaySwipeRefreshLayout implements PagerView, BothWaySwipeRefreshLayout.OnRefreshAndLoadListener, MiniErrorStateAdapter.OnRetryListener { @BindView(R2.id.container_photo_list_recyclerView) MultipleStateRecyclerView recyclerView; private PagerStateManagePresenter stateManagePresenter; private PagerManageView pagerManageView; public CollectionPhotosView(Context context) { super(context); this.init(); } public CollectionPhotosView(Context context, AttributeSet attrs) { super(context, attrs); this.init(); } // init. @SuppressLint("InflateParams") private void init() { View contentView = LayoutInflater.from(getContext()) .inflate(R.layout.container_photo_list_2, null); addView(contentView); ButterKnife.bind(this, this); initView(); } private void initView() { setColorSchemeColors(ThemeManager.getContentColor(getContext())); setProgressBackgroundColorSchemeColor(ThemeManager.getRootColor(getContext())); setOnRefreshAndLoadListener(this); setRefreshEnabled(false); setLoadEnabled(false); recyclerView.setLayoutManager( RecyclerViewHelper.getDefaultStaggeredGridLayoutManager(getContext())); recyclerView.setAdapter(new MiniLoadingStateAdapter(), MultipleStateRecyclerView.STATE_LOADING); recyclerView.setAdapter(new MiniErrorStateAdapter(this), MultipleStateRecyclerView.STATE_ERROR); recyclerView.setState(MultipleStateRecyclerView.STATE_LOADING); stateManagePresenter = new PagerStateManagePresenter(recyclerView); } // control. public void setPhotoAdapter(PhotoAdapter adapter) { recyclerView.setAdapter(adapter); recyclerView.addOnScrollListener(new RecyclerView.OnScrollListener() { @Override public void onScrolled(@NonNull RecyclerView recyclerView, int dx, int dy) { PagerScrollablePresenter.onScrolled( CollectionPhotosView.this, recyclerView, adapter.getItemCount(), pagerManageView, 0, dy ); } }); recyclerView.addItemDecoration(new GridMarginsItemDecoration(getContext(), recyclerView)); } public void setPagerManageView(PagerManageView view) { pagerManageView = view; } // interface. @Override public State getState() { return stateManagePresenter.getState(); } @Override public boolean setState(State state) { return stateManagePresenter.setState(state); } @Override public void setSelected(boolean selected) { // do nothing. } @Override public void setSwipeRefreshing(boolean refreshing) { setRefreshing(refreshing); } @Override public void setSwipeLoading(boolean loading) { setLoading(loading); } @Override public void setPermitSwipeRefreshing(boolean permit) { // do nothing. } @Override public void setPermitSwipeLoading(boolean permit) { setLoadEnabled(permit); } @Override public boolean checkNeedBackToTop() { return recyclerView.canScrollVertically(-1) && stateManagePresenter.getState() == State.NORMAL; } @Override public void scrollToPageTop() { BackToTopUtils.scrollToTop(recyclerView); } @Override public boolean canSwipeBack(int dir) { return stateManagePresenter.getState() != State.NORMAL || SwipeBackCoordinatorLayout.canSwipeBack(recyclerView, dir); } @Override public RecyclerView getRecyclerView() { return recyclerView; } // on refresh and load listener. @Override public void onRefresh() { pagerManageView.onRefresh(0); } @Override public void onLoad() { pagerManageView.onLoad(0); } // on retry listener. @Override public void onRetry() { pagerManageView.onRefresh(0); } }
{ "pile_set_name": "Github" }
gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:e20fad3e161e88226c83437271adb063221459b9
{ "pile_set_name": "Github" }
/*------------------------------------------------------------------------------ Copyright (c) 2000 Tyrell Corporation. All rights reserved. Tyrell DarkIce File : AudioEncoder.h Version : $Revision$ Author : $Author$ Location : $Source$ Copyright notice: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ------------------------------------------------------------------------------*/ #ifndef AUDIO_ENCODER_H #define AUDIO_ENCODER_H #ifndef __cplusplus #error This is a C++ include file #endif /* ============================================================ include files */ #include "Referable.h" #include "Sink.h" #include "AudioSource.h" /* ================================================================ constants */ /* =================================================================== macros */ /* =============================================================== data types */ /** * An audio encoder * * @author $Author$ * @version $Revision$ */ class AudioEncoder : public Sink, public virtual Referable { public: /** * Type to specify bitrate mode. Possible values: * - cbr - constant bitrate mode * described by bitrate * - abr - average bitrate mode * described by an average bitrate and quality * - vbr - variable bitrate mode * described by quality */ enum BitrateMode { cbr, abr, vbr }; private: /** * Sample rate of the input. */ unsigned int inSampleRate; /** * Number of bits per sample of the input. */ unsigned int inBitsPerSample; /** * Number of channels of the input. */ unsigned int inChannel; /** * Is the input big endian or little endian? */ bool inBigEndian; /** * The bitrate mode of the encoder */ BitrateMode outBitrateMode; /** * Bit rate of the output in kbits/sec, for fixed bitrate encodings. */ unsigned int outBitrate; /** * Quality of the output, for variable bitrate encodings. */ double outQuality; /** * Sample rate of the output. */ unsigned int outSampleRate; /** * Number of channels of the output. */ unsigned int outChannel; /** * Initialize the object. * * @param inSampleRate sample rate of the input. * @param inBitsPerSample number of bits per sample of the input. * @param inChannel number of channels of the input. * @param inBigEndian shows if the input is big or little endian. * @param outBitrateMode the bit rate mode of the output. * @param outBitrate bit rate of the output. * @param outSampleRate sample rate of the output. * @param outChannel number of channels of the output. * @exception Exception */ inline void init ( unsigned int inSampleRate, unsigned int inBitsPerSample, unsigned int inChannel, bool inBigEndian, BitrateMode outBitrateMode, unsigned int outBitrate, double outQuality, unsigned int outSampleRate, unsigned int outChannel ) throw ( Exception ) { this->inSampleRate = inSampleRate; this->inBitsPerSample = inBitsPerSample; this->inChannel = inChannel; this->inBigEndian = inBigEndian; this->outBitrateMode = outBitrateMode; this->outBitrate = outBitrate; this->outQuality = outQuality; this->outSampleRate = outSampleRate; this->outChannel = outChannel; if ( outQuality < 0 || 1.0 < outQuality ) { throw Exception( __FILE__, __LINE__, "invalid encoder quality"); } } /** * De-iitialize the object. * * @exception Exception */ inline void strip ( void ) throw ( Exception ) { } protected: /** * Default constructor. Always throws an Exception. * * @exception Exception */ inline AudioEncoder ( void ) throw ( Exception ) { throw Exception( __FILE__, __LINE__); } /** * Constructor. * * @param inSampleRate sample rate of the input. * @param inBitsPerSample number of bits per sample of the input. * @param inChannel number of channels of the input. * @param inBigEndian shows if the input is big or little endian * @param outBitrateMode the bit rate mode of the output. * @param outBitrate bit rate of the output (kbits/sec). * @param outQuality the quality of the stream. * @param outSampleRate sample rate of the output. * If 0, inSampleRate is used. * @param outChannel number of channels of the output. * If 0, inChannel is used. * @exception Exception */ inline AudioEncoder ( unsigned int inSampleRate, unsigned int inBitsPerSample, unsigned int inChannel, bool inBigEndian, BitrateMode outBitrateMode, unsigned int outBitrate, double outQuality, unsigned int outSampleRate = 0, unsigned int outChannel = 0 ) throw ( Exception ) { init ( inSampleRate, inBitsPerSample, inChannel, inBigEndian, outBitrateMode, outBitrate, outQuality, outSampleRate ? outSampleRate : inSampleRate, outChannel ? outChannel : inChannel ); } /** * Constructor. * * @param as get input sample rate, bits per sample and channels * from this AudioSource. * @param outBitrateMode the bit rate mode of the output. * @param outBitrate bit rate of the output (kbits/sec). * @param outQuality the quality of the stream. * @param outSampleRate sample rate of the output. * If 0, input sample rate is used. * @param outChannel number of channels of the output. * If 0, input channel is used. * @exception Exception */ inline AudioEncoder ( const AudioSource * as, BitrateMode outBitrateMode, unsigned int outBitrate, double outQuality, unsigned int outSampleRate = 0, unsigned int outChannel = 0 ) throw ( Exception) { init( as->getSampleRate(), as->getBitsPerSample(), as->getChannel(), as->isBigEndian(), outBitrateMode, outBitrate, outQuality, outSampleRate ? outSampleRate : as->getSampleRate(), outChannel ? outChannel : as->getChannel() ); } /** * Copy constructor. * * @param encoder the AudioEncoder to copy. */ inline AudioEncoder ( const AudioEncoder & encoder ) throw ( Exception ) { init ( encoder.inSampleRate, encoder.inBitsPerSample, encoder.inChannel, encoder.inBigEndian, encoder.outBitrateMode, encoder.outBitrate, encoder.outQuality, encoder.outSampleRate, encoder.outChannel ); } /** * Assignment operator. * * @param encoder the AudioEncoder to assign this to. * @return a reference to this AudioEncoder. * @exception Exception */ inline virtual AudioEncoder & operator= ( const AudioEncoder & encoder ) throw ( Exception ) { if ( this != &encoder ) { strip(); init ( encoder.inSampleRate, encoder.inBitsPerSample, encoder.inChannel, encoder.inBigEndian, encoder.outBitrateMode, encoder.outBitrate, encoder.outQuality, encoder.outSampleRate, encoder.outChannel ); } return *this; } public: /** * Destructor. * * @exception Exception */ inline virtual ~AudioEncoder ( void ) throw ( Exception ) { strip(); } /** * Get the number of channels of the input. * * @return the number of channels of the input. */ inline int getInChannel ( void ) const throw () { return inChannel; } /** * Tell if the input is big or little endian. * * @return true if the input is big endian, false if little endian. */ inline bool isInBigEndian ( void ) const throw () { return inBigEndian; } /** * Get the sample rate of the input. * * @return the sample rate of the input. */ inline int getInSampleRate ( void ) const throw () { return inSampleRate; } /** * Get the number of bits per sample of the input. * * @return the number of bits per sample of the input. */ inline int getInBitsPerSample ( void ) const throw () { return inBitsPerSample; } /** * Get the number of channels of the output. * * @return the number of channels of the output. */ inline int getOutChannel ( void ) const throw () { return outChannel; } /** * Get the sample rate of the output. * * @return the sample rate of the output. */ inline int getOutSampleRate ( void ) const throw () { return outSampleRate; } /** * Get the bit rate mode of the output. * * @return the bit rate mode of the output. */ inline BitrateMode getOutBitrateMode ( void ) const throw () { return outBitrateMode; } /** * Get the bit rate of the output in kbits/sec, for fixed bitrate * encodings. * * @return the bit rate of the output. */ inline unsigned int getOutBitrate ( void ) const throw () { return outBitrate; } /** * Get the encoding quality of the output, for variable bitrate * encodings. * * @return the encoding quality of the output. */ inline double getOutQuality ( void ) const throw () { return outQuality; } /** * Check wether encoding is in progress. * * @return true if encoding is in progress, false otherwise. */ virtual bool isRunning ( void ) const throw () = 0; /** * Start encoding. This function returns as soon as possible, * with encoding started in the background. * * @return true if encoding has started, false otherwise. * @exception Exception */ virtual bool start ( void ) throw ( Exception ) = 0; /** * Stop encoding. Stops the encoding running in the background. * * @exception Exception */ virtual void stop ( void ) throw ( Exception ) = 0; }; /* ================================================= external data structures */ /* ====================================================== function prototypes */ #endif /* AUDIO_ENCODER_H */ /*------------------------------------------------------------------------------ $Source$ $Log$ Revision 1.8 2002/08/20 19:35:37 darkeye added possibility to specify maximum bitrate for Ogg Vorbis streams Revision 1.7 2002/04/13 11:26:00 darkeye added cbr, abr and vbr setting feature with encoding quality Revision 1.6 2002/03/28 16:39:32 darkeye added interface for variable bitrate encoding Revision 1.5 2002/02/19 15:23:59 darkeye fixed typo Revision 1.4 2001/09/18 14:57:19 darkeye finalized Solaris port Revision 1.3 2001/09/14 19:31:06 darkeye added IceCast2 / vorbis support Revision 1.2 2000/11/12 14:54:50 darkeye added kdoc-style documentation comments Revision 1.1.1.1 2000/11/05 10:05:47 darkeye initial version ------------------------------------------------------------------------------*/
{ "pile_set_name": "Github" }
# This file is used by Rack-based servers to start the application. require ::File.expand_path('../config/environment', __FILE__) run Hermes::Application
{ "pile_set_name": "Github" }
<?xml version="1.0"?> <data> <country name="Liechtenstein"> <rank>1</rank> <year>2008</year> <gdppc>141100</gdppc> <neighbor name="Austria" direction="E"/> <neighbor name="Switzerland" direction="W"/> </country> <country name="Singapore"> <rank>4</rank> <year>2011</year> <gdppc>59900</gdppc> <neighbor name="Malaysia" direction="N"/> </country> <country name="Panama"> <rank>68</rank> <year>2011</year> <gdppc>13600</gdppc> <neighbor name="Costa Rica" direction="W"/> <neighbor name="Colombia" direction="E"/> </country> </data>
{ "pile_set_name": "Github" }
/** * Module dependencies. */ var tty = require('tty'); var util = require('util'); /** * This is the Node.js implementation of `debug()`. * * Expose `debug()` as the module. */ exports = module.exports = require('./debug'); exports.log = log; exports.formatArgs = formatArgs; exports.save = save; exports.load = load; exports.useColors = useColors; /** * Colors. */ exports.colors = [6, 2, 3, 4, 5, 1]; /** * The file descriptor to write the `debug()` calls to. * Set the `DEBUG_FD` env variable to override with another value. i.e.: * * $ DEBUG_FD=3 node script.js 3>debug.log */ var fd = parseInt(process.env.DEBUG_FD, 10) || 2; var stream = 1 === fd ? process.stdout : 2 === fd ? process.stderr : createWritableStdioStream(fd); /** * Is stdout a TTY? Colored output is enabled when `true`. */ function useColors() { var debugColors = (process.env.DEBUG_COLORS || '').trim().toLowerCase(); if (0 === debugColors.length) { return tty.isatty(fd); } else { return '0' !== debugColors && 'no' !== debugColors && 'false' !== debugColors && 'disabled' !== debugColors; } } /** * Map %o to `util.inspect()`, since Node doesn't do that out of the box. */ var inspect = (4 === util.inspect.length ? // node <= 0.8.x function (v, colors) { return util.inspect(v, void 0, void 0, colors); } : // node > 0.8.x function (v, colors) { return util.inspect(v, { colors: colors }); } ); exports.formatters.o = function(v) { return inspect(v, this.useColors) .replace(/\s*\n\s*/g, ' '); }; /** * Adds ANSI color escape codes if enabled. * * @api public */ function formatArgs() { var args = arguments; var useColors = this.useColors; var name = this.namespace; if (useColors) { var c = this.color; args[0] = ' \u001b[3' + c + ';1m' + name + ' ' + '\u001b[0m' + args[0] + '\u001b[3' + c + 'm' + ' +' + exports.humanize(this.diff) + '\u001b[0m'; } else { args[0] = new Date().toUTCString() + ' ' + name + ' ' + args[0]; } return args; } /** * Invokes `console.error()` with the specified arguments. */ function log() { return stream.write(util.format.apply(this, arguments) + '\n'); } /** * Save `namespaces`. * * @param {String} namespaces * @api private */ function save(namespaces) { if (null == namespaces) { // If you set a process.env field to null or undefined, it gets cast to the // string 'null' or 'undefined'. Just delete instead. delete process.env.DEBUG; } else { process.env.DEBUG = namespaces; } } /** * Load `namespaces`. * * @return {String} returns the previously persisted debug modes * @api private */ function load() { return process.env.DEBUG; } /** * Copied from `node/src/node.js`. * * XXX: It's lame that node doesn't expose this API out-of-the-box. It also * relies on the undocumented `tty_wrap.guessHandleType()` which is also lame. */ function createWritableStdioStream (fd) { var stream; var tty_wrap = process.binding('tty_wrap'); // Note stream._type is used for test-module-load-list.js switch (tty_wrap.guessHandleType(fd)) { case 'TTY': stream = new tty.WriteStream(fd); stream._type = 'tty'; // Hack to have stream not keep the event loop alive. // See https://github.com/joyent/node/issues/1726 if (stream._handle && stream._handle.unref) { stream._handle.unref(); } break; case 'FILE': var fs = require('fs'); stream = new fs.SyncWriteStream(fd, { autoClose: false }); stream._type = 'fs'; break; case 'PIPE': case 'TCP': var net = require('net'); stream = new net.Socket({ fd: fd, readable: false, writable: true }); // FIXME Should probably have an option in net.Socket to create a // stream from an existing fd which is writable only. But for now // we'll just add this hack and set the `readable` member to false. // Test: ./node test/fixtures/echo.js < /etc/passwd stream.readable = false; stream.read = null; stream._type = 'pipe'; // FIXME Hack to have stream not keep the event loop alive. // See https://github.com/joyent/node/issues/1726 if (stream._handle && stream._handle.unref) { stream._handle.unref(); } break; default: // Probably an error on in uv_guess_handle() throw new Error('Implement me. Unknown stream file type!'); } // For supporting legacy API we put the FD here. stream.fd = fd; stream._isStdio = true; return stream; } /** * Enable namespaces listed in `process.env.DEBUG` initially. */ exports.enable(load());
{ "pile_set_name": "Github" }
namespace MDK.Commands { static class CommandIds { public const int QuickDeploySolution = 0x0100; public const int ProjectOptions = 0x101; public const int RefreshWhitelistCache = 0x0102; public const int CheckForUpdates = 0x103; public const int DeployProject = 0x0104; public const int BlueprintManager = 0x0105; public const int GlobalBlueprintManager = 0x106; } }
{ "pile_set_name": "Github" }
import os import copy import numpy as np import math import torch import torch.nn as nn def readtextfile(filename): with open(filename) as f: content = f.readlines() f.close() return content def writetextfile(data, filename): with open(filename, 'w') as f: f.writelines(data) f.close() def delete_file(filename): if os.path.isfile(filename) == True: os.remove(filename) def eformat(f, prec, exp_digits): s = "%.*e"%(prec, f) mantissa, exp = s.split('e') # add 1 to digits as 1 is taken by sign +/- return "%se%+0*d"%(mantissa, exp_digits+1, int(exp)) def saveargs(args): path = args.logs if os.path.isdir(path) == False: os.makedirs(path) with open(os.path.join(path,'args.txt'), 'w') as f: for arg in vars(args): f.write(arg+' '+str(getattr(args,arg))+'\n') def init_params(net): for m in net.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal(m.weight, mode='fan_out') if m.bias: nn.init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant(m.weight, 1) nn.init.constant(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal(m.weight, std=1e-3) if m.bias: nn.init.constant(m.bias, 0) def weights_init(m): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() class Counter: #not used currently def __init__(self): self.mask_size = 0 def update(self, size): self.mask_size += size def get_total(self): return self.mask_size def act_fn(act): if act == 'relu': act_ = nn.ReLU(inplace=False) elif act == 'lrelu': act_ = nn.LeakyReLU(inplace=True) elif act == 'prelu': act_ = nn.PReLU() elif act == 'rrelu': act_ = nn.RReLU(inplace=True) elif act == 'elu': act_ = nn.ELU(inplace=True) elif act == 'selu': act_ = nn.SELU(inplace=True) elif act == 'tanh': act_ = nn.Tanh() elif act == 'sigmoid': act_ = nn.Sigmoid() else: print('\n\nActivation function {} is not supported/understood\n\n'.format(act)) act_ = None return act_ def print_values(x, noise, y, unique_masks, n=2): np.set_printoptions(precision=5, linewidth=200, threshold=1000000, suppress=True) print('\nimage: {} image0, channel0 {}'.format(list(x.unsqueeze(2).size()), x.unsqueeze(2).data[0, 0, 0, 0, :n].cpu().numpy())) print('image: {} image0, channel1 {}'.format(list(x.unsqueeze(2).size()), x.unsqueeze(2).data[0, 1, 0, 0, :n].cpu().numpy())) print('\nimage: {} image1, channel0 {}'.format(list(x.unsqueeze(2).size()), x.unsqueeze(2).data[1, 0, 0, 0, :n].cpu().numpy())) print('image: {} image1, channel1 {}'.format(list(x.unsqueeze(2).size()), x.unsqueeze(2).data[1, 1, 0, 0, :n].cpu().numpy())) if noise is not None: print('\nnoise {} channel0, mask0: {}'.format(list(noise.size()), noise.data[0, 0, 0, 0, :n].cpu().numpy())) print('noise {} channel0, mask1: {}'.format(list(noise.size()), noise.data[0, 0, 1, 0, :n].cpu().numpy())) if unique_masks: print('\nnoise {} channel1, mask0: {}'.format(list(noise.size()), noise.data[0, 1, 0, 0, :n].cpu().numpy())) print('noise {} channel1, mask1: {}'.format(list(noise.size()), noise.data[0, 1, 1, 0, :n].cpu().numpy())) print('\nmasks: {} image0, channel0, mask0: {}'.format(list(y.size()), y.data[0, 0, 0, 0, :n].cpu().numpy())) print('masks: {} image0, channel0, mask1: {}'.format(list(y.size()), y.data[0, 0, 1, 0, :n].cpu().numpy())) print('masks: {} image0, channel1, mask0: {}'.format(list(y.size()), y.data[0, 1, 0, 0, :n].cpu().numpy())) print('masks: {} image0, channel1, mask1: {}'.format(list(y.size()), y.data[0, 1, 1, 0, :n].cpu().numpy())) print('\nmasks: {} image1, channel0, mask0: {}'.format(list(y.size()), y.data[1, 0, 0, 0, :n].cpu().numpy())) print('masks: {} image1, channel0, mask1: {}'.format(list(y.size()), y.data[1, 0, 1, 0, :n].cpu().numpy())) print('masks: {} image1, channel1, mask0: {}'.format(list(y.size()), y.data[1, 1, 0, 0, :n].cpu().numpy())) print('masks: {} image1, channel1, mask1: {}'.format(list(y.size()), y.data[1, 1, 1, 0, :n].cpu().numpy()))
{ "pile_set_name": "Github" }
/* * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using ElmSharp; namespace BasicCalculator.ViewModels { /// <summary> /// Delegate to register key-pressed events. /// </summary> /// <param name="keyName">Pressed key name.</param> public delegate void KeyPressedDelegate(string keyName); /// <summary> /// Class to handle key-pressed events. /// </summary> public class KeyboardHandler { private readonly CalculatorViewModel _viewModel; /// <summary> /// Method called when a key is pressed. /// </summary> /// <param name="keyName">Pressed key name.</param> public void KeyPressed(string keyName) { _viewModel.AppendToExpression(keyName); } /// <summary> /// Constructor to ViewModel object reference. /// </summary> /// <param name="viewModel">CalculatorViewModel object reference.</param> public KeyboardHandler(CalculatorViewModel viewModel) => _viewModel = viewModel; } }
{ "pile_set_name": "Github" }
/*--------------------------------------------------------------------------------------------- * Copyright (c) Bentley Systems, Incorporated. All rights reserved. * See LICENSE.md in the project root for license terms and full copyright notice. *--------------------------------------------------------------------------------------------*/ /** @packageDocumentation * @module Numerics */ import { Geometry } from "../Geometry"; import { GrowableBlockedArray } from "../geometry3d/GrowableBlockedArray"; import { GrowableXYArray } from "../geometry3d/GrowableXYArray"; import { GrowableXYZArray } from "../geometry3d/GrowableXYZArray"; import { Point2d } from "../geometry3d/Point2dVector2d"; import { Point3d } from "../geometry3d/Point3dVector3d"; /** * Blocked array with operations to sort and cluster with a tolerance. * * Primary sorting is along an "arbitrary" sort vector. * @internal */ export class ClusterableArray extends GrowableBlockedArray { // (This is pretty strange) // The sort vector is (1,c, c*c, ...) // Setting c = 1 makes it 1,1,1 which may be useful for visual scans during debug. // c with some inobvious digits makes it unlikely that there will be multiple points on a perpendicular to the sort vector. private static readonly _vectorFactor = 0.8732; // use 1.0 to rig easy tests. /** Return a component of the sort vector. */ public static sortVectorComponent(index: number): number { let c = 1.0; for (let i = 1; i < index; i++) c *= ClusterableArray._vectorFactor; return c; } private _numCoordinatePerPoint: number; private _numExtraDataPerPoint: number; /** * @param numCoordinatePerPoint number of coordinates per point * @param numExtraDataPerPoint of extra data values per point. * @param initialBlockCapacity predicted number of points. (This does not have to be accurate) */ public constructor(numCoordinatePerPoint: number, numExtraDataPerPoint: number, initialBlockCapacity: number) { super(1 + numCoordinatePerPoint + numExtraDataPerPoint, initialBlockCapacity); this._numExtraDataPerPoint = numExtraDataPerPoint; this._numCoordinatePerPoint = numCoordinatePerPoint; } /** load a block, placing data[i] at block[i+1] to allow sort coordinate first. * @param data array of numDataPerBlock values. */ public addBlock(data: number[]) { const i0 = this.newBlockIndex() + 1; const n = Math.min(this.numPerBlock - 1, data.length); for (let i = 0; i < n; i++) this._data[i0 + i] = data[i]; } /** add a block with directly 2 to 5 listed content parameters. * This assumes numDataPerPoint is sufficient for the parameters provided. */ public addDirect(x0: number, x1: number, x2?: number, x3?: number, x4?: number) { const i0 = this.newBlockIndex(); this._data[i0 + 1] = x0; this._data[i0 + 2] = x1; if (x2 !== undefined) this._data[i0 + 3] = x2; if (x3 !== undefined) this._data[i0 + 4] = x3; if (x4 !== undefined) this._data[i0 + 5] = x4; } /** add a block directly from a Point2d with 0 to 3 extras * This assumes numDataPerPoint is sufficient for the parameters provided. */ public addPoint2d(xy: Point2d, a?: number, b?: number, c?: number) { const i0 = this.newBlockIndex(); this._data[i0 + 1] = xy.x; this._data[i0 + 2] = xy.y; if (a !== undefined) this._data[i0 + 3] = a; if (b !== undefined) this._data[i0 + 4] = b; if (c !== undefined) this._data[i0 + 5] = c; } /** add a block with directly from a Point2d with 0 to 3 extras * This assumes numDataPerPoint is sufficient for the parameters provided. */ public addPoint3d(xyz: Point3d, a?: number, b?: number, c?: number) { const i0 = this.newBlockIndex(); this._data[i0 + 1] = xyz.x; this._data[i0 + 2] = xyz.y; this._data[i0 + 3] = xyz.z; if (a !== undefined) this._data[i0 + 4] = a; if (b !== undefined) this._data[i0 + 5] = b; if (c !== undefined) this._data[i0 + 6] = c; } /** Get the xy coordinates by point index. */ public getPoint2d(blockIndex: number, result?: Point2d): Point2d { const i0 = this.blockIndexToDoubleIndex(blockIndex); return Point2d.create(this._data[i0 + 1], this._data[i0 + 2], result); } /** Get the xyZ coordinates by point index. */ public getPoint3d(blockIndex: number, result?: Point3d): Point3d { const i0 = this.blockIndexToDoubleIndex(blockIndex); return Point3d.create(this._data[i0 + 1], this._data[i0 + 2], this._data[i0 + 3], result); } /** Return a single extra data value */ public getExtraData(blockIndex: number, i: number): number { const i0 = this.blockIndexToDoubleIndex(blockIndex); return this._data[i0 + 1 + this._numCoordinatePerPoint + i]; } /** Return a single data value */ public getData(blockIndex: number, i: number): number { const i0 = this.blockIndexToDoubleIndex(blockIndex); return this._data[i0 + i]; } /** Set a single extra data value */ public setExtraData(blockIndex: number, i: number, value: number): void { const i0 = this.blockIndexToDoubleIndex(blockIndex); this._data[i0 + 1 + this._numCoordinatePerPoint + i] = value; } /** this value is used as cluster terminator in the Uint232rray of indcies. */ public static readonly clusterTerminator = 0xFFffFFff; /** Test if `x` is the cluster terminator value. */ public static isClusterTerminator(x: number): boolean { return x === ClusterableArray.clusterTerminator; } /** Return an array giving clusters of blocks with similar coordinates. * * * The contents of each block is assumed to be set up so the primary sort coordinate is first. * * ** simple coordinate blocks (x,y) or (x,y,z) would work fine but have occasional performance problems because points with same x would generate big blocks of * candidates for clusters. * ** The usual solution is to u value which is a dot product along some skew direction and have the blocks contain (u,x,y) or (u,x,y,z) for 2d versus 3d. * ** apply setupPrimaryClusterSort to prepare that!!! * * After a simple lexical sort, consecutive blocks that are within tolerance in the 0 component * are inspected. Within that candidate set, all blocks that are within tolerance for ALL components are clustered. * * In the output cluster array, clusters are terminated a invalid index. Test for the invalid index with GrowableBlockArray.isClusterTerminator (x) */ public clusterIndicesLexical(clusterTolerance: number = Geometry.smallMetricDistance): Uint32Array { // install primary sort key this.setupPrimaryClusterSort(); // presort by all coordinates .... const firstSort = this.sortIndicesLexical(); const clusterIndices = new Uint32Array(2 * firstSort.length); // worst case: no duplicates, each index goes in followed by terminator. let m = 0; // number of cluster indices const n = this.numBlocks; // and this must match firstSort.length !! let clusterStartBlockIndex = 0; let candidateBlockIndex = 0; let barrierU = 0.0; let i = 0; let j = 0; const k0 = 1; // beginning of active column for distance const k1 = 1 + this._numCoordinatePerPoint; for (i = 0; i < n; i++) { clusterStartBlockIndex = firstSort[i]; if (!ClusterableArray.isClusterTerminator(clusterStartBlockIndex)) { // unused block, so it becomes a cluster... clusterIndices[m++] = clusterStartBlockIndex; barrierU = this.component(clusterStartBlockIndex, 0) + clusterTolerance; firstSort[i] = ClusterableArray.clusterTerminator; for (j = i + 1; j < n; j++) { candidateBlockIndex = firstSort[j]; if (candidateBlockIndex === ClusterableArray.clusterTerminator) continue; // nearby in sort direction but already in a cluster. if (this.component(candidateBlockIndex, 0) >= barrierU) break; if (this.distanceBetweenSubBlocks(clusterStartBlockIndex, candidateBlockIndex, k0, k1) < clusterTolerance) { clusterIndices[m++] = candidateBlockIndex; // The candidate is in the block firstSort[j] = ClusterableArray.clusterTerminator; // and it will not be reused as future block base } } clusterIndices[m++] = ClusterableArray.clusterTerminator; } } // Alas, the clusterIndices array has fluff at the end. So it has to be copied. return clusterIndices.slice(0, m); } /** setup (overwrite!!) the "0" component with the dot product of numClusterCoordinate later components with a non-axis aligned vector. * This is normally called before clusterIndicesLexical. */ public setupPrimaryClusterSort() { const nb = this.numBlocks; const nc = this._numCoordinatePerPoint; const vector = new Float64Array(nc); vector[0] = 1.0; for (let c = 1; c < nc; c++) vector[c] = vector[c - 1] * ClusterableArray._vectorFactor; let k = 0; let dot = 0.0; const data = this._data; for (let b = 0; b < nb; b++) { k = this.blockIndexToDoubleIndex(b); dot = 0.0; for (let c = 0; c < nc; c++) { dot += vector[c] * data[k + 1 + c]; } data[k] = dot; } } /** Convert the cluster data to an array of tuples with point i in the form * `[i, primarySortCoordinate, [x,y,..], [extraData0, extraData1, ...]]` */ public toJSON(): any[] { const result: any[] = []; for (let b = 0; b < this.numBlocks; b++) { let i = this.blockIndexToDoubleIndex(b); const chunk: any[] = [b, this._data[i++]]; const coordinates = []; for (let c = 0; c < this._numCoordinatePerPoint; c++)coordinates.push(this._data[i++]); chunk.push(coordinates); for (let c = 0; c < this._numExtraDataPerPoint; c++) chunk.push(this._data[i++]); result.push(chunk); } return result; } /** * Return an array of indices from block index to cluster index. * @param clusteredBlocks clusters of block indices followed by separators. */ public createIndexBlockToClusterIndex(clusteredBlocks: Uint32Array): Uint32Array { const numBlocks = this.numBlocks; const blockToCluster = new Uint32Array(numBlocks); blockToCluster.fill(ClusterableArray.clusterTerminator); let numCluster = 0; for (const b of clusteredBlocks) { if (b >= numBlocks) { numCluster++; } else { blockToCluster[b] = numCluster; } } return blockToCluster; } /** * Return an array of indices from block index to index of its cluster's start in the cluster index array. * @param clusteredBlocks clusters of block indices followed by separators. */ public createIndexBlockToClusterStart(clusteredBlocks: Uint32Array): Uint32Array { const n = clusteredBlocks.length; const numBlocks = this.numBlocks; const blockToClusterStart = new Uint32Array(numBlocks); const terminator = ClusterableArray.clusterTerminator; blockToClusterStart.fill(terminator); let clusterStart = 0; for (let i = 0; i < n; i++) { const k = clusteredBlocks[i]; if (k > numBlocks) { clusterStart = i + 1; } else { blockToClusterStart[k] = clusterStart; } } return blockToClusterStart; } /** count the clusters in the clusteredBlocks array. */ public countClusters(clusteredBlocks: Uint32Array): number { let numClusters = 0; const terminator = ClusterableArray.clusterTerminator; for (const b of clusteredBlocks) { if (b === terminator) numClusters++; } return numClusters; } /** create a reverse index: given a cluster index k, clusterToClusterStart[k] is the place * the cluster's block indices appear in clusterBlocks */ public createIndexClusterToClusterStart(clusteredBlocks: Uint32Array): Uint32Array { let numCluster = this.countClusters(clusteredBlocks); const clusterToClusterStart = new Uint32Array(numCluster); const terminator = ClusterableArray.clusterTerminator; clusterToClusterStart.fill(terminator); const n = clusteredBlocks.length; let clusterStart = 0; for (let i = 0; i < n; i++) { const k = clusteredBlocks[i]; if (k === terminator) { clusterStart = i + 1; } else if (i === clusterStart) { clusterToClusterStart[numCluster++] = clusterStart; } } return clusterToClusterStart; } /** * Sort terminator-delimited subsets of an array of indices into the table, using a single extraData index as sort key. * @param blockedIndices [in] indices, organized as blocks of good indices terminated by the clusterTerminator. * @param extraDataIndex index of the extra data key. */ public sortSubsetsBySingleKey(blockedIndices: Uint32Array, dataIndex: number) { const dataOffset = 1 + dataIndex; let kBegin = 0; let swap; let key0, key1; const numK = blockedIndices.length; for (let kEnd = 0; kEnd < numK; kEnd++) { if (blockedIndices[kEnd] === ClusterableArray.clusterTerminator) { // sort blockedIndices[kBegin ,= k < kEnd]. // (search for minimum remaining, swap . . ) for (let k0 = kBegin; k0 + 1 < kEnd; k0++) { key0 = this.getWithinBlock(blockedIndices[k0], dataOffset); for (let k1 = k0 + 1; k1 < kEnd; k1++) { key1 = this.getWithinBlock(blockedIndices[k1], dataOffset); if (key1 < key0) { swap = blockedIndices[k0]; blockedIndices[k0] = blockedIndices[k1]; blockedIndices[k1] = swap; key0 = key1; } } } kBegin = kEnd + 1; } } } /** * Returns packed points with indices mapping old to new. * @param data points to cluster. */ public static clusterPoint3dArray(data: Point3d[], tolerance: number = Geometry.smallMetricDistance): PackedPointsWithIndex { const clusterArray = new ClusterableArray(3, 0, data.length); data.forEach((p: Point3d) => { clusterArray.addDirect(p.x, p.y, p.z); }); const order = clusterArray.clusterIndicesLexical(tolerance); const result = new PackedPointsWithIndex(data.length); let currentClusterIndex = 0; let numThisCluster = 0; order.forEach((k: number) => { if (ClusterableArray.isClusterTerminator(k)) { currentClusterIndex++; numThisCluster = 0; } else { if (numThisCluster === 0) result.packedPoints.push(data[k].clone()); result.oldToNew[k] = currentClusterIndex; numThisCluster++; } }); return result; } /** * Returns packed points with indices mapping old to new. * @param data points to cluster. */ public static clusterGrowablePoint2dArray(source: GrowableXYArray, tolerance: number = Geometry.smallMetricDistance): PackedPoint2dsWithIndex { const clusterArray = new ClusterableArray(2, 0, source.length); const p = Point2d.create(); const numSourcePoint = source.length; for (let i = 0; i < numSourcePoint; i++) { source.getPoint2dAtUncheckedPointIndex(i, p); clusterArray.addDirect(p.x, p.y); } const order = clusterArray.clusterIndicesLexical(tolerance); const numPackedPoints = clusterArray.countClusters(order); const result = new PackedPoint2dsWithIndex(source.length, numPackedPoints); let currentClusterIndex = 0; let numThisCluster = 0; order.forEach((k: number) => { if (ClusterableArray.isClusterTerminator(k)) { currentClusterIndex++; numThisCluster = 0; } else { if (numThisCluster === 0) // This is the first encounter with a new cluster result.growablePackedPoints.pushFromGrowableXYArray(source, k); result.oldToNew[k] = currentClusterIndex; numThisCluster++; } }); return result; } /** * Returns packed points with indices mapping old to new. * @param data points to cluster. */ public static clusterGrowablePoint3dArray(source: GrowableXYZArray, tolerance: number = Geometry.smallMetricDistance): PackedPointsWithIndex { const clusterArray = new ClusterableArray(3, 0, source.length); const p = Point3d.create(); const numSourcePoint = source.length; for (let i = 0; i < numSourcePoint; i++) { source.getPoint3dAtUncheckedPointIndex(i, p); clusterArray.addDirect(p.x, p.y, p.z); } const order = clusterArray.clusterIndicesLexical(tolerance); const result = new PackedPointsWithIndex(source.length); const numPackedPoints = clusterArray.countClusters(order); result.growablePackedPoints = new GrowableXYZArray(numPackedPoints); let currentClusterIndex = 0; let numThisCluster = 0; order.forEach((k: number) => { if (ClusterableArray.isClusterTerminator(k)) { currentClusterIndex++; numThisCluster = 0; } else { if (numThisCluster === 0) // This is the first encounter with a new cluster result.growablePackedPoints!.pushFromGrowableXYZArray(source, k); result.oldToNew[k] = currentClusterIndex; numThisCluster++; } }); return result; } } /** * @internal */ function updateIndices(indices: number[], oldToNew: Uint32Array): boolean { let numErrors = 0; indices.forEach((value: number, i: number, data: number[]) => { if (value < oldToNew.length) { data[i] = oldToNew[value]; } else numErrors++; }); return numErrors === 0; } /** * Data carrier class for * * packedPoints = an array of Point3d * * oldToNew = array of indices from some prior Point3d[] to the packed points. * @internal */ class PackedPointsWithIndex { /** Array of Point3d */ public packedPoints: Point3d[]; /** array of coordinates packed in GrowableXYZArray */ public growablePackedPoints: GrowableXYZArray | undefined; /** mapping from old point index to new point index. */ public oldToNew: Uint32Array; /** integer value for unknown index. */ public static readonly invalidIndex = 0xFFFFffff; /** construct a PackedPoints object with * * empty packedPoints array * * oldToNew indices all initialized to PackedPoints.invalidIndex */ constructor(numOldIndexEntry: number) { this.packedPoints = []; this.oldToNew = new Uint32Array(numOldIndexEntry); for (let i = 0; i < numOldIndexEntry; i++) { this.oldToNew[i] = PackedPointsWithIndex.invalidIndex; } } /** * Use the oldToNew array to update an array of "old" indices. * @param indices array of indices into prepacked array. * @returns true if all input indices were valid for the oldToNew array. */ public updateIndices(indices: number[]): boolean { return updateIndices(indices, this.oldToNew); } } /** * @internal */ class PackedPoint2dsWithIndex { /** array of coordinates packed in GrowableXYArray */ public growablePackedPoints: GrowableXYArray; /** mapping from old point index to new point index. */ public oldToNew: Uint32Array; /** integer value for unknown index. */ public static readonly invalidIndex = 0xFFFFffff; /** construct a PackedPoints object with * * empty packedPoints array * * oldToNew indices all initialized to PackedPoints.invalidIndex */ constructor(numOldIndexEntry: number, numPackedPoints: number) { this.growablePackedPoints = new GrowableXYArray(numPackedPoints); this.oldToNew = new Uint32Array(numOldIndexEntry); for (let i = 0; i < numOldIndexEntry; i++) { this.oldToNew[i] = PackedPoint2dsWithIndex.invalidIndex; } } /** * Use the oldToNew array to update an array of "old" indices. * @param indices array of indices into prepacked array. * @returns true if all input indices were valid for the oldToNew array. */ public updateIndices(indices: number[]): boolean { return updateIndices(indices, this.oldToNew); } }
{ "pile_set_name": "Github" }
/* * Copyright 2012-2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.context.properties.source; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link AliasedConfigurationPropertySource}. * * @author Phillip Webb * @author Madhura Bhave */ class AliasedIterableConfigurationPropertySourceTests extends AliasedConfigurationPropertySourceTests { @Test void streamShouldIncludeAliases() { MockConfigurationPropertySource source = new MockConfigurationPropertySource(); source.put("foo.bar", "bing"); source.put("foo.baz", "biff"); IterableConfigurationPropertySource aliased = source .withAliases(new ConfigurationPropertyNameAliases("foo.bar", "foo.bar1")); assertThat(aliased.stream()).containsExactly(ConfigurationPropertyName.of("foo.bar"), ConfigurationPropertyName.of("foo.bar1"), ConfigurationPropertyName.of("foo.baz")); } }
{ "pile_set_name": "Github" }
/* Copyright (c) 2003-2004, Roger Dingledine * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2013, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file util_process.c * \brief utility functions for launching processes and checking their * status. These functions are kept separately from procmon so that they * won't require linking against libevent. **/ #include "orconfig.h" #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #include "compat.h" #include "util.h" #include "torlog.h" #include "util_process.h" #include "ht.h" /* ================================================== */ /* Convenience structures for handlers for waitpid(). * * The tor_process_monitor*() code above doesn't use them, since it is for * monitoring a non-child process. */ #ifndef _WIN32 /** Mapping from a PID to a userfn/userdata pair. */ struct waitpid_callback_t { HT_ENTRY(waitpid_callback_t) node; pid_t pid; void (*userfn)(int, void *userdata); void *userdata; unsigned running; }; static INLINE unsigned int process_map_entry_hash_(const waitpid_callback_t *ent) { return (unsigned) ent->pid; } static INLINE unsigned int process_map_entries_eq_(const waitpid_callback_t *a, const waitpid_callback_t *b) { return a->pid == b->pid; } static HT_HEAD(process_map, waitpid_callback_t) process_map = HT_INITIALIZER(); HT_PROTOTYPE(process_map, waitpid_callback_t, node, process_map_entry_hash_, process_map_entries_eq_); HT_GENERATE(process_map, waitpid_callback_t, node, process_map_entry_hash_, process_map_entries_eq_, 0.6, malloc, realloc, free); /** * Begin monitoring the child pid <b>pid</b> to see if we get a SIGCHLD for * it. If we eventually do, call <b>fn</b>, passing it the exit status (as * yielded by waitpid) and the pointer <b>arg</b>. * * To cancel this, or clean up after it has triggered, call * clear_waitpid_callback(). */ waitpid_callback_t * set_waitpid_callback(pid_t pid, void (*fn)(int, void *), void *arg) { waitpid_callback_t *old_ent; waitpid_callback_t *ent = tor_malloc_zero(sizeof(waitpid_callback_t)); ent->pid = pid; ent->userfn = fn; ent->userdata = arg; ent->running = 1; old_ent = HT_REPLACE(process_map, &process_map, ent); if (old_ent) { log_warn(LD_BUG, "Replaced a waitpid monitor on pid %u. That should be " "impossible.", (unsigned) pid); old_ent->running = 0; } return ent; } /** * Cancel a waitpid_callback_t, or clean up after one has triggered. Releases * all storage held by <b>ent</b>. */ void clear_waitpid_callback(waitpid_callback_t *ent) { waitpid_callback_t *old_ent; if (ent == NULL) return; if (ent->running) { old_ent = HT_REMOVE(process_map, &process_map, ent); if (old_ent != ent) { log_warn(LD_BUG, "Couldn't remove waitpid monitor for pid %u.", (unsigned) ent->pid); return; } } tor_free(ent); } /** Helper: find the callack for <b>pid</b>; if there is one, run it, * reporting the exit status as <b>status</b>. */ static void notify_waitpid_callback_by_pid(pid_t pid, int status) { waitpid_callback_t search, *ent; search.pid = pid; ent = HT_REMOVE(process_map, &process_map, &search); if (!ent || !ent->running) { log_info(LD_GENERAL, "Child process %u has exited; no callback was " "registered", (unsigned)pid); return; } log_info(LD_GENERAL, "Child process %u has exited; running callback.", (unsigned)pid); ent->running = 0; ent->userfn(status, ent->userdata); } /** Use waitpid() to wait for all children that have exited, and invoke any * callbacks registered for them. */ void notify_pending_waitpid_callbacks(void) { /* I was going to call this function reap_zombie_children(), but * that makes it sound way more exciting than it really is. */ pid_t child; int status = 0; while ((child = waitpid(-1, &status, WNOHANG)) > 0) { notify_waitpid_callback_by_pid(child, status); status = 0; /* should be needless */ } } #endif
{ "pile_set_name": "Github" }
["3.2.1+0"] git-tree-sha1 = "f2c12e7da9c4f7fab577619626b772f607e67b04" ["3.2.1+1"] git-tree-sha1 = "68b165c609961207baa3174e03d6d56e7a37124c" ["3.2.1+2"] git-tree-sha1 = "12d1f5728d8b17cbc7f721e80808a6cefb7c362b" ["3.2.1+3"] git-tree-sha1 = "a30d50f837074b6ab43b81d9dd8be190b40cdd45"
{ "pile_set_name": "Github" }
; RUN: opt < %s -instcombine -sample-profile -sample-profile-file=%S/Inputs/inline-combine.prof -S | FileCheck %s ; RUN: opt < %s -passes="function(instcombine),sample-profile" -sample-profile-file=%S/Inputs/inline-combine.prof -S | FileCheck %s %"class.llvm::FoldingSetNodeID" = type { %"class.llvm::SmallVector" } %"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl.base", %"struct.llvm::SmallVectorStorage" } %"class.llvm::SmallVectorImpl.base" = type { %"class.llvm::SmallVectorTemplateBase.base" } %"class.llvm::SmallVectorTemplateBase.base" = type { %"class.llvm::SmallVectorTemplateCommon.base" } %"class.llvm::SmallVectorTemplateCommon.base" = type <{ %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion" }> %"class.llvm::SmallVectorBase" = type { i8*, i8*, i8* } %"struct.llvm::AlignedCharArrayUnion" = type { %"struct.llvm::AlignedCharArray" } %"struct.llvm::AlignedCharArray" = type { [4 x i8] } %"struct.llvm::SmallVectorStorage" = type { [31 x %"struct.llvm::AlignedCharArrayUnion"] } %"class.llvm::SmallVectorImpl" = type { %"class.llvm::SmallVectorTemplateBase.base", [4 x i8] } $foo = comdat any $bar = comdat any define void @foo(%"class.llvm::FoldingSetNodeID"* %this) comdat align 2 !dbg !3 { %1 = alloca %"class.llvm::FoldingSetNodeID"*, align 8 store %"class.llvm::FoldingSetNodeID"* %this, %"class.llvm::FoldingSetNodeID"** %1, align 8 %2 = load %"class.llvm::FoldingSetNodeID"*, %"class.llvm::FoldingSetNodeID"** %1, align 8 %3 = getelementptr inbounds %"class.llvm::FoldingSetNodeID", %"class.llvm::FoldingSetNodeID"* %2, i32 0, i32 0 ; the call should have been inlined after sample-profile pass ; CHECK-NOT: call call void bitcast (void (%"class.llvm::SmallVectorImpl"*)* @bar to void (%"class.llvm::SmallVector"*)*)(%"class.llvm::SmallVector"* %3), !dbg !7 ret void } define void @bar(%"class.llvm::SmallVectorImpl"* %this) comdat align 2 !dbg !8 { ret void } !llvm.module.flags = !{!0, !1} !llvm.ident = !{!2} !llvm.dbg.cu = !{!9} !0 = !{i32 2, !"Dwarf Version", i32 4} !1 = !{i32 1, !"Debug Info Version", i32 3} !2 = !{!"clang version 3.5 "} !3 = distinct !DISubprogram(name: "foo", scope: !4, file: !4, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !9, variables: !6) !4 = !DIFile(filename: "test.cc", directory: ".") !5 = !DISubroutineType(types: !6) !6 = !{} !7 = !DILocation(line: 4, scope: !3) !8 = distinct !DISubprogram(name: "bar", scope: !4, file: !4, line: 7, type: !5, isLocal: false, isDefinition: true, scopeLine: 7, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !9, variables: !6) !9 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.5 ", isOptimized: false, emissionKind: FullDebug, file: !4)
{ "pile_set_name": "Github" }
# scale.tcl - Copyright (C) 2004 Pat Thoyts <patthoyts@users.sourceforge.net> # # Bindings for the TScale widget # # $Id: scale.tcl,v 1.1 2006/10/31 01:42:27 hobbs Exp $ namespace eval ttk::scale { variable State array set State { dragging 0 } } bind TScale <ButtonPress-1> { ttk::scale::Press %W %x %y } bind TScale <B1-Motion> { ttk::scale::Drag %W %x %y } bind TScale <ButtonRelease-1> { ttk::scale::Release %W %x %y } proc ttk::scale::Press {w x y} { variable State set State(dragging) 0 switch -glob -- [$w identify $x $y] { *track - *trough { if {[$w get $x $y] <= [$w get]} { ttk::Repeatedly Increment $w -1 } else { ttk::Repeatedly Increment $w 1 } } *slider { set State(dragging) 1 set State(initial) [$w get] } } } proc ttk::scale::Drag {w x y} { variable State if {$State(dragging)} { $w set [$w get $x $y] } } proc ttk::scale::Release {w x y} { variable State set State(dragging) 0 ttk::CancelRepeat } proc ttk::scale::Increment {w delta} { if {![winfo exists $w]} return $w set [expr {[$w get] + $delta}] }
{ "pile_set_name": "Github" }
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: google/protobuf/source_context.proto package types import ( bytes "bytes" fmt "fmt" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" reflect "reflect" strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // `SourceContext` represents information about the source of a // protobuf element, like the file in which it is defined. type SourceContext struct { // The path-qualified name of the .proto file that contained the associated // protobuf element. For example: `"google/protobuf/source_context.proto"`. FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SourceContext) Reset() { *m = SourceContext{} } func (*SourceContext) ProtoMessage() {} func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptor_b686cdb126d509db, []int{0} } func (m *SourceContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SourceContext) XXX_Merge(src proto.Message) { xxx_messageInfo_SourceContext.Merge(m, src) } func (m *SourceContext) XXX_Size() int { return m.Size() } func (m *SourceContext) XXX_DiscardUnknown() { xxx_messageInfo_SourceContext.DiscardUnknown(m) } var xxx_messageInfo_SourceContext proto.InternalMessageInfo func (m *SourceContext) GetFileName() string { if m != nil { return m.FileName } return "" } func (*SourceContext) XXX_MessageName() string { return "google.protobuf.SourceContext" } func init() { proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") } func init() { proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) } var fileDescriptor_b686cdb126d509db = []byte{ // 212 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, 0xf9, 0x00, 0x00, 0x00, } func (this *SourceContext) Compare(that interface{}) int { if that == nil { if this == nil { return 0 } return 1 } that1, ok := that.(*SourceContext) if !ok { that2, ok := that.(SourceContext) if ok { that1 = &that2 } else { return 1 } } if that1 == nil { if this == nil { return 0 } return 1 } else if this == nil { return -1 } if this.FileName != that1.FileName { if this.FileName < that1.FileName { return -1 } return 1 } if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { return c } return 0 } func (this *SourceContext) Equal(that interface{}) bool { if that == nil { return this == nil } that1, ok := that.(*SourceContext) if !ok { that2, ok := that.(SourceContext) if ok { that1 = &that2 } else { return false } } if that1 == nil { return this == nil } else if this == nil { return false } if this.FileName != that1.FileName { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } return true } func (this *SourceContext) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) s = append(s, "&types.SourceContext{") s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } s = append(s, "}") return strings.Join(s, "") } func valueToGoStringSourceContext(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } func (m *SourceContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.XXX_unrecognized != nil { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } if len(m.FileName) > 0 { i -= len(m.FileName) copy(dAtA[i:], m.FileName) i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { offset -= sovSourceContext(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { this := &SourceContext{} this.FileName = string(randStringSourceContext(r)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) } return this } type randySourceContext interface { Float32() float32 Float64() float64 Int63() int64 Int31() int32 Uint32() uint32 Intn(n int) int } func randUTF8RuneSourceContext(r randySourceContext) rune { ru := r.Intn(62) if ru < 10 { return rune(ru + 48) } else if ru < 36 { return rune(ru + 55) } return rune(ru + 61) } func randStringSourceContext(r randySourceContext) string { v1 := r.Intn(100) tmps := make([]rune, v1) for i := 0; i < v1; i++ { tmps[i] = randUTF8RuneSourceContext(r) } return string(tmps) } func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { wire := r.Intn(4) if wire == 3 { wire = 5 } fieldNumber := maxFieldNumber + r.Intn(100) dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) } return dAtA } func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { case 0: dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) v2 := r.Int63() if r.Intn(2) == 0 { v2 *= -1 } dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) case 1: dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) case 2: dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) ll := r.Intn(100) dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) for j := 0; j < ll; j++ { dAtA = append(dAtA, byte(r.Intn(256))) } default: dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) } return dAtA } func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) v >>= 7 } dAtA = append(dAtA, uint8(v)) return dAtA } func (m *SourceContext) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.FileName) if l > 0 { n += 1 + l + sovSourceContext(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovSourceContext(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozSourceContext(x uint64) (n int) { return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *SourceContext) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&SourceContext{`, `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s } func valueToStringSourceContext(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } func (m *SourceContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSourceContext } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSourceContext } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthSourceContext } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthSourceContext } if postIndex > l { return io.ErrUnexpectedEOF } m.FileName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSourceContext(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthSourceContext } if (iNdEx + skippy) < 0 { return ErrInvalidLengthSourceContext } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipSourceContext(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowSourceContext } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowSourceContext } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowSourceContext } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthSourceContext } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupSourceContext } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthSourceContext } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group") )
{ "pile_set_name": "Github" }
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** using System; using System.Collections.Generic; using System.Collections.Immutable; using System.Threading.Tasks; using Pulumi.Serialization; namespace Pulumi.Aws.Kinesis.Inputs { public sealed class FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs : Pulumi.ResourceArgs { /// <summary> /// Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below. /// </summary> [Input("deserializer", required: true)] public Input<Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs> Deserializer { get; set; } = null!; public FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs() { } } }
{ "pile_set_name": "Github" }
package api import ( "context" "fmt" "net/http" "strconv" "github.com/NYTimes/gizmo/server/kit" ocontext "golang.org/x/net/context" "github.com/NYTimes/gizmo/examples/nyt" ) // GRPC LAYER, add the middleware layer ourselves func (s service) GetMostPopularResourceTypeSectionTimeframe(ctx ocontext.Context, req *GetMostPopularResourceTypeSectionTimeframeRequest) (*MostPopularResponse, error) { res, err := s.Middleware(s.getMostPopular)(ctx, req) if res != nil { return res.(*MostPopularResponse), err } return nil, err } // SHARED BIZ LAYER func (s service) getMostPopular(ctx context.Context, r interface{}) (interface{}, error) { mpr := r.(*GetMostPopularResourceTypeSectionTimeframeRequest) res, err := s.client.GetMostPopular(mpr.ResourceType, mpr.Section, uint(mpr.Timeframe)) if err != nil { return nil, kit.NewJSONStatusResponse( &GetMostPopularResourceTypeSectionTimeframeRequest{}, http.StatusBadRequest) } kit.LogMsg(ctx, fmt.Sprintf("most popular results found: %d", len(res))) return mpToMP(res), nil } // CUSTOM HTTP REQUEST DECODER func decodeMostPopularRequest(ctx context.Context, r *http.Request) (interface{}, error) { vs := kit.Vars(r) timeframe, err := strconv.ParseUint(vs["timeframe"], 10, 8) if err != nil { return nil, kit.NewJSONStatusResponse( &MostPopularResponse{Status: "bad request"}, http.StatusBadRequest) } return &GetMostPopularResourceTypeSectionTimeframeRequest{ ResourceType: vs["resourceType"], Section: vs["section"], Timeframe: int32(timeframe), }, nil } // BIZ LOGIC THAT SHOULD/COULD LIVE SOMEWHERE ELSE? func mpToMP(res []*nyt.MostPopularResult) *MostPopularResponse { var mpr MostPopularResponse mpr.NumResults = uint32(len(res)) mpr.Status = "OK" mpr.Results = make([]*MostPopularResult, len(res)) for i, r := range res { mpr.Results[i] = &MostPopularResult{ Abstract: r.Abstract, AssetID: r.AsssetId, Byline: r.Byline, Column: r.Column, ID: r.Id, Keywords: r.AdxKeywords, PublishedDate: r.PublishedDate, Section: r.Section, Source: r.Source, Title: r.Title, Type: r.Type, URL: r.Url, } } return &mpr }
{ "pile_set_name": "Github" }
// adapted from https://github.com/alexstanbury/chartist-plugin-axistitle/blob/master/src/scripts/chartist-plugin-axistitle.js import Chartist from 'chartist' const axisDefaults = { axisTitle: '', axisClass: 'ct-axis-title', offset: { x: 0, y: 0 }, textAnchor: 'middle', flipTitle: false }; const defaultOptions = { axisX: axisDefaults, axisY: axisDefaults }; const getTitle = function (title) { if (title instanceof Function) { return title(); } return title; }; const getClasses = function (classes) { if (classes instanceof Function) { return classes(); } return classes; }; Chartist.plugins = Chartist.plugins || {}; Chartist.plugins.ctAxisTitle = function (options) { options = Chartist.extend({}, defaultOptions, options); return function ctAxisTitle(chart) { chart.on('created', function (data) { if (!options.axisX.axisTitle && !options.axisY.axisTitle) { throw new Error( 'ctAxisTitle plugin - You must provide at least one axis title' ); } else if (!data.axisX && !data.axisY) { throw new Error( 'ctAxisTitle plugin can only be used on charts that have at least one axis' ); } let xPos, yPos, title, chartPadding = Chartist.normalizePadding(data.options.chartPadding); // normalize the padding in case the full padding object was not passed into the options //position axis X title if (options.axisX.axisTitle && data.axisX) { xPos = (data.axisX.axisLength / 2) + data.options.axisY.offset + chartPadding.left; yPos = chartPadding.top; if (data.options.axisY.position === 'end') { xPos -= data.options.axisY.offset; } if (data.options.axisX.position === 'end') { yPos += data.axisY.axisLength; } title = new Chartist.Svg("text"); title.addClass(getClasses(options.axisX.axisClass)); title.text(getTitle(options.axisX.axisTitle)); title.attr({ x: xPos + options.axisX.offset.x, y: yPos + options.axisX.offset.y, "text-anchor": options.axisX.textAnchor }); data.svg.append(title, true); } //position axis Y title if (options.axisY.axisTitle && data.axisY) { xPos = 0; yPos = (data.axisY.axisLength / 2) + chartPadding .top; if (data.options.axisX.position === 'start') { yPos += data.options.axisX.offset; } if (data.options.axisY.position === 'end') { xPos = data.axisX.axisLength; } const transform = 'rotate(' + (options.axisY.flipTitle ? - 90 : 90) + ', ' + xPos + ', ' + yPos + ')'; title = new Chartist.Svg("text"); title.addClass(getClasses(options.axisY.axisClass)); title.text(getTitle(options.axisY.axisTitle)); title.attr({ x: xPos + options.axisY.offset.x, y: yPos + options.axisY.offset.y, transform: transform, "text-anchor": options.axisY.textAnchor }); data.svg.append(title, true); } }); }; };
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!-- http://www.phpunit.de/manual/current/en/appendixes.configuration.html --> <phpunit backupGlobals = "false" backupStaticAttributes = "false" colors = "true" convertErrorsToExceptions = "true" convertNoticesToExceptions = "true" convertWarningsToExceptions = "true" processIsolation = "false" stopOnFailure = "true" syntaxCheck = "false" bootstrap = "vendor/autoload.php" > <testsuites> <testsuite name="Test Suite"> <directory>tests</directory> </testsuite> </testsuites> </phpunit>
{ "pile_set_name": "Github" }
srcmod=bambooblocks bamboo srcmod=upgrade_aquatic driftwood srcmod=endergetic poise srcmod=swampexpansion willow srcmod=bloomful wisteria
{ "pile_set_name": "Github" }
# `wasmer` [![Build Status](https://github.com/wasmerio/wasmer/workflows/build/badge.svg?style=flat-square)](https://github.com/wasmerio/wasmer/actions?query=workflow%3Abuild) [![Join Wasmer Slack](https://img.shields.io/static/v1?label=Slack&message=join%20chat&color=brighgreen&style=flat-square)](https://slack.wasmer.io) [![MIT License](https://img.shields.io/github/license/wasmerio/wasmer.svg?style=flat-square)](https://github.com/wasmerio/wasmer/blob/master/LICENSE) [`Wasmer`](https://wasmer.io/) is the most popular [WebAssembly](https://webassembly.org/) runtime for Rust (...and also [the fastest]()!). It supports JIT (Just in Time) and AOT (Ahead of time) compilation as well as pluggable compilers suited to your needs. It's designed to be safe and secure, and runnable in any kind of environment. ## Usage Add to your `Cargo.toml` ```toml [dependencies] wasmer = "1.0.0-alpha" ``` ```rust use wasmer::{Store, Module, Instance, Value, imports}; fn main() -> anyhow::Result<()> { let module_wat = r#" (module (type $t0 (func (param i32) (result i32))) (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) get_local $p0 i32.const 1 i32.add)) "#; let store = Store::default(); let module = Module::new(&store, &module_wat); // The module doesn't import anything, so we create an empty import object. let import_object = imports! {}; let instance = Instance::new(&module, &import_object)?; let add_one = instance.exports.get_function("add_one")?; let result = add_one.call([Value::I32(42)])?; assert_eq!(result[0], Value::I32(43)); Ok(()) } ``` ## Features Wasmer is not only fast, but also designed to be *highly customizable*: * **Pluggable Engines**: do you have a fancy `dlopen` implementation? This is for you! * **Pluggable Compilers**: you want to emit code with DynASM or other compiler? We got you! * **Headless mode**: that means that no compilers will be required to run a `serialized` Module (via `Module::deserialize()`). * **Cross-compilation**: You can pre-compile a module and serialize it to then run it in other platform (via `Module::serialize()`). ## Config flags Wasmer has the following configuration flags: * `wat` (enabled by default): It allows to read WebAssembly files in their text format. *This feature is normally used only in development environments* * Compilers (mutually exclusive): - `singlepass`: it will use `wasmer-compiler-singlepass` as the default compiler (ideal for **blockchains**). - `cranelift`: it will use `wasmer-compiler-cranelift` as the default compiler (ideal for **development**). - `llvm`: it will use `wasmer-compiler-llvm` as the default compiler (ideal for **production**). Wasmer ships by default with the `cranelift` compiler as its great for development proposes. However, we strongly encourage to use the `llvm` backend in production as it performs about 50% faster, achieving near-native speeds. > Note: if you want to use multiple compilers at the same time, it's also possible! > You will need to import them directly via each of the compiler crates. --- Made with ❤️ by the Wasmer team, for the community
{ "pile_set_name": "Github" }
// Copyright (c) 2019-2020 Alexander Medvednikov. All rights reserved. // Use of this source code is governed by an MIT license // that can be found in the LICENSE file. // Directed acyclic graph // this implementation is specifically suited to ordering dependencies module depgraph struct DepGraphNode { pub mut: name string deps []string } struct DepGraph { pub mut: acyclic bool nodes []DepGraphNode } struct OrderedDepMap { mut: keys []string data map[string][]string } pub fn (mut o OrderedDepMap) set(name string, deps []string) { if name !in o.data { o.keys << name } o.data[name] = deps } pub fn (mut o OrderedDepMap) add(name string, deps []string) { mut d := o.data[name] for dep in deps { if dep !in d { d << dep } else{} } o.set(name, d) } pub fn (o &OrderedDepMap) get(name string) []string { return o.data[name] } pub fn (mut o OrderedDepMap) delete(name string) { if name !in o.data { panic('delete: no such key: $name') } for i, _ in o.keys { if o.keys[i] == name { o.keys.delete(i) break } } o.data.delete(name) } pub fn (mut o OrderedDepMap) apply_diff(name string, deps []string) { mut diff := []string{} for dep in o.data[name] { if dep !in deps { diff << dep } } o.set(name, diff) } pub fn (o &OrderedDepMap) size() int { return o.data.len } pub fn new_dep_graph() &DepGraph { return &DepGraph{ acyclic: true } } pub fn (mut graph DepGraph) add(mod string, deps []string) { graph.nodes << DepGraphNode{ name: mod deps: deps.clone() } } pub fn (graph &DepGraph) resolve() &DepGraph { mut node_names := OrderedDepMap{} mut node_deps := OrderedDepMap{} for node in graph.nodes { node_names.add(node.name, node.deps) node_deps.add(node.name, node.deps) } mut resolved := new_dep_graph() for node_deps.size() != 0 { mut ready_set := []string{} for name in node_deps.keys { deps := node_deps.data[name] if deps.len == 0 { ready_set << name } } if ready_set.len == 0 { mut g := new_dep_graph() g.acyclic = false for name in node_deps.keys { g.add(name, node_names.data[name]) } return g } for name in ready_set { node_deps.delete(name) resolved.add(name, node_names.data[name]) } for name in node_deps.keys { node_deps.apply_diff(name, ready_set) } } return resolved } pub fn (graph &DepGraph) last_node() DepGraphNode { return graph.nodes[graph.nodes.len - 1] } pub fn (graph &DepGraph) display() string { mut out := '\n' for node in graph.nodes { for dep in node.deps { out += ' * $node.name -> $dep\n' } } return out } pub fn (graph &DepGraph) display_cycles() string { mut node_names := map[string]DepGraphNode for node in graph.nodes { node_names[node.name] = node } mut out := '\n' for node in graph.nodes { for dep in node.deps { if dep !in node_names { continue } dn := node_names[dep] if node.name in dn.deps { out += ' * $node.name -> $dep\n' } } } return out }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package com.android.systemui.statusbar.phone; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import android.content.res.Resources; import android.hardware.display.AmbientDisplayConfiguration; import android.os.PowerManager; import android.test.suitebuilder.annotation.SmallTest; import androidx.test.runner.AndroidJUnit4; import com.android.systemui.SysuiTestCase; import com.android.systemui.doze.AlwaysOnDisplayPolicy; import com.android.systemui.doze.DozeScreenState; import com.android.systemui.tuner.TunerService; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.MockitoAnnotations; @SmallTest @RunWith(AndroidJUnit4.class) public class DozeParametersTest extends SysuiTestCase { private DozeParameters mDozeParameters; @Mock Resources mResources; @Mock private AmbientDisplayConfiguration mAmbientDisplayConfiguration; @Mock private AlwaysOnDisplayPolicy mAlwaysOnDisplayPolicy; @Mock private PowerManager mPowerManager; @Mock private TunerService mTunerService; @Before public void setup() { MockitoAnnotations.initMocks(this); mDozeParameters = new DozeParameters( mResources, mAmbientDisplayConfiguration, mAlwaysOnDisplayPolicy, mPowerManager, mTunerService ); } @Test public void test_setControlScreenOffAnimation_setsDozeAfterScreenOff_false() { mDozeParameters.setControlScreenOffAnimation(true); reset(mPowerManager); mDozeParameters.setControlScreenOffAnimation(false); verify(mPowerManager).setDozeAfterScreenOff(eq(true)); } @Test public void test_setControlScreenOffAnimation_setsDozeAfterScreenOff_true() { mDozeParameters.setControlScreenOffAnimation(false); reset(mPowerManager); mDozeParameters.setControlScreenOffAnimation(true); verify(mPowerManager).setDozeAfterScreenOff(eq(false)); } @Test public void test_getWallpaperAodDuration_when_shouldControlScreenOff() { mDozeParameters.setControlScreenOffAnimation(true); Assert.assertEquals( "wallpaper hides faster when controlling screen off", mDozeParameters.getWallpaperAodDuration(), DozeScreenState.ENTER_DOZE_HIDE_WALLPAPER_DELAY); } }
{ "pile_set_name": "Github" }
#import <Foundation/Foundation.h> #include <CoreMotion/CoreMotion.h> @interface FODeviceMotion : NSObject @property (nonatomic, readonly, getter=isSensing) BOOL sensing; @property (nonatomic, copy) void (^_onDataChanged)(CMDeviceMotion* deviceMotionData); @property (nonatomic, copy) void (^_onError)(NSError* error); + (BOOL)isSensorAvailable; - (instancetype)init NS_UNAVAILABLE; - (instancetype)initWithBlock:(void (^)(CMDeviceMotion* deviceMotionData))onDataChanged error:(void (^)(NSError* err))onError; - (BOOL)startSensing; - (BOOL)stopSensing; @end
{ "pile_set_name": "Github" }
@using Jmelosegui.Mvc.GoogleMap @{ ViewBag.Title = "Multiple Maps"; Layout = "~/Views/Shared/_LayoutExamples.cshtml"; } <div class="row"> <div class="col-md-6" style="height: 300px"> @( Html.GoogleMap() .Name("map") ) </div> <div class="col-md-6" style="height: 300px"> @( Html.GoogleMap() .Name("map1") .Center(c => c.Address("Madrid, Spain")) ) </div> </div>
{ "pile_set_name": "Github" }
# Special extra rules for this directory subtypes.cc : . utilities\find_subtypes.exe @echo "Making subtypes.cc" utilities\find_subtypes $(DIRS) - $(ABSTRACT_TYPES) >subtypes.cc
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <resources> <color name="support_ui_red">#F00</color> <color name="support_ui_green">#0F0</color> <color name="support_ui_blue">#00F</color> <color name="support_ui_yellow">#FF0</color> <integer-array name="support_ui_color_array"> <item>@color/support_ui_red</item> <item>@color/support_ui_green</item> <item>@color/support_ui_blue</item> <item>@color/support_ui_yellow</item> </integer-array> </resources>
{ "pile_set_name": "Github" }
require File.join(File.dirname(__FILE__), 'helper') class TC_Statement < Test::Unit::TestCase def setup @db = SQLite3::Database.new( "test.db" ) @db.transaction do @db.execute "create table foo ( a integer primary key, b text )" @db.execute "insert into foo ( b ) values ( 'foo' )" @db.execute "insert into foo ( b ) values ( 'bar' )" @db.execute "insert into foo ( b ) values ( 'baz' )" end @stmt = @db.prepare( "select * from foo where a in ( ?, :named )" ) end def teardown @stmt.close @db.close File.delete( "test.db" ) end def test_remainder_empty assert_equal "", @stmt.remainder end def test_remainder_nonempty called = false @db.prepare( "select * from foo;\n blah" ) do |stmt| called = true assert_equal "\n blah", stmt.remainder end assert called end def test_bind_params_empty assert_nothing_raised { @stmt.bind_params } assert @stmt.execute!.empty? end def test_bind_params_array @stmt.bind_params 1, 2 assert_equal 2, @stmt.execute!.length end def test_bind_params_hash @stmt.bind_params ":named" => 2 assert_equal 1, @stmt.execute!.length end def test_bind_params_hash_without_colon @stmt.bind_params "named" => 2 assert_equal 1, @stmt.execute!.length end def test_bind_params_hash_as_symbol @stmt.bind_params :named => 2 assert_equal 1, @stmt.execute!.length end def test_bind_params_mixed @stmt.bind_params( 1, ":named" => 2 ) assert_equal 2, @stmt.execute!.length end def test_bind_param_by_index @stmt.bind_params( 1, 2 ) assert_equal 2, @stmt.execute!.length end def test_bind_param_by_name_bad assert_raise( SQLite3::Exception ) { @stmt.bind_param( "@named", 2 ) } end def test_bind_param_by_name_good @stmt.bind_param( ":named", 2 ) assert_equal 1, @stmt.execute!.length end def test_bind_param_with_various_types @db.transaction do @db.execute "create table all_types ( a integer primary key, b float, c string, d integer )" @db.execute "insert into all_types ( b, c, d ) values ( 1.4, 'hello', 68719476735 )" end assert_equal 1, @db.execute( "select * from all_types where b = ?", 1.4 ).length assert_equal 1, @db.execute( "select * from all_types where c = ?", 'hello').length assert_equal 1, @db.execute( "select * from all_types where d = ?", 68719476735).length end def test_execute_no_bind_no_block assert_instance_of SQLite3::ResultSet, @stmt.execute end def test_execute_with_bind_no_block assert_instance_of SQLite3::ResultSet, @stmt.execute( 1, 2 ) end def test_execute_no_bind_with_block called = false @stmt.execute { |row| called = true } assert called end def test_execute_with_bind_with_block called = 0 @stmt.execute( 1, 2 ) { |row| called += 1 } assert_equal 1, called end def test_reexecute r = @stmt.execute( 1, 2 ) assert_equal 2, r.to_a.length assert_nothing_raised { r = @stmt.execute( 1, 2 ) } assert_equal 2, r.to_a.length end def test_execute_bang_no_bind_no_block assert @stmt.execute!.empty? end def test_execute_bang_with_bind_no_block assert_equal 2, @stmt.execute!( 1, 2 ).length end def test_execute_bang_no_bind_with_block called = 0 @stmt.execute! { |row| called += 1 } assert_equal 0, called end def test_execute_bang_with_bind_with_block called = 0 @stmt.execute!( 1, 2 ) { |row| called += 1 } assert_equal 2, called end def test_columns c1 = @stmt.columns c2 = @stmt.columns assert_same c1, c2 assert_equal 2, c1.length end def test_columns_computed called = false @db.prepare( "select count(*) from foo" ) do |stmt| called = true assert_equal [ "count(*)" ], stmt.columns end assert called end def test_types t1 = @stmt.types t2 = @stmt.types assert_same t1, t2 assert_equal 2, t1.length end def test_types_computed called = false @db.prepare( "select count(*) from foo" ) do |stmt| called = true assert_equal [ nil ], stmt.types end assert called end def test_close stmt = @db.prepare( "select * from foo" ) assert !stmt.closed? stmt.close assert stmt.closed? assert_raise( SQLite3::Exception ) { stmt.execute } assert_raise( SQLite3::Exception ) { stmt.execute! } assert_raise( SQLite3::Exception ) { stmt.close } assert_raise( SQLite3::Exception ) { stmt.bind_params 5 } assert_raise( SQLite3::Exception ) { stmt.bind_param 1, 5 } assert_raise( SQLite3::Exception ) { stmt.columns } assert_raise( SQLite3::Exception ) { stmt.types } end def test_committing_tx_with_statement_active called = false @db.prepare( "select count(*) from foo" ) do |stmt| called = true count = stmt.execute!.first.first.to_i @db.transaction do @db.execute "insert into foo ( b ) values ( 'hello' )" end new_count = stmt.execute!.first.first.to_i assert_equal new_count, count+1 end assert called end end
{ "pile_set_name": "Github" }
/* * Table styles */ table.dataTable { width: 100%; margin: 0 auto; clear: both; border-collapse: separate; border-spacing: 0; /* * Header and footer styles */ /* * Body styles */ } table.dataTable thead th, table.dataTable thead td, table.dataTable tfoot th, table.dataTable tfoot td { padding: 4px 10px; } table.dataTable thead th, table.dataTable tfoot th { font-weight: bold; } table.dataTable thead th:active, table.dataTable thead td:active { outline: none; } table.dataTable thead .sorting_asc, table.dataTable thead .sorting_desc, table.dataTable thead .sorting { cursor: pointer; *cursor: hand; } table.dataTable thead th div.DataTables_sort_wrapper { position: relative; padding-right: 10px; } table.dataTable thead th div.DataTables_sort_wrapper span { position: absolute; top: 50%; margin-top: -8px; right: -5px; } table.dataTable thead th.ui-state-default { border-right-width: 0; } table.dataTable thead th.ui-state-default:last-child { border-right-width: 1px; } table.dataTable tbody tr { background-color: #ffffff; } table.dataTable tbody tr.selected { background-color: #B0BED9; } table.dataTable tbody th, table.dataTable tbody td { padding: 8px 10px; } table.dataTable th.center, table.dataTable td.center, table.dataTable td.dataTables_empty { text-align: center; } table.dataTable th.right, table.dataTable td.right { text-align: right; } table.dataTable.row-border tbody th, table.dataTable.row-border tbody td, table.dataTable.display tbody th, table.dataTable.display tbody td { border-top: 1px solid #ddd; } table.dataTable.row-border tbody tr:first-child th, table.dataTable.row-border tbody tr:first-child td, table.dataTable.display tbody tr:first-child th, table.dataTable.display tbody tr:first-child td { border-top: none; } table.dataTable.cell-border tbody th, table.dataTable.cell-border tbody td { border-top: 1px solid #ddd; border-right: 1px solid #ddd; } table.dataTable.cell-border tbody tr th:first-child, table.dataTable.cell-border tbody tr td:first-child { border-left: 1px solid #ddd; } table.dataTable.cell-border tbody tr:first-child th, table.dataTable.cell-border tbody tr:first-child td { border-top: none; } table.dataTable.stripe tbody tr.odd, table.dataTable.display tbody tr.odd { background-color: #f9f9f9; } table.dataTable.stripe tbody tr.odd.selected, table.dataTable.display tbody tr.odd.selected { background-color: #abb9d3; } table.dataTable.hover tbody tr:hover, table.dataTable.hover tbody tr.odd:hover, table.dataTable.hover tbody tr.even:hover, table.dataTable.display tbody tr:hover, table.dataTable.display tbody tr.odd:hover, table.dataTable.display tbody tr.even:hover { background-color: whitesmoke; } table.dataTable.hover tbody tr:hover.selected, table.dataTable.hover tbody tr.odd:hover.selected, table.dataTable.hover tbody tr.even:hover.selected, table.dataTable.display tbody tr:hover.selected, table.dataTable.display tbody tr.odd:hover.selected, table.dataTable.display tbody tr.even:hover.selected { background-color: #a9b7d1; } table.dataTable.order-column tbody tr > .sorting_1, table.dataTable.order-column tbody tr > .sorting_2, table.dataTable.order-column tbody tr > .sorting_3, table.dataTable.display tbody tr > .sorting_1, table.dataTable.display tbody tr > .sorting_2, table.dataTable.display tbody tr > .sorting_3 { background-color: #f9f9f9; } table.dataTable.order-column tbody tr.selected > .sorting_1, table.dataTable.order-column tbody tr.selected > .sorting_2, table.dataTable.order-column tbody tr.selected > .sorting_3, table.dataTable.display tbody tr.selected > .sorting_1, table.dataTable.display tbody tr.selected > .sorting_2, table.dataTable.display tbody tr.selected > .sorting_3 { background-color: #acbad4; } table.dataTable.display tbody tr.odd > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd > .sorting_1 { background-color: #f1f1f1; } table.dataTable.display tbody tr.odd > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd > .sorting_2 { background-color: #f3f3f3; } table.dataTable.display tbody tr.odd > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd > .sorting_3 { background-color: whitesmoke; } table.dataTable.display tbody tr.odd.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_1 { background-color: #a6b3cd; } table.dataTable.display tbody tr.odd.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_2 { background-color: #a7b5ce; } table.dataTable.display tbody tr.odd.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_3 { background-color: #a9b6d0; } table.dataTable.display tbody tr.even > .sorting_1, table.dataTable.order-column.stripe tbody tr.even > .sorting_1 { background-color: #f9f9f9; } table.dataTable.display tbody tr.even > .sorting_2, table.dataTable.order-column.stripe tbody tr.even > .sorting_2 { background-color: #fbfbfb; } table.dataTable.display tbody tr.even > .sorting_3, table.dataTable.order-column.stripe tbody tr.even > .sorting_3 { background-color: #fdfdfd; } table.dataTable.display tbody tr.even.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_1 { background-color: #acbad4; } table.dataTable.display tbody tr.even.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_2 { background-color: #adbbd6; } table.dataTable.display tbody tr.even.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_3 { background-color: #afbdd8; } table.dataTable.display tbody tr:hover > .sorting_1, table.dataTable.display tbody tr.odd:hover > .sorting_1, table.dataTable.display tbody tr.even:hover > .sorting_1, table.dataTable.order-column.hover tbody tr:hover > .sorting_1, table.dataTable.order-column.hover tbody tr.odd:hover > .sorting_1, table.dataTable.order-column.hover tbody tr.even:hover > .sorting_1 { background-color: #eaeaea; } table.dataTable.display tbody tr:hover > .sorting_2, table.dataTable.display tbody tr.odd:hover > .sorting_2, table.dataTable.display tbody tr.even:hover > .sorting_2, table.dataTable.order-column.hover tbody tr:hover > .sorting_2, table.dataTable.order-column.hover tbody tr.odd:hover > .sorting_2, table.dataTable.order-column.hover tbody tr.even:hover > .sorting_2 { background-color: #ebebeb; } table.dataTable.display tbody tr:hover > .sorting_3, table.dataTable.display tbody tr.odd:hover > .sorting_3, table.dataTable.display tbody tr.even:hover > .sorting_3, table.dataTable.order-column.hover tbody tr:hover > .sorting_3, table.dataTable.order-column.hover tbody tr.odd:hover > .sorting_3, table.dataTable.order-column.hover tbody tr.even:hover > .sorting_3 { background-color: #eeeeee; } table.dataTable.display tbody tr:hover.selected > .sorting_1, table.dataTable.display tbody tr.odd:hover.selected > .sorting_1, table.dataTable.display tbody tr.even:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr.odd:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr.even:hover.selected > .sorting_1 { background-color: #a1aec7; } table.dataTable.display tbody tr:hover.selected > .sorting_2, table.dataTable.display tbody tr.odd:hover.selected > .sorting_2, table.dataTable.display tbody tr.even:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr.odd:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr.even:hover.selected > .sorting_2 { background-color: #a2afc8; } table.dataTable.display tbody tr:hover.selected > .sorting_3, table.dataTable.display tbody tr.odd:hover.selected > .sorting_3, table.dataTable.display tbody tr.even:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr.odd:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr.even:hover.selected > .sorting_3 { background-color: #a4b2cb; } table.dataTable.nowrap th, table.dataTable.nowrap td { white-space: nowrap; } table.dataTable.compact thead th, table.dataTable.compact thead td { padding: 5px 9px; } table.dataTable.compact tfoot th, table.dataTable.compact tfoot td { padding: 5px 9px 3px 9px; } table.dataTable.compact tbody th, table.dataTable.compact tbody td { padding: 4px 5px; } table.dataTable th.dt-left, table.dataTable td.dt-left { text-align: left; } table.dataTable th.dt-center, table.dataTable td.dt-center, table.dataTable td.dataTables_empty { text-align: center; } table.dataTable th.dt-right, table.dataTable td.dt-right { text-align: right; } table.dataTable th.dt-justify, table.dataTable td.dt-justify { text-align: justify; } table.dataTable th.dt-nowrap, table.dataTable td.dt-nowrap { white-space: nowrap; } table.dataTable thead th.dt-head-left, table.dataTable thead td.dt-head-left, table.dataTable tfoot th.dt-head-left, table.dataTable tfoot td.dt-head-left { text-align: left; } table.dataTable thead th.dt-head-center, table.dataTable thead td.dt-head-center, table.dataTable tfoot th.dt-head-center, table.dataTable tfoot td.dt-head-center { text-align: center; } table.dataTable thead th.dt-head-right, table.dataTable thead td.dt-head-right, table.dataTable tfoot th.dt-head-right, table.dataTable tfoot td.dt-head-right { text-align: right; } table.dataTable thead th.dt-head-justify, table.dataTable thead td.dt-head-justify, table.dataTable tfoot th.dt-head-justify, table.dataTable tfoot td.dt-head-justify { text-align: justify; } table.dataTable thead th.dt-head-nowrap, table.dataTable thead td.dt-head-nowrap, table.dataTable tfoot th.dt-head-nowrap, table.dataTable tfoot td.dt-head-nowrap { white-space: nowrap; } table.dataTable tbody th.dt-body-left, table.dataTable tbody td.dt-body-left { text-align: left; } table.dataTable tbody th.dt-body-center, table.dataTable tbody td.dt-body-center { text-align: center; } table.dataTable tbody th.dt-body-right, table.dataTable tbody td.dt-body-right { text-align: right; } table.dataTable tbody th.dt-body-justify, table.dataTable tbody td.dt-body-justify { text-align: justify; } table.dataTable tbody th.dt-body-nowrap, table.dataTable tbody td.dt-body-nowrap { white-space: nowrap; } table.dataTable, table.dataTable th, table.dataTable td { -webkit-box-sizing: content-box; -moz-box-sizing: content-box; box-sizing: content-box; } /* * Control feature layout */ .dataTables_wrapper { position: relative; clear: both; *zoom: 1; zoom: 1; } .dataTables_wrapper .dataTables_length { float: left; } .dataTables_wrapper .dataTables_filter { float: right; text-align: right; } .dataTables_wrapper .dataTables_filter input { margin-left: 0.5em; } .dataTables_wrapper .dataTables_info { clear: both; float: left; padding-top: 0.55em; } .dataTables_wrapper .dataTables_paginate { float: right; text-align: right; } .dataTables_wrapper .dataTables_paginate .fg-button { box-sizing: border-box; display: inline-block; min-width: 1.5em; padding: 0.5em; margin-left: 2px; text-align: center; text-decoration: none !important; cursor: pointer; *cursor: hand; color: #333 !important; border: 1px solid transparent; } .dataTables_wrapper .dataTables_paginate .fg-button:active { outline: none; } .dataTables_wrapper .dataTables_paginate .fg-button:first-child { border-top-left-radius: 3px; border-bottom-left-radius: 3px; } .dataTables_wrapper .dataTables_paginate .fg-button:last-child { border-top-right-radius: 3px; border-bottom-right-radius: 3px; } .dataTables_wrapper .dataTables_processing { position: absolute; top: 50%; left: 50%; width: 100%; height: 40px; margin-left: -50%; margin-top: -25px; padding-top: 20px; text-align: center; font-size: 1.2em; background-color: white; background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255, 255, 255, 0)), color-stop(25%, rgba(255, 255, 255, 0.9)), color-stop(75%, rgba(255, 255, 255, 0.9)), color-stop(100%, rgba(255, 255, 255, 0))); /* Chrome,Safari4+ */ background: -webkit-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); /* Chrome10+,Safari5.1+ */ background: -moz-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); /* FF3.6+ */ background: -ms-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); /* IE10+ */ background: -o-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); /* Opera 11.10+ */ background: linear-gradient(to right, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); /* W3C */ } .dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter, .dataTables_wrapper .dataTables_info, .dataTables_wrapper .dataTables_processing, .dataTables_wrapper .dataTables_paginate { color: #333; } .dataTables_wrapper .dataTables_scroll { clear: both; } .dataTables_wrapper .dataTables_scrollBody { *margin-top: -1px; -webkit-overflow-scrolling: touch; } .dataTables_wrapper .ui-widget-header { font-weight: normal; } .dataTables_wrapper .ui-toolbar { padding: 8px; } .dataTables_wrapper:after { visibility: hidden; display: block; content: ""; clear: both; height: 0; } @media screen and (max-width: 767px) { .dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter, .dataTables_wrapper .dataTables_info, .dataTables_wrapper .dataTables_paginate { float: none; text-align: center; } .dataTables_wrapper .dataTables_filter, .dataTables_wrapper .dataTables_paginate { margin-top: 0.5em; } }
{ "pile_set_name": "Github" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Acronym element rendering test</title> <!-- Element Definition: Acronym: element contains an acronym --> <style type="text/css"> .text { color: blue } </style> </head> <body> <b><u>Visual Pass Criteria (Expected Rendering):</u></b> <br> <b><i>If the test section (repeated "acronym") is inline and looks like the surrounding content, the test passes</i></b>.<br> Additional possibilities: <ul> <li>dotted or solid underline when TITLE attribute present</li> </ul> <hr> filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text <acronym title="the acronym title">acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym acronym</acronym> filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text filler text <hr> </body> </html>
{ "pile_set_name": "Github" }
{ "name": "memio/memio", "license": "MIT", "type": "library", "description": "A highly opinionated PHP code generator library", "keywords": ["generator", "PHP", "code"], "homepage": "http://memio.github.io/memio", "authors": [ { "name": "Loïc Faugeron", "email": "faugeron.loic@gmail.com", "homepage": "http://gnugat.github.io", "role": "Developer" } ], "autoload": { "psr-4": { "Memio\\Memio\\": "src/Memio/Memio", "Memio\\Memio\\Config\\": "config" } }, "autoload-dev": { "psr-4": { "Memio\\Memio\\Examples\\": "examples" } }, "require": { "memio/linter": "^1.0", "memio/model": "^1.3", "memio/pretty-printer": "^1.0", "memio/twig-template-engine": "^1.2", "memio/validator": "^1.0", "php": "^5.3|^7.0" }, "require-dev": { "fabpot/php-cs-fixer": "^1.6", "phpunit/phpunit": "^4.6|^5.0" } }
{ "pile_set_name": "Github" }
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse class AlipayOpenPublicTemplateMessageIndustryModifyResponse(AlipayResponse): def __init__(self): super(AlipayOpenPublicTemplateMessageIndustryModifyResponse, self).__init__() def parse_response_content(self, response_content): response = super(AlipayOpenPublicTemplateMessageIndustryModifyResponse, self).parse_response_content(response_content)
{ "pile_set_name": "Github" }
//////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2014-2020 ArangoDB GmbH, Cologne, Germany /// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is ArangoDB GmbH, Cologne, Germany /// /// @author Dr. Frank Celler //////////////////////////////////////////////////////////////////////////////// #include "DaemonFeature.h" #include <errno.h> #include <fcntl.h> #include <stdlib.h> #include <sys/stat.h> #include <chrono> #include <stdexcept> #include <thread> #include "ApplicationFeatures/ApplicationServer.h" #include "ApplicationFeatures/GreetingsFeaturePhase.h" #include "Basics/Exceptions.h" #include "Basics/FileResult.h" #include "Basics/FileResultString.h" #include "Basics/FileUtils.h" #include "Basics/StringUtils.h" #include "Basics/application-exit.h" #include "Basics/debugging.h" #include "Basics/files.h" #include "Basics/operating-system.h" #include "Basics/process-utils.h" #include "Basics/system-functions.h" #include "Basics/threads.h" #include "Logger/LogAppender.h" #include "Logger/LogMacros.h" #include "Logger/Logger.h" #include "Logger/LoggerFeature.h" #include "Logger/LoggerStream.h" #include "ProgramOptions/Option.h" #include "ProgramOptions/Parameters.h" #include "ProgramOptions/ProgramOptions.h" #ifdef TRI_HAVE_SIGNAL_H #include <signal.h> #endif #ifdef TRI_HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #ifdef TRI_HAVE_UNISTD_H #include <unistd.h> #endif using namespace arangodb::application_features; using namespace arangodb::basics; using namespace arangodb::options; namespace arangodb { DaemonFeature::DaemonFeature(application_features::ApplicationServer& server) : ApplicationFeature(server, "Daemon") { setOptional(true); startsAfter<GreetingsFeaturePhase>(); #ifndef _WIN32 _workingDirectory = "/var/tmp"; #endif } void DaemonFeature::collectOptions(std::shared_ptr<ProgramOptions> options) { options->addOption("--daemon", "background the server, running it as daemon", new BooleanParameter(&_daemon), arangodb::options::makeFlags(arangodb::options::Flags::DefaultNoOs, arangodb::options::Flags::OsLinux, arangodb::options::Flags::OsMac, arangodb::options::Flags::Hidden)); options->addOption("--pid-file", "pid-file in daemon mode", new StringParameter(&_pidFile), arangodb::options::makeFlags(arangodb::options::Flags::DefaultNoOs, arangodb::options::Flags::OsLinux, arangodb::options::Flags::OsMac, arangodb::options::Flags::Hidden)); options->addOption("--working-directory", "working directory in daemon mode", new StringParameter(&_workingDirectory), arangodb::options::makeFlags(arangodb::options::Flags::DefaultNoOs, arangodb::options::Flags::OsLinux, arangodb::options::Flags::OsMac, arangodb::options::Flags::Hidden)); } void DaemonFeature::validateOptions(std::shared_ptr<ProgramOptions> options) { if (!_daemon) { return; } if (_pidFile.empty()) { LOG_TOPIC("9d6ba", FATAL, arangodb::Logger::FIXME) << "need --pid-file in --daemon mode"; FATAL_ERROR_EXIT(); } LoggerFeature& logger = server().getFeature<LoggerFeature>(); logger.setBackgrounded(true); // make the pid filename absolute std::string currentDir = FileUtils::currentDirectory().result(); std::string absoluteFile = TRI_GetAbsolutePath(_pidFile, currentDir); if (!absoluteFile.empty()) { _pidFile = absoluteFile; LOG_TOPIC("79662", DEBUG, arangodb::Logger::FIXME) << "using absolute pid file '" << _pidFile << "'"; } else { LOG_TOPIC("24de9", FATAL, arangodb::Logger::FIXME) << "cannot determine absolute path"; FATAL_ERROR_EXIT(); } } void DaemonFeature::daemonize() { LOG_TOPIC("71164", TRACE, Logger::STARTUP) << name() << "::daemonize"; if (!_daemon) { return; } LOG_TOPIC("480d4", INFO, Logger::STARTUP) << "starting up in daemon mode"; checkPidFile(); int pid = forkProcess(); // main process if (pid != 0) { TRI_SetProcessTitle("arangodb [daemon]"); writePidFile(pid); int result = waitForChildProcess(pid); exit(result); } // child process else { LOG_TOPIC("0b126", DEBUG, Logger::STARTUP) << "daemon mode continuing within child"; } } void DaemonFeature::unprepare() { if (!_daemon) { return; } // remove pid file if (!FileUtils::remove(_pidFile)) { LOG_TOPIC("1b46c", ERR, arangodb::Logger::FIXME) << "cannot remove pid file '" << _pidFile << "'"; } } void DaemonFeature::checkPidFile() { // check if the pid-file exists if (!_pidFile.empty()) { if (FileUtils::isDirectory(_pidFile)) { LOG_TOPIC("6b3c0", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' is a directory"; FATAL_ERROR_EXIT(); } else if (FileUtils::exists(_pidFile) && FileUtils::size(_pidFile) > 0) { LOG_TOPIC("cf10a", INFO, Logger::STARTUP) << "pid-file '" << _pidFile << "' already exists, verifying pid"; std::string oldPidS; try { oldPidS = arangodb::basics::FileUtils::slurp(_pidFile); } catch (arangodb::basics::Exception const& ex) { LOG_TOPIC("4aadd", FATAL, arangodb::Logger::FIXME) << "Couldn't read PID file '" << _pidFile << "' - " << ex.what(); FATAL_ERROR_EXIT(); } basics::StringUtils::trimInPlace(oldPidS); if (!oldPidS.empty()) { TRI_pid_t oldPid; try { oldPid = std::stol(oldPidS); } catch (std::invalid_argument const& ex) { LOG_TOPIC("bd20c", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' doesn't contain a number."; FATAL_ERROR_EXIT(); } if (oldPid == 0) { LOG_TOPIC("aef5d", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' is unreadable"; FATAL_ERROR_EXIT(); } LOG_TOPIC("ecac1", DEBUG, Logger::STARTUP) << "found old pid: " << oldPid; int r = kill(oldPid, 0); if (r == 0 || errno == EPERM) { LOG_TOPIC("5fa62", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' exists and process with pid " << oldPid << " is still running, refusing to start twice"; FATAL_ERROR_EXIT(); } else if (errno == ESRCH) { LOG_TOPIC("a9576", ERR, Logger::STARTUP) << "pid-file '" << _pidFile << " exists, but no process with pid " << oldPid << " exists"; if (!FileUtils::remove(_pidFile)) { LOG_TOPIC("fddfc", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' exists, no process with pid " << oldPid << " exists, but pid-file cannot be removed"; FATAL_ERROR_EXIT(); } LOG_TOPIC("1f3e6", INFO, Logger::STARTUP) << "removed stale pid-file '" << _pidFile << "'"; } else { LOG_TOPIC("180c0", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' exists and kill " << oldPid << " failed"; FATAL_ERROR_EXIT(); } } // failed to open file else { LOG_TOPIC("ab3fe", FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' exists, but cannot be opened"; FATAL_ERROR_EXIT(); } } LOG_TOPIC("1589d", DEBUG, Logger::STARTUP) << "using pid-file '" << _pidFile << "'"; } } int DaemonFeature::forkProcess() { // fork off the parent process TRI_pid_t pid = fork(); if (pid < 0) { LOG_TOPIC("fd0f9", FATAL, arangodb::Logger::FIXME) << "cannot fork"; FATAL_ERROR_EXIT(); } // Upon successful completion, fork() shall return 0 to the child process and // shall return the process ID of the child process to the parent process. // if we got a good PID, then we can exit the parent process if (pid > 0) { LOG_TOPIC("89e55", DEBUG, Logger::STARTUP) << "started child process with pid " << pid; return pid; } TRI_ASSERT(pid == 0); // we are in the child // child LogAppender::allowStdLogging(false); Logger::clearCachedPid(); // change the file mode mask umask(0); // create a new SID for the child process TRI_pid_t sid = setsid(); if (sid < 0) { LOG_TOPIC("e9093", FATAL, arangodb::Logger::FIXME) << "cannot create sid"; FATAL_ERROR_EXIT(); } // store current working directory FileResultString cwd = FileUtils::currentDirectory(); if (!cwd.ok()) { LOG_TOPIC("a681c", FATAL, arangodb::Logger::FIXME) << "cannot get current directory: " << cwd.errorMessage(); FATAL_ERROR_EXIT(); } _current = cwd.result(); // change the current working directory if (!_workingDirectory.empty()) { FileResult res = FileUtils::changeDirectory(_workingDirectory); if (!res.ok()) { LOG_TOPIC("d9f9d", FATAL, arangodb::Logger::STARTUP) << "cannot change into working directory '" << _workingDirectory << "': " << res.errorMessage(); FATAL_ERROR_EXIT(); } else { LOG_TOPIC("ae8be", INFO, arangodb::Logger::STARTUP) << "changed working directory for child process to '" << _workingDirectory << "'"; } } remapStandardFileDescriptors(); return pid; } void DaemonFeature::remapStandardFileDescriptors() { // we're a daemon so there won't be a terminal attached // close the standard file descriptors and re-open them mapped to /dev/null // close all descriptors for (int i = getdtablesize(); i >= 0; --i) { close(i); } // open fd /dev/null int fd = open("/dev/null", O_RDWR | O_CREAT, 0644); if (fd < 0) { LOG_TOPIC("92755", FATAL, arangodb::Logger::FIXME) << "cannot open /dev/null"; FATAL_ERROR_EXIT(); } // the following calls silently close and repoen the given fds // to avoid concurrency issues if (dup2(fd, STDIN_FILENO) != STDIN_FILENO) { LOG_TOPIC("3d2ca", FATAL, arangodb::Logger::FIXME) << "cannot re-map stdin to /dev/null"; FATAL_ERROR_EXIT(); } if (dup2(fd, STDOUT_FILENO) != STDOUT_FILENO) { LOG_TOPIC("4d834", FATAL, arangodb::Logger::FIXME) << "cannot re-map stdout to /dev/null"; FATAL_ERROR_EXIT(); } if (dup2(fd, STDERR_FILENO) != STDERR_FILENO) { LOG_TOPIC("39cac", FATAL, arangodb::Logger::FIXME) << "cannot re-map stderr to /dev/null"; FATAL_ERROR_EXIT(); } // Do not close one of the recently opened fds if (fd > 2) { close(fd); } } void DaemonFeature::writePidFile(int pid) { try { arangodb::basics::FileUtils::spit(_pidFile, std::to_string(pid), true); } catch (arangodb::basics::Exception const& ex) { LOG_TOPIC("c2741", FATAL, arangodb::Logger::FIXME) << "cannot write pid-file '" << _pidFile << "' - " << ex.what(); } } int DaemonFeature::waitForChildProcess(int pid) { if (!isatty(STDIN_FILENO)) { // during system boot, we don't have a tty, and we don't want to delay // the boot process return EXIT_SUCCESS; } // in case a tty is present, this is probably a manual invocation of the start // procedure double const end = TRI_microtime() + 10.0; while (TRI_microtime() < end) { int status; int res = waitpid(pid, &status, WNOHANG); if (res == -1) { // error in waitpid. don't know what to do break; } if (res != 0 && WIFEXITED(status)) { // give information about supervisor exit code if (WEXITSTATUS(status) == 0) { // exit code 0 return EXIT_SUCCESS; } else if (WIFSIGNALED(status)) { switch (WTERMSIG(status)) { case 2: case 9: case 15: // terminated normally return EXIT_SUCCESS; default: break; } } // failure! LOG_TOPIC("dce6d", ERR, arangodb::Logger::FIXME) << "unable to start arangod. please check the logfiles for errors"; return EXIT_FAILURE; } // sleep a while and retry std::this_thread::sleep_for(std::chrono::milliseconds(500)); } // enough time has elapsed... we now abort our loop return EXIT_SUCCESS; } } // namespace arangodb
{ "pile_set_name": "Github" }
/*************************************************************************** * __________ __ ___. * Open \______ \ ____ ____ | | _\_ |__ _______ ___ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ * \/ \/ \/ \/ \/ * $Id$ * * Copyright (C) 2006-2007 Adam Gashlin (hcs) * Copyright (C) 2004-2007 Shay Green (blargg) * Copyright (C) 2002 Brad Martin * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ****************************************************************************/ static inline int apply_gen_envx( struct voice_t* voice, int output ) { return (output * voice->envx) >> 11; } static inline int apply_gen_volume( struct voice_t* voice, int output, int* amp_0, int* amp_1 ) { *amp_0 = voice->volume [0] * output; *amp_1 = voice->volume [1] * output; return output; } static inline int apply_gen_amp( struct voice_t* voice, int output, int* amp_0, int* amp_1) { output = apply_gen_envx( voice, output ); output = apply_gen_volume( voice, output, amp_0, amp_1 ); return output; } #if !SPC_NOINTERP #ifndef SPC_GAUSSIAN_FAST_INTERP static inline int gaussian_fast_interp( int16_t const* samples, int32_t position, int16_t const* fwd, int16_t const* rev ) { samples += position >> 12; return (fwd [0] * samples [0] + fwd [1] * samples [1] + rev [1] * samples [2] + rev [0] * samples [3]) >> 11; } #endif /* SPC_GAUSSIAN_FAST_INTERP */ #ifndef SPC_GAUSSIAN_FAST_AMP #define gaussian_fast_amp apply_amp #endif /* SPC_GAUSSIAN_FAST_AMP */ #ifndef SPC_GAUSSIAN_SLOW_INTERP static inline int gaussian_slow_interp( int16_t const* samples, int32_t position, int16_t const* fwd, int16_t const* rev ) { int output; samples += position >> 12; output = (fwd [0] * samples [0]) & ~0xFFF; output = (output + fwd [1] * samples [1]) & ~0xFFF; output = (output + rev [1] * samples [2]) >> 12; output = (int16_t) (output * 2); output += ((rev [0] * samples [3]) >> 12) * 2; return CLAMP16( output ); } #endif /* SPC_GAUSSIAN_SLOW_INTERP */ #ifndef SPC_GAUSSIAN_SLOW_AMP static inline int gaussian_slow_amp( struct voice_t* voice, int output, int *amp_0, int *amp_1 ) { output = apply_gen_envx( voice, output ) & ~1; output = apply_gen_volume( voice, output, amp_0, amp_1 ); return output; } #endif /* SPC_GAUSSIAN_SLOW_AMP */ #define interp gaussian_slow_interp #define apply_amp gaussian_slow_amp #else /* SPC_NOINTERP */ #ifndef SPC_LINEAR_INTERP static inline int linear_interp( int16_t const* samples, int32_t position ) { int32_t fraction = position & 0xfff; int16_t const* pos = (samples + (position >> 12)) + 1; return pos[0] + ((fraction * (pos[1] - pos[0])) >> 12); } #endif /* SPC_LINEAR_INTERP */ #define interp( samp, pos, fwd, rev ) \ linear_interp( (samp), (pos) ) #ifndef SPC_LINEAR_AMP #define linear_amp apply_gen_amp #endif /* SPC_LINEAR_AMP */ #define apply_amp linear_amp #endif /* SPC_NOINTERP */ #if !SPC_NOECHO #ifndef SPC_DSP_ECHO_APPLY /* Init FIR filter */ static inline void echo_init( struct Spc_Dsp* this ) { this->fir.pos = 0; ci->memset( this->fir.buf, 0, sizeof this->fir.buf ); } /* Apply FIR filter */ static inline void echo_apply(struct Spc_Dsp* this, uint8_t* const echo_ptr, int* out_0, int* out_1) { int fb_0 = GET_LE16SA( echo_ptr ); int fb_1 = GET_LE16SA( echo_ptr + 2 ); /* Keep last 8 samples */ int (* const fir_ptr) [2] = this->fir.buf + this->fir.pos; this->fir.pos = (this->fir.pos + 1) & (FIR_BUF_HALF - 1); fir_ptr [ 0] [0] = fb_0; fir_ptr [ 0] [1] = fb_1; /* duplicate at +8 eliminates wrap checking below */ fir_ptr [FIR_BUF_HALF] [0] = fb_0; fir_ptr [FIR_BUF_HALF] [1] = fb_1; fb_0 *= this->fir.coeff [0]; fb_1 *= this->fir.coeff [0]; #define DO_PT( i ) \ fb_0 += fir_ptr [i] [0] * this->fir.coeff [i]; \ fb_1 += fir_ptr [i] [1] * this->fir.coeff [i]; DO_PT( 1 ) DO_PT( 2 ) DO_PT( 3 ) DO_PT( 4 ) DO_PT( 5 ) DO_PT( 6 ) DO_PT( 7 ) #undef DO_PT *out_0 = fb_0; *out_1 = fb_1; } #endif /* SPC_DSP_ECHO_APPLY */ #ifndef SPC_DSP_ECHO_FEEDBACK /* Feedback into echo buffer */ static inline void echo_feedback( struct Spc_Dsp* this, uint8_t *echo_ptr, int echo_0, int echo_1, int fb_0, int fb_1 ) { int e0 = (echo_0 >> 7) + ((fb_0 * this->r.g.echo_feedback) >> 14); int e1 = (echo_1 >> 7) + ((fb_1 * this->r.g.echo_feedback) >> 14); e0 = CLAMP16( e0 ); SET_LE16A( echo_ptr , e0 ); e1 = CLAMP16( e1 ); SET_LE16A( echo_ptr + 2, e1 ); } #endif /* SPC_DSP_ECHO_FEEDBACK */ #ifndef SPC_DSP_GENERATE_OUTPUT /* Generate final output */ static inline void echo_output( struct Spc_Dsp* this, int global_muting, int global_vol_0, int global_vol_1, int chans_0, int chans_1, int fb_0, int fb_1, int* out_0, int* out_1 ) { *out_0 = (chans_0 * global_vol_0 + fb_0 * this->r.g.echo_volume_0) >> global_muting; *out_1 = (chans_1 * global_vol_1 + fb_1 * this->r.g.echo_volume_1) >> global_muting; } #endif /* SPC_DSP_GENERATE_OUTPUT */ #define mix_output echo_output #else /* SPC_NOECHO */ #ifndef SPC_DSP_GENERATE_OUTPUT /* Generate final output */ static inline void noecho_output( struct Spc_Dsp* this, int global_muting, int global_vol_0, int global_vol_1, int chans_0, int chans_1, int* out_0, int* out_1 ) { *out_0 = (chans_0 * global_vol_0) >> global_muting; *out_1 = (chans_1 * global_vol_1) >> global_muting; (void)this; } #endif /* SPC_DSP_GENERATE_OUTPUT */ #define mix_output(this, gm, gv0, gv1, ch0, ch1, fb_0, fb_1, o0, o1) \ noecho_output( (this), (gm), (gv0), (gv1), (ch0), (ch1), (o0), (o1) ) #endif /* !SPC_NOECHO */
{ "pile_set_name": "Github" }
#include <list> #include <vector> #include <string> #include "sentence.h" #include "tag.h" #ifndef __CORPORA_IO_H #define __CORPORA_IO_H typedef std::vector<Sentence> SentenceCollection; void readCorpus(const std::string &fn, SentenceCollection &sc); std::set<MorphInterp> makeVariants(const std::string &s); #endif
{ "pile_set_name": "Github" }
name=Valley Dasher image=https://magiccards.info/scans/en/ktk/125.jpg value=2.750 rarity=C type=Creature subtype=Human,Berserker cost={1}{R} pt=2/2 ability=Haste;\ SN attacks each combat if able. timing=fmain oracle=Haste\nValley Dasher attacks each combat if able.
{ "pile_set_name": "Github" }
<?php // This file was auto-generated from sdk-root/src/data/elasticmapreduce/2009-03-31/examples-1.json return [ 'version' => '1.0', 'examples' => [],];
{ "pile_set_name": "Github" }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package reflect is a fork of go's standard library reflection package, which // allows for deep equal with equality functions defined. package reflect import ( "fmt" "reflect" "strings" ) // Equalities is a map from type to a function comparing two values of // that type. type Equalities map[reflect.Type]reflect.Value // For convenience, panics on errrors func EqualitiesOrDie(funcs ...interface{}) Equalities { e := Equalities{} if err := e.AddFuncs(funcs...); err != nil { panic(err) } return e } // AddFuncs is a shortcut for multiple calls to AddFunc. func (e Equalities) AddFuncs(funcs ...interface{}) error { for _, f := range funcs { if err := e.AddFunc(f); err != nil { return err } } return nil } // AddFunc uses func as an equality function: it must take // two parameters of the same type, and return a boolean. func (e Equalities) AddFunc(eqFunc interface{}) error { fv := reflect.ValueOf(eqFunc) ft := fv.Type() if ft.Kind() != reflect.Func { return fmt.Errorf("expected func, got: %v", ft) } if ft.NumIn() != 2 { return fmt.Errorf("expected two 'in' params, got: %v", ft) } if ft.NumOut() != 1 { return fmt.Errorf("expected one 'out' param, got: %v", ft) } if ft.In(0) != ft.In(1) { return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft) } var forReturnType bool boolType := reflect.TypeOf(forReturnType) if ft.Out(0) != boolType { return fmt.Errorf("expected bool return, got: %v", ft) } e[ft.In(0)] = fv return nil } // Below here is forked from go's reflect/deepequal.go // During deepValueEqual, must keep track of checks that are // in progress. The comparison algorithm assumes that all // checks in progress are true when it reencounters them. // Visited comparisons are stored in a map indexed by visit. type visit struct { a1 uintptr a2 uintptr typ reflect.Type } // unexportedTypePanic is thrown when you use this DeepEqual on something that has an // unexported type. It indicates a programmer error, so should not occur at runtime, // which is why it's not public and thus impossible to catch. type unexportedTypePanic []reflect.Type func (u unexportedTypePanic) Error() string { return u.String() } func (u unexportedTypePanic) String() string { strs := make([]string, len(u)) for i, t := range u { strs[i] = fmt.Sprintf("%v", t) } return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ") } func makeUsefulPanic(v reflect.Value) { if x := recover(); x != nil { if u, ok := x.(unexportedTypePanic); ok { u = append(unexportedTypePanic{v.Type()}, u...) x = u } panic(x) } } // Tests for deep equality using reflected types. The map argument tracks // comparisons that have already been seen, which allows short circuiting on // recursive types. func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { defer makeUsefulPanic(v1) if !v1.IsValid() || !v2.IsValid() { return v1.IsValid() == v2.IsValid() } if v1.Type() != v2.Type() { return false } if fv, ok := e[v1.Type()]; ok { return fv.Call([]reflect.Value{v1, v2})[0].Bool() } hard := func(k reflect.Kind) bool { switch k { case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: return true } return false } if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { addr1 := v1.UnsafeAddr() addr2 := v2.UnsafeAddr() if addr1 > addr2 { // Canonicalize order to reduce number of entries in visited. addr1, addr2 = addr2, addr1 } // Short circuit if references are identical ... if addr1 == addr2 { return true } // ... or already seen typ := v1.Type() v := visit{addr1, addr2, typ} if visited[v] { return true } // Remember for later. visited[v] = true } switch v1.Kind() { case reflect.Array: // We don't need to check length here because length is part of // an array's type, which has already been filtered for. for i := 0; i < v1.Len(); i++ { if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { return false } } return true case reflect.Slice: if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { return false } if v1.IsNil() || v1.Len() == 0 { return true } if v1.Len() != v2.Len() { return false } if v1.Pointer() == v2.Pointer() { return true } for i := 0; i < v1.Len(); i++ { if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { return false } } return true case reflect.Interface: if v1.IsNil() || v2.IsNil() { return v1.IsNil() == v2.IsNil() } return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) case reflect.Ptr: return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) case reflect.Struct: for i, n := 0, v1.NumField(); i < n; i++ { if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { return false } } return true case reflect.Map: if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { return false } if v1.IsNil() || v1.Len() == 0 { return true } if v1.Len() != v2.Len() { return false } if v1.Pointer() == v2.Pointer() { return true } for _, k := range v1.MapKeys() { if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { return false } } return true case reflect.Func: if v1.IsNil() && v2.IsNil() { return true } // Can't do better than this: return false default: // Normal equality suffices if !v1.CanInterface() || !v2.CanInterface() { panic(unexportedTypePanic{}) } return v1.Interface() == v2.Interface() } } // DeepEqual is like reflect.DeepEqual, but focused on semantic equality // instead of memory equality. // // It will use e's equality functions if it finds types that match. // // An empty slice *is* equal to a nil slice for our purposes; same for maps. // // Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality // function for these types. func (e Equalities) DeepEqual(a1, a2 interface{}) bool { if a1 == nil || a2 == nil { return a1 == a2 } v1 := reflect.ValueOf(a1) v2 := reflect.ValueOf(a2) if v1.Type() != v2.Type() { return false } return e.deepValueEqual(v1, v2, make(map[visit]bool), 0) } func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { defer makeUsefulPanic(v1) if !v1.IsValid() || !v2.IsValid() { return v1.IsValid() == v2.IsValid() } if v1.Type() != v2.Type() { return false } if fv, ok := e[v1.Type()]; ok { return fv.Call([]reflect.Value{v1, v2})[0].Bool() } hard := func(k reflect.Kind) bool { switch k { case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: return true } return false } if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { addr1 := v1.UnsafeAddr() addr2 := v2.UnsafeAddr() if addr1 > addr2 { // Canonicalize order to reduce number of entries in visited. addr1, addr2 = addr2, addr1 } // Short circuit if references are identical ... if addr1 == addr2 { return true } // ... or already seen typ := v1.Type() v := visit{addr1, addr2, typ} if visited[v] { return true } // Remember for later. visited[v] = true } switch v1.Kind() { case reflect.Array: // We don't need to check length here because length is part of // an array's type, which has already been filtered for. for i := 0; i < v1.Len(); i++ { if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { return false } } return true case reflect.Slice: if v1.IsNil() || v1.Len() == 0 { return true } if v1.Len() > v2.Len() { return false } if v1.Pointer() == v2.Pointer() { return true } for i := 0; i < v1.Len(); i++ { if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { return false } } return true case reflect.String: if v1.Len() == 0 { return true } if v1.Len() > v2.Len() { return false } return v1.String() == v2.String() case reflect.Interface: if v1.IsNil() { return true } return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) case reflect.Ptr: if v1.IsNil() { return true } return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) case reflect.Struct: for i, n := 0, v1.NumField(); i < n; i++ { if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) { return false } } return true case reflect.Map: if v1.IsNil() || v1.Len() == 0 { return true } if v1.Len() > v2.Len() { return false } if v1.Pointer() == v2.Pointer() { return true } for _, k := range v1.MapKeys() { if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { return false } } return true case reflect.Func: if v1.IsNil() && v2.IsNil() { return true } // Can't do better than this: return false default: // Normal equality suffices if !v1.CanInterface() || !v2.CanInterface() { panic(unexportedTypePanic{}) } return v1.Interface() == v2.Interface() } } // DeepDerivative is similar to DeepEqual except that unset fields in a1 are // ignored (not compared). This allows us to focus on the fields that matter to // the semantic comparison. // // The unset fields include a nil pointer and an empty string. func (e Equalities) DeepDerivative(a1, a2 interface{}) bool { if a1 == nil { return true } v1 := reflect.ValueOf(a1) v2 := reflect.ValueOf(a2) if v1.Type() != v2.Type() { return false } return e.deepValueDerive(v1, v2, make(map[visit]bool), 0) }
{ "pile_set_name": "Github" }
package typings.devtoolsProtocol.anon import typings.devtoolsProtocol.mod.Protocol.Debugger.SetScriptSourceRequest import typings.devtoolsProtocol.mod.Protocol.Debugger.SetScriptSourceResponse import scala.scalajs.js import scala.scalajs.js.`|` import scala.scalajs.js.annotation._ @js.native trait ReturnTypeSetScriptSourceResponse extends js.Object { var paramsType: js.Array[SetScriptSourceRequest] = js.native var returnType: SetScriptSourceResponse = js.native } object ReturnTypeSetScriptSourceResponse { @scala.inline def apply(paramsType: js.Array[SetScriptSourceRequest], returnType: SetScriptSourceResponse): ReturnTypeSetScriptSourceResponse = { val __obj = js.Dynamic.literal(paramsType = paramsType.asInstanceOf[js.Any], returnType = returnType.asInstanceOf[js.Any]) __obj.asInstanceOf[ReturnTypeSetScriptSourceResponse] } @scala.inline implicit class ReturnTypeSetScriptSourceResponseOps[Self <: ReturnTypeSetScriptSourceResponse] (val x: Self) extends AnyVal { @scala.inline def duplicate: Self = (js.Dynamic.global.Object.assign(js.Dynamic.literal(), x)).asInstanceOf[Self] @scala.inline def combineWith[Other <: js.Any](other: Other): Self with Other = (js.Dynamic.global.Object.assign(js.Dynamic.literal(), x, other.asInstanceOf[js.Any])).asInstanceOf[Self with Other] @scala.inline def set(key: String, value: js.Any): Self = { x.asInstanceOf[js.Dynamic].updateDynamic(key)(value) x } @scala.inline def setParamsTypeVarargs(value: SetScriptSourceRequest*): Self = this.set("paramsType", js.Array(value :_*)) @scala.inline def setParamsType(value: js.Array[SetScriptSourceRequest]): Self = this.set("paramsType", value.asInstanceOf[js.Any]) @scala.inline def setReturnType(value: SetScriptSourceResponse): Self = this.set("returnType", value.asInstanceOf[js.Any]) } }
{ "pile_set_name": "Github" }
<?php class test { function hdlr($errno, $errstr, $errfile, $errline) { printf("[%d] errstr: %s, errfile: %s, errline: %d\n", $errno, $errstr, $errfile, $errline, $errstr); } } set_error_handler(array(new test(), "hdlr")); trigger_error("test"); ?>
{ "pile_set_name": "Github" }
#!/bin/sh # # Controls lldpd. # case $1 in start) printf "Starting lldpd: " start-stop-daemon -S -q -p /var/run/lldpd.pid --exec /usr/sbin/lldpd [ $? = 0 ] && echo "OK" || echo "FAIL" ;; stop) printf "Stopping lldpd: " start-stop-daemon -K -q -p /var/run/lldpd.pid [ $? = 0 ] && echo "OK" || echo "FAIL" ;; restart) $0 stop $0 start ;; *) echo "Usage: $0 {start|stop|restart}" exit 1 ;; esac
{ "pile_set_name": "Github" }
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/fsnotify_backend.h> #include <linux/path.h> #include <linux/slab.h> extern struct kmem_cache *fanotify_mark_cache; extern struct kmem_cache *fanotify_event_cachep; extern struct kmem_cache *fanotify_perm_event_cachep; /* * Structure for normal fanotify events. It gets allocated in * fanotify_handle_event() and freed when the information is retrieved by * userspace */ struct fanotify_event_info { struct fsnotify_event fse; /* * We hold ref to this path so it may be dereferenced at any point * during this object's lifetime */ struct path path; struct pid *tgid; }; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS /* * Structure for permission fanotify events. It gets allocated and freed in * fanotify_handle_event() since we wait there for user response. When the * information is retrieved by userspace the structure is moved from * group->notification_list to group->fanotify_data.access_list to wait for * user response. */ struct fanotify_perm_event_info { struct fanotify_event_info fae; int response; /* userspace answer to question */ int fd; /* fd we passed to userspace for this event */ }; static inline struct fanotify_perm_event_info * FANOTIFY_PE(struct fsnotify_event *fse) { return container_of(fse, struct fanotify_perm_event_info, fae.fse); } #endif static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse) { return container_of(fse, struct fanotify_event_info, fse); } struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, const struct path *path);
{ "pile_set_name": "Github" }
Name: presburger Version: 1.3.1 License: BSD3 License-file: LICENSE Author: Iavor S. Diatchki Homepage: http://github.com/yav/presburger Maintainer: diatchki@galois.com Category: Algorithms Synopsis: A decision procedure for quantifier-free linear arithmetic. Description: The decision procedure is based on the algorithm used in CVC4, which is itself based on the Omega test. Build-type: Simple Cabal-version: >= 1.8 library Build-Depends: base < 10, containers, pretty hs-source-dirs: src Exposed-modules: Data.Integer.SAT GHC-options: -O2 -Wall source-repository head type: git location: git://github.com/yav/presburger.git Test-Suite pressburger-qc-tests type: exitcode-stdio-1.0 hs-source-dirs: tests main-is: qc.hs build-depends: base, presburger == 1.3.1, QuickCheck
{ "pile_set_name": "Github" }
package drivers import ( "bytes" "crypto/md5" "crypto/sha1" "crypto/sha256" "crypto/sha512" "encoding/hex" "fmt" "hash" "io" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path" "path/filepath" "strings" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type BaseDriver struct { Builtin bool URL string DriverHash string DriverName string BinaryPrefix string } func (d *BaseDriver) Name() string { return d.DriverName } func (d *BaseDriver) Hash() string { return d.DriverHash } func (d *BaseDriver) Checksum() string { return d.DriverName } func (d *BaseDriver) FriendlyName() string { return strings.TrimPrefix(d.DriverName, d.BinaryPrefix) } func (d *BaseDriver) Remove() error { cacheFilePrefix := d.cacheFile() content, err := ioutil.ReadFile(cacheFilePrefix) if os.IsNotExist(err) { return nil } if err != nil { return err } dest := path.Join(binDir(), string(content)) os.Remove(dest) os.Remove(cacheFilePrefix + "-" + string(content)) os.Remove(cacheFilePrefix) return nil } func (d *BaseDriver) Stage(forceUpdate bool) error { if err := d.getError(); err != nil { return err } return d.setError(d.stage(forceUpdate)) } func (d *BaseDriver) setError(err error) error { errFile := d.cacheFile() + ".error" if err != nil { os.MkdirAll(path.Dir(errFile), 0700) ioutil.WriteFile(errFile, []byte(err.Error()), 0600) } return err } func (d *BaseDriver) getError() error { errFile := d.cacheFile() + ".error" if content, err := ioutil.ReadFile(errFile); err == nil { logrus.Errorf("Returning previous error: %s", content) d.ClearError() return errors.New(string(content)) } return nil } func (d *BaseDriver) ClearError() { errFile := d.cacheFile() + ".error" os.Remove(errFile) } func (d *BaseDriver) stage(forceUpdate bool) error { if d.Builtin { return nil } cacheFilePrefix := d.cacheFile() driverName, err := isInstalled(cacheFilePrefix) if !forceUpdate && err != nil || driverName != "" { d.DriverName = driverName return err } tempFile, err := ioutil.TempFile("", "machine-driver") if err != nil { return err } defer os.Remove(tempFile.Name()) defer tempFile.Close() hasher, err := getHasher(d.DriverHash) if err != nil { return err } downloadDest := io.Writer(tempFile) if hasher != nil { downloadDest = io.MultiWriter(tempFile, hasher) } if err := d.download(downloadDest); err != nil { return err } if got, ok := compare(hasher, d.DriverHash); !ok { return fmt.Errorf("hash does not match, got %s, expected %s", got, d.DriverHash) } if err := tempFile.Close(); err != nil { return err } driverName, err = d.copyBinary(cacheFilePrefix, tempFile.Name()) if err != nil { return err } d.DriverName = driverName return nil } // Exists will return true if the executable binary for the driver can be found // and the cache file exists (in case of upgrades the binary will match but // the cache will not yet exist) func (d *BaseDriver) Exists() bool { if d.DriverName == "" { return false } if d.Builtin { return true } _, err := os.Stat(d.binName()) if err == nil { // The executable is there but do it come from the right version? _, err = os.Stat(d.srcBinName()) } return err == nil } func isElf(input string) bool { f, err := os.Open(input) if err != nil { return false } defer f.Close() elf := make([]byte, 4) if _, err := f.Read(elf); err != nil { return false } //support unix binary and mac-os binary mach-o return bytes.Compare(elf, []byte{0x7f, 0x45, 0x4c, 0x46}) == 0 || bytes.Compare(elf, []byte{0xcf, 0xfa, 0xed, 0xfe}) == 0 } func (d *BaseDriver) copyBinary(cacheFile, input string) (string, error) { temp, err := ioutil.TempDir("", "machine-driver-extract") if err != nil { return "", err } defer os.RemoveAll(temp) file := "" driverName := "" if isElf(input) { file = input u, err := url.Parse(d.URL) if err != nil { return "", err } if !strings.HasPrefix(path.Base(u.Path), d.BinaryPrefix) { return "", fmt.Errorf("invalid URL %s, path should be of the format %s*", d.URL, d.BinaryPrefix) } s := strings.TrimPrefix(path.Base(u.Path), d.BinaryPrefix) name := strings.FieldsFunc(s, func(r rune) bool { return r == '-' || r == '_' || r == '.' })[0] if name == "" { return "", fmt.Errorf("invalid URL %s, NAME is empty, path should be of the format %sNAME", d.URL, d.BinaryPrefix) } driverName = d.BinaryPrefix + name } else { if err := exec.Command("tar", "xvf", input, "-C", temp).Run(); err != nil { if err := exec.Command("unzip", "-o", input, "-d", temp).Run(); err != nil { return "", fmt.Errorf("failed to extract") } } } filepath.Walk(temp, filepath.WalkFunc(func(p string, info os.FileInfo, err error) error { if info.IsDir() { return nil } if strings.HasPrefix(path.Base(p), d.BinaryPrefix) { file = p } return nil })) if file == "" { return "", fmt.Errorf("failed to find driver in archive. There must be a file of form %s*", d.BinaryPrefix) } if driverName == "" { driverName = path.Base(file) } f, err := os.Open(file) if err != nil { return "", err } defer f.Close() if err := os.MkdirAll(path.Dir(cacheFile), 0755); err != nil { return "", err } driverName = strings.ToLower(driverName) dest, err := os.Create(cacheFile + "-" + driverName) if err != nil { return "", err } defer dest.Close() if _, err := io.Copy(dest, f); err != nil { return "", err } logrus.Infof("Found driver %s", driverName) return driverName, ioutil.WriteFile(cacheFile, []byte(driverName), 0644) } // binName is the full path to the binary executable. This does not take in // account the version of the binary func (d *BaseDriver) binName() string { return path.Join(binDir(), d.DriverName) } // srcBinName is the full path of the cached/hashed binary executable. This takes // in account the version of the binary func (d *BaseDriver) srcBinName() string { return d.cacheFile() + "-" + d.DriverName } func binDir() string { if dl := os.Getenv("CATTLE_DEV_MODE"); dl != "" { return "./management-state/bin" } return "/opt/drivers/management-state/bin" } func compare(hash hash.Hash, value string) (string, bool) { if hash == nil { return "", true } got := hex.EncodeToString(hash.Sum([]byte{})) expected := strings.TrimSpace(strings.ToLower(value)) return got, got == expected } func getHasher(hash string) (hash.Hash, error) { switch len(hash) { case 0: return nil, nil case 32: return md5.New(), nil case 40: return sha1.New(), nil case 64: return sha256.New(), nil case 128: return sha512.New(), nil } return nil, fmt.Errorf("invalid hash format: %s", hash) } func (d *BaseDriver) download(dest io.Writer) error { logrus.Infof("Download %s", d.URL) resp, err := http.Get(d.URL) if err != nil { return err } defer resp.Body.Close() _, err = io.Copy(dest, resp.Body) return err } func (d *BaseDriver) cacheFile() string { key := sha256Bytes([]byte(d.URL + d.DriverHash)) base := os.Getenv("CATTLE_HOME") if base == "" { base = "./management-state" } return path.Join(base, "machine-drivers", key) } func isInstalled(file string) (string, error) { content, err := ioutil.ReadFile(file) if os.IsNotExist(err) { return "", nil } return strings.ToLower(strings.TrimSpace(string(content))), err } func sha256Bytes(content []byte) string { hash := sha256.New() io.Copy(hash, bytes.NewBuffer(content)) return hex.EncodeToString(hash.Sum([]byte{})) }
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: 9d4026abd0cbd0d4daddc49296c87e73 folderAsset: yes DefaultImporter: userData:
{ "pile_set_name": "Github" }
# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o netvsc_bpf.o
{ "pile_set_name": "Github" }
ca-fr: stuff_to_do_title: Mes tâches stuff_to_do_what_im_doing_now: "Les tâches que j'ai à faire" stuff_to_do_what_is_recommended: Les tâches que je dois accomplir par la suite stuff_to_do_what_is_available: Liste de mes tâches disponibles stuff_to_do_text_order_im_doing_it: (en ordre chronologique) stuff_to_do_text_by_project_manager: (priorisées par le gestionnaire) stuff_to_do_label_view_user_list: "Voir la liste d'un autre utilisateur :" stuff_to_do_text_total_progress: Progrès total stuff_to_do_text_total_estimates: Total estimatif stuff_to_do_label_project_manager_emails: Courriel du gestionnaire stuff_to_do_label_threshold: Seuil stuff_to_do_label_email_to: Adresses de courriel stuff_to_do_text_settings: "Un courriel est envoyé au gestionnaire quand le seuil des tâches à accomplir défini ci-dessous est atteint. Plusieurs adresses de courriels peuvent être spécifiées en séparant chacune d'elles par une virgule." stuff_to_do_label_filter: Filtre stuff_to_do_label_filter_by: Filtré par... stuff_to_do_drag_issue_create_list: Glisser une demande ici pour créer une nouvelle liste. stuff_to_do_drag_issue_remove_list: Glisser la demande ici pour la supprimer de la liste.
{ "pile_set_name": "Github" }
<?xml version="1.0"?> <xliff version="1.2" xmlns="urn:oasis:names:tc:xliff:document:1.2"> <file source-language="en" datatype="plaintext" original="file.ext"> <body> <trans-unit id="3"> <source>This value is not a valid mobile number.</source> <target>This value is not a valid handphone number.</target> </trans-unit> </body> </file> </xliff>
{ "pile_set_name": "Github" }
/////////////////////////////////////////////////////////// /////////////// Welcome to Cello ////////////////////// /////////////////////////////////////////////////////////// JobID by date: 0x68 [ -dateID 0x68 -figures false -external_directory true -assignment_algorithm abstract_only -verilog /Users/peng/cello/resources/verilog/3-input/0x68.v -output_or false] /////////////////////////////////////////////////////////// /////////////// Logic synthesis, Wiring diagram /////// /////////////////////////////////////////////////////////// fin_ver /Users/peng/cello/resources/verilog/3-input/0x68.v Input gates = 3 Logic gates = 8 NOR gates = 8 AND gates = 0 Output gates = 1 ----- Logic Circuit #0 ----- OUTPUT 01101000 out 0 (1) NOR 01101000 ~| 1 (3,2) NOR 10010000 ~| 3 (4,9) NOR 00010111 ~| 2 (4,5) NOR 01100000 ~| 4 (6,5) NOR 00011111 ~| 6 (8,7) NOR 10001000 ~| 5 (10,11) NOR 10100000 ~| 7 (9,11) NOR 11000000 ~| 8 (9,10) INPUT 00001111 in1 9 INPUT 00110011 in2 10 INPUT 01010101 in3 11 Cello finished playing. Abstract circuit only.
{ "pile_set_name": "Github" }
/** * <p> * Trust Boundary Violation is fancy name to describe tainted value passed directly to session attribute. * This could be an expected behavior that allow an attacker to change the session state. * </p> * <p> * When the parameter is dynamic, it is a lot more suspicious than when it is a dynamic value. * <code>setAttribute( suspiciousValue, "true")</code> * vs * <code>setAttribute( "language" , commonDynamicValue)</code> * </p> * <p> * For this reason, the trust boundary violation was split in two detectors. * This will allow user to hide the low priority of this detector. * </p> * * @see com.h3xstream.findsecbugs.injection.trust.TrustBoundaryViolationAttributeDetector * @see com.h3xstream.findsecbugs.injection.trust.TrustBoundaryViolationValueDetector */ package com.h3xstream.findsecbugs.injection.trust;
{ "pile_set_name": "Github" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.7.0_181) on Fri Dec 21 18:17:21 UTC 2018 --> <title>org.opencv.utils</title> <meta name="date" content="2018-12-21"> <link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style"> </head> <body> <h1 class="bar"><a href="../../../org/opencv/utils/package-summary.html" target="classFrame">org.opencv.utils</a></h1> <div class="indexContainer"> <h2 title="Classes">Classes</h2> <ul title="Classes"> <li><a href="Converters.html" title="class in org.opencv.utils" target="classFrame">Converters</a></li> </ul> </div> </body> </html>
{ "pile_set_name": "Github" }
package sdkio const ( // Byte is 8 bits Byte int64 = 1 // KibiByte (KiB) is 1024 Bytes KibiByte = Byte * 1024 // MebiByte (MiB) is 1024 KiB MebiByte = KibiByte * 1024 // GibiByte (GiB) is 1024 MiB GibiByte = MebiByte * 1024 )
{ "pile_set_name": "Github" }
package models.utils; import play.Configuration; import play.Logger; import play.libs.mailer.Email; import play.libs.mailer.MailerClient; import javax.inject.Inject; import play.libs.Akka; import scala.concurrent.duration.Duration; import scala.concurrent.duration.FiniteDuration; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; /** * Send a mail with PlayStartApp. * <p/> * User: yesnault * Date: 24/01/12 */ public class Mail { MailerClient mailerClient; public Mail(MailerClient mailerClient) { this.mailerClient = mailerClient; } /** * 1 second delay on sending emails */ private static final int DELAY = 1; /** * Envelop to prepare. */ public static class Envelop { public String subject; public String message; public List<String> toEmails; /** * Constructor of Envelop. * * @param subject the subject * @param message a message * @param toEmails list of emails adress */ public Envelop(String subject, String message, List<String> toEmails) { this.subject = subject; this.message = message; this.toEmails = toEmails; } public Envelop(String subject, String message, String email) { this.message = message; this.subject = subject; this.toEmails = new ArrayList<String>(); this.toEmails.add(email); } } /** * Send a email, using Akka to offload it to an actor. * * @param envelop envelop to send */ public void sendMail(Mail.Envelop envelop) { EnvelopJob envelopJob = new EnvelopJob(envelop, mailerClient); final FiniteDuration delay = Duration.create(DELAY, TimeUnit.SECONDS); Akka.system().scheduler().scheduleOnce(delay, envelopJob, Akka.system().dispatcher()); } static class EnvelopJob implements Runnable { MailerClient mailerClient; Mail.Envelop envelop; @Inject public EnvelopJob(Mail.Envelop envelop, MailerClient mailerClient) { this.envelop = envelop; this.mailerClient = mailerClient; } public void run() { Email email = new Email(); final Configuration root = Configuration.root(); final String mailFrom = root.getString("mail.from"); final String mailSign = root.getString("mail.sign"); email.setFrom(mailFrom); email.setSubject(envelop.subject); email.setBodyText(envelop.message + "\n\n " + mailSign); email.setBodyHtml(envelop.message + "<br><br>--<br>" + mailSign); for (String toEmail : envelop.toEmails) { email.addTo(toEmail); Logger.debug("Mail.sendMail: Mail will be sent to " + toEmail); } mailerClient.send(email); Logger.debug("Mail sent - SMTP:" + root.getString("smtp.host") + ":" + root.getString("smtp.port") + " SSL:" + root.getString("smtp.ssl") + " user:" + root.getString("smtp.user") + " password:" + root.getString("smtp.password")); } } }
{ "pile_set_name": "Github" }
framework module Pods_Get_It { umbrella header "Pods-Get It-umbrella.h" export * module * { export * } }
{ "pile_set_name": "Github" }
@available(iOS 8.0, *) enum NEVPNError : Int { init?(rawValue rawValue: Int) var rawValue: Int { get } case configurationInvalid case configurationDisabled case connectionFailed case configurationStale case configurationReadWriteFailed case configurationUnknown } @available(iOS 8.0, *) let NEVPNErrorDomain: String @available(iOS 8.0, *) let NEVPNConfigurationChangeNotification: String @available(iOS 8.0, *) class NEVPNManager : NSObject { @available(iOS 8.0, *) class func shared() -> NEVPNManager @available(iOS 8.0, *) func loadFromPreferences(completionHandler completionHandler: (NSError?) -> Void) @available(iOS 8.0, *) func removeFromPreferences(completionHandler completionHandler: ((NSError?) -> Void)? = nil) @available(iOS 8.0, *) func saveToPreferences(completionHandler completionHandler: ((NSError?) -> Void)? = nil) @available(iOS 8.0, *) var onDemandRules: [NEOnDemandRule]? @available(iOS 8.0, *) var isOnDemandEnabled: Bool @available(iOS 8.0, *) var localizedDescription: String? @available(iOS, introduced=8.0, deprecated=9.0, message="Use protocolConfiguration instead") var `protocol`: NEVPNProtocol? @available(iOS 9.0, *) var protocolConfiguration: NEVPNProtocol? @available(iOS 8.0, *) var connection: NEVPNConnection { get } @available(iOS 8.0, *) var isEnabled: Bool }
{ "pile_set_name": "Github" }
package libpython_clj.jna; import com.sun.jna.*; public class CFunction { public static interface KeyWordFunction extends Callback { Pointer pyinvoke(Pointer self, Pointer tuple_args, Pointer kw_args); } public static interface TupleFunction extends Callback { Pointer pyinvoke(Pointer self, Pointer tuple_args); } public static interface NoArgFunction extends Callback { Pointer pyinvoke(Pointer self); } public static interface tp_new extends Callback { Pointer pyinvoke(PyTypeObject type, Pointer args, Pointer kwds); } public static interface tp_init extends Callback { Pointer pyinvoke(Pointer self, Pointer tuple_args, Pointer kw_args); } public static interface tp_dealloc extends Callback { void pyinvoke(Pointer self); } public static interface tp_free extends Callback { void pyinvoke(Pointer item); } public static interface tp_att_getter extends Callback { Pointer pyinvoke(Pointer self, Pointer closure); } public static interface tp_att_setter extends Callback { Pointer pyinvoke(Pointer self, Pointer value, Pointer closure); } public static interface tp_getattr extends Callback { Pointer pyinvoke(Pointer self, String attr_name); } public static interface tp_setattr extends Callback { int pyinvoke(Pointer self, String attr_name, Pointer val); } public static interface tp_getattro extends Callback { Pointer pyinvoke(Pointer self, Pointer attr_name); } public static interface tp_setattro extends Callback { int pyinvoke(Pointer self, Pointer attr_name, Pointer val); } public static interface tp_richcompare extends Callback { Pointer pyinvoke(Pointer self, Pointer other, int comp_type); } public static interface tp_hash extends Callback { long pyinvoke(Pointer self ); } public static interface bf_getbuffer extends Callback { int pyinvoke(Pointer self, PyBuffer item, int flags); } public static interface bf_releasebuffer extends Callback { void pyinvoke(Pointer self, PyBuffer item); } }
{ "pile_set_name": "Github" }
ev
{ "pile_set_name": "Github" }
/* * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <stdio.h> #include <string.h> #include "usbip_common.h" #include "utils.h" #include "sysfs_utils.h" int modify_match_busid(char *busid, int add) { char attr_name[] = "match_busid"; char command[SYSFS_BUS_ID_SIZE + 4]; char match_busid_attr_path[SYSFS_PATH_MAX]; int rc; int cmd_size; snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE, SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name); if (add) cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); else cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); rc = write_sysfs_attribute(match_busid_attr_path, command, cmd_size); if (rc < 0) { dbg("failed to write match_busid: %s", strerror(errno)); return -1; } return 0; }
{ "pile_set_name": "Github" }
/* * Copyright (c) 2018 Goldman Sachs. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and Eclipse Distribution License v. 1.0 which accompany this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. */ package org.eclipse.collections.impl.block.factory; import java.util.Arrays; import java.util.Collection; import java.util.Set; import org.eclipse.collections.api.block.function.Function; import org.eclipse.collections.api.block.function.Function2; import org.eclipse.collections.api.block.predicate.Predicate; import org.eclipse.collections.api.block.predicate.Predicate2; import org.eclipse.collections.api.set.SetIterable; import org.eclipse.collections.impl.block.predicate.checked.CheckedPredicate; import org.eclipse.collections.impl.block.predicate.checked.ThrowingPredicate; import org.eclipse.collections.impl.set.mutable.UnifiedSet; import org.eclipse.collections.impl.utility.Iterate; /** * The Predicates class can be used to build common Predicates to be used by methods such * as detect:, select: and reject: on the Iterate, MapIterate, and ArrayIterate classes. Predicates supports * equals, not equals, less than, greater than, less than or equal to, greater than or equal to, in, not in * and, or, and several other Predicate type operations. */ public abstract class Predicates<T> implements Predicate<T> { private static final long serialVersionUID = 1L; private static final Predicates<Object> ALWAYS_TRUE = new AlwaysTrue(); private static final Predicates<Object> ALWAYS_FALSE = new AlwaysFalse(); private static final Predicates<Object> IS_NULL = new IsNull(); private static final Predicates<Object> NOT_NULL = new NotNull(); private static final int SMALL_COLLECTION_THRESHOLD = 6; public static <T> Predicates<T> adapt(Predicate<T> predicate) { return new PredicateAdapter<>(predicate); } /** * Allows a Java 8 lambda or method reference to be used in a method taking a predicate without requiring an actual cast. * This method can be used in places where two or more method overloads could apply when used with a lambda or method * reference (e.g. removeIf). */ public static <T> Predicate<T> cast(Predicate<T> predicate) { return predicate; } /** * Allows a lambda or anonymous inner class that needs to throw a checked exception to be safely wrapped as a * Predicate that will throw a RuntimeException, wrapping the checked exception that is the cause. */ public static <T> Predicate<T> throwing(ThrowingPredicate<T> throwingPredicate) { return new ThrowingPredicateAdapter<>(throwingPredicate); } /** * Allows a lambda or anonymous inner class that needs to throw a checked exception to be safely wrapped as a * Predicate that will throw a user specified RuntimeException based on the provided function. The function * is passed the current element and the checked exception that was thrown as context arguments. */ public static <T> Predicate<T> throwing( ThrowingPredicate<T> throwingPredicate, Function2<T, ? super Throwable, ? extends RuntimeException> rethrow) { return each -> { try { return throwingPredicate.safeAccept(each); } catch (RuntimeException e) { throw e; } catch (Throwable t) { throw rethrow.value(each, t); } }; } public static <P, T> Predicate<T> bind(Predicate2<? super T, ? super P> predicate, P parameter) { return new BindPredicate2<>(predicate, parameter); } public static <T> Predicate<T> synchronizedEach(Predicate<T> predicate) { return new SynchronizedPredicate<>(predicate); } public static <T> Predicates<T> or(Iterable<? extends Predicate<? super T>> predicates) { return new OrIterablePredicate<>(predicates); } public static <T> Predicates<T> or(Predicate<? super T> predicate1, Predicate<? super T> predicate2) { return new OrPredicate<>(predicate1, predicate2); } public static <T> Predicates<T> or(Predicate<? super T>... predicates) { return new OrIterablePredicate<T>(Arrays.asList(predicates)); } public static <T> Predicates<T> and(Iterable<? extends Predicate<? super T>> predicates) { return new AndIterablePredicate<>(predicates); } public static <T> Predicates<T> and(Predicate<? super T> predicate1, Predicate<? super T> predicate2) { return new AndPredicate<>(predicate1, predicate2); } public static <T> Predicates<T> and(Predicate<? super T>... predicates) { return new AndIterablePredicate<T>(Arrays.asList(predicates)); } public static <T> Predicates<T> not(Predicate<T> predicate) { return new NotPredicate<>(predicate); } public Predicates<T> not() { return Predicates.not(this); } public static <T> Predicates<T> neither(Predicate<? super T> operation1, Predicate<? super T> operation2) { return new NeitherPredicate<>(operation1, operation2); } public static <T> Predicates<T> noneOf(Predicate<? super T>... operations) { return new NoneOfIterablePredicate<T>(Arrays.asList(operations)); } public static <T> Predicates<T> noneOf(Iterable<? extends Predicate<? super T>> operations) { return new NoneOfIterablePredicate<>(operations); } /** * Tests for equality. */ public static Predicates<Object> equal(Object object) { if (object == null) { return Predicates.isNull(); } return new EqualPredicate(object); } /** * Creates a predicate which returns true if an object passed to accept method is within the range, inclusive * of the from and to values. */ public static <T extends Comparable<? super T>> Predicates<T> betweenInclusive(T from, T to) { Predicates.failIfDifferentTypes(from, to); return new BetweenInclusive<>(from, to); } private static void failIfDifferentTypes(Object from, Object to) { if (!from.getClass().equals(to.getClass())) { throw new IllegalArgumentException("Trying to do a between comparison with two different types " + from.getClass() + ':' + to.getClass()); } } /** * Creates a predicate which returns true if an object passed to accept method is within the range, exclusive * of the from and to values. */ public static <T extends Comparable<? super T>> Predicates<T> betweenExclusive(T from, T to) { Predicates.failIfDifferentTypes(from, to); return new BetweenExclusive<>(from, to); } /** * Creates a predicate which returns true if an object passed to accept method is within the range, inclusive * of the from and exclusive from the to value. */ public static <T extends Comparable<? super T>> Predicates<T> betweenInclusiveFrom(T from, T to) { Predicates.failIfDifferentTypes(from, to); return new BetweenInclusiveFrom<>(from, to); } /** * Creates a predicate which returns true if an object passed to accept method is within the range, exclusive * of the from and inclusive of the to value. */ public static <T extends Comparable<? super T>> Predicates<T> betweenInclusiveTo(T from, T to) { Predicates.failIfDifferentTypes(from, to); return new BetweenInclusiveTo<>(from, to); } /** * Creates a predicate which returns true if an object passed to accept method is contained in the iterable. */ public static Predicates<Object> in(Iterable<?> iterable) { if (iterable instanceof SetIterable<?>) { return new InSetIterablePredicate((SetIterable<?>) iterable); } if (iterable instanceof Set<?>) { return new InSetPredicate((Set<?>) iterable); } if (iterable instanceof Collection<?> && ((Collection<?>) iterable).size() <= SMALL_COLLECTION_THRESHOLD) { return new InCollectionPredicate((Collection<?>) iterable); } return new InSetIterablePredicate(UnifiedSet.newSet(iterable)); } public static Predicates<Object> in(Object... array) { if (array.length <= SMALL_COLLECTION_THRESHOLD) { return new InCollectionPredicate(Arrays.asList(array)); } return new InSetIterablePredicate(UnifiedSet.newSetWith(array)); } /** * Creates a predicate which returns true if an attribute selected from an object passed to accept method * is contained in the iterable. */ public static <T> Predicates<T> attributeIn( Function<? super T, ?> function, Iterable<?> iterable) { return new AttributePredicate<>(function, Predicates.in(iterable)); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeBetweenInclusive( Function<? super T, ? extends V> function, V from, V to) { return new AttributePredicate<>(function, Predicates.betweenInclusive(from, to)); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeBetweenExclusive( Function<? super T, ? extends V> function, V from, V to) { return new AttributePredicate<>(function, Predicates.betweenExclusive(from, to)); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeBetweenInclusiveFrom( Function<? super T, ? extends V> function, V from, V to) { return new AttributePredicate<>(function, Predicates.betweenInclusiveFrom(from, to)); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeBetweenInclusiveTo( Function<? super T, ? extends V> function, V from, V to) { return new AttributePredicate<>(function, Predicates.betweenInclusiveTo(from, to)); } /** * Creates a predicate which returns true if an object passed to accept method is not contained in * the iterable. */ public static Predicates<Object> notIn(Iterable<?> iterable) { if (iterable instanceof SetIterable<?>) { return new NotInSetIterablePredicate((SetIterable<?>) iterable); } if (iterable instanceof Set<?>) { return new NotInSetPredicate((Set<?>) iterable); } if (iterable instanceof Collection<?> && ((Collection<?>) iterable).size() <= SMALL_COLLECTION_THRESHOLD) { return new NotInCollectionPredicate((Collection<?>) iterable); } return new NotInSetIterablePredicate(UnifiedSet.newSet(iterable)); } public static Predicates<Object> notIn(Object... array) { if (array.length <= SMALL_COLLECTION_THRESHOLD) { return new NotInCollectionPredicate(Arrays.asList(array)); } return new NotInSetIterablePredicate(UnifiedSet.newSetWith(array)); } /** * Creates a predicate which returns true if an attribute selected from an object passed to accept method * is not contained in the iterable. */ public static <T> Predicates<T> attributeNotIn( Function<? super T, ?> function, Iterable<?> iterable) { return new AttributePredicate<>(function, Predicates.notIn(iterable)); } public static <T extends Comparable<? super T>> Predicates<T> lessThan(T object) { return new LessThanPredicate<>(object); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeLessThan( Function<? super T, ? extends V> function, V object) { return new AttributePredicate<>(function, new LessThanPredicate<>(object)); } public static <T extends Comparable<? super T>> Predicates<T> lessThanOrEqualTo(T object) { return new LessThanOrEqualPredicate<>(object); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeLessThanOrEqualTo( Function<? super T, ? extends V> function, V object) { return new AttributePredicate<>(function, new LessThanOrEqualPredicate<>(object)); } public static <T extends Comparable<? super T>> Predicates<T> greaterThan(T object) { return new GreaterThanPredicate<>(object); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeGreaterThan( Function<? super T, ? extends V> function, V object) { return new AttributePredicate<>(function, new GreaterThanPredicate<>(object)); } public static <T extends Comparable<? super T>> Predicates<T> greaterThanOrEqualTo(T object) { return new GreaterThanOrEqualPredicate<>(object); } public static <T, V extends Comparable<? super V>> Predicates<T> attributeGreaterThanOrEqualTo( Function<? super T, ? extends V> function, V object) { return new AttributePredicate<>(function, new GreaterThanOrEqualPredicate<>(object)); } public static <T, V> Predicates<T> attributePredicate( Function<? super T, ? extends V> function, Predicate<? super V> predicate) { return new AttributePredicate<>(function, predicate); } public static <T> Predicates<T> attributeEqual( Function<? super T, ?> function, Object object) { return new AttributePredicate<>(function, Predicates.equal(object)); } public static <T> Predicates<Iterable<T>> anySatisfy(Predicate<? super T> predicate) { return new AnySatisfy<>(predicate); } public static <T> Predicates<Iterable<T>> allSatisfy(Predicate<? super T> predicate) { return new AllSatisfy<>(predicate); } public static <T> Predicates<Iterable<T>> noneSatisfy(Predicate<? super T> predicate) { return new NoneSatisfy<>(predicate); } public static <T, V> Predicates<T> attributeAnySatisfy( Function<? super T, ? extends Iterable<V>> function, Predicate<? super V> predicate) { return Predicates.attributePredicate(function, Predicates.anySatisfy(predicate)); } public static <T, V> Predicates<T> attributeAllSatisfy( Function<? super T, ? extends Iterable<V>> function, Predicate<? super V> predicate) { return Predicates.attributePredicate(function, Predicates.allSatisfy(predicate)); } public static <T, V> Predicates<T> attributeNoneSatisfy( Function<? super T, ? extends Iterable<V>> function, Predicate<? super V> predicate) { return Predicates.attributePredicate(function, Predicates.noneSatisfy(predicate)); } public static Predicates<Object> notEqual(Object object) { if (object == null) { return Predicates.notNull(); } return new NotEqualPredicate(object); } public static <T> Predicates<T> ifTrue(Function<? super T, Boolean> function) { return new AttributeTrue<>(function); } public static <T> Predicates<T> ifFalse(Function<? super T, Boolean> function) { return new AttributeFalse<>(function); } public static <T> Predicates<T> attributeNotEqual( Function<? super T, ?> function, Object object) { return new AttributePredicate<>(function, Predicates.notEqual(object)); } public static Predicates<Object> isNull() { return IS_NULL; } public static <T> Predicates<T> attributeIsNull(Function<? super T, ?> function) { return new AttributePredicate<>(function, Predicates.isNull()); } public static Predicates<Object> notNull() { return NOT_NULL; } public static <T> Predicates<T> attributeNotNull(Function<? super T, ?> function) { return new AttributePredicate<>(function, Predicates.notNull()); } public static Predicates<Object> sameAs(Object object) { return new IdentityPredicate(object); } public static Predicates<Object> notSameAs(Object object) { return new NotIdentityPredicate(object); } public static Predicates<Object> instanceOf(Class<?> clazz) { return new InstanceOfPredicate(clazz); } public static Predicates<Object> assignableFrom(Class<?> clazz) { return new AssignableFromPredicate(clazz); } public static Predicates<Object> notInstanceOf(Class<?> clazz) { return new NotInstanceOfPredicate(clazz); } public static Predicates<Object> alwaysTrue() { return ALWAYS_TRUE; } public static Predicates<Object> alwaysFalse() { return ALWAYS_FALSE; } public Predicates<T> and(Predicate<? super T> op) { return Predicates.and(this, op); } public Predicates<T> or(Predicate<? super T> op) { return Predicates.or(this, op); } public static Predicates<Class<?>> subClass(Class<?> aClass) { return new SubclassPredicate(aClass); } public static Predicates<Class<?>> superClass(Class<?> aClass) { return new SuperclassPredicate(aClass); } private static final class PredicateAdapter<T> extends Predicates<T> { private static final long serialVersionUID = 1L; private final Predicate<T> predicate; private PredicateAdapter(Predicate<T> newPredicate) { this.predicate = newPredicate; } @Override public boolean accept(T o) { return this.predicate.accept(o); } @Override public String toString() { return "Predicates.adapt(" + this.predicate + ')'; } } protected static class AttributePredicate<T, V> extends Predicates<T> { private static final long serialVersionUID = 1L; protected final Function<? super T, ? extends V> function; protected final Predicate<? super V> predicate; protected AttributePredicate( Function<? super T, ? extends V> newFunction, Predicate<? super V> newPredicate) { this.function = newFunction; this.predicate = newPredicate; } @Override public boolean accept(T anObject) { return this.predicate.accept(this.function.valueOf(anObject)); } @Override public String toString() { return "Predicates.attributePredicate(" + this.function + ", " + this.predicate + ')'; } } private static class FalseEquals implements Predicate<Boolean> { private static final long serialVersionUID = 1L; @Override public boolean accept(Boolean anObject) { return Boolean.FALSE.equals(anObject); } } private static class TrueEquals implements Predicate<Boolean> { private static final long serialVersionUID = 1L; @Override public boolean accept(Boolean anObject) { return Boolean.TRUE.equals(anObject); } } private static final class AttributeFalse<T> extends AttributePredicate<T, Boolean> { private static final long serialVersionUID = 1L; private static final FalseEquals FALSE_EQUALS = new FalseEquals(); private AttributeFalse(Function<? super T, Boolean> newFunction) { super(newFunction, FALSE_EQUALS); } @Override public String toString() { return "Predicates.ifFalse(" + this.function + ')'; } } private static final class AttributeTrue<T> extends AttributePredicate<T, Boolean> { private static final long serialVersionUID = 1L; private static final TrueEquals TRUE_EQUALS = new TrueEquals(); private AttributeTrue(Function<? super T, Boolean> newFunction) { super(newFunction, TRUE_EQUALS); } @Override public String toString() { return "Predicates.ifTrue(" + this.function + ')'; } } public static class AnySatisfy<T> extends Predicates<Iterable<T>> { private static final long serialVersionUID = 1L; private final Predicate<? super T> predicate; public AnySatisfy(Predicate<? super T> predicate) { this.predicate = predicate; } @Override public boolean accept(Iterable<T> iterable) { return Iterate.anySatisfy(iterable, this.predicate); } } public static class AllSatisfy<T> extends Predicates<Iterable<T>> { private static final long serialVersionUID = 1L; private final Predicate<? super T> predicate; public AllSatisfy(Predicate<? super T> predicate) { this.predicate = predicate; } @Override public boolean accept(Iterable<T> iterable) { return Iterate.allSatisfy(iterable, this.predicate); } } public static class NoneSatisfy<T> extends Predicates<Iterable<T>> { private static final long serialVersionUID = 1L; private final Predicate<? super T> predicate; public NoneSatisfy(Predicate<? super T> predicate) { this.predicate = predicate; } @Override public boolean accept(Iterable<T> iterable) { return Iterate.noneSatisfy(iterable, this.predicate); } } private abstract static class CompareToPredicate<T extends Comparable<? super T>> extends Predicates<T> { private static final long serialVersionUID = 1L; protected final T compareTo; private CompareToPredicate(T newCompareTo) { this.compareTo = newCompareTo; } } protected static class LessThanPredicate<T extends Comparable<? super T>> extends CompareToPredicate<T> { private static final long serialVersionUID = 1L; protected LessThanPredicate(T newCompareTo) { super(newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareTo) < 0; } @Override public String toString() { return "Predicates.lessThan(" + this.compareTo + ')'; } } protected abstract static class RangePredicate<T extends Comparable<? super T>> extends CompareToPredicate<T> { private static final long serialVersionUID = 1L; protected final T compareFrom; protected RangePredicate(T newCompareFrom, T newCompareTo) { super(newCompareTo); this.compareFrom = newCompareFrom; } } private static final class BetweenInclusive<T extends Comparable<? super T>> extends RangePredicate<T> { private static final long serialVersionUID = 1L; private BetweenInclusive(T newCompareFrom, T newCompareTo) { super(newCompareFrom, newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareFrom) >= 0 && o.compareTo(this.compareTo) <= 0; } } private static final class BetweenInclusiveTo<T extends Comparable<? super T>> extends RangePredicate<T> { private static final long serialVersionUID = 1L; private BetweenInclusiveTo(T newCompareFrom, T newCompareTo) { super(newCompareFrom, newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareFrom) > 0 && o.compareTo(this.compareTo) <= 0; } } private static final class BetweenInclusiveFrom<T extends Comparable<? super T>> extends RangePredicate<T> { private static final long serialVersionUID = 1L; private BetweenInclusiveFrom(T newCompareFrom, T newCompareTo) { super(newCompareFrom, newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareFrom) >= 0 && o.compareTo(this.compareTo) < 0; } } private static final class BetweenExclusive<T extends Comparable<? super T>> extends RangePredicate<T> { private static final long serialVersionUID = 1L; private BetweenExclusive(T newCompareFrom, T newCompareTo) { super(newCompareFrom, newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareFrom) > 0 && o.compareTo(this.compareTo) < 0; } } protected static class LessThanOrEqualPredicate<T extends Comparable<? super T>> extends CompareToPredicate<T> { private static final long serialVersionUID = 1L; protected LessThanOrEqualPredicate(T newCompareTo) { super(newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareTo) <= 0; } @Override public String toString() { return "Predicates.lessThanOrEqualTo(" + this.compareTo + ')'; } } protected static class GreaterThanPredicate<T extends Comparable<? super T>> extends CompareToPredicate<T> { private static final long serialVersionUID = 1L; protected GreaterThanPredicate(T newCompareTo) { super(newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareTo) > 0; } @Override public String toString() { return "Predicates.greaterThan(" + this.compareTo + ')'; } } protected static class GreaterThanOrEqualPredicate<T extends Comparable<? super T>> extends CompareToPredicate<T> { private static final long serialVersionUID = 1L; protected GreaterThanOrEqualPredicate(T newCompareTo) { super(newCompareTo); } @Override public boolean accept(T o) { return o.compareTo(this.compareTo) >= 0; } @Override public String toString() { return "Predicates.greaterThanOrEqualTo(" + this.compareTo + ')'; } } private static final class AndIterablePredicate<T> extends AbstractIterablePredicate<T> { private static final long serialVersionUID = 1L; private AndIterablePredicate(Iterable<? extends Predicate<? super T>> predicates) { super(predicates); } @Override protected String getTypeName() { return "and"; } @Override public boolean accept(T anObject) { Predicate<Predicate<? super T>> predicate = aPredicate -> aPredicate.accept(anObject); return Iterate.allSatisfy(this.predicates, predicate); } } private static final class OrIterablePredicate<T> extends AbstractIterablePredicate<T> { private static final long serialVersionUID = 1L; private OrIterablePredicate(Iterable<? extends Predicate<? super T>> predicates) { super(predicates); } @Override protected String getTypeName() { return "or"; } @Override public boolean accept(T anObject) { Predicate<Predicate<? super T>> predicate = aPredicate -> aPredicate.accept(anObject); return Iterate.anySatisfy(this.predicates, predicate); } } private static final class NoneOfIterablePredicate<T> extends AbstractIterablePredicate<T> { private static final long serialVersionUID = 1L; private NoneOfIterablePredicate(Iterable<? extends Predicate<? super T>> predicates) { super(predicates); } @Override protected String getTypeName() { return "noneOf"; } @Override public boolean accept(T anObject) { Predicate<Predicate<? super T>> predicate = aPredicate -> !aPredicate.accept(anObject); return Iterate.allSatisfy(this.predicates, predicate); } } private abstract static class AbstractIterablePredicate<T> extends Predicates<T> { private static final long serialVersionUID = 1L; protected final Iterable<? extends Predicate<? super T>> predicates; private AbstractIterablePredicate(Iterable<? extends Predicate<? super T>> predicates) { this.predicates = predicates; } protected abstract String getTypeName(); @Override public String toString() { return "Predicates." + this.getTypeName() + '(' + this.predicates + ')'; } } private static final class AndPredicate<T> extends Predicates<T> { private static final long serialVersionUID = 1L; private final Predicate<? super T> left; private final Predicate<? super T> right; private AndPredicate(Predicate<? super T> one, Predicate<? super T> two) { this.left = one; this.right = two; } @Override public boolean accept(T anObject) { return this.left.accept(anObject) && this.right.accept(anObject); } @Override public String toString() { return this.left + ".and(" + this.right + ')'; } } private static final class NeitherPredicate<T> extends Predicates<T> { private static final long serialVersionUID = 1L; private final Predicate<? super T> left; private final Predicate<? super T> right; private NeitherPredicate(Predicate<? super T> one, Predicate<? super T> two) { this.left = one; this.right = two; } @Override public boolean accept(T anObject) { return !this.left.accept(anObject) && !this.right.accept(anObject); } @Override public String toString() { return "Predicates.neither(" + this.left + ", " + this.right + ')'; } } private static final class OrPredicate<T> extends Predicates<T> { private static final long serialVersionUID = 1L; private final Predicate<? super T> left; private final Predicate<? super T> right; private OrPredicate(Predicate<? super T> one, Predicate<? super T> two) { this.left = one; this.right = two; } @Override public boolean accept(T anObject) { return this.left.accept(anObject) || this.right.accept(anObject); } @Override public String toString() { return this.left + ".or(" + this.right + ')'; } } private static final class NotPredicate<T> extends Predicates<T> { private static final long serialVersionUID = 1L; private final Predicate<T> predicate; private NotPredicate(Predicate<T> newPredicate) { this.predicate = newPredicate; } @Override public boolean accept(T anObject) { return !this.predicate.accept(anObject); } @Override public String toString() { return "Predicates.not(" + this.predicate + ')'; } } private static final class EqualPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Object compareObject; private EqualPredicate(Object newCompareObject) { this.compareObject = newCompareObject; } @Override public boolean accept(Object anObject) { return this.compareObject.equals(anObject); } @Override public String toString() { return "Predicates.equal(" + this.compareObject + ')'; } } private static final class InCollectionPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Collection<?> collection; private InCollectionPredicate(Collection<?> collection) { this.collection = collection; } @Override public boolean accept(Object anObject) { return this.collection.contains(anObject); } @Override public String toString() { return "Predicates.in(" + this.collection + ')'; } } private static final class NotInCollectionPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Collection<?> collection; private NotInCollectionPredicate(Collection<?> collection) { this.collection = collection; } @Override public boolean accept(Object anObject) { return !this.collection.contains(anObject); } @Override public String toString() { return "Predicates.notIn(" + this.collection + ')'; } } private static final class InSetIterablePredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final SetIterable<?> setIterable; private InSetIterablePredicate(SetIterable<?> setIterable) { this.setIterable = setIterable; } @Override public boolean accept(Object anObject) { return this.setIterable.contains(anObject); } @Override public String toString() { return "Predicates.in(" + this.setIterable + ')'; } } private static final class NotInSetIterablePredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final SetIterable<?> setIterable; private NotInSetIterablePredicate(SetIterable<?> setIterable) { this.setIterable = setIterable; } @Override public boolean accept(Object anObject) { return !this.setIterable.contains(anObject); } @Override public String toString() { return "Predicates.notIn(" + this.setIterable + ')'; } } private static final class InSetPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Set<?> set; private InSetPredicate(Set<?> set) { this.set = set; } @Override public boolean accept(Object anObject) { return this.set.contains(anObject); } @Override public String toString() { return "Predicates.in(" + this.set + ')'; } } private static final class NotInSetPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Set<?> set; private NotInSetPredicate(Set<?> set) { this.set = set; } @Override public boolean accept(Object anObject) { return !this.set.contains(anObject); } @Override public String toString() { return "Predicates.notIn(" + this.set + ')'; } } private static final class NotEqualPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Object compareObject; private NotEqualPredicate(Object newCompareObject) { this.compareObject = newCompareObject; } @Override public boolean accept(Object anObject) { return !this.compareObject.equals(anObject); } @Override public String toString() { return "Predicates.notEqual(" + this.compareObject + ')'; } } private static final class IsNull extends Predicates<Object> { private static final long serialVersionUID = 1L; @Override public boolean accept(Object anObject) { return anObject == null; } @Override public String toString() { return "Predicates.isNull()"; } } private static final class NotNull extends Predicates<Object> { private static final long serialVersionUID = 1L; @Override public boolean accept(Object anObject) { return anObject != null; } @Override public String toString() { return "Predicates.notNull()"; } } private static final class AssignableFromPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Class<?> clazz; private AssignableFromPredicate(Class<?> newClass) { this.clazz = newClass; } @Override public boolean accept(Object anObject) { return this.clazz.isAssignableFrom(anObject.getClass()); } @Override public String toString() { return "Predicates.assignableFrom(" + this.clazz.getName() + ".class)"; } } private static final class InstanceOfPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Class<?> clazz; private InstanceOfPredicate(Class<?> newClass) { this.clazz = newClass; } @Override public boolean accept(Object anObject) { return this.clazz.isInstance(anObject); } @Override public String toString() { return "Predicates.instanceOf(" + this.clazz.getName() + ".class)"; } } private static final class NotInstanceOfPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Class<?> clazz; private NotInstanceOfPredicate(Class<?> newClass) { this.clazz = newClass; } @Override public boolean accept(Object anObject) { return !this.clazz.isInstance(anObject); } @Override public String toString() { return "Predicates.notInstanceOf(" + this.clazz.getName() + ".class)"; } } private static final class AlwaysTrue extends Predicates<Object> { private static final long serialVersionUID = 1L; @Override public boolean accept(Object anObject) { return true; } @Override public String toString() { return "Predicates.alwaysTrue()"; } } private static final class AlwaysFalse extends Predicates<Object> { private static final long serialVersionUID = 1L; @Override public boolean accept(Object anObject) { return false; } @Override public String toString() { return "Predicates.alwaysFalse()"; } } private static final class IdentityPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Object twin; private IdentityPredicate(Object object) { this.twin = object; } @Override public boolean accept(Object anObject) { return this.twin == anObject; } @Override public String toString() { return "Predicates.sameAs(" + this.twin + ')'; } } private static final class NotIdentityPredicate extends Predicates<Object> { private static final long serialVersionUID = 1L; private final Object twin; private NotIdentityPredicate(Object object) { this.twin = object; } @Override public boolean accept(Object anObject) { return this.twin != anObject; } @Override public String toString() { return "Predicates.notSameAs(" + this.twin + ')'; } } private static final class SynchronizedPredicate<T> implements Predicate<T> { private static final long serialVersionUID = 1L; private final Predicate<T> predicate; private SynchronizedPredicate(Predicate<T> predicate) { this.predicate = predicate; } @Override public boolean accept(T each) { synchronized (each) { return this.predicate.accept(each); } } } private static final class SubclassPredicate extends Predicates<Class<?>> { private static final long serialVersionUID = 1L; private final Class<?> aClass; private SubclassPredicate(Class<?> aClass) { this.aClass = aClass; } @Override public boolean accept(Class<?> each) { return this.aClass.isAssignableFrom(each); } } private static final class SuperclassPredicate extends Predicates<Class<?>> { private static final long serialVersionUID = 1L; private final Class<?> aClass; private SuperclassPredicate(Class<?> aClass) { this.aClass = aClass; } @Override public boolean accept(Class<?> each) { return each.isAssignableFrom(this.aClass); } } private static final class BindPredicate2<T, P> implements Predicate<T> { private static final long serialVersionUID = 1L; private final Predicate2<? super T, ? super P> predicate; private final P parameter; private BindPredicate2(Predicate2<? super T, ? super P> predicate, P parameter) { this.predicate = predicate; this.parameter = parameter; } @Override public boolean accept(T each) { return this.predicate.accept(each, this.parameter); } @Override public String toString() { return "Predicates.bind(" + this.predicate + ", " + this.parameter + ")"; } } private static final class ThrowingPredicateAdapter<T> extends CheckedPredicate<T> { private static final long serialVersionUID = 1L; private final ThrowingPredicate<T> throwingPredicate; private ThrowingPredicateAdapter(ThrowingPredicate<T> throwingPredicate) { this.throwingPredicate = throwingPredicate; } @Override public boolean safeAccept(T object) throws Exception { return this.throwingPredicate.safeAccept(object); } } }
{ "pile_set_name": "Github" }
{ "translatorID": "54ac4ec1-9d07-45d3-9d96-48bed3411fb6", "label": "National Library of Australia (new catalog)", "creator": "Philipp Zumstein", "target": "^https?://catalogue\\.nla\\.gov\\.au", "minVersion": "3.0", "maxVersion": "", "priority": 100, "inRepository": true, "translatorType": 4, "browserSupport": "gcsibv", "lastUpdated": "2018-01-30 09:40:11" } /* ***** BEGIN LICENSE BLOCK ***** Copyright © 2018 Philipp Zumstein This file is part of Zotero. Zotero is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Zotero is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Zotero. If not, see <http://www.gnu.org/licenses/>. ***** END LICENSE BLOCK ***** */ // attr()/text() v2 function attr(docOrElem,selector,attr,index){var elem=index?docOrElem.querySelectorAll(selector).item(index):docOrElem.querySelector(selector);return elem?elem.getAttribute(attr):null;}function text(docOrElem,selector,index){var elem=index?docOrElem.querySelectorAll(selector).item(index):docOrElem.querySelector(selector);return elem?elem.textContent:null;} function detectWeb(doc, url) { if (url.match("/Record/[0-9]+")) { var format = doc.getElementById("myformat").textContent; return computeFormat(format); } else if (url.includes("/Search/Home") && doc.getElementById("resultItemLine1")) { return "multiple"; } } // map the nla formats to zotero formats function computeFormat(format){ // clean up whitespace and remove commas from items with multiple formats format = Zotero.Utilities.trimInternal(format.replace(',', '')); if (format == "Audio") return "audioRecording"; if (format == "Book") return "book"; if (format == "Journal/Newspaper") return "journalArticle"; if (format == "Manuscript") return "manuscript"; if (format == "Map") return "map"; if (format == "Music") return "audioRecording"; if (format == "Online") return "webpage"; if (format == "Picture") return "artwork"; if (format == "Video") return "videoRecording"; // default return "book"; } function getSearchResults(doc, checkOnly) { var items = {}; var found = false; var rows = doc.querySelectorAll('.resultitem a.title'); for (let i=0; i<rows.length; i++) { let href = rows[i].href; if (!/\/Record\/\d+/.test(href)) continue; let title = ZU.trimInternal(rows[i].textContent); if (!href || !title) continue; if (checkOnly) return true; found = true; items[href] = title; } return found ? items : false; } function doWeb(doc, url) { if (detectWeb(doc, url) == "multiple") { Zotero.selectItems(getSearchResults(doc, false), function (items) { if (!items) { return true; } processUrls(Object.keys(items)); }); } else { processUrls([url]); } } function processUrls(urls) { for (let i=0; i<urls.length; i++) { var bibid = urls[i].match(/\/Record\/(\d+)\b/); if (bibid) { var marcUrl = "/Record/" + bibid[1] + "/Export?style=marc"; ZU.doGet(marcUrl, scrapeMarc); } } } function scrapeMarc(text) { var translator = Zotero.loadTranslator("import"); translator.setTranslator("a6ee60df-1ddc-4aae-bb25-45e0537be973"); translator.setString(text); translator.translate(); } /** BEGIN TEST CASES **/ var testCases = [ { "type": "web", "url": "https://catalogue.nla.gov.au/Record/773336?lookfor=labor&offset=10&max=65985", "items": [ { "itemType": "book", "title": "Labor: readings on major issues", "creators": [ { "firstName": "Richard Allen", "lastName": "Lester", "creatorType": "author" } ], "date": "1967", "callNumber": "331.082", "libraryCatalog": "National Library of Australia (new catalog)", "place": "New York", "publisher": "Random House", "shortTitle": "Labor", "attachments": [], "tags": [ { "tag": "Labor unions" }, { "tag": "United States" }, { "tag": "United States" }, { "tag": "Working class" } ], "notes": [], "seeAlso": [] } ] }, { "type": "web", "url": "http://catalogue.nla.gov.au/Search/Home?lookfor=labor&type=all&limit%5B%5D=&submit=Find&filter[]=language:%22eng%22", "items": "multiple" } ] /** END TEST CASES **/
{ "pile_set_name": "Github" }
{ "compilerOptions": { "target": "es5", "forceConsistentCasingInFileNames": true, "module": "commonjs", "jsx": "react", "declaration": true, "sourceMap": true, "types": [ "es6-promise", "es6-collections", "webpack-env" ] } }
{ "pile_set_name": "Github" }
// // Copyright (c) ZeroC, Inc. All rights reserved. // #include <Ice/Ice.h> #include <TestHelper.h> #include <Test.h> using namespace std; Test::ChecksumPrxPtr allTests(Test::TestHelper* helper) { Ice::CommunicatorPtr communicator = helper->communicator(); string ref = "test:" + helper->getTestEndpoint(); Ice::ObjectPrxPtr base = communicator->stringToProxy(ref); test(base); Test::ChecksumPrxPtr checksum = ICE_CHECKED_CAST(Test::ChecksumPrx, base); test(checksum); Ice::SliceChecksumDict::const_iterator p; // // Verify that no checksums are present for local types. // cout << "testing checksums... " << flush; Ice::SliceChecksumDict localChecksums = Ice::sliceChecksums(); test(!localChecksums.empty()); for(p = localChecksums.begin(); p != localChecksums.end(); ++p) { string::size_type pos = p->first.find("Local"); test(pos == string::npos); } // // Get server's Slice checksums. // Ice::SliceChecksumDict d = checksum->getSliceChecksums(); // // Compare the checksums. For a type FooN whose name ends in an integer N, // we assume that the server's type does not change for N = 1, and does // change for N > 1. // for(p = d.begin(); p != d.end(); ++p) { int n = 0; string::size_type pos = p->first.find_first_of("0123456789"); if(pos != string::npos) { n = atoi(p->first.c_str() + pos); } Ice::SliceChecksumDict::const_iterator q = localChecksums.find(p->first); test(q != localChecksums.end()); if(n <= 1) { test(q->second == p->second); } else { test(q->second != p->second); } } cout << "ok" << endl; return checksum; }
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.zookeeper.server.quorum; import org.apache.zookeeper.server.ZooKeeperServerMXBean; /** * Leader MBean. */ public interface LeaderMXBean extends ZooKeeperServerMXBean { /** * Current zxid of cluster. */ String getCurrentZxid(); /** * @return information on current followers */ String followerInfo(); /** * @return information about current non-voting followers */ String nonVotingFollowerInfo(); /** * @return time taken for leader election in milliseconds. */ long getElectionTimeTaken(); /** * @return size of latest generated proposal */ int getLastProposalSize(); /** * @return size of smallest generated proposal */ int getMinProposalSize(); /** * @return size of largest generated proposal */ int getMaxProposalSize(); /** * Resets statistics of proposal size (min/max/last) */ void resetProposalStatistics(); /** * @return Number of concurrent snapshots permitted to send to observers */ int getMaxConcurrentSnapSyncs(); /** * @param maxConcurrentSnapSyncs Number of concurrent snapshots permitted to send to observers */ void setMaxConcurrentSnapSyncs(int maxConcurrentSnapSyncs); /** * @return Number of concurrent diff syncs permitted to send to observers */ int getMaxConcurrentDiffSyncs(); /** * @param maxConcurrentDiffSyncs Number of concurrent diff syncs permitted to send to observers */ void setMaxConcurrentDiffSyncs(int maxConcurrentDiffSyncs); }
{ "pile_set_name": "Github" }
// Copyright (c) 2017, Tom Honermann // // This file is distributed under the MIT License. See the accompanying file // LICENSE.txt or http://www.opensource.org/licenses/mit-license.php for terms // and conditions. #include <cassert> #include <text_view_detail/error_status.hpp> namespace std { namespace experimental { inline namespace text { const char* status_message(encode_status es) noexcept { switch (es) { case encode_status::no_error: return "no error"; case encode_status::invalid_character: return "invalid character"; case encode_status::invalid_state_transition: return "invalid state transition"; } assert(0 && "Unrecognized encode_status value"); return "unknown error"; } const char* status_message(decode_status ds) noexcept { switch (ds) { case decode_status::no_error: return "no error"; case decode_status::no_character: return "no character decoded"; case decode_status::invalid_code_unit_sequence: return "invalid code unit sequence"; case decode_status::underflow: return "underflow"; } assert(0 && "Unrecognized decode_status value"); return "unknown error"; } } // inline namespace text } // namespace experimental } // namespace std
{ "pile_set_name": "Github" }
/* * This library is part of OpenCms - * the Open Source Content Management System * * Copyright (c) Alkacon Software GmbH & Co. KG (http://www.alkacon.com) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * For further information about Alkacon Software, please see the * company website: http://www.alkacon.com * * For further information about OpenCms, please see the * project website: http://www.opencms.org * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ package org.opencms.ui.components; import org.opencms.report.A_CmsReportThread; import org.opencms.ui.CmsVaadinUtils; import org.opencms.ui.apps.A_CmsAttributeAwareApp; import org.opencms.ui.report.CmsReportWidget; import java.util.Collections; import com.vaadin.ui.Button; import com.vaadin.ui.Button.ClickEvent; import com.vaadin.ui.Button.ClickListener; import com.vaadin.ui.Panel; import com.vaadin.v7.ui.VerticalLayout; /** * Page to display a report.<p> */ public class CmsBasicReportPage extends VerticalLayout { /** Serial version id. */ private static final long serialVersionUID = 1L; /** The OK button. */ private Button m_ok; /** The panel for the report. */ private Panel m_panel; /** The immediate parent layout of the report. */ private VerticalLayout m_reportContainer; /** * Creates a new instance.<p> * * @param label the caption for the panel * @param reportThread the report thread whose output should be displayed * @param callback the callback to call when the user clicks OK */ public CmsBasicReportPage(String label, final A_CmsReportThread reportThread, final Runnable callback) { CmsVaadinUtils.readAndLocalizeDesign(this, CmsVaadinUtils.getWpMessagesForCurrentLocale(), null); m_panel.setCaption(label); CmsReportWidget reportWidget = new CmsReportWidget(reportThread); reportWidget.setSizeFull(); m_reportContainer.addComponent(reportWidget); if (reportThread != null) { addAttachListener(new AttachListener() { private static final long serialVersionUID = 1L; public void attach(AttachEvent event) { if (reportThread.getState() == Thread.State.NEW) { reportThread.start(); } } }); } setSpacing(true); m_ok.addClickListener(new ClickListener() { private static final long serialVersionUID = 1L; public void buttonClick(ClickEvent event) { callback.run(); } }); setData(Collections.singletonMap(A_CmsAttributeAwareApp.ATTR_MAIN_HEIGHT_FULL, Boolean.TRUE)); } }
{ "pile_set_name": "Github" }
package main import ( "errors" "net/http" "testing" "github.com/steinfletcher/apitest" ) func TestGetUser_Success(t *testing.T) { apitest.New(). Mocks(getPreferencesMock, getUserMock). Handler(newApp().Router). Get("/user"). Expect(t). Status(http.StatusOK). Body(`{"name": "jon", "is_contactable": true}`). End() } var getPreferencesMock = apitest.NewMock(). Get("/preferences/12345"). AddMatcher(func(r *http.Request, mr *apitest.MockRequest) error { // Custom matching func for URL Scheme if r.URL.Scheme != "http" { return errors.New("request did not have 'http' scheme") } return nil }). RespondWith(). Body(`{"is_contactable": true}`). Status(http.StatusOK). End() var getUserMock = apitest.NewMock(). Get("/user/12345"). RespondWith(). Body(`{"name": "jon", "id": "1234"}`). Status(http.StatusOK). End()
{ "pile_set_name": "Github" }
/* * Copyright (C) 2010 Piotr Jaroszyński <p.jaroszynski@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ FILE_LICENCE(GPL2_OR_LATER); #include <ipxe/uaccess.h> /** @file * * iPXE user access API for linux * */ PROVIDE_UACCESS_INLINE(linux, phys_to_user); PROVIDE_UACCESS_INLINE(linux, user_to_phys); PROVIDE_UACCESS_INLINE(linux, virt_to_user); PROVIDE_UACCESS_INLINE(linux, user_to_virt); PROVIDE_UACCESS_INLINE(linux, userptr_add); PROVIDE_UACCESS_INLINE(linux, memcpy_user); PROVIDE_UACCESS_INLINE(linux, memmove_user); PROVIDE_UACCESS_INLINE(linux, memset_user); PROVIDE_UACCESS_INLINE(linux, strlen_user); PROVIDE_UACCESS_INLINE(linux, memchr_user);
{ "pile_set_name": "Github" }
/* eslint-env node */ import React from 'react'; import { render, Workspace, requireComponent } from 'workflow-react'; const { Atom } = requireComponent('workflow-app-atom'); export const flow = render( <Workspace name={'workflow-app-atom'}> <Atom file={__filename} /> </Workspace> );
{ "pile_set_name": "Github" }
enabled: false image: tag: "20200918134708" port_number: 9191 log_level: "INFO" os_user_domain_name: "Default" os_project_name: "master" os_project_domain_name: "ccadmin" # Deploy Ironic Prometheus alerts. alerts: # Name of the Prometheus to which the alerts should be assigned to. prometheus: openstack
{ "pile_set_name": "Github" }
function Get-HumanTime($Seconds) { if($Seconds -gt 0.99) { $time = [math]::Round($Seconds, 2) $unit = 's' } else { $time = [math]::Floor($Seconds * 1000) $unit = 'ms' } return "$time$unit" } function GetFullPath ([string]$Path) { if (-not [System.IO.Path]::IsPathRooted($Path)) { $Path = Join-Path $ExecutionContext.SessionState.Path.CurrentFileSystemLocation $Path } return $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($Path) } function Export-PesterResults { param ( $PesterState, [string] $Path, [string] $Format ) switch ($Format) { 'LegacyNUnitXml' { Export-NUnitReport -PesterState $PesterState -Path $Path -LegacyFormat } 'NUnitXml' { Export-NUnitReport -PesterState $PesterState -Path $Path } default { throw "'$Format' is not a valid Pester export format." } } } function Export-NUnitReport { param ( [parameter(Mandatory=$true,ValueFromPipeline=$true)] $PesterState, [parameter(Mandatory=$true)] [String]$Path, [switch] $LegacyFormat ) #the xmlwriter create method can resolve relatives paths by itself. but its current directory might #be different from what PowerShell sees as the current directory so I have to resolve the path beforehand #working around the limitations of Resolve-Path $Path = GetFullPath -Path $Path $settings = New-Object -TypeName Xml.XmlWriterSettings -Property @{ Indent = $true NewLineOnAttributes = $false } $xmlWriter = $null try { $xmlWriter = [Xml.XmlWriter]::Create($Path,$settings) Write-NUnitReport -XmlWriter $xmlWriter -PesterState $PesterState -LegacyFormat:$LegacyFormat $xmlWriter.Flush() } finally { if ($null -ne $xmlWriter) { try { $xmlWriter.Close() } catch {} } } } function Write-NUnitReport($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { # Write the XML Declaration $XmlWriter.WriteStartDocument($false) # Write Root Element $xmlWriter.WriteStartElement('test-results') Write-NUnitTestResultAttributes @PSBoundParameters Write-NUnitTestResultChildNodes @PSBoundParameters $XmlWriter.WriteEndElement() } function Write-NUnitTestResultAttributes($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { $XmlWriter.WriteAttributeString('xmlns','xsi', $null, 'http://www.w3.org/2001/XMLSchema-instance') $XmlWriter.WriteAttributeString('xsi','noNamespaceSchemaLocation', [Xml.Schema.XmlSchema]::InstanceNamespace , 'nunit_schema_2.5.xsd') $XmlWriter.WriteAttributeString('name','Pester') $XmlWriter.WriteAttributeString('total', $PesterState.TotalCount) $XmlWriter.WriteAttributeString('errors', '0') $XmlWriter.WriteAttributeString('failures', $PesterState.FailedCount) $XmlWriter.WriteAttributeString('not-run', '0') $XmlWriter.WriteAttributeString('inconclusive', $PesterState.PendingCount) $XmlWriter.WriteAttributeString('ignored', '0') $XmlWriter.WriteAttributeString('skipped', $PesterState.SkippedCount) $XmlWriter.WriteAttributeString('invalid', '0') $date = Get-Date $XmlWriter.WriteAttributeString('date', (Get-Date -Date $date -Format 'yyyy-MM-dd')) $XmlWriter.WriteAttributeString('time', (Get-Date -Date $date -Format 'HH:mm:ss')) } function Write-NUnitTestResultChildNodes($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { Write-NUnitEnvironmentInformation @PSBoundParameters Write-NUnitCultureInformation @PSBoundParameters $XmlWriter.WriteStartElement('test-suite') Write-NUnitGlobalTestSuiteAttributes @PSBoundParameters $XmlWriter.WriteStartElement('results') Write-NUnitDescribeElements @PSBoundParameters $XmlWriter.WriteEndElement() $XmlWriter.WriteEndElement() } function Write-NUnitEnvironmentInformation($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { $XmlWriter.WriteStartElement('environment') $environment = Get-RunTimeEnvironment foreach ($keyValuePair in $environment.GetEnumerator()) { $XmlWriter.WriteAttributeString($keyValuePair.Name, $keyValuePair.Value) } $XmlWriter.WriteEndElement() } function Write-NUnitCultureInformation($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { $XmlWriter.WriteStartElement('culture-info') $XmlWriter.WriteAttributeString('current-culture', ([System.Threading.Thread]::CurrentThread.CurrentCulture).Name) $XmlWriter.WriteAttributeString('current-uiculture', ([System.Threading.Thread]::CurrentThread.CurrentUiCulture).Name) $XmlWriter.WriteEndElement() } function Write-NUnitGlobalTestSuiteAttributes($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { $XmlWriter.WriteAttributeString('type', 'Powershell') # TODO: This used to be writing $PesterState.Path, back when that was a single string (and existed.) # Better would be to produce a test suite for each resolved file, rather than for the value # of the path that was passed to Invoke-Pester. $XmlWriter.WriteAttributeString('name', 'Pester') $XmlWriter.WriteAttributeString('executed', 'True') $isSuccess = $PesterState.FailedCount -eq 0 $result = Get-ParentResult $PesterState $XmlWriter.WriteAttributeString('result', $result) $XmlWriter.WriteAttributeString('success',[string]$isSuccess) $XmlWriter.WriteAttributeString('time',(Convert-TimeSpan $PesterState.Time)) $XmlWriter.WriteAttributeString('asserts','0') } function Write-NUnitDescribeElements($PesterState, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat) { $Describes = $PesterState.TestResult | Group-Object -Property Describe if ($null -ne $Describes) { foreach ($currentDescribe in $Describes) { $DescribeInfo = Get-TestSuiteInfo $currentDescribe #Write test suites $XmlWriter.WriteStartElement('test-suite') if ($LegacyFormat) { $suiteType = 'PowerShell' } else { $suiteType = 'TestFixture' } Write-NUnitTestSuiteAttributes -TestSuiteInfo $DescribeInfo -TestSuiteType $suiteType -XmlWriter $XmlWriter -LegacyFormat:$LegacyFormat $XmlWriter.WriteStartElement('results') Write-NUnitDescribeChildElements -TestResults $currentDescribe.Group -XmlWriter $XmlWriter -LegacyFormat:$LegacyFormat -DescribeName $DescribeInfo.Name $XmlWriter.WriteEndElement() $XmlWriter.WriteEndElement() } } } function Get-TestSuiteInfo ([Microsoft.PowerShell.Commands.GroupInfo]$TestSuiteGroup) { $suite = @{ resultMessage = 'Failure' success = 'False' totalTime = '0.0' name = $TestSuiteGroup.Name description = $TestSuiteGroup.Name } #calculate the time first, I am converting the time into string in the TestCases $suite.totalTime = (Get-TestTime $TestSuiteGroup.Group) $suite.success = (Get-TestSuccess $TestSuiteGroup.Group) $suite.resultMessage = Get-GroupResult $TestSuiteGroup.Group $suite } function Get-TestTime($tests) { [TimeSpan]$totalTime = 0; if ($tests) { foreach ($test in $tests) { $totalTime += $test.time } } Convert-TimeSpan -TimeSpan $totalTime } function Convert-TimeSpan { param ( [Parameter(ValueFromPipeline=$true)] $TimeSpan ) process { if ($TimeSpan) { [string][math]::round(([TimeSpan]$TimeSpan).totalseconds,4) } else { '0' } } } function Get-TestSuccess($tests) { $result = $true if ($tests) { foreach ($test in $tests) { if (-not $test.Passed) { $result = $false break } } } [String]$result } function Write-NUnitTestSuiteAttributes($TestSuiteInfo, [System.Xml.XmlWriter] $XmlWriter, [string] $TestSuiteType, [switch] $LegacyFormat) { $XmlWriter.WriteAttributeString('type', $TestSuiteType) $XmlWriter.WriteAttributeString('name', $TestSuiteInfo.name) $XmlWriter.WriteAttributeString('executed', 'True') $XmlWriter.WriteAttributeString('result', $TestSuiteInfo.resultMessage) $XmlWriter.WriteAttributeString('success', $TestSuiteInfo.success) $XmlWriter.WriteAttributeString('time',$TestSuiteInfo.totalTime) $XmlWriter.WriteAttributeString('asserts','0') if (-not $LegacyFormat) { $XmlWriter.WriteAttributeString('description', $TestSuiteInfo.Description) } } function Write-NUnitDescribeChildElements([object[]] $TestResults, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat, [string] $DescribeName) { $suites = $TestResults | Group-Object -Property ParameterizedSuiteName foreach ($suite in $suites) { if ($suite.Name) { $suiteInfo = Get-TestSuiteInfo -TestSuiteGroup $suite $XmlWriter.WriteStartElement('test-suite') if (-not $LegacyFormat) { $suiteInfo.Name = "$DescribeName.$($suiteInfo.Name)" } Write-NUnitTestSuiteAttributes -TestSuiteInfo $suiteInfo -TestSuiteType 'ParameterizedTest' -XmlWriter $XmlWriter -LegacyFormat:$LegacyFormat $XmlWriter.WriteStartElement('results') } Write-NUnitTestCaseElements -TestResults $suite.Group -XmlWriter $XmlWriter -LegacyFormat:$LegacyFormat -DescribeName $DescribeName -ParameterizedSuiteName $suite.Name if ($suite.Name) { $XmlWriter.WriteEndElement() $XmlWriter.WriteEndElement() } } } function Write-NUnitTestCaseElements([object[]] $TestResults, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat, [string] $DescribeName, [string] $ParameterizedSuiteName) { foreach ($testResult in $TestResults) { $XmlWriter.WriteStartElement('test-case') Write-NUnitTestCaseAttributes -TestResult $testResult -XmlWriter $XmlWriter -LegacyFormat:$LegacyFormat -DescribeName $DescribeName -ParameterizedSuiteName $ParameterizedSuiteName $XmlWriter.WriteEndElement() } } function Write-NUnitTestCaseAttributes($TestResult, [System.Xml.XmlWriter] $XmlWriter, [switch] $LegacyFormat, [string] $DescribeName, [string] $ParameterizedSuiteName) { $testName = $TestResult.Name if (-not $LegacyFormat) { if ($testName -eq $ParameterizedSuiteName) { $paramString = '' if ($null -ne $TestResult.Parameters) { $params = @( foreach ($value in $TestResult.Parameters.Values) { if ($null -eq $value) { 'null' } elseif ($value -is [string]) { '"{0}"' -f $value } else { #do not use .ToString() it uses the current culture settings #and we need to use en-US culture, which [string] or .ToString([Globalization.CultureInfo]'en-us') uses [string]$value } } ) $paramString = $params -join ',' } $testName = "$testName($paramString)" } $testName = "$DescribeName.$testName" $XmlWriter.WriteAttributeString('description', $TestResult.Name) } $XmlWriter.WriteAttributeString('name', $testName) $XmlWriter.WriteAttributeString('executed', 'True') $XmlWriter.WriteAttributeString('time', (Convert-TimeSpan $TestResult.Time)) $XmlWriter.WriteAttributeString('asserts', '0') $XmlWriter.WriteAttributeString('success', $TestResult.Passed) switch ($TestResult.Result) { Passed { $XmlWriter.WriteAttributeString('result', 'Success') break } Skipped { $XmlWriter.WriteAttributeString('result', 'Skipped') break } Pending { $XmlWriter.WriteAttributeString('result', 'Inconclusive') break } Failed { $XmlWriter.WriteAttributeString('result', 'Failure') $XmlWriter.WriteStartElement('failure') $xmlWriter.WriteElementString('message', $TestResult.FailureMessage) $XmlWriter.WriteElementString('stack-trace', $TestResult.StackTrace) $XmlWriter.WriteEndElement() # Close failure tag break } } } function Get-RunTimeEnvironment() { $osSystemInformation = (Get-WmiObject Win32_OperatingSystem) @{ 'nunit-version' = '2.5.8.0' 'os-version' = $osSystemInformation.Version platform = $osSystemInformation.Name cwd = (Get-Location).Path #run path 'machine-name' = $env:ComputerName user = $env:Username 'user-domain' = $env:userDomain 'clr-version' = [string]$PSVersionTable.ClrVersion } } function Exit-WithCode ($FailedCount) { $host.SetShouldExit($FailedCount) } function Get-ParentResult ($InputObject) { #I am not sure about the result precedence, and can't find any good source #TODO: Confirm this is the correct order of precedence if ($inputObject.FailedCount -gt 0) { return 'Failure' } if ($InputObject.SkippedCount -gt 0) { return 'Skipped' } if ($InputObject.PendingCount -gt 0) { return 'Inconclusive' } return 'Success' } function Get-GroupResult ($InputObject) { #I am not sure about the result precedence, and can't find any good source #TODO: Confirm this is the correct order of precedence if ($InputObject | Where {$_.Result -eq 'Failed'}) { return 'Failure' } if ($InputObject | Where {$_.Result -eq 'Skipped'}) { return 'Skipped' } if ($InputObject | Where {$_.Result -eq 'Pending'}) { return 'Inconclusive' } return 'Success' }
{ "pile_set_name": "Github" }
#!/bin/bash set -ev if [[ $TRAVIS_PULL_REQUEST != "false" ]]; then NAME="pr/"$TRAVIS_PULL_REQUEST else NAME=$TRAVIS_BRANCH fi JOB_NUMBER=$(echo $TRAVIS_JOB_NUMBER | cut -d. -f1) DEPLOYPATH=s3-deploy/deploy/$NAME/$JOB_NUMBER VERSION=`tools/krpc-version.sh` echo $VERSION rm -rf $DEPLOYPATH mkdir -p $DEPLOYPATH # Copy archives cp bazel-bin/krpc-$VERSION.zip $DEPLOYPATH/ cp bazel-bin/tools/krpctools/krpctools-$VERSION.zip $DEPLOYPATH/ cp bazel-bin/krpc-genfiles-$VERSION.zip $DEPLOYPATH/ cp bazel-bin/tools/TestServer/TestServer-$VERSION.zip $DEPLOYPATH/ # Extract release archive (cd $DEPLOYPATH; unzip -q krpc-$VERSION.zip) # Extract documentation mkdir $DEPLOYPATH/doc cp bazel-bin/doc/html.zip $DEPLOYPATH/doc/html.zip (cd $DEPLOYPATH/doc; unzip -q html.zip; rm -f html.zip)
{ "pile_set_name": "Github" }
#pragma once #include "send_packet_data.h" struct Send_Packet_Data { PNET_BUFFER m_net_buffer; UINT m_len; PMDL m_mdl; PVOID m_addr; unsigned char m_locked; unsigned char* m_data; HANDLE m_map_process; }; #define free_send_packet_data(packet_data) { \ if (packet_data) { \ if (packet_data->m_mdl) { \ if (packet_data->m_addr) \ if (packet_data->m_map_process == PsGetCurrentProcessId()) \ MmUnmapLockedPages(packet_data->m_addr, \ packet_data->m_mdl); \ if (packet_data->m_locked) \ MmUnlockPages(packet_data->m_mdl); \ IoFreeMdl(packet_data->m_mdl); \ } \ ExFreePoolWithTag(packet_data, 'tkip'); \ packet_data = NULL; \ } \ } void free_get_send_packet( PVOID context, struct RESOURCE_OBJECT_DATA* rdata) { struct Send_Packet_Data* packet_data; PHWT_ADAPTER Adapter; PNET_BUFFER_LIST NetBufferList; packet_data = (struct Send_Packet_Data*)rdata->m_associated.m_get_send_packet_out.Packet; Adapter = (PHWT_ADAPTER)context; NetBufferList = NET_BUFFER_BUFFER_LIST(packet_data->m_net_buffer); NET_BUFFER_LIST_STATUS(NetBufferList) = NDIS_STATUS_FAILURE; switch(InterlockedDecrement(&NET_BUFFER_LIST_BUFFER_COUNT(NetBufferList))) { case 0: NdisMSendNetBufferListsComplete(Adapter->AdapterHandle, NetBufferList, 0); break; default: break; } free_send_packet_data(packet_data); } PHWT_ADAPTER GetMPAdapter(PDEVICE_OBJECT device) { return (PHWT_ADAPTER)GlobalData.AdapterList.Flink; }
{ "pile_set_name": "Github" }
# @cumulus/python-reference-task [`python_reference_workflow`]: https://github.com/nasa/cumulus/blob/master/example/cumulus-tf/python_reference_workflow.tf This 'task' is a reference task that is included with Cumulus to allow integration testing of the [`cumulus-message-adapter`](https://github.com/nasa/cumulus-message-adapter) with a built python lambda. ## Use Developmental use of this lambda is intended to be simple - the task returns a static processing output, integration tests can be then built against the `Reference Task` step in the [`python_reference_workflow`]. ## Development Updates should generally consist of updates to the included `requirements.txt`, as the purpose of this task is to ensure compatibility with updates to the [`cumulus-message-adapter-python`](https://github.com/nasa/cumulus-message-adapter-python) client library and the [`cumulus-message-adapter`](https://github.com/nasa/cumulus-message-adapter) deployed with Cumulus via the CMA lambda layer ([`cumulus-message-adatper-python`](https://github.com/nasa/cumulus-message-adapter-python) will utilize the layer added to the lambda by default if `CMA_DIR` is set). The spec test at [`PythonReferenceSpec`](https://github.com/nasa/cumulus/blob/master/example/spec/parallel/pythonReferenceTests/PythonReferenceSpec.js) utilizes this task in combination with the configuration in [`python_reference_workflow`] to validate the tasks run/outputs are as expected for this purpose. ### Requirements To develop against this task, you should be using python > 3.6 (CMA compatibility is baselined at 3.6). Once you have a python env enabled: ```bash pip install -r requirements.txt ``` ### Build ```bash npm run prepare ``` The above command will build the lambda and put a .zip for deployment in ./dist
{ "pile_set_name": "Github" }