repo_name
stringlengths
6
101
path
stringlengths
4
300
text
stringlengths
7
1.31M
alinakazi/apache-royale-0.9.8-bin-js-swf
royale-compiler/compiler/src/main/java/org/apache/royale/compiler/internal/as/codegen/DirectiveProcessor.java
/* * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.apache.royale.compiler.internal.as.codegen; import java.util.Collection; import org.apache.royale.compiler.internal.definitions.FunctionDefinition; import org.apache.royale.compiler.internal.tree.as.ClassNode; import org.apache.royale.compiler.internal.tree.as.FunctionNode; import org.apache.royale.compiler.internal.tree.as.IdentifierNode; import org.apache.royale.compiler.internal.tree.as.ImportNode; import org.apache.royale.compiler.internal.tree.as.InterfaceNode; import org.apache.royale.compiler.internal.tree.as.NamespaceIdentifierNode; import org.apache.royale.compiler.internal.tree.as.PackageNode; import org.apache.royale.compiler.internal.tree.as.VariableNode; import static org.apache.royale.abc.ABCConstants.TRAIT_Getter; import static org.apache.royale.abc.ABCConstants.TRAIT_Setter; import org.apache.royale.compiler.problems.BURMDiagnosticNotAllowedHereProblem; import org.apache.royale.compiler.problems.ICompilerProblem; import org.apache.royale.compiler.tree.ASTNodeID; import org.apache.royale.compiler.tree.as.IASNode; import org.apache.royale.compiler.tree.as.IFunctionNode; import org.apache.royale.compiler.tree.mxml.IMXMLDocumentNode; /** * A DirectiveProcessor is the outer shell of the code generator; * the DirectiveProcessor contains logic to navigate the directives' * ASTs, and subclasses implement "builder" methods to translate * specific directives into ABC terms. */ class DirectiveProcessor { private Collection<ICompilerProblem> problems = null; protected DirectiveProcessor(Collection<ICompilerProblem> problems) { this.problems = problems; } /** * Translate a ClassNode AST into ABC. * Subclasses should override this if * they can process class definitions. * @param c - the class' AST. */ void declareClass(ClassNode c) { problems.add(new BURMDiagnosticNotAllowedHereProblem(c)); } /** * Translate an InterfaceNode AST into ABC. * Subclasses should override this if * they can process interface definitions. * @param in - the interface's AST. */ void declareInterface(InterfaceNode in) { problems.add(new BURMDiagnosticNotAllowedHereProblem(in)); } /** * Translate a FunctionNode AST into ABC. * Subclasses should override this if * they can process function definitions. * @param f - the function's AST. */ void declareFunction(FunctionNode f) { problems.add(new BURMDiagnosticNotAllowedHereProblem(f)); } /** * Translate a PackageNode AST into ABC. * Subclasses should override this if * they can process packages. * @param p - the package's AST. */ void declarePackage(PackageNode p) { problems.add(new BURMDiagnosticNotAllowedHereProblem(p)); } /** * Translate a VaribleNode AST into ABC. * Subclasses should override this if * they can process variable definitions. * @param var - the variable's AST. * @param modifiers - static, const */ void declareVariable(VariableNode var) { problems.add(new BURMDiagnosticNotAllowedHereProblem(var)); } /** * Translate a VaribleNode AST into ABC, and adding bindable support. * Subclasses should override this if * they can process bindable variable definitions. * @param var - the variable's AST. * @param modifiers - static, const */ void declareBindableVariable(VariableNode var) { problems.add(new BURMDiagnosticNotAllowedHereProblem(var)); } /** * Translate an MXMLDocumentNode AST into ABC. * Subclasses should override this if * they can process document nodes. * @param c - the document's AST. */ void declareMXMLDocument(IMXMLDocumentNode d) { problems.add(new BURMDiagnosticNotAllowedHereProblem(d)); } /** * Translate other directives' ASTs into ABC. * Subclasses should override this if * they can process ad-hoc directives. * @param n - the directive's AST. */ void processDirective(IASNode n) { problems.add(new BURMDiagnosticNotAllowedHereProblem(n)); } /** * Translate a NamespaceIdentifierNode AST into ABC. * Subclasses should override this if * they can process namespace identifiers. * @param ns - the namespace identifier's AST. */ void processNamespaceIdentifierDirective(NamespaceIdentifierNode ns) { problems.add(new BURMDiagnosticNotAllowedHereProblem(ns)); } /** * Translate a ImportNode AST into ABC. * Subclasses should override this if * they can process imports. * @param imp - the import's AST. */ void processImportDirective(ImportNode imp) { processDirective(imp); } /** * Look through a CONFIG block marker into its contents. * @param n - the CONFIG block marker. */ void processConfigBlock(IASNode n) { traverse(n); } /** * Traverse the children of a root node and process them. * @param root - the root node. The root is not processed. */ void traverse(IASNode root) { for ( int i = 0; root != null && i < root.getChildCount(); i++ ) { processNode(root.getChild(i)); } } /** * Process an individual directive. * @param n - the directive's AST. */ void processNode(IASNode n) { // In malformed trees, children can be null. // For example, an MXMLFileNode may not have an MXMLDocumentNode. if (n == null) return; switch ( n.getNodeID() ) { case ClassID: declareClass((ClassNode)n); break; case InterfaceID: declareInterface((InterfaceNode)n); break; case FunctionID: case GetterID: case SetterID: declareFunction((FunctionNode)n); break; case ImportID: processImportDirective((ImportNode)n); break; case NamespaceIdentifierID: processNamespaceIdentifierDirective((NamespaceIdentifierNode)n); break; case PackageID: declarePackage((PackageNode)n); break; case VariableID: declareVariable((VariableNode)n); break; case BindableVariableID: declareBindableVariable((VariableNode)n); break; case MXMLDocumentID: declareMXMLDocument((IMXMLDocumentNode)n); break; case MetaTagsID: break; case ConfigBlockID: processConfigBlock(n); break; default: // hack to allow override as a separate keyword in a conditional compile block. // the AST thinks it is a property on the class named override. // This allows less coding when in SWF the base class has a method that needs // overriding but the JS base class does not. Instead of writing // COMPILE::SWF // override public function foo() { // method body // } // COMPILE::JS // public function foo() { // an exact copy of method body // } // we want to allow: // COMPILE::SWF { override } // public function foo() { // method body // } if (n.getNodeID() == ASTNodeID.IdentifierID) { IdentifierNode node = (IdentifierNode)n; if (node.getName().equals("override")) { IASNode parent = node.getParent(); if (parent.getNodeID() == ASTNodeID.ConfigBlockID) { IASNode parentOfMethods = parent.getParent(); int functionCount = parentOfMethods.getChildCount(); for (int i = 0; i < functionCount; i++) { IASNode child = parentOfMethods.getChild(i); if (child == parent) { // examine the next node child = parentOfMethods.getChild(i + 1); if (child instanceof IFunctionNode) { // convince the compiler that this is now an override IFunctionNode fnode = (IFunctionNode)child; FunctionDefinition fdef = (FunctionDefinition)fnode.getDefinition(); fdef.setOverride(); return; } } } } } } processDirective(n); } } /** * Getter/Setter functions need to be declared with a specific * trait kind; do so here. * @param func - a FunctionNode. * @param default_kind - the trait kind to use if the function * is not a getter or setter. Varies depending on caller's context. * @return the trait kind to use to declare the input function. */ public static int functionTraitKind(FunctionNode func, int default_kind) { switch(func.getNodeID()) { case GetterID: return TRAIT_Getter; case SetterID: return TRAIT_Setter; default: return default_kind; } } }
zelinskiyrk/ftl-jb-001
src/com/company/tasks/task022/Fahrenheit.java
<filename>src/com/company/tasks/task022/Fahrenheit.java<gh_stars>0 package com.company.tasks.task022; public class Fahrenheit implements Converting{ @Override public double getValue(double value) { return (9 / 5) * value + 32; } }
sahanperera00/C-programming
Tutorials/Tutorial 11/Question1-Thushara.c
#include<stdio.h> #include<string.h> int main(void) { char word[10]; int length, count, flag; printf("Enter any word : "); scanf("%10s", word); length = strlen(word); for (count = 0; count <= (length/2); count++) { if (word[count] != word[(length - 1) - count]) { flag = 0; break; } else { flag = 1; } } if (flag == 1) { printf("\n%s is a palindrome\n", word); } else { printf("\n%s is a not a palindrome\n", word); } return 0; }
refdiff-data/gocd
server/webapp/WEB-INF/rails.new/vendor/bundle/jruby/1.9/gems/yard-0.8.7.6/spec/handlers/private_class_method_handler_spec.rb
require File.dirname(__FILE__) + '/spec_helper' describe "YARD::Handlers::Ruby::#{LEGACY_PARSER ? "Legacy::" : ""}PrivateClassMethodHandler" do before(:all) { parse_file :private_class_method_handler_001, __FILE__ } it "should handle private_class_method statement" do Registry.at('A.c').visibility.should eq :private Registry.at('A.d').visibility.should eq :private Registry.at('A.e').visibility.should eq :private end it "should fail if parameter is not String or Symbol" do undoc_error 'class Foo; X = 1; private_class_method X.new("hi"); end' undoc_error 'class Foo; X = 1; private_class_method 123; end' end unless LEGACY_PARSER # Issue #760 # https://github.com/lsegal/yard/issues/760 it "should handle singleton classes" do # Note: It's important to def a method within the singleton class or # the bug may not trigger. code = 'class SingletonClass; private_class_method :new; def self.foo; "foo"end; end' StubbedSourceParser.parse_string(code) # Should be successful. end unless LEGACY_PARSER describe "should handle reopened class" do # Modified #parse_file from '/spec/spec_helper.rb' because the second example # file was overwriting the data from the first example when trying to reopen # the class. def parse_files(files, thisfile = __FILE__, log_level = log.level, ext = '.rb.txt') Registry.clear paths = files.map { |file| File.join(File.dirname(thisfile), 'examples', file.to_s + ext) } YARD::Parser::SourceParser.parse(paths, [], log_level) end before { parse_files [ :private_class_method_handler_002, :private_class_method_handler_003 ], __FILE__ } specify do Registry.at('SingletonClass.foo').visibility.should eq :public Registry.at('SingletonClass.bar').visibility.should eq :private Registry.at('SingletonClass.baz').visibility.should eq :private Registry.at('SingletonClass.bat').visibility.should eq :public end end unless LEGACY_PARSER # reopened class end
senaademr/CS1950UFinal
src/final/abilities/abilityconstants.cpp
#include "abilityconstants.h"
BeeeOn/gateway-frc
src/zmq/ZMQConnector.h
#ifndef BEEEON_ZMQ_CONNECTOR_H #define BEEEON_ZMQ_CONNECTOR_H #include <string> #include <Poco/AtomicCounter.h> #include <Poco/SharedPtr.h> #include <zmq.hpp> #include "loop/StoppableRunnable.h" #include "util/Loggable.h" #include "zmq/ZMQMessageError.h" namespace BeeeOn { class ZMQMessage; /* * Abstract class for zmq server and client. * Obsahuje spolocne metody pre zmq clienta a zmq server. Pre jej * plnu funkcnost treba doplnit run metodu, kde sa vykonava beh * programu. Pre prijem dat su pripravene rozhrania dataServerReceive * a helloServerReceive. Pre konfiguraciu socketov je pripravne rozhranie * configureDataSockets a configureHelloSockets. * * Connector obsahuje dva sockety. Hello socket sluzi na priradenie ID, aby * bolo mozne komunikovat pomocou datoveho socketu asynchronne. Datovy * socket sluzi pre posielanie dat oboma smermi asynchronne. * * Defaultne je nastavene jedno vlakno pre spracovanie poziadaviek a tato * moznost sa neda zmenit. * * Vytvoreny zmq endpointu touto triedou umoznuje komunikovat * len pomocou tcp transportu, ktory je nastveny v metode createAddress(). */ class ZMQConnector : public StoppableRunnable, public Loggable { public: ZMQConnector(); ~ZMQConnector(); void stop() override; void setDataServerHost(const std::string &host); void setHelloServerHost(const std::string &host); void setDataServerPort(const int port); void setHelloServerPort(const int port); protected: /* * Processing of messages from Device Manager (measured values, * command) */ virtual void dataServerReceive() = 0; /* * Processing of hello request message from Device Manager and * create identification for communication over dataSocket. */ virtual void helloServerReceive() = 0; /* * It creates an endpoint address for accepting connections. */ std::string createAddress(const std::string &host, int port); virtual void configureDataSockets() = 0; virtual void configureHelloSockets() = 0; /* * Dostane spravu a pokusi sa ju rozparsovat a zistit ci je mozne * spravu poslat dalej. Metoda zarucuje len obsah atributu messageType. * Chyby sposobene neexistujucim inym atributom je potrebne overit * samostatne. */ bool parseMessage(const std::string &jsonMessage, Poco::SharedPtr<zmq::socket_t> socket, ZMQMessage &msg); int sendError(const ZMQMessageError::Error errorType, const std::string message, Poco::SharedPtr<zmq::socket_t> socket); protected: Poco::AtomicCounter m_stop; std::string m_dataServerHost; std::string m_helloServerHost; int m_dataServerPort; int m_helloServerPort; zmq::context_t m_context; Poco::SharedPtr<zmq::socket_t> m_dataServerSocket; Poco::SharedPtr<zmq::socket_t> m_helloServerSocket; }; } #endif
ShawnZhong/SplitFS
tar/gnulib/lib/usleep.c
<reponame>ShawnZhong/SplitFS /* Pausing execution of the current thread. Copyright (C) 2009-2018 Free Software Foundation, Inc. Written by <NAME> <<EMAIL>>, 2009. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ /* This file is _intentionally_ light-weight. Rather than using select or nanosleep, both of which drag in external libraries on some platforms, this merely rounds up to the nearest second if usleep() does not exist. If sub-second resolution is important, then use a more powerful interface to begin with. */ #include <config.h> /* Specification. */ #include <unistd.h> #include <errno.h> #ifndef HAVE_USLEEP # define HAVE_USLEEP 0 #endif /* Sleep for MICRO microseconds, which can be greater than 1 second. Return -1 and set errno to EINVAL on range error (about 4295 seconds), or 0 on success. Interaction with SIGALARM is unspecified. */ int usleep (useconds_t micro) { unsigned int seconds = micro / 1000000; if (sizeof seconds < sizeof micro && micro / 1000000 != seconds) { errno = EINVAL; return -1; } if (!HAVE_USLEEP && micro % 1000000) seconds++; while ((seconds = sleep (seconds)) != 0); #undef usleep #if !HAVE_USLEEP # define usleep(x) 0 #endif return usleep (micro % 1000000); }
BasisTI/madre
suprimentos/src/main/java/br/com/basis/suprimentos/service/AutorizacaoFornecimentoService.java
<filename>suprimentos/src/main/java/br/com/basis/suprimentos/service/AutorizacaoFornecimentoService.java<gh_stars>1-10 package br.com.basis.suprimentos.service; import br.com.basis.suprimentos.domain.AutorizacaoFornecimento; import br.com.basis.suprimentos.repository.AutorizacaoFornecimentoRepository; import br.com.basis.suprimentos.repository.search.AutorizacaoFornecimentoSearchRepository; import br.com.basis.suprimentos.service.dto.AutorizacaoFornecimentoDTO; import br.com.basis.suprimentos.service.mapper.AutorizacaoFornecimentoMapper; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.data.domain.Example; import org.springframework.data.domain.ExampleMatcher; import org.springframework.data.domain.Page; import org.springframework.data.domain.Pageable; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.util.Optional; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @Slf4j @RequiredArgsConstructor @Service @Transactional public class AutorizacaoFornecimentoService { private final AutorizacaoFornecimentoRepository autorizacaoFornecimentoRepository; private final AutorizacaoFornecimentoMapper autorizacaoFornecimentoMapper; private final AutorizacaoFornecimentoSearchRepository autorizacaoFornecimentoSearchRepository; public AutorizacaoFornecimentoDTO save(AutorizacaoFornecimentoDTO autorizacaoFornecimentoDTO) { log.debug("Request to save AutorizacaoFornecimento : {}", autorizacaoFornecimentoDTO); AutorizacaoFornecimento autorizacaoFornecimento = autorizacaoFornecimentoMapper.toEntity(autorizacaoFornecimentoDTO); autorizacaoFornecimento = autorizacaoFornecimentoRepository.save(autorizacaoFornecimento); AutorizacaoFornecimentoDTO result = autorizacaoFornecimentoMapper.toDto(autorizacaoFornecimento); autorizacaoFornecimentoSearchRepository.save(autorizacaoFornecimento); return result; } @Transactional(readOnly = true) public Page<AutorizacaoFornecimentoDTO> findAll(Pageable pageable, AutorizacaoFornecimentoDTO autorizacaoFornecimentoDTO) { log.debug("Request to get all AutorizacaoFornecimentos"); return autorizacaoFornecimentoRepository.findAll( Example.of(autorizacaoFornecimentoMapper.toEntity(autorizacaoFornecimentoDTO), ExampleMatcher.matching().withIgnoreCase().withStringMatcher(ExampleMatcher.StringMatcher.CONTAINING)) , pageable) .map(autorizacaoFornecimentoMapper::toDto); } @Transactional(readOnly = true) public Optional<AutorizacaoFornecimentoDTO> findOne(Long id) { log.debug("Request to get AutorizacaoFornecimento : {}", id); return autorizacaoFornecimentoRepository.findById(id) .map(autorizacaoFornecimentoMapper::toDto); } public void delete(Long id) { log.debug("Request to delete AutorizacaoFornecimento : {}", id); autorizacaoFornecimentoRepository.deleteById(id); autorizacaoFornecimentoSearchRepository.deleteById(id); } @Transactional(readOnly = true) public Page<AutorizacaoFornecimentoDTO> search(String query, Pageable pageable) { log.debug("Request to search for a page of AutorizacaoFornecimentos for query {}", query); return autorizacaoFornecimentoSearchRepository.search(queryStringQuery(query), pageable) .map(autorizacaoFornecimentoMapper::toDto); } }
daiki01240/obyte-insurance
node_modules/safe-json-parse/index.js
module.exports = SafeParse function SafeParse(obj, reviver, callback) { if (arguments.length === 2) { callback = reviver reviver = null } var json try { json = JSON.parse(obj, reviver) } catch (err) { return callback(err) } callback(null, json) }
uk-gov-mirror/dvsa.motr
motr-notifier/src/integration-test/java/uk/gov/dvsa/motr/test/integration/dynamodb/DynamoDbIntegrationHelper.java
package uk.gov.dvsa.motr.test.integration.dynamodb; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; import static uk.gov.dvsa.motr.test.environmant.variables.TestEnvironmentVariables.region; public class DynamoDbIntegrationHelper { /** * Returns region for DynamoDB client * * @return amazon region */ public static AmazonDynamoDB dynamoDbClient() { return AmazonDynamoDBClientBuilder.standard().withRegion(region()).build(); } }
haohanscm/pds
src/main/java/com/haohan/platform/service/sys/modules/pds/core/admin/impl/AdminShortcutServiceImpl.java
<filename>src/main/java/com/haohan/platform/service/sys/modules/pds/core/admin/impl/AdminShortcutServiceImpl.java package com.haohan.platform.service.sys.modules.pds.core.admin.impl; import com.haohan.framework.entity.BaseResp; import com.haohan.platform.service.sys.common.utils.Collections3; import com.haohan.platform.service.sys.common.utils.StringUtils; import com.haohan.platform.service.sys.modules.pds.api.entity.req.admin.PdsDataResetApiReq; import com.haohan.platform.service.sys.modules.pds.constant.IPdsConstant; import com.haohan.platform.service.sys.modules.pds.core.admin.IPdsAdminShortcutService; import com.haohan.platform.service.sys.modules.pds.core.buyer.IBuyerOrderService; import com.haohan.platform.service.sys.modules.pds.core.common.IPdsCommonService; import com.haohan.platform.service.sys.modules.pds.core.delivery.IPdsDeliveryService; import com.haohan.platform.service.sys.modules.pds.core.operation.IPdsOperationService; import com.haohan.platform.service.sys.modules.pds.core.pss.IPdsGoodsStorageOpService; import com.haohan.platform.service.sys.modules.pds.core.summary.IPdsSummaryService; import com.haohan.platform.service.sys.modules.pds.entity.business.PdsSupplier; import com.haohan.platform.service.sys.modules.pds.entity.delivery.DeliveryFlow; import com.haohan.platform.service.sys.modules.pds.entity.order.*; import com.haohan.platform.service.sys.modules.pds.entity.req.PdsOfferOrderReq; import com.haohan.platform.service.sys.modules.pds.entity.resp.PdsSupListParams; import com.haohan.platform.service.sys.modules.pds.exception.PdsSummaryOperationException; import com.haohan.platform.service.sys.modules.pds.service.business.PdsSupplierService; import com.haohan.platform.service.sys.modules.pds.service.delivery.DeliveryFlowService; import com.haohan.platform.service.sys.modules.pds.service.order.*; import com.haohan.platform.service.sys.modules.pss.api.inf.IPssGoodsStorageService; import com.haohan.platform.service.sys.modules.pss.entity.info.PssWarehouse; import com.haohan.platform.service.sys.modules.pss.service.info.WarehouseService; import com.haohan.platform.service.sys.modules.weixin.mp.message.WxMpMessageService; import com.haohan.platform.service.sys.modules.xiaodian.constant.ICommonConstant; import com.haohan.platform.service.sys.modules.xiaodian.entity.UserOpenPlatform; import com.haohan.platform.service.sys.modules.xiaodian.exception.PdsOnekeyOperationException; import com.haohan.platform.service.sys.modules.xiaodian.exception.StorageOperationException; import org.apache.commons.collections.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import javax.annotation.Resource; import java.math.BigDecimal; import java.util.Date; import java.util.HashMap; import java.util.List; /** * @author shenyu * @create 2018/12/5 */ @Service public class AdminShortcutServiceImpl implements IPdsAdminShortcutService { Logger logger = LoggerFactory.getLogger(this.getClass()); @Resource private BuyOrderService buyOrderService; @Autowired private IBuyerOrderService buyerOrderService; @Autowired private IPdsOperationService pdsOperationService; @Autowired private OfferOrderService offerOrderService; @Autowired private IPdsDeliveryService pdsDeliveryService; @Resource private IPdsGoodsStorageOpService pdsGoodsStorageOpService; @Resource private IPdsSummaryService pdsSummaryService; @Autowired private BuyOrderDetailService buyOrderDetailService; @Autowired private SummaryOrderService summaryOrderService; @Autowired private DeliveryFlowService deliveryFlowService; @Autowired private PdsShipOrderService pdsShipOrderService; @Autowired private PdsShipOrderDetailService pdsShipOrderDetailService; @Autowired private TradeOrderService tradeOrderService; @Autowired private TradeMatchService tradeMatchService; @Autowired private PdsSupplierService pdsSupplierService; @Autowired private IPssGoodsStorageService pssGoodsStorageService; @Autowired private WarehouseService warehouseService; @Autowired private IPdsCommonService pdsCommonServiceImpl; @Autowired private WxMpMessageService wxMpMessageService; @Transactional(rollbackFor = Exception.class) @Override public BaseResp confirm(String pmId, String buySeq, Date deliveryTime) throws PdsOnekeyOperationException, PdsSummaryOperationException, StorageOperationException { BaseResp resp = BaseResp.newError(); logger.debug("--交易确认--平台确认报价--begin--\npmId:{}\nbuySeq:{}\ndeliveryTime:{}", pmId, buySeq, deliveryTime); // 平台确认报价 抛出异常 resp = pdsSummaryService.confirmOffer(pmId, buySeq, deliveryTime); logger.debug("--交易确认--平台确认报价--end--\n结果:{}", resp.getMsg()); if (!resp.isSuccess()) { throw new PdsOnekeyOperationException("确认报价出错:" + resp.getMsg()); } // 采购商确认报价 待确认状态 BuyOrder buyOrder = new BuyOrder(); buyOrder.setPmId(pmId); buyOrder.setBuySeq(buySeq); buyOrder.setDeliveryTime(deliveryTime); buyOrder.setStatus(IPdsConstant.BuyOrderStatus.wait.getCode()); List<BuyOrder> buyOrderList = buyOrderService.findList(buyOrder); logger.debug("--交易确认--采购商确认报价--begin--\n采购单数量:{}", buyOrderList.size()); for (BuyOrder order : buyOrderList) { resp = buyerOrderService.confirmBuyOrder(order); if (!resp.isSuccess()) { throw new PdsOnekeyOperationException("确认报价出错:" + resp.getMsg()); } } logger.debug("--交易确认--采购商确认报价--end--\n"); logger.debug("--交易确认--交易匹配--begin--\n"); // 交易匹配 抛出异常 resp = pdsSummaryService.tradeMatch(pmId, buySeq, deliveryTime); if (!resp.isSuccess()) { throw new PdsOnekeyOperationException("交易匹配出错:" + resp.getMsg()); } logger.debug("--交易确认--交易匹配--end--\n结果:{}", resp.getMsg()); logger.debug("--交易确认--生成交易单--begin--\n"); // 生成交易单 抛出异常 resp = pdsSummaryService.createTradeOrder(pmId, buySeq, deliveryTime); if (!resp.isSuccess()) { throw new PdsOnekeyOperationException("生成交易单出错:" + resp.getMsg()); } logger.debug("--交易确认--生成交易单--end--\n结果:{}", resp.getMsg()); logger.debug("--交易确认--揽货--begin--\n"); // 需揽货供应商列表 当前批次 报价单发货状态 PdsOfferOrderReq pdsOfferOrderReq = new PdsOfferOrderReq(); pdsOfferOrderReq.setPmId(pmId); pdsOfferOrderReq.setBuySeq(buySeq); pdsOfferOrderReq.setDeliveryDate(deliveryTime); // 报价单发货状态 待备货 待揽货 String pCode = IPdsConstant.OfferShipStatus.prepare.getCode(); String tCode = IPdsConstant.OfferShipStatus.take.getCode(); String[] shipStatusArray = new String[]{pCode, tCode}; pdsOfferOrderReq.setShipStatusArry(shipStatusArray); // 已成交报价单 pdsOfferOrderReq.setStatus(IPdsConstant.OfferOrderStatus.success.getCode()); List<PdsSupListParams> supplierList = offerOrderService.findSupList(pdsOfferOrderReq); if (Collections3.isEmpty(supplierList)) { throw new PdsOnekeyOperationException("揽货出错:找不到待揽货供应商"); } logger.debug("--交易确认--揽货--list--\n供应商数量:{}", supplierList.size()); for (PdsSupListParams params : supplierList) { // 运营揽货/商品入库 resp = pdsOperationService.supplierFreightConfirm(pmId, params.getSupplierId(), buySeq, deliveryTime); if (!resp.isSuccess()) { throw new PdsOnekeyOperationException("揽货出错:" + resp.getMsg()); } } logger.debug("--交易确认--揽货--end--\n"); // //一键分拣 // resp = pdsAdminSortOutService.fastSortOut(deliveryTime,buySeq,pmId); // if (!resp.isSuccess()){ // throw new PdsOnekeyOperationException("分拣出错:"+resp.getMsg()); // } // //一键装车,收货 // resp = loadAndArrived(pmId,buySeq,deliveryTime); // if (!resp.isSuccess()){ // throw new PdsOnekeyOperationException("装车收货失败:"+resp.getMsg()); // } return resp; } @Transactional(rollbackFor = Exception.class) @Override public BaseResp loadAndArrived(String pmId, String buySeq, Date deliveryTime) throws PdsOnekeyOperationException { BaseResp baseResp = BaseResp.newError(); TradeOrder tradeOrder = new TradeOrder(); tradeOrder.setPmId(pmId); tradeOrder.setBuySeq(buySeq); tradeOrder.setDeliveryTime(deliveryTime); try { baseResp = pdsDeliveryService.truckLoad(tradeOrder); } catch (Exception e) { throw new PdsOnekeyOperationException("装车失败", e); } if (!baseResp.isSuccess()) { throw new PdsOnekeyOperationException("装车失败:" + baseResp.getMsg()); } // 交易单状态修改 采购商状态 配送状态 TradeOrder queryTrade = new TradeOrder(); queryTrade.setPmId(pmId); queryTrade.setBuySeq(buySeq); queryTrade.setDeliveryTime(deliveryTime); queryTrade.setTransStatus(IPdsConstant.TradeOrderStatus.done.getCode()); List<TradeOrder> tradeOrderList = tradeOrderService.findList(queryTrade); for (TradeOrder t : tradeOrderList) { t.setBuyerStatus(IPdsConstant.BuyerDealStatus.wait_check.getCode()); t.setDeliveryStatus(IPdsConstant.DeliveryStatus.arrived.getCode()); tradeOrderService.save(t); } // 货物送达 DeliveryFlow deliveryFlow = new DeliveryFlow(); deliveryFlow.setPmId(pmId); deliveryFlow.setDeliverySeq(buySeq); deliveryFlow.setDeliveryDate(deliveryTime); List<DeliveryFlow> deliveryFlowList = deliveryFlowService.findList(deliveryFlow); for (DeliveryFlow flow : deliveryFlowList) { flow.setStatus(IPdsConstant.DeliveryStatus.arrived.getCode()); PdsShipOrder pdsShipOrder = new PdsShipOrder(); pdsShipOrder.setDeliveryId(flow.getDeliveryId()); List<PdsShipOrder> pdsShipOrderList = pdsShipOrderService.findList(pdsShipOrder); for (PdsShipOrder item : pdsShipOrderList) { item.setStatus(IPdsConstant.DeliveryStatus.arrived.getCode()); pdsShipOrderService.save(item); } deliveryFlowService.save(flow); } // 修改BuyOrderStatus后 采购单状态变更为待收货 BuyOrderDetail update = new BuyOrderDetail(); update.setPmId(pmId); update.setDeliveryDate(deliveryTime); update.setBuySeq(buySeq); update.setStatus(IPdsConstant.BuyOrderStatus.delivery.getCode()); update.setFinalStatus(IPdsConstant.BuyOrderStatus.arrive.getCode()); buyOrderDetailService.updateStatusBatch(update); return baseResp.success(); } @Override public BaseResp resetSummary(PdsDataResetApiReq dataResetReq) { BaseResp baseResp = BaseResp.newError(); String pmId = dataResetReq.getPmId(); Date deliveryDate = dataResetReq.getDeliveryDate(); String buySeq = dataResetReq.getBuySeq(); int affectRows = 0; //总共影响记录数(不包含进销存进货单) //删除物流单 DeliveryFlow deliveryFlow = new DeliveryFlow(); deliveryFlow.setPmId(pmId); deliveryFlow.setDeliveryDate(deliveryDate); deliveryFlow.setDeliverySeq(buySeq); affectRows += deliveryFlowService.deleteByDateSeqPmId(deliveryFlow); //删除送货单 PdsShipOrder pdsShipOrder = new PdsShipOrder(); pdsShipOrder.setPmId(pmId); pdsShipOrder.setDeliveryDate(deliveryDate); pdsShipOrder.setDeliverySeq(buySeq); List<PdsShipOrder> shipOrderList = pdsShipOrderService.findList(pdsShipOrder); affectRows += shipOrderList.size(); for (PdsShipOrder shipOrder : shipOrderList) { affectRows += pdsShipOrderDetailService.deleteByShipOrderId(shipOrder.getShipId()); pdsShipOrderService.delete(shipOrder); } //删除交易单 TradeOrder tradeOrder = new TradeOrder(); tradeOrder.setPmId(pmId); tradeOrder.setDeliveryTime(deliveryDate); tradeOrder.setBuySeq(buySeq); affectRows += tradeOrderService.deleteByDateSeqPmId(tradeOrder); //删除汇总单 SummaryOrder summaryOrder = new SummaryOrder(); summaryOrder.setPmId(pmId); summaryOrder.setDeliveryTime(deliveryDate); summaryOrder.setBuySeq(buySeq); List<SummaryOrder> summaryOrderList = summaryOrderService.findList(summaryOrder); affectRows += summaryOrderList.size(); BigDecimal addStockNum; for (SummaryOrder sumOrder : summaryOrderList) { addStockNum = BigDecimal.ZERO; List<TradeMatch> tradeMatchList = tradeMatchService.findByAskOrder(sumOrder.getSummaryOrderId()); affectRows += tradeMatchList.size(); //删除交易匹配 for (TradeMatch tradeMatch : tradeMatchList) { tradeMatchService.delete(tradeMatch); } //删除报价单 List<OfferOrder> offerOrderList = offerOrderService.findByAskId(sumOrder.getSummaryOrderId()); affectRows += offerOrderList.size(); for (OfferOrder offerOrder : offerOrderList) { PdsSupplier pdsSupplier = pdsSupplierService.get(offerOrder.getSupplierId()); tradeOrder.setOfferId(offerOrder.getOfferOrderId()); List<TradeOrder> tradeOrderList = tradeOrderService.findList(tradeOrder); if (CollectionUtils.isNotEmpty(tradeOrderList)) { TradeOrder item = tradeOrderList.get(0); boolean truckFlag = IPdsConstant.OperatorViewStatus.truckLoad.getCode().equals(item.getOpStatus()); boolean stockSupTypeFlag = IPdsConstant.SupplierType.stock.getCode().equals(pdsSupplier); if (!truckFlag && !stockSupTypeFlag) { addStockNum = addStockNum.add(offerOrder.getBuyNum()); } } offerOrderService.delete(offerOrder); } //库存还原 PssWarehouse warehouse = new PssWarehouse(); warehouse.setMerchantId(pmId); warehouse.setStatus(ICommonConstant.IsEnable.enable.getCode()); List<PssWarehouse> warehouseList = warehouseService.findList(warehouse); if (CollectionUtils.isNotEmpty(warehouseList)) { pssGoodsStorageService.outStock(warehouseList.get(0).getId(), sumOrder.getGoodsId(), addStockNum); affectRows += 1; } //删除PSS进货单 TODO //删除汇总单 summaryOrderService.delete(sumOrder); } //还原采购单 BuyOrder buyOrder = new BuyOrder(); buyOrder.setPmId(pmId); buyOrder.setDeliveryTime(deliveryDate); buyOrder.setBuySeq(buySeq); List<BuyOrder> buyOrderList = buyOrderService.findList(buyOrder); if (CollectionUtils.isEmpty(buyOrderList)) { baseResp.setMsg("重置失败,未找到采购单"); return baseResp; } affectRows += buyOrderList.size(); String cancelStatus = IPdsConstant.BuyOrderStatus.cancel.getCode(); String submitStatus = IPdsConstant.BuyOrderStatus.submit.getCode(); for (BuyOrder order : buyOrderList) { // 已取消订单不修改状态 if (StringUtils.equals(order.getStatus(), cancelStatus)) { affectRows--; continue; } order.setStatus(submitStatus); List<BuyOrderDetail> detailList = buyOrderDetailService.findListByBuyId(order.getBuyId()); affectRows += detailList.size(); for (BuyOrderDetail detail : detailList) { // 汇总单号 重置 detail.setSmmaryBuyId(null); detail.setSummaryFlag(IPdsConstant.DetailSummaryFlag.wait.getCode()); // 已取消订单明细不修改状态 if (!StringUtils.equals(detail.getStatus(), cancelStatus)) { detail.setStatus(submitStatus); } buyOrderDetailService.save(detail); } buyOrderService.save(order); } baseResp.success(); HashMap<String, Integer> respMap = new HashMap<>(8); respMap.put("affectRows", affectRows); baseResp.setExt(respMap); return baseResp; } @Override public BaseResp goodsReceived(String pmId, String buySeq, Date deliveryTime) { BaseResp baseResp = BaseResp.newError(); // 已成交 采购单 BuyOrder buyOrder = new BuyOrder(); buyOrder.setPmId(pmId); buyOrder.setDeliveryTime(deliveryTime); buyOrder.setBuySeq(buySeq); buyOrder.setStatus(IPdsConstant.BuyOrderStatus.arrive.getCode()); List<BuyOrder> buyOrderList = buyOrderService.findList(buyOrder); if (Collections3.isEmpty(buyOrderList)) { baseResp.setMsg("无待确认收货订单"); return baseResp; } TradeOrder tradeOrder = new TradeOrder(); for (BuyOrder order : buyOrderList) { tradeOrder.setBuyId(order.getBuyId()); tradeOrder.setBuyerId(order.getBuyerId()); baseResp = buyerOrderService.confirmAllGoods(tradeOrder); // 收货成功 推送消息 if (baseResp.isSuccess()) { UserOpenPlatform userOpenPlatform = pdsCommonServiceImpl.fetchOpenUserByUid(order.getBuyerUid(), IPdsConstant.WX_MP_APPID, order.getBuyerId(), IPdsConstant.CompanyType.buyer); if (null != userOpenPlatform && null != order) { wxMpMessageService.orderDealCloseNotify(userOpenPlatform, order); } } logger.debug("--收货---\n采购商{}\n 采购单:{}\n{}", order.getBuyerName(), order.getBuyId(), baseResp.getMsg()); } return baseResp; } }
afonsopbarros/appcenter-cli
dist/commands/crashes/lib/subfolder-symbols-helper.js
<filename>dist/commands/crashes/lib/subfolder-symbols-helper.js "use strict"; var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; Object.defineProperty(exports, "__esModule", { value: true }); exports.getChildrenDsymFolderPaths = exports.packDsymParentFolderContents = exports.getSymbolsZipFromXcarchive = void 0; const Path = require("path"); const Fs = require("fs"); const JsZip = require("jszip"); const util_1 = require("util"); const commandline_1 = require("../../../util/commandline"); const JsZipHelper = require("../../../util/misc/jszip-helper"); function getSymbolsZipFromXcarchive(path, debug) { return __awaiter(this, void 0, void 0, function* () { // the DSYM folders from "*.xcarchive/dSYMs" should be compressed const dsymsFolderPath = Path.join(path, "dSYMs"); return yield packDsymParentFolderContents(dsymsFolderPath, debug); }); } exports.getSymbolsZipFromXcarchive = getSymbolsZipFromXcarchive; function packDsymParentFolderContents(path, debug) { return __awaiter(this, void 0, void 0, function* () { debug(`Compressing the dSYM sub-folders of ${path} to the in-memory ZIP archive`); const zipArchive = new JsZip(); const childrenDsymFolders = getChildrenDsymFolderPaths(path, debug); for (const dSymPath of childrenDsymFolders) { try { debug(`Adding the sub-folder ${dSymPath} to the ZIP archive`); yield JsZipHelper.addFolderToZipRecursively(dSymPath, zipArchive); } catch (error) { debug(`Unable to add folder ${dSymPath} to the ZIP archive - ${util_1.inspect(error)}`); throw commandline_1.failure(commandline_1.ErrorCodes.Exception, `unable to add folder ${dSymPath} to the ZIP archive`); } } return zipArchive; }); } exports.packDsymParentFolderContents = packDsymParentFolderContents; function getChildrenDsymFolderPaths(parentPath, debug) { // get paths for all the DSym folders which belong to the specified folder let childrenEntriesList; try { childrenEntriesList = Fs.readdirSync(parentPath); } catch (error) { debug(`error when looking into directory ${parentPath} content - ${util_1.inspect(error)}`); throw commandline_1.failure(commandline_1.ErrorCodes.Exception, `error when looking into directory ${parentPath} content`); } return childrenEntriesList .map((childPath) => Path.join(parentPath, childPath)) .filter((childPath) => { if (Path.extname(childPath).toLowerCase() !== ".dsym") { return false; } try { const childStats = Fs.statSync(childPath); return childStats.isDirectory(); } catch (error) { debug(`Error when getting statistics for the file ${parentPath} - ${util_1.inspect(error)}`); throw commandline_1.failure(commandline_1.ErrorCodes.Exception, `error when getting statistics for the file ${parentPath}`); } }); } exports.getChildrenDsymFolderPaths = getChildrenDsymFolderPaths;
jbayer/external-secrets
pkg/provider/aws/parameterstore/fake/fake.go
<filename>pkg/provider/aws/parameterstore/fake/fake.go /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "fmt" "github.com/aws/aws-sdk-go/service/ssm" "github.com/google/go-cmp/cmp" ) // Client implements the aws parameterstore interface. type Client struct { valFn func(*ssm.GetParameterInput) (*ssm.GetParameterOutput, error) } func (sm *Client) GetParameter(in *ssm.GetParameterInput) (*ssm.GetParameterOutput, error) { return sm.valFn(in) } func (sm *Client) DescribeParameters(*ssm.DescribeParametersInput) (*ssm.DescribeParametersOutput, error) { return nil, nil } func (sm *Client) WithValue(in *ssm.GetParameterInput, val *ssm.GetParameterOutput, err error) { sm.valFn = func(paramIn *ssm.GetParameterInput) (*ssm.GetParameterOutput, error) { if !cmp.Equal(paramIn, in) { return nil, fmt.Errorf("unexpected test argument") } return val, err } }
ubirch/ubirch-auth-service
core/src/test/scala/com/ubirch/auth/core/manager/LogoutManagerSpec.scala
package com.ubirch.auth.core.manager import com.ubirch.auth.testTools.db.redis.RedisSpec import com.ubirch.util.json.JsonFormats import com.ubirch.util.oidc.model.UserContext import com.ubirch.util.oidc.util.OidcUtil import org.json4s.native.Serialization.write /** * author: cvandrei * since: 2017-03-22 */ class LogoutManagerSpec extends RedisSpec { implicit private val formatter = JsonFormats.default feature("logout") { scenario("empty database") { // test LogoutManager.logout("some-token") flatMap { logoutResult => // verify logoutResult should be(true) redis.keys("*") map { keys => keys should be('isEmpty) } } } scenario("token does not exist") { // prepare val token1 = "some-token-1" // to logout with val token2 = "some-token-2" // exists in Redis val redisKey = OidcUtil.tokenToHashedKey(token2) val redisValue = write( UserContext( context = "some-context", providerId = "some-provider-id", externalUserId = "some-user-id", userName = "some-user-name", locale = "en" ) ) redis.set(redisKey, redisValue) flatMap { token2Created => token2Created should be(true) // test LogoutManager.logout(token1) flatMap { logoutResult => // verify Thread.sleep(500) logoutResult should be(true) redis.keys("*") map { keys => keys.size shouldBe 1 } } } } scenario("token exists") { // prepare val token = "some-token" val redisKey = OidcUtil.tokenToHashedKey(token) val redisValue = write( UserContext( context = "some-context", providerId = "some-provider-id", externalUserId = "some-user-id", userName = "some-user-name", locale = "en" ) ) redis.set(redisKey, redisValue) flatMap { tokenCreated => tokenCreated should be(true) // test LogoutManager.logout(token) flatMap { logoutResult => // verify Thread.sleep(500) logoutResult should be(true) redis.keys("*") map { keys => keys should be('isEmpty) } } } } } }
Harteex/Tuniac
Tuniac1/TuniacApp/LibraryEntry.h
<reponame>Harteex/Tuniac /* Copyright (C) 2003-2008 <NAME> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation is required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ /* Modification and addition to Tuniac originally written by <NAME> Copyright (C) 2003-2014 <NAME> */ #pragma once #define LENGTH_STREAM 0xfffffffe #define LENGTH_UNKNOWN 0xffffffff #define BITRATE_UNDEFINABLE 0xfffffffe #define BITRATE_UNKNOWN 0xffffffff typedef struct { public: unsigned long ulKind; TCHAR szURL[MAX_PATH]; unsigned long ulFilesize; unsigned long ulAvailability; SYSTEMTIME stDateAdded; SYSTEMTIME stFileCreationDate; SYSTEMTIME stLastPlayed; unsigned long ulPlayCount; unsigned long ulRating; // filled in by the relevent media type handler // standard ID3 tag stuff TCHAR szTitle[128]; TCHAR szArtist[128]; TCHAR szAlbum[128]; TCHAR szComment[128]; TCHAR szGenre[128]; TCHAR szAlbumArtist[128]; TCHAR szComposer[128]; unsigned long ulYear; unsigned short dwTrack[2]; // index 0 == track : index 1 == max track (0 if unavailable) unsigned short dwDisc[2]; // index 0 == disk : index 1 == max disc (0 is unavailable) //extra info unsigned long ulPlaybackTime; unsigned long ulBitRate; unsigned long ulSampleRate; unsigned long ulChannels; unsigned long ulBitsPerSample; float fReplayGain_Track_Gain; float fReplayGain_Track_Peak; float fReplayGain_Album_Gain; float fReplayGain_Album_Peak; unsigned long ulBPM; TCHAR szFileType[16]; } LibraryEntry; //Past versions typedef struct { public: unsigned long ulKind; TCHAR szURL[MAX_PATH]; unsigned long ulFilesize; unsigned long ulAvailability; SYSTEMTIME stDateAdded; SYSTEMTIME stFileCreationDate; SYSTEMTIME stLastPlayed; unsigned long ulPlayCount; unsigned long ulRating; // filled in by the relevent media type handler // standard ID3 tag stuff TCHAR szTitle[128]; TCHAR szArtist[128]; TCHAR szAlbum[128]; TCHAR szComment[128]; TCHAR szGenre[128]; TCHAR szAlbumArtist[128]; TCHAR szComposer[128]; unsigned long ulYear; unsigned short dwTrack[2]; // index 0 == track : index 1 == max track (0 if unavailable) unsigned short dwDisc[2]; // index 0 == disk : index 1 == max disc (0 is unavailable) //extra info unsigned long ulPlaybackTime; unsigned long ulBitRate; unsigned long ulSampleRate; unsigned long ulChannels; float fReplayGain_Track_Gain; float fReplayGain_Track_Peak; float fReplayGain_Album_Gain; float fReplayGain_Album_Peak; unsigned long ulBPM; TCHAR szFileType[16]; } LibraryEntry09; typedef struct { public: unsigned long ulKind; TCHAR szURL[MAX_PATH]; unsigned long ulFilesize; unsigned long ulAvailability; SYSTEMTIME stDateAdded; SYSTEMTIME stFileCreationDate; SYSTEMTIME stLastPlayed; unsigned long ulPlayCount; unsigned long ulRating; // filled in by the relevent media type handler // standard ID3 tag stuff TCHAR szTitle[128]; TCHAR szArtist[128]; TCHAR szAlbum[128]; TCHAR szComment[128]; TCHAR szGenre[128]; TCHAR szAlbumArtist[128]; TCHAR szComposer[128]; unsigned long ulYear; unsigned short dwTrack[2]; // index 0 == track : index 1 == max track (0 if unavailable) unsigned short dwDisc[2]; // index 0 == disk : index 1 == max disc (0 is unavailable) //extra info unsigned long ulPlaybackTime; unsigned long ulBitRate; unsigned long ulSampleRate; unsigned long ulChannels; float fReplayGain_Track_Gain; float fReplayGain_Track_Peak; float fReplayGain_Album_Gain; float fReplayGain_Album_Peak; unsigned long ulBPM; } LibraryEntry08; typedef struct { public: unsigned long ulKind; TCHAR szURL[MAX_PATH]; unsigned long ulFilesize; unsigned long ulAvailability; SYSTEMTIME stDateAdded; SYSTEMTIME stFileCreationDate; SYSTEMTIME stLastPlayed; unsigned long ulPlayCount; unsigned long ulRating; // filled in by the relevent media type handler // standard ID3 tag stuff TCHAR szTitle[128]; TCHAR szArtist[128]; TCHAR szAlbum[128]; TCHAR szComment[128]; TCHAR szGenre[128]; TCHAR szAlbumArtist[128]; unsigned long ulYear; unsigned short dwTrack[2]; // index 0 == track : index 1 == max track (0 if unavailable) unsigned short dwDisc[2]; // index 0 == disk : index 1 == max disc (0 is unavailable) //extra info unsigned long ulPlaybackTime; unsigned long ulBitRate; unsigned long ulSampleRate; unsigned long ulChannels; float fReplayGain_Track_Gain; float fReplayGain_Track_Peak; float fReplayGain_Album_Gain; float fReplayGain_Album_Peak; unsigned long ulBPM; } LibraryEntry07; typedef struct { public: unsigned long ulKind; TCHAR szURL[MAX_PATH]; unsigned long ulFilesize; unsigned long ulAvailability; SYSTEMTIME stDateAdded; SYSTEMTIME stFileCreationDate; SYSTEMTIME stLastPlayed; unsigned long ulPlayCount; unsigned long ulRating; // filled in by the relevent media type handler // standard ID3 tag stuff TCHAR szTitle[128]; TCHAR szArtist[128]; TCHAR szAlbum[128]; TCHAR szComment[128]; TCHAR szGenre[128]; unsigned long ulYear; unsigned short dwTrack[2]; // index 0 == track : index 1 == max track (0 if unavailable) unsigned short dwDisc[2]; // index 0 == disk : index 1 == max disc (0 is unavailable) //extra info unsigned long ulPlaybackTime; unsigned long ulBitRate; unsigned long ulSampleRate; unsigned long ulChannels; float fReplayGain_Track_Gain; float fReplayGain_Track_Peak; float fReplayGain_Album_Gain; float fReplayGain_Album_Peak; unsigned long ulBPM; } LibraryEntry06; typedef struct { public: unsigned long dwKind; TCHAR szURL[MAX_PATH]; unsigned long dwFilesize; unsigned long dwAvailability; SYSTEMTIME stDateAdded; SYSTEMTIME stFileCreationDate; SYSTEMTIME stLastPlayed; int iPlayCount; unsigned long dwRating; // filled in by the relevent media type handler // standard ID3 tag stuff TCHAR szTitle[128]; TCHAR szArtist[128]; TCHAR szAlbum[128]; TCHAR szComment[128]; TCHAR szGenre[128]; int iYear; unsigned short dwTrack[2]; // index 0 == track : index 1 == max track (0 if unavailable) unsigned short dwDisc[2]; // index 0 == disk : index 1 == max disc (0 is unavailable) //extra info int iPlaybackTime; int iBitRate; int iSampleRate; int iChannels; float fReplayGain_Track_Gain; float fReplayGain_Track_Peak; float fReplayGain_Album_Gain; float fReplayGain_Album_Peak; } LibraryEntry05;
Darlingnotin/Antisocial_VR
libraries/controllers/src/controllers/impl/endpoints/ScriptEndpoint.cpp
// // Created by <NAME> 2015/10/23 // Copyright 2015 High Fidelity, Inc. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // #include "ScriptEndpoint.h" #include "../../Logging.h" #include <QtCore/QThread> #include <StreamUtils.h> using namespace controller; QString formatException(const QScriptValue& exception) { QString note { "UncaughtException" }; QString result; const auto message = exception.toString(); const auto fileName = exception.property("fileName").toString(); const auto lineNumber = exception.property("lineNumber").toString(); const auto stacktrace = exception.property("stack").toString(); const QString SCRIPT_EXCEPTION_FORMAT = "[%0] %1 in %2:%3"; const QString SCRIPT_BACKTRACE_SEP = "\n "; result = QString(SCRIPT_EXCEPTION_FORMAT).arg(note, message, fileName, lineNumber); if (!stacktrace.isEmpty()) { result += QString("\n[Backtrace]%1%2").arg(SCRIPT_BACKTRACE_SEP).arg(stacktrace); } return result; } AxisValue ScriptEndpoint::peek() const { const_cast<ScriptEndpoint*>(this)->updateValue(); return AxisValue(_lastValueRead, 0); } void ScriptEndpoint::updateValue() { if (QThread::currentThread() != thread()) { QMetaObject::invokeMethod(this, "updateValue", Qt::QueuedConnection); return; } QScriptValue result = _callable.call(); if (result.isError()) { // print JavaScript exception qCDebug(controllers).noquote() << formatException(result); _lastValueRead = 0.0f; } else if (result.isNumber()) { _lastValueRead = (float)_callable.call().toNumber(); } else { Pose::fromScriptValue(result, _lastPoseRead); _returnPose = true; } } void ScriptEndpoint::apply(AxisValue value, const Pointer& source) { if (value == _lastValueWritten) { return; } _lastValueWritten = value; internalApply(value.value, source->getInput().getID()); } void ScriptEndpoint::internalApply(float value, int sourceID) { if (QThread::currentThread() != thread()) { QMetaObject::invokeMethod(this, "internalApply", Qt::QueuedConnection, Q_ARG(float, value), Q_ARG(int, sourceID)); return; } QScriptValue result = _callable.call(QScriptValue(), QScriptValueList({ QScriptValue(value), QScriptValue(sourceID) })); if (result.isError()) { // print JavaScript exception qCDebug(controllers).noquote() << formatException(result); } } Pose ScriptEndpoint::peekPose() const { const_cast<ScriptEndpoint*>(this)->updatePose(); return _lastPoseRead; } void ScriptEndpoint::updatePose() { if (QThread::currentThread() != thread()) { QMetaObject::invokeMethod(this, "updatePose", Qt::QueuedConnection); return; } QScriptValue result = _callable.call(); if (result.isError()) { // print JavaScript exception qCDebug(controllers).noquote() << formatException(result); } Pose::fromScriptValue(result, _lastPoseRead); } void ScriptEndpoint::apply(const Pose& newPose, const Pointer& source) { if (newPose == _lastPoseWritten) { return; } internalApply(newPose, source->getInput().getID()); } void ScriptEndpoint::internalApply(const Pose& newPose, int sourceID) { _lastPoseWritten = newPose; if (QThread::currentThread() != thread()) { QMetaObject::invokeMethod(this, "internalApply", Qt::QueuedConnection, Q_ARG(const Pose&, newPose), Q_ARG(int, sourceID)); return; } QScriptValue result = _callable.call(QScriptValue(), QScriptValueList({ Pose::toScriptValue(_callable.engine(), newPose), QScriptValue(sourceID) })); if (result.isError()) { // print JavaScript exception qCDebug(controllers).noquote() << formatException(result); } }
demisto/pycharm-plugin
src/main/java/com/demisto/plugin/ide/generalUIComponents/DemistoCollapsingListeners.java
<filename>src/main/java/com/demisto/plugin/ide/generalUIComponents/DemistoCollapsingListeners.java package com.demisto.plugin.ide.generalUIComponents; public interface DemistoCollapsingListeners { void onCollapsingChanged(CollapsiblePanel panel, boolean newValue); }
longluo/leetcode
Java/src/com/longluo/offer_ii/Offer2_03_countBits.java
package com.longluo.offer_ii; import java.util.Arrays; /** * 剑指 Offer II 003. 前 n 个数字二进制中 1 的个数 * <p> * 给定一个非负整数 n ,请计算 0 到 n 之间的每个数字的二进制表示中 1 的个数,并输出一个数组。 * <p> * 示例 1: * 输入: n = 2 * 输出: [0,1,1] * 解释: * 0 --> 0 * 1 --> 1 * 2 --> 10 * <p> * 示例 2: * 输入: n = 5 * 输出: [0,1,1,2,1,2] * 解释: * 0 --> 0 * 1 --> 1 * 2 --> 10 * 3 --> 11 * 4 --> 100 * 5 --> 101 * <p> * 说明: * 0 <= n <= 10^5 * <p> * 进阶: * 给出时间复杂度为 O(n*sizeof(integer)) 的解答非常容易。但你可以在线性时间 O(n) 内用一趟扫描做到吗? * 要求算法的空间复杂度为 O(n) 。 * 你能进一步完善解法吗?要求在C++或任何其他语言中不使用任何内置函数(如 C++ 中的 __builtin_popcount )来执行此操作。 * <p> * 注意:本题与主站 338 题相同:https://leetcode-cn.com/problems/counting-bits/ * <p> * https://leetcode-cn.com/problems/w3tCBm/ */ public class Offer2_03_countBits { public static int[] countBits(int n) { int[] res = new int[n + 1]; for (int i = 1; i <= n; i++) { res[i] = getOneNum(i); } return res; } public static int getOneNum(int n) { int ans = 0; while (n > 0) { if ((n & 1) == 1) { ans++; } n = n >> 1; } return ans; } public static int[] countBits_dp(int n) { int[] res = new int[n + 1]; for (int i = 1; i <= n; i++) { res[i] = getOneNum(i); } return res; } public static void main(String[] args) { System.out.println("[0] ?= " + Arrays.toString(countBits(0))); System.out.println("[0,1] ?= " + Arrays.toString(countBits(1))); System.out.println("[0,1,1] ?= " + Arrays.toString(countBits(2))); System.out.println("[0,1,1,2,1,2] ?= " + Arrays.toString(countBits(5))); } }
benshurts/ttauri-gui
src/ttauri/event_queue.hpp
<gh_stars>1-10 // Copyright <NAME> 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt) #pragma once #include "wfree_fifo.hpp" #include <concepts> namespace tt { namespace detail { class event_queue_item_base { public: virtual ~event_queue_item_base() = default; virtual void operator()() const noexcept = 0; }; template<std::invocable Function> class event_queue_item final : public event_queue_item_base { public: constexpr event_queue_item(event_queue_item const &) noexcept = default; constexpr event_queue_item(event_queue_item &&) noexcept = default; constexpr event_queue_item &operator=(event_queue_item const &) noexcept = default; constexpr event_queue_item &operator=(event_queue_item &&) noexcept = default; constexpr event_queue_item(Function const &function) noexcept : function(function) {} constexpr event_queue_item(Function &&function) noexcept : function(std::move(function)) {} virtual void operator()() const noexcept { function(); } private: Function function; }; } // namespace detail class event_queue { public: void emplace(std::invocable auto &&function) const noexcept { using function_type = std::remove_cvref_t<decltype(function)>; return fifo.emplace<detail::event_queue_item<function_type>>(std::forward<decltype(function)>(function)); } bool take_one(std::invocable<detail::event_queue_item_base const &> auto &&operation) const noexcept { return fifo.take_one(std::forward<decltype(operation)>(operation)); } void take_all(std::invocable<detail::event_queue_item_base const &> auto &&operation) const noexcept { return fifo.take_all(std::forward<decltype(operation)>(operation)); } private: mutable wfree_fifo<detail::event_queue_item_base, 128> fifo; }; } // namespace tt
jjwatts/gigantum-client
ui/__tests__/components/shared/activity/ActivityCard.test.js
<reponame>jjwatts/gigantum-client<filename>ui/__tests__/components/shared/activity/ActivityCard.test.js // vendor import React, { Component } from 'react'; import renderer from 'react-test-renderer'; import { mount } from 'enzyme'; import relayTestingUtils from '@gigantum/relay-testing-utils'; import { Provider } from 'react-redux'; // components import ActivityCard from 'Components/shared/activity/ActivityCard'; // Data import json from 'Tests/components/labbook/__relaydata__/LabbookContainerQuery.json'; // store import store from 'JS/redux/store'; let { labbook } = json.data; let fixtures = { sectionType: 'labbook', isFirstCard: true, addCluster: jest.fn(), compressExpanded: jest.fn(), isCompressed: false, isExpandedHead: true, isExpandedEnd: true, isExpandedNode: true, attachedCluster: false, collapsed: false, clusterObject: {}, position: 0, hoveredRollback: true, key: 'ActivityCard', edge: labbook.activityRecords.edges[0], }; describe('Activity', () => { it('renders a snapshot', () => { const wrapper = renderer.create(<ActivityCard {...fixtures} />); const tree = wrapper.toJSON(); expect(tree).toMatchSnapshot(); }); });
neo4j-contrib/gcli
lib/gcli/l10n.js
<reponame>neo4j-contrib/gcli<filename>lib/gcli/l10n.js /* * Copyright 2012, Mozilla Foundation and contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ define(function(require, exports, module) { var strings = {}; /** * Add a CommonJS module to the list of places in which we look for * localizations. Before calling this function, it's important to make a call * to require(modulePath) to ensure that the dependency system (either require * or dryice) knows to make the module ready. * @param modulePath A CommonJS module (as used in calls to require). Don't * add the 'i18n!' prefix used by requirejs. * @see unregisterStringsSource() */ exports.registerStringsSource = function(modulePath) { // Bug 683844: Should be require('i18n!' + module); var additions = require(modulePath).root; Object.keys(additions).forEach(function(key) { if (strings[key]) { console.error('Key \'' + key + '\' (loaded from ' + modulePath + ') ' + 'already exists. Ignoring.'); return; } strings[key] = additions[key]; }, this); }; /** * The main GCLI strings source is always required. * We have to load it early on in the process (in the require phase) so that * we can define settingSpecs and commandSpecs at the top level too. */ require('gcli/nls/strings'); exports.registerStringsSource('gcli/nls/strings'); /** * Undo the effects of registerStringsSource(). * @param modulePath A CommonJS module (as used in calls to require). * @see registerStringsSource() */ exports.unregisterStringsSource = function(modulePath) { // Bug 683844: Should be require('i18n!' + module); var additions = require(modulePath).root; Object.keys(additions).forEach(function(key) { delete strings[key]; }, this); }; /** * Finds the preferred locales of the user as an array of RFC 4646 strings * (e.g. 'pt-br'). * . There is considerable confusion as to the correct value * since there are a number of places the information can be stored: * - In the OS (IE:navigator.userLanguage, IE:navigator.systemLanguage) * - In the browser (navigator.language, IE:navigator.browserLanguage) * - By GEO-IP * - By website specific settings * This implementation uses navigator.language || navigator.userLanguage as * this is compatible with requirejs. * See http://tools.ietf.org/html/rfc4646 * See http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference * See http://msdn.microsoft.com/en-us/library/ms534713.aspx * @return The current locale as an RFC 4646 string */ exports.getPreferredLocales = function() { var language = typeof navigator !== 'undefined' ? (navigator.language || navigator.userLanguage).toLowerCase() : 'en-us'; var parts = language.split('-'); var reply = parts.map(function(part, index) { return parts.slice(0, parts.length - index).join('-'); }); reply.push('root'); return reply; }; /** * Lookup a key in our strings file using localized versions if possible, * throwing an error if that string does not exist. * @param key The string to lookup * This should generally be in the general form 'filenameExportIssue' where * filename is the name of the module (all lowercase without underscores) and * export is the name of a top level thing in which the message is used and * issue is a short string indicating the issue. * The point of a 'standard' like this is to keep strings fairly short whilst * still allowing users to have an idea where they come from, and preventing * name clashes. * @return The string resolved from the correct locale */ exports.lookup = function(key) { var str = strings[key]; if (str == null) { throw new Error('No i18n key: ' + key); } return str; }; /** * An alternative to lookup(). * <tt>l10n.lookup('x') === l10n.propertyLookup.x</tt> * We should go easy on this method until we are sure that we don't have too * many 'old-browser' problems. However this works particularly well with the * templater because you can pass this in to a template that does not do * <tt>{ allowEval: true }</tt> */ if (typeof Proxy !== 'undefined') { exports.propertyLookup = Proxy.create({ get: function(rcvr, name) { return exports.lookup(name); } }); } else { exports.propertyLookup = strings; } /** * Helper function to process swaps. * For example: * swap('the {subject} {verb} {preposition} the {object}', { * subject: 'cat', verb: 'sat', preposition: 'on', object: 'mat' * }); * Returns 'the cat sat on the mat'. * @param str The string containing parts delimited by { and } to be replaced * @param swaps Lookup map containing the replacement strings */ function swap(str, swaps) { return str.replace(/\{[^}]*\}/g, function(name) { name = name.slice(1, -1); if (swaps == null) { console.log('Missing swaps while looking up \'' + name + '\''); return ''; } var replacement = swaps[name]; if (replacement == null) { console.log('Can\'t find \'' + name + '\' in ' + JSON.stringify(swaps)); replacement = ''; } return replacement; }); } /** * Lookup a key in our strings file using localized versions if possible, * and perform string interpolation to inject runtime values into the string. * l10n lookup is required for user visible strings, but not required for * console messages and throw strings. * lookupSwap() is virtually identical in function to lookupFormat(), except * that lookupSwap() is easier to use, however lookupFormat() is required if * your code is to work with Mozilla's i10n system. * @param key The string to lookup * This should generally be in the general form 'filename_export_issue' where * filename is the name of the module (all lowercase without underscores) and * export is the name of a top level thing in which the message is used and * issue is a short string indicating the issue. * The point of a 'standard' like this is to keep strings fairly short whilst * still allowing users to have an idea where they come from, and preventing * name clashes. * The value looked up may contain {variables} to be exchanged using swaps * @param swaps A map of variable values to be swapped. * @return A looked-up and interpolated message for display to the user. * @see lookupFormat() */ exports.lookupSwap = function(key, swaps) { var str = exports.lookup(key); return swap(str, swaps); }; /** * Perform the string swapping required by format(). * @see format() for details of the swaps performed. */ function format(str, swaps) { // First replace the %S strings var index = 0; str = str.replace(/%S/g, function() { return swaps[index++]; }); // Then %n$S style strings str = str.replace(/%([0-9])\$S/g, function(match, idx) { return swaps[idx - 1]; }); return str; } /** * Lookup a key in our strings file using localized versions if possible, * and perform string interpolation to inject runtime values into the string. * l10n lookup is required for user visible strings, but not required for * console messages and throw strings. * lookupFormat() is virtually identical in function to lookupSwap(), except * that lookupFormat() works with strings held in the mozilla repo in addition * to files held outside. * @param key Looks up the format string for the given key in the string bundle * and returns a formatted copy where each occurrence of %S (uppercase) is * replaced by each successive element in the supplied array. * Alternatively, numbered indices of the format %n$S (e.g. %1$S, %2$S, etc.) * can be used to specify the position of the corresponding parameter * explicitly. * The mozilla version performs more advances formatting than these simple * cases, however these cases are not supported so far, mostly because they are * not well documented. * @param swaps An array of strings to be swapped. * @return A looked-up and interpolated message for display to the user. * @see https://developer.mozilla.org/en/XUL/Method/getFormattedString */ exports.lookupFormat = function(key, swaps) { var str = exports.lookup(key); return format(str, swaps); }; /** * Lookup the correct pluralization of a word/string. * The first ``key`` and ``swaps`` parameters of lookupPlural() are the same * as for lookupSwap(), however there is an extra ``ord`` parameter which indicates * the plural ordinal to use. * For example, in looking up the string '39 steps', the ordinal would be 39. * * More detailed example: * French has 2 plural forms: the first for 0 and 1, the second for everything * else. English also has 2, but the first only covers 1. Zero is lumped into * the 'everything else' category. Vietnamese has only 1 plural form - so it * uses the same noun form however many of them there are. * The following localization strings describe how to pluralize the phrase * '1 minute': * 'en-us': { demo_plural_time: [ '{ord} minute', '{ord} minutes' ] }, * 'fr-fr': { demo_plural_time: [ '{ord} minute', '{ord} minutes' ] }, * 'vi-vn': { demo_plural_time: [ '{ord} phut' ] }, * * l10n.lookupPlural('demo_plural_time', 0); // '0 minutes' in 'en-us' * l10n.lookupPlural('demo_plural_time', 1); // '1 minute' in 'en-us' * l10n.lookupPlural('demo_plural_time', 9); // '9 minutes' in 'en-us' * * l10n.lookupPlural('demo_plural_time', 0); // '0 minute' in 'fr-fr' * l10n.lookupPlural('demo_plural_time', 1); // '1 minute' in 'fr-fr' * l10n.lookupPlural('demo_plural_time', 9); // '9 minutes' in 'fr-fr' * * l10n.lookupPlural('demo_plural_time', 0); // '0 phut' in 'vi-vn' * l10n.lookupPlural('demo_plural_time', 1); // '1 phut' in 'vi-vn' * l10n.lookupPlural('demo_plural_time', 9); // '9 phut' in 'vi-vn' * * The * Note that the localization strings are (correctly) the same (since both * the English and the French words have the same etymology) * @param key The string to lookup in gcli/nls/strings.js * @param ord The number to use in plural lookup * @param swaps A map of variable values to be swapped. */ exports.lookupPlural = function(key, ord, swaps) { var index = getPluralRule().get(ord); var words = exports.lookup(key); var str = words[index]; swaps = swaps || {}; swaps.ord = ord; return swap(str, swaps); }; /** * Find the correct plural rule for the current locale * @return a plural rule with a 'get()' function */ function getPluralRule() { if (!pluralRule) { var lang = navigator.language || navigator.userLanguage; // Convert lang to a rule index pluralRules.some(function(rule) { if (rule.locales.indexOf(lang) !== -1) { pluralRule = rule; return true; } return false; }); // Use rule 0 by default, which is no plural forms at all if (!pluralRule) { console.error('Failed to find plural rule for ' + lang); pluralRule = pluralRules[0]; } } return pluralRule; } /** * A plural form is a way to pluralize a noun. There are 2 simple plural forms * in English, with (s) and without - e.g. tree and trees. There are many other * ways to pluralize (e.g. witches, ladies, teeth, oxen, axes, data, alumini) * However they all follow the rule that 1 is 'singular' while everything * else is 'plural' (words without a plural form like sheep can be seen as * following this rule where the singular and plural forms are the same) * <p>Non-English languages have different pluralization rules, for example * French uses singular for 0 as well as 1. Japanese has no plurals while * Arabic and Russian are very complex. * * See https://developer.mozilla.org/en/Localization_and_Plurals * See https://secure.wikimedia.org/wikipedia/en/wiki/List_of_ISO_639-1_codes * * Contains code inspired by Mozilla L10n code originally developed by * <NAME> <<EMAIL>> */ var pluralRules = [ /** * Index 0 - Only one form for all * Asian family: Japanese, Vietnamese, Korean */ { locales: [ 'fa', 'fa-ir', 'id', 'ja', 'ja-jp-mac', 'ka', 'ko', 'ko-kr', 'th', 'th-th', 'tr', 'tr-tr', 'zh', 'zh-tw', 'zh-cn' ], numForms: 1, get: function(n) { return 0; } }, /** * Index 1 - Two forms, singular used for one only * Germanic family: English, German, Dutch, Swedish, Danish, Norwegian, * Faroese * Romanic family: Spanish, Portuguese, Italian, Bulgarian * Latin/Greek family: Greek * Finno-Ugric family: Finnish, Estonian * Semitic family: Hebrew * Artificial: Esperanto * Finno-Ugric family: Hungarian * Turkic/Altaic family: Turkish */ { locales: [ 'af', 'af-za', 'as', 'ast', 'bg', 'br', 'bs', 'bs-ba', 'ca', 'cy', 'cy-gb', 'da', 'de', 'de-de', 'de-ch', 'en', 'en-gb', 'en-us', 'en-za', 'el', 'el-gr', 'eo', 'es', 'es-es', 'es-ar', 'es-cl', 'es-mx', 'et', 'et-ee', 'eu', 'fi', 'fi-fi', 'fy', 'fy-nl', 'gl', 'gl-gl', 'he', // 'hi-in', Without an unqualified language, looks dodgy 'hu', 'hu-hu', 'hy', 'hy-am', 'it', 'it-it', 'kk', 'ku', 'lg', 'mai', // 'mk', 'mk-mk', Should be 14? 'ml', 'ml-in', 'mn', 'nb', 'nb-no', 'no', 'no-no', 'nl', 'nn', 'nn-no', 'no', 'no-no', 'nb', 'nb-no', 'nso', 'nso-za', 'pa', 'pa-in', 'pt', 'pt-pt', 'rm', 'rm-ch', // 'ro', 'ro-ro', Should be 5? 'si', 'si-lk', // 'sl', Should be 10? 'son', 'son-ml', 'sq', 'sq-al', 'sv', 'sv-se', 'vi', 'vi-vn', 'zu', 'zu-za' ], numForms: 2, get: function(n) { return n != 1 ? 1 : 0; } }, /** * Index 2 - Two forms, singular used for zero and one * Romanic family: Brazilian Portuguese, French */ { locales: [ 'ak', 'ak-gh', 'bn', 'bn-in', 'bn-bd', 'fr', 'fr-fr', 'gu', 'gu-in', 'kn', 'kn-in', 'mr', 'mr-in', 'oc', 'oc-oc', 'or', 'or-in', 'pt-br', 'ta', 'ta-in', 'ta-lk', 'te', 'te-in' ], numForms: 2, get: function(n) { return n > 1 ? 1 : 0; } }, /** * Index 3 - Three forms, special case for zero * Latvian */ { locales: [ 'lv' ], numForms: 3, get: function(n) { return n % 10 == 1 && n % 100 != 11 ? 1 : n != 0 ? 2 : 0; } }, /** * Index 4 - * Scottish Gaelic */ { locales: [ 'gd', 'gd-gb' ], numForms: 4, get: function(n) { return n == 1 || n == 11 ? 0 : n == 2 || n == 12 ? 1 : n > 0 && n < 20 ? 2 : 3; } }, /** * Index 5 - Three forms, special case for numbers ending in 00 or [2-9][0-9] * Romanian */ { locales: [ 'ro', 'ro-ro' ], numForms: 3, get: function(n) { return n == 1 ? 0 : n == 0 || n % 100 > 0 && n % 100 < 20 ? 1 : 2; } }, /** * Index 6 - Three forms, special case for numbers ending in 1[2-9] * Lithuanian */ { locales: [ 'lt' ], numForms: 3, get: function(n) { return n % 10 == 1 && n % 100 != 11 ? 0 : n % 10 >= 2 && (n % 100 < 10 || n % 100 >= 20) ? 2 : 1; } }, /** * Index 7 - Three forms, special cases for numbers ending in 1 and * 2, 3, 4, except those ending in 1[1-4] * Slavic family: Russian, Ukrainian, Serbian, Croatian */ { locales: [ 'be', 'be-by', 'hr', 'hr-hr', 'ru', 'ru-ru', 'sr', 'sr-rs', 'sr-cs', 'uk' ], numForms: 3, get: function(n) { return n % 10 == 1 && n % 100 != 11 ? 0 : n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 10 || n % 100 >= 20) ? 1 : 2; } }, /** * Index 8 - Three forms, special cases for 1 and 2, 3, 4 * Slavic family: Czech, Slovak */ { locales: [ 'cs', 'sk' ], numForms: 3, get: function(n) { return n == 1 ? 0 : n >= 2 && n <= 4 ? 1 : 2; } }, /** * Index 9 - Three forms, special case for one and some numbers ending in * 2, 3, or 4 * Polish */ { locales: [ 'pl' ], numForms: 3, get: function(n) { return n == 1 ? 0 : n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 10 || n % 100 >= 20) ? 1 : 2; } }, /** * Index 10 - Four forms, special case for one and all numbers ending in * 02, 03, or 04 * Slovenian */ { locales: [ 'sl' ], numForms: 4, get: function(n) { return n % 100 == 1 ? 0 : n % 100 == 2 ? 1 : n % 100 == 3 || n % 100 == 4 ? 2 : 3; } }, /** * Index 11 - * Irish Gaeilge */ { locales: [ 'ga-ie', 'ga-ie', 'ga', 'en-ie' ], numForms: 5, get: function(n) { return n == 1 ? 0 : n == 2 ? 1 : n >= 3 && n <= 6 ? 2 : n >= 7 && n <= 10 ? 3 : 4; } }, /** * Index 12 - * Arabic */ { locales: [ 'ar' ], numForms: 6, get: function(n) { return n == 0 ? 5 : n == 1 ? 0 : n == 2 ? 1 : n % 100 >= 3 && n % 100 <= 10 ? 2 : n % 100 >= 11 && n % 100 <= 99 ? 3 : 4; } }, /** * Index 13 - * Maltese */ { locales: [ 'mt' ], numForms: 4, get: function(n) { return n == 1 ? 0 : n == 0 || n % 100 > 0 && n % 100 <= 10 ? 1 : n % 100 > 10 && n % 100 < 20 ? 2 : 3; } }, /** * Index 14 - * Macedonian */ { locales: [ 'mk', 'mk-mk' ], numForms: 3, get: function(n) { return n % 10 == 1 ? 0 : n % 10 == 2 ? 1 : 2; } }, /** * Index 15 - * Icelandic */ { locales: [ 'is' ], numForms: 2, get: function(n) { return n % 10 == 1 && n % 100 != 11 ? 0 : 1; } } /* // Known locales without a known plural rule 'km', 'ms', 'ne-np', 'ne-np', 'ne', 'nr', 'nr-za', 'rw', 'ss', 'ss-za', 'st', 'st-za', 'tn', 'tn-za', 'ts', 'ts-za', 've', 've-za', 'xh', 'xh-za' */ ]; /** * The cached plural rule */ var pluralRule; });
tnga/underscore-es
src/defer.js
// `_defer` : (ahem) a function's function // ---------------------------------------- import _partial from './partial'; import _delay from './delay'; // Defers a function, scheduling it to run after the current call stack has cleared. var _defer = _partial(_delay, _partial.placeholder, 1); export {_defer as default};
KCodeYT/gomint
gomint-server/src/main/java/io/gomint/server/world/block/Glowstone.java
package io.gomint.server.world.block; import io.gomint.inventory.item.ItemGlowstoneDust; import io.gomint.inventory.item.ItemStack; import io.gomint.world.block.BlockType; import io.gomint.server.registry.RegisterInfo; import io.gomint.world.block.BlockGlowstone; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ThreadLocalRandom; /** * @author geNAZt * @version 1.0 */ @RegisterInfo( sId = "minecraft:glowstone" ) public class Glowstone extends Block implements BlockGlowstone { @Override public String getBlockId() { return "minecraft:glowstone"; } @Override public long getBreakTime() { return 450; } @Override public boolean isTransparent() { return true; } @Override public float getBlastResistance() { return 1.5f; } @Override public BlockType getBlockType() { return BlockType.GLOWSTONE; } @Override public boolean canBeBrokenWithHand() { return true; } @Override public List<ItemStack> getDrops( ItemStack itemInHand ) { return new ArrayList<ItemStack>() {{ add( ItemGlowstoneDust.create( ThreadLocalRandom.current().nextBoolean() ? 2 : 4 ) ); }}; } }
seagullua/Alarm-x4
Classes/Widgets/TurnOnAlarmMenuWidget.h
<gh_stars>1-10 #ifndef TURNONALARMMENUWIDGET_H #define TURNONALARMMENUWIDGET_H #include "Core/Widget.h" class TurnOnAlarmMenuDelegate { public: TurnOnAlarmMenuDelegate(cocos2d::CCObject* wake_up_obj=0, cocos2d::SEL_CallFunc wake_up_method=0, cocos2d::CCObject* go_out_obj=0, cocos2d::SEL_CallFunc go_out_method=0) : _wake_up_obj(wake_up_obj), _wake_up_method(wake_up_method), _go_out_obj(go_out_obj), _go_out_method(go_out_method) {} virtual ~TurnOnAlarmMenuDelegate() {} virtual void onWakeUpClick() { if(_wake_up_method && _wake_up_obj) { (_wake_up_obj->*_wake_up_method)(); } } virtual void onGoOutInTimeClick() { if(_go_out_method && _go_out_obj) { (_go_out_obj->*_go_out_method)(); } } private: cocos2d::CCObject* _wake_up_obj; cocos2d::SEL_CallFunc _wake_up_method; cocos2d::CCObject* _go_out_obj; cocos2d::SEL_CallFunc _go_out_method; }; typedef std::auto_ptr<TurnOnAlarmMenuDelegate> TurnOnAlarmMenuDelegatePtr; class TurnOnAlarmMenuWidget : public Widget { public: TurnOnAlarmMenuWidget(TurnOnAlarmMenuDelegatePtr delegate); private: TurnOnAlarmMenuWidget(const TurnOnAlarmMenuWidget&); TurnOnAlarmMenuWidget& operator=(const TurnOnAlarmMenuWidget&); void onInit(); void displayButtons(); void onWakeUpClick(MTile *); void onGoOutInTimeClick(MTile *); MTileBlock _rs; TurnOnAlarmMenuDelegatePtr _delegate; }; #endif // TURNONALARMMENUWIDGET_H
olebole/voclient
mini-d/vocRegistry_f77.c
<reponame>olebole/voclient<gh_stars>1-10 /** * VOC_REGISTRYQUERY -- Utility code to act as a client interface to * the NVO Registry service. * * RegistryQuery * ---------------------- * * High-Level Query: * * res = voc_regSearch (term1, term2, orValues) * res = voc_regSearchBySvc (svc, term, orValues) * * Programmatic Query: * * query = voc_regQuery (term, orValues) // OR keyword list? * * voc_regAddSearchTerm (query, term, orValue) // OR term w/ previous * voc_regRemoveSearchTerm (query, term) // remove search term * count = voc_regGetSTCount (query) * * str = voc_regGetQueryString (query) // GET form of query * * res = voc_regExecute (query) // return result obj * str = voc_regExecuteRaw (query) // return raw XML * * RegistryQueryResult * * count = voc_resGetCount (res) * * str = voc_resGetStr (res, attribute, index) * dval = voc_resGetFloat (res, attribute, index) * ival = voc_resGetInt (res, attribute, index) * * For this implementation, we've chose to use the NVO Registry at * JHU/STScI, specifically the QueryRegistry() method which provides a * 'SimpleResource' form of the resource record. Support for the newer * IVOA standard will be added later, for now we can quickly access the most * commonly used fields of a resource using both a keyword and SQL form of * the search. * * * @file vocRegistry_f77.c * @author <NAME> * @version July 2006 * ************************************************************************* */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <errno.h> #define _VOCLIENT_LIB_ #include "VOClient.h" /* Procedure name mapping. */ #ifdef _NO_US_ #define VF_REGSEARCH vfregsearch #define VF_REGSEARCHBYSVC vfregsearchbysvc #define VF_REGQUERY vfregquery #define VF_REGADDSEARCHTERM vfregaddsearchterm #define VF_REGREMOVESEARCHTERM vfregremovesearchterm #define VF_REGGETSTCOUNT vfreggetstcount #define VF_REGGETQUERYSTRING vfreggetquerystring #define VF_REGEXECUTE vfregexecute #define VF_REGEXECUTERAW vfregexecuteraw #define VF_RESGETCOUNT vfresgetcount #define VF_RESGETSTR vfresgetstr #define VF_RESGETFLOAT vfresgetfloat #define VF_RESGETINT vfresgetint #else #define VF_REGSEARCH vfregsearch_ #define VF_REGSEARCHBYSVC vfregsearchbysvc_ #define VF_REGQUERY vfregquery_ #define VF_REGADDSEARCHTERM vfregaddsearchterm_ #define VF_REGREMOVESEARCHTERM vfregremovesearchterm_ #define VF_REGGETSTCOUNT vfreggetstcount_ #define VF_REGGETQUERYSTRING vfreggetquerystring_ #define VF_REGEXECUTE vfregexecute_ #define VF_REGEXECUTERAW vfregexecuteraw_ #define VF_RESGETCOUNT vfresgetcount_ #define VF_RESGETSTR vfresgetstr_ #define VF_RESGETFLOAT vfresgetfloat_ #define VF_RESGETINT vfresgetint_ #endif /* Prototype declarations. */ void VF_REGSEARCH (char *term1, char *term2, int *orValues, RegResult *result, int *ier, int len1, int len2); void VF_REGSEARCHBYSVC (char *svc, char *term, int *orValues, RegResult *result, int *ier, int slen, int tlen); void VF_REGQUERY (char *term, int *orValues, RegQuery *query, int *ier, int len); void VF_REGADDSEARCHTERM (RegQuery *query, char *term, int *orValue, int tlen); void VF_REGREMOVESEARCHTERM (RegQuery *query, char *term, int tlen); void VF_REGGETSTCOUNT (RegQuery *query, int *count); void VF_REGGETQUERYSTRING (RegQuery *query, char *qstr, int *len, int qlen); void VF_REGEXECUTE (RegQuery *query, RegResult *result); void VF_REGEXECUTERAW (RegQuery *query, char *raw, int *len, int rlen); void VF_RESGETCOUNT (RegResult *res, int *count); void VF_RESGETSTR (RegResult *res, char *attr, int *index, char *str, int *len, int alen, int slen); void VF_RESGETFLOAT (RegResult *res, char *attr, int *index, double *dval, int alen); void VF_RESGETINT (RegResult *res, char *attr, int *index, int *ival, int alen); extern VOClient *vo; /* Interface runtime struct */ /* Private interface declarations. */ extern char *sstrip (char *instr, int len); extern void spad (char *outstr, int len); /** * VF_REGSEARCH -- High-level procedure to form a query and execute it * immediately. We allow that 'term1' may be a complex SQL WHERE predicate, * and that 'term2' (or vice versa) is a search-keyword list. The * 'orValues' applies to the keyword list (if present), otherwise it applies * to the two search term elements. The default action if two terms are * specified is to logically AND them. * * The thinking here is that one might want SIAP services for Quasars. This * is easily expressed in an SQL form to get SIAP resources, however a * Quasar may be known as a QSO, AGN, active-nuclei, etc and so we need a * easy way to OR the keywords but AND that result with the SQL predicate. * * @brief High-level Registry query interface * @fn call vf_regSearch (char *term1, char *term2, int *orValues, * int *result, int *ier) * * @param term1 first search term * @param term2 second search term * @param orValues logically OR values? * @param result handle to result object * @param ier function error code (OK or ERR) * @returns nothing */ void VF_REGSEARCH (char *term1, char *term2, int *orValues, RegResult *result, int *ier, int len1, int len2) { char *_term1 = sstrip (term1, len1); char *_term2 = sstrip (term2, len2); *result = voc_regSearch (_term1, _term2, *orValues); *ier = (*result ? OK : ERR); free ((char *) _term1); free ((char *) _term2); } /** * VF_REGSEARCHBYSERVICE -- Search the Registry using a search term and * constrain by service type. * * @brief Search Registry using a search term and service constraint * @fn call vf_regSearchByService (char *svc, char *term, int *orValues, * int *result, int *ier) * * @param svc service type constraint * @param term keyword search term * @param orValues logically OR values? * @param result handle to result object * @param ier function error code (OK or ERR) * @returns nothing */ void VF_REGSEARCHBYSVC (char *svc, char *term, int *orValues, RegResult *result, int *ier, int slen, int tlen) { char *_svc = sstrip (svc, slen); char *_term = sstrip (term, tlen); *result = voc_regSearchByService (_svc, _term, *orValues); *ier = (*result ? OK : ERR); free ((char *) _svc); free ((char *) _term); } /** * VF_REGQUERY -- Create a RegistryQuery object. * * @brief Create a RegistryQuery object. * @fn call vf_regQuery (char *term, int *orValues, int *query, int *ier) * * @param term keyword search term * @param orValues logically OR values? * @param query handle to query object * @param ier function error code (OK or ERR) * @returns nothing */ void VF_REGQUERY (char *term, int *orValues, RegQuery *query, int *ier, int len) { char *_term = sstrip (term, len); *query = voc_regQuery (_term, *orValues); *ier = (*query ? OK : ERR); free ((char *) _term); } /** * VF_REGADDSEARCHTERM -- Add a search term (sql predicate or keyword list) * to the specified query. * * @brief Add a search term to the specified query * @fn call vf_regAddSearchTerm (RegQuery *query, char *term, int *orValue) * * @param query Registry query handle * @param term keyword search term * @param orValues logically OR values? * @returns nothing */ void VF_REGADDSEARCHTERM (RegQuery *query, char *term, int *orValue, int len) { char *_term = sstrip (term, len); voc_regAddSearchTerm (*query, _term, *orValue); free ((char *) _term); } /** * VF_REGREMOVESEARCHTERM -- Remove the search term from the query. * * @brief Remove a search term to the specified query * @fn call vf_regRemoveSearchTerm (RegQuery *query, char *term) * * @param query Registry query handle * @param term keyword search term * @returns nothing */ void VF_REGREMOVESEARCHTERM (RegQuery *query, char *term, int len) { char *_term = sstrip (term, len); voc_regRemoveSearchTerm (*query, _term); free ((char *) _term); } /** * VF_REGGETSTCOUNT -- Get the number of search terms in the current query. * * @brief Get the number of search terms in the current query. * @fn call vf_regGetSTCount (RegQuery *query, int *count) * * @param query Registry query handle * @returns nothing */ void VF_REGGETSTCOUNT (RegQuery *query, int *count) { *count = voc_resGetCount (*query); } /** * VF_REGGETQUERYSTRING -- Get the current query as an http GET URL. * * @brief Get the current query as an http GET URL. * @fn call vf_regGetQueryString (RegQuery *query, char *qstr, int *len) * * @param query Registry query handle * @param qstr returned query string * @param len length of query string * @returns nothing */ void VF_REGGETQUERYSTRING (RegQuery *query, char *qstr, int *len, int qlen) { char *_result = voc_regGetQueryString (*query); memset (qstr, 0, qlen); if ((*len = strlen(_result)) > qlen) fprintf (stderr, "Warning: truncating result string: len=%d maxch=%d\n", *len, qlen); spad (strncpy (qstr, _result, *len), qlen); free ((char *) _result); } /** * VF_REGEXECUTE -- Execute the specified query, returning a result object * code or NULL. * * @brief Execute the specified query * @fn call vf_regExecute (RegQuery *query, RegResult *result) * * @param query Registry query handle * @param result Registry result handle * @returns nothing */ void VF_REGEXECUTE (RegQuery *query, RegResult *result) { *result = voc_regExecute (*query); } /** * VF_REGEXECUTERAW -- Execute the specified query and return the raw * resulting XML string. * * @brief Execute the specified query and return raw result string * @fn call vf_regExecuteRaw (RegQuery *query, char *raw, int *len) * * @param query Registry query handle * @param raw raw result string * @param len length of result string * @returns nothing */ void VF_REGEXECUTERAW (RegQuery *query, char *raw, int *len, int rlen) { char *_result = voc_regExecuteRaw (*query); memset (raw, 0, rlen); if ((*len = strlen(_result)) > rlen) fprintf (stderr, "Warning: truncating result string: len=%d maxch=%d\n", *len, rlen); spad (strncpy (raw, _result, *len), rlen); free ((char *) _result); } /****************************************************************************/ /********************* RegistryQueryResult Methods ************************/ /****************************************************************************/ /** * VF_RESGETCOUNT -- Return a count of the number of results records. * * @brief Return a count of the number of results records. * @fn call vf_resGetCount (RegResult *res, int *count) * * @param res Registry result handle * @param count result count * @returns nothing */ void VF_RESGETCOUNT (RegResult *res, int *count) { *count = voc_resGetCount (*res); } /** * VF_GETSTR -- Get a string-valued attribute from the result resource * record. Currently recognized real-valued attributes include: * * Title Resource title (long version) * ShortName Short name of Resource * ServiceURL Service URL (if appropriate) * ReferenceURL URL to reference about Resource * Description Text description of resource * Identifier Standard ivo identifier of resource * ServiceType Service Type (Cone, Siap, etc) * Type Resource Type (catalog, survey, etc) * CoverageSpatial Spatial coverage (STC) * CoverageTemporal Temporal coverage of data * * CoverageSpectral Spectral coverage (csv list of bandpasses) * ContentLevel Content level (research, EPO, etc -- csv list) * * Attribute string are case-insensitive. * * @brief Get a string-valued attribute from the result resource record * @fn call vf_resGetStr (RegResult *res, char *attr, int *index, * char *str, int *len) * * @param res Registry result handle * @param attr record attribute * @param index record index * @param str attribute string * @param len length of attribute string * @returns nothing */ void VF_RESGETSTR (RegResult *res, char *attr, int *index, char *str, int *len, int alen, int slen) { char *_attr = sstrip (attr, alen); char *_result = voc_resGetStr (*res, _attr, *index-1); memset (str, 0, slen); if ((*len = strlen(_result)) > slen) fprintf (stderr, "Warning: truncating result string: len=%d maxch=%d\n", *len, slen); spad (strncpy (str, _result, *len), slen); free ((char *) _result); free ((char *) _attr); } /** * VF_GETFLOAT -- Get a real-valued attribute from the result resource * record. Currently recognized real-valued attributes include: * * MaxSR maximum search radius * * Attribute string are case-insensitive. * * @brief Get a real-valued attribute from the result resource record * @fn call vf_resGetFloat (RegResult *res, char *attr, int *index, * double *dval) * * @param res Registry result handle * @param attr record attribute * @param index record index * @param dval double-precision value * @returns nothing */ void VF_RESGETFLOAT (RegResult *res, char *attr, int *index, double *dval, int alen) { char *_attr = sstrip (attr, alen); *dval = voc_resGetFloat (*res, _attr, *index-1); free ((char *) _attr); } /** * VF_GETINT -- Get a integer-valued attribute from the result resource * record. Currently recognized real-valued attributes include: * * MaxRecords maximum records returned by the service * * Attribute string are case-insensitive. * * @brief Get an int-valued attribute from the result resource record * @fn call vf_resGetInt (RegResult *res, char *attr, int *index, * int *ival) * * @param res Registry result handle * @param attr record attribute * @param index record index * @param ival integer value * @returns nothing */ void VF_RESGETINT (RegResult *res, char *attr, int *index, int *ival, int alen) { char *_attr = sstrip (attr, alen); *ival = voc_resGetInt (*res, _attr, *index-1); free ((char *) _attr); }
steph-dmts/mailchain
cmd/mailchain/internal/prompts/promptstest/select.go
package promptstest import ( "testing" "github.com/stretchr/testify/assert" ) func MockSelectItemSkipable(t *testing.T, wantItems []string, returnSelected string, returnSkipped bool, returnErr error) func(label string, items []string, skipable bool) (selected string, skipped bool, err error) { return func(label string, items []string, skipable bool) (selected string, skipped bool, err error) { if !assert.EqualValues(t, wantItems, items) { t.Errorf("items = %v, wantItems %v", items, wantItems) } return returnSelected, returnSkipped && skipable, returnErr } } func MockSelectItem(t *testing.T, wantItems []string, returnSelected string, returnErr error) func(label string, items []string) (string, error) { return func(label string, items []string) (string, error) { if !assert.EqualValues(t, wantItems, items) { t.Errorf("items = %v, wantItems %v", items, wantItems) } return returnSelected, returnErr } }
ggonnella/genometools
src/match/seqabstract.h
/* Copyright (c) 2013 <NAME> <<EMAIL>> Copyright (c) 2013 Center for Bioinformatics, University of Hamburg Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef SEQABSTRACT_H #define SEQABSTRACT_H #include "core/types_api.h" #include "core/encseq_api.h" /* Class <GtSeqabstract> represents short substrings of either <GtEncseq> or <GtUchar>-Arrays. All indices given in the methods of this class ar relative to <offset>. */ typedef struct GtSeqabstract GtSeqabstract; GtSeqabstract* gt_seqabstract_new_empty(void); /* Creates new <GtSeqabstract> object from <string>, starting at <offset> with length <len>, <string> should be long enough and <offset> within <string>. Ownership of <string> stays with the caller. */ GtSeqabstract* gt_seqabstract_new_gtuchar(const GtUchar *string, GtUword len, GtUword offset); /* Creates new <GtSeqabstract> object from <encseq>, starting at <offset> with length <len>, fails if <offset> is out of bounds, or <offset> + <len> extends <encseq>. */ GtSeqabstract* gt_seqabstract_new_encseq(const GtEncseq *encseq, GtUword len, GtUword offset); /* reinitialize <sa> with <string> starting at <offset> with length <len> */ void gt_seqabstract_reinit_gtuchar(GtSeqabstract *sa, const GtUchar *string, GtUword len, GtUword offset); /* reinitialize <sa> with <encseq> starting at <offset> with length <len> */ void gt_seqabstract_reinit_encseq(GtSeqabstract *sa, const GtEncseq *encseq, GtUword len, GtUword offset); /* return the length of <sa> */ GtUword gt_seqabstract_length(const GtSeqabstract *sa); /* return character at positon <idx> (relative to <offset>) of <sa> */ GtUchar gt_seqabstract_encoded_char(const GtSeqabstract *sa, GtUword idx); /* calculate longest common prefix for suffixes <ustart> and <vstart> of <useq> and <vseq>. */ GtUword gt_seqabstract_lcp(bool forward, const GtSeqabstract *useq, const GtSeqabstract *vseq, GtUword ustart, GtUword vstart); void gt_seqabstract_delete(GtSeqabstract *sa); #endif
CNES/ccsdsmo-malgo
mal/sessiontype.go
/** * MIT License * * Copyright (c) 2020 CNES * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file has been automatically generated by fr.cnes.mo:StubGenerator_go * It has then be slightly transformed to match the underlying type UOctet originally defined in the mal. * The mal should eventually use the standard generated definition of the type. */ package mal import ( "fmt" ) // Defines SessionType type // the generator would define the type as uint32 instead of UOctet //type SessionType uint32 const ( SESSIONTYPE_LIVE_OVAL = iota SESSIONTYPE_LIVE_NVAL = 1 SESSIONTYPE_SIMULATION_OVAL SESSIONTYPE_SIMULATION_NVAL = 2 SESSIONTYPE_REPLAY_OVAL SESSIONTYPE_REPLAY_NVAL = 3 ) // Conversion table OVAL->NVAL var nvalTable_SessionType = []uint32 { SESSIONTYPE_LIVE_NVAL, SESSIONTYPE_SIMULATION_NVAL, SESSIONTYPE_REPLAY_NVAL, } // Conversion map NVAL->OVAL var ovalMap_SessionType map[uint32]uint32 var ( SESSIONTYPE_LIVE = SessionType(SESSIONTYPE_LIVE_NVAL) SESSIONTYPE_SIMULATION = SessionType(SESSIONTYPE_SIMULATION_NVAL) SESSIONTYPE_REPLAY = SessionType(SESSIONTYPE_REPLAY_NVAL) ) var NullSessionType *SessionType = nil func init() { ovalMap_SessionType = make(map[uint32]uint32) for oval, nval := range nvalTable_SessionType { ovalMap_SessionType[nval] = uint32(oval) } } func (receiver SessionType) GetNumericValue() uint32 { // return uint32(receiver) return uint32(uint8(UOctet(receiver))) } func (receiver SessionType) GetOrdinalValue() (uint32, error) { nval := receiver.GetNumericValue() oval, ok := ovalMap_SessionType[nval] if !ok { return 0, fmt.Errorf("Invalid SessionType value: %d", nval) } return oval, nil } func SessionTypeFromNumericValue(nval uint32) (SessionType, error) { _, ok := ovalMap_SessionType[nval] if !ok { return SessionType(0), fmt.Errorf("Invalid numeric value for SessionType: %v", nval) } return SessionType(nval), nil } func SessionTypeFromOrdinalValue(oval uint32) (SessionType, error) { if oval >= uint32(len(nvalTable_SessionType)) { return SessionType(0), fmt.Errorf("Invalid ordinal value for SessionType: %v", oval) } return SessionType(nvalTable_SessionType[oval]), nil } // ================================================================================ // Defines SessionType type as a MAL Element //const SESSIONTYPE_TYPE_SHORT_FORM Integer = 20 //const SESSIONTYPE_SHORT_FORM Long = 0x65000001000014 // Registers SessionType type for polymorphism handling func init() { RegisterMALElement(SESSIONTYPE_SHORT_FORM, NullSessionType) } // Returns the absolute short form of the element type. func (receiver *SessionType) GetShortForm() Long { return SESSIONTYPE_SHORT_FORM } // Returns the number of the area this element type belongs to. func (receiver *SessionType) GetAreaNumber() UShort { return AREA_NUMBER } // Returns the version of the area this element type belongs to. func (receiver *SessionType) GetAreaVersion() UOctet { return AREA_VERSION } // Returns the number of the service this element type belongs to. func (receiver *SessionType) GetServiceNumber() UShort { return NULL_SERVICE_NUMBER } // Returns the relative short form of the element type. func (receiver *SessionType) GetTypeShortForm() Integer { return SESSIONTYPE_TYPE_SHORT_FORM } // Allows the creation of an element in a generic way, i.e., using the MAL Element polymorphism. func (receiver *SessionType) CreateElement() Element { return NullSessionType } func (receiver *SessionType) IsNull() bool { return receiver == nil } func (receiver *SessionType) Null() Element { return NullSessionType } // Encodes this element using the supplied encoder. // @param encoder The encoder to use, must not be null. func (receiver *SessionType) Encode(encoder Encoder) error { specific := encoder.LookupSpecific(SESSIONTYPE_SHORT_FORM) if specific != nil { return specific(receiver, encoder) } oval, err := receiver.GetOrdinalValue() if err != nil { return err } value := NewUOctet(uint8(oval)) return encoder.EncodeUOctet(value) } // Decodes an instance of this element type using the supplied decoder. // @param decoder The decoder to use, must not be null. // @return the decoded instance, may be not the same instance as this Element. func (receiver *SessionType) Decode(decoder Decoder) (Element, error) { specific := decoder.LookupSpecific(SESSIONTYPE_SHORT_FORM) if specific != nil { return specific(decoder) } elem, err := decoder.DecodeUOctet() if err != nil { return receiver.Null(), err } value, err := SessionTypeFromOrdinalValue(uint32(uint8(*elem))) return &value, err }
GillesArcas/Advent_of_Code
2018/13.py
<filename>2018/13.py import re DATA = '13.txt' def get_data(): with open(DATA) as f: amap = list(line.strip('\n') for line in f.readlines()) carts = list() for y, line in enumerate(amap): for match in re.finditer('([<>^v])', line): c = match.group(1) x = match.start(1) d = '^>v<'.index(c) carts.append((y, x, d, 0)) for y, _ in enumerate(amap): amap[y] = re.sub('([<>])', '-', amap[y]) amap[y] = re.sub('([v^])', '|', amap[y]) return amap, carts def step(x, y, direction): deltax = [0, 1, 0, -1] deltay = [-1, 0, 1, 0] return x + deltax[direction], y + deltay[direction] def next_step(amap, y, x, direction, nbinter): mapc = amap[y][x] if mapc == '+': if nbinter == 0: # left direction = (direction + 3) % 4 elif nbinter == 2: # right direction = (direction + 1) % 4 else: pass nbinter = (nbinter + 1) % 3 elif mapc in '|-': pass elif mapc == '/': direction = [1, 0, 3, 2][direction] elif mapc == '\\': direction = [3, 2, 1, 0][direction] else: assert 0, (x, y, mapc) x, y = step(x, y, direction) return (y, x, direction, nbinter) def collision(cart, carts): for cart2 in carts: if cart2 != cart: if cart2[:2] == cart[:2]: return True else: return False def next_tick(amap, carts): carts.sort() for index, cart in enumerate(carts): cart2 = next_step(amap, *cart) carts[index] = cart2 if collision(cart2, carts): print('1>', cart2[1], cart2[0]) return False return True def next_tick2(amap, carts): carts.sort() for index, cart in enumerate(carts): if cart is None: continue cart = next_step(amap, *cart) carts[index] = cart for index2, cart2 in enumerate(carts): if cart2 is None: continue if cart2 != cart: if cart2[:2] == cart[:2]: carts[index] = None carts[index2] = None cartcrashed = set(index for index, cart in enumerate(carts) if not cart) if cartcrashed: for index in sorted(cartcrashed, reverse=True): carts.pop(index) if len(carts) == 1: print('2>', carts[0][1], carts[0][0]) return False return True def print_map(amap, carts): bmap = [line[:] for line in amap] for y, x, direction, _ in carts: line = list(bmap[y]) line[x] = '^>v<'[direction] bmap[y] = ''.join(line) for line in bmap: print(line) def code1(): amap, carts = get_data() while next_tick(amap, carts): pass # print_map(amap, carts) def code2(): amap, carts = get_data() # print_map(amap, carts) while next_tick2(amap, carts): pass # print_map(amap, carts) code1() code2()
itamar244/arithmetic-evaluator
src/types.js
// @flow import type { SourceLocation } from './utils/location' export interface NodeBase { loc: SourceLocation; start: number; end: number; } export type AnyNode = NodeBase & { [string]: any } export type Node = | Program | Expression | Import | UnaryExpression | BinaryExpression | VariableDeclerations | VariableDeclerator | ConstLiteral | FunctionDeclaration | ParameterDeclaration | Equation | NumericLiteral | Identifier | Parenthesized | CallExpression | VectorExpression export type Statement = | Expression | Import | VariableDeclerations | FunctionDeclaration | ParameterDeclaration export type NodeType = | 'Program' | 'Expression' | 'Import' | 'UnaryExpression' | 'BinaryExpression' | 'VariableDeclerations' | 'VariableDeclerator' | 'ConstLiteral' | 'FunctionDeclaration' | 'ParameterDeclaration' | 'Equation' | 'NumericLiteral' | 'Identifier' | 'Parenthesized' | 'CallExpression' | 'VectorExpression' export type Program = NodeBase & { type: 'Program'; body: Statement[]; filename: string; } export type Expression = NodeBase & { type: 'Expression'; body: Node; } export type Import = NodeBase & { type: 'Import'; path: string; } export type UnaryExpression = NodeBase & { type: 'UnaryExpression'; operator: UnaryOperator; argument: Node; prefix: bool; } export type UnaryOperator = | '+' | '-' | '!' type BinNode = NodeBase & { left: Node; right: Node; } export type BinaryExpression = BinNode & { type: 'BinaryExpression'; operator: BinaryOperator; } export type BinaryOperator = | '+' | '-' | '*' | '/' | '^' | '%' export type VariableDeclerations = NodeBase & { type: 'VariableDeclerations'; declarations: VariableDeclerator[]; expression: Expression; } export type VariableDeclerator = NodeBase & { type: 'VariableDeclerator'; id: Identifier; init: Node; } export type ConstLiteral = NodeBase & { type: 'ConstLiteral'; // should be all legal names of const literals name: 'null' | 'inf'; } export type FunctionDeclaration = NodeBase & { type: 'FunctionDeclaration'; typeDefinitions: Identifier[] | null; params: ParameterDeclaration[]; id: Identifier; body: Node; } export type ParameterDeclaration = NodeBase & { type: 'ParameterDeclaration'; id: Identifier; declType: Identifier | null; } export type Equation = BinNode & { type: 'Equation'; } export type NumericLiteral = NodeBase & { type: 'NumericLiteral'; value: number; } export type Identifier = NodeBase & { type: 'Identifier'; name: string; } export type Parenthesized = NodeBase & { type: 'Parenthesized'; body: Node; abs: bool; } export type CallExpression = NodeBase & { type: 'CallExpression'; callee: Identifier; typeArgs: Identifier[] | null; args: Node[]; } export type VectorExpression = NodeBase & { type: 'VectorExpression'; x: Node; y: Node; }
stefb965/nomulus
java/google/registry/bigquery/BigqueryFactory.java
<reponame>stefb965/nomulus // Copyright 2017 The Nomulus Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package google.registry.bigquery; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.Sets.newConcurrentHashSet; import static google.registry.util.FormattingLogger.getLoggerForCallerClass; import com.google.api.client.extensions.appengine.http.UrlFetchTransport; import com.google.api.client.googleapis.extensions.appengine.auth.oauth2.AppIdentityCredential; import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.json.JsonFactory; import com.google.api.client.json.jackson2.JacksonFactory; import com.google.api.services.bigquery.Bigquery; import com.google.api.services.bigquery.BigqueryScopes; import com.google.api.services.bigquery.model.Dataset; import com.google.api.services.bigquery.model.DatasetReference; import com.google.api.services.bigquery.model.Table; import com.google.api.services.bigquery.model.TableFieldSchema; import com.google.api.services.bigquery.model.TableReference; import com.google.api.services.bigquery.model.TableSchema; import com.google.common.collect.ImmutableList; import google.registry.util.FormattingLogger; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; import javax.inject.Inject; /** Factory for creating {@link Bigquery} connections. */ public class BigqueryFactory { private static final FormattingLogger logger = getLoggerForCallerClass(); // Cross-request caches to avoid unnecessary RPCs. private static Set<String> knownExistingDatasets = newConcurrentHashSet(); private static Set<String> knownExistingTables = newConcurrentHashSet(); @Inject Map<String, ImmutableList<TableFieldSchema>> bigquerySchemas; @Inject Subfactory subfactory; @Inject BigqueryFactory() {} /** This class is broken out solely so that it can be mocked inside of tests. */ static class Subfactory { @Inject Subfactory() {} public Bigquery create( String applicationName, HttpTransport transport, JsonFactory jsonFactory, HttpRequestInitializer httpRequestInitializer) { return new Bigquery.Builder(transport, jsonFactory, httpRequestInitializer) .setApplicationName(applicationName) .build(); } } /** Returns a new connection to BigQuery. */ public Bigquery create( String applicationName, HttpTransport transport, JsonFactory jsonFactory, HttpRequestInitializer httpRequestInitializer) { return subfactory.create(applicationName, transport, jsonFactory, httpRequestInitializer); } /** * Returns a new connection to Bigquery, first ensuring that the given dataset exists in the * project with the given id, creating it if required. */ public Bigquery create(String projectId, String datasetId) throws IOException { Bigquery bigquery = create( getClass().getSimpleName(), new UrlFetchTransport(), new JacksonFactory(), new AppIdentityCredential(BigqueryScopes.all())); // Note: it's safe for multiple threads to call this as the dataset will only be created once. if (!knownExistingDatasets.contains(datasetId)) { ensureDataset(bigquery, projectId, datasetId); knownExistingDatasets.add(datasetId); } return bigquery; } /** * Returns a new connection to Bigquery, first ensuring that the given dataset and table exist in * project with the given id, creating them if required. */ public Bigquery create(String projectId, String datasetId, String tableId) throws IOException { Bigquery bigquery = create(projectId, datasetId); checkArgument(bigquerySchemas.containsKey(tableId), "Unknown table ID: %s", tableId); if (!knownExistingTables.contains(tableId)) { ensureTable( bigquery, new TableReference() .setDatasetId(datasetId) .setProjectId(projectId) .setTableId(tableId), bigquerySchemas.get(tableId)); knownExistingTables.add(tableId); } return bigquery; } /** * Ensures the dataset exists by trying to create it. Note that it's not appreciably cheaper * to check for dataset existence than it is to try to create it and check for exceptions. */ // Note that these are not static so they can be mocked for testing. private void ensureDataset(Bigquery bigquery, String projectId, String datasetId) throws IOException { try { bigquery.datasets() .insert(projectId, new Dataset().setDatasetReference( new DatasetReference() .setProjectId(projectId) .setDatasetId(datasetId))) .execute(); } catch (IOException e) { // Swallow errors about a duplicate dataset, and throw any other ones. if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) { throw e; } } } /** Ensures the table exists in Bigquery. */ private void ensureTable(Bigquery bigquery, TableReference table, List<TableFieldSchema> schema) throws IOException { try { bigquery.tables().insert(table.getProjectId(), table.getDatasetId(), new Table() .setSchema(new TableSchema().setFields(schema)) .setTableReference(table)) .execute(); logger.infofmt("Created BigQuery table %s:%s.%s", table.getProjectId(), table.getDatasetId(), table.getTableId()); } catch (IOException e) { // Swallow errors about a table that exists, and throw any other ones. if (!BigqueryJobFailureException.create(e).getReason().equals("duplicate")) { throw e; } } } }
shamazmazum/voxvision
benchmarks/voxtrees/static-tree-creation.c
#include <stdlib.h> #include <stdio.h> #include <voxtrees.h> #include <gettime.h> #define N 10000000 int main () { vox_dot *dots = vox_alloc (sizeof(vox_dot)*N); int i; double time; struct vox_node *tree; vox_dot_set (vox_voxel, 1, 1, 1); for (i=0; i<N; i++) { // XXX: must be unique vox_dot_set (dots[i], rand(), rand(), rand()); } time = gettime(); tree = vox_make_tree (dots, N); time = gettime() - time; printf ("Static tree creation took %f seconds. " "Number of voxels in tree %lu\n", time, vox_voxels_in_tree (tree)); vox_destroy_tree (tree); return 0; }
brent2333/barterly
client/src/components/posts/PostsByUser.js
<reponame>brent2333/barterly import React, { Fragment, useEffect } from 'react' import PropTypes from 'prop-types' import { connect } from 'react-redux'; import Spinner from '../layout/Spinner'; import PostItem from './PostItem'; import { getPostsByUser } from '../../actions/post'; const PostsByUser = ({ getPostsByUser , post: { posts, loading }, match}) => { useEffect(() => { getPostsByUser(match.params.id); }, [getPostsByUser, match.params.id]); return loading ? <Spinner /> : ( <Fragment> <h1 className="large text-primary">Posts By User</h1> {/* <p className="lead"><i className="fas fa-user"></i>Posts By User</p> */} { posts.length > 0 ? (<div className="posts"> {posts.map(post => ( <PostItem key={post._id} post={post}/> ))} </div>) : <Fragment>... No posts :(</Fragment> } </Fragment> ); } PostsByUser.propTypes = { getPostsByUser: PropTypes.func.isRequired, post: PropTypes.object.isRequired } const mapStateToProps = state => ({ post: state.post }); export default connect(mapStateToProps, { getPostsByUser })(PostsByUser);
chopshop-166/chopshoplib
core/src/test/java/com/chopshop166/chopshoplib/states/Claw2.java
<filename>core/src/test/java/com/chopshop166/chopshoplib/states/Claw2.java package com.chopshop166.chopshoplib.states; /** Sample Claw class using states. */ public class Claw2 extends StateSubsystem<Direction> { /** Create a Claw and set up state transitions. */ public Claw2() { super(Direction.NEUTRAL); transition(Direction.NEUTRAL, Direction.FORWARD); transition(Direction.NEUTRAL, Direction.REVERSE); transition(Direction.REVERSE, Direction.FORWARD); transition(Direction.FORWARD, Direction.REVERSE); onEntry(Direction.FORWARD, () -> { // Solenoid forward }); onEntry(Direction.REVERSE, () -> { // Solenoid reverse }); } @Override public void reset() { // No-op } }
lw-lin/incubator-ranger
ranger-plugin-classloader/src/test/java/org/apache/ranger/plugin/classloader/test/Impl/TestChildFistClassLoader.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.ranger.plugin.classloader.test.Impl; import java.io.File; import java.net.URL; import org.apache.ranger.plugin.classloader.RangerPluginClassLoader; import org.apache.ranger.plugin.classloader.test.TestPlugin; import org.apache.ranger.plugin.classloader.test.TestPrintParent; public class TestChildFistClassLoader { public static void main(String [] args){ TestPrintParent testPrint = new TestPrintParent(); System.out.println(testPrint.getString()); File file = null; URL[] urls = null; try { file = new File(".." + File.separatorChar + "TestPluginImpl.class"); URL url = file.toPath().toUri().toURL(); urls = new URL[] {url}; } catch (Exception e) { e.printStackTrace(); } String[] libdirs = new String[] { file.getAbsolutePath() }; try { @SuppressWarnings("resource") RangerPluginClassLoader rangerPluginClassLoader = new RangerPluginClassLoader("hdfs", TestChildFistClassLoader.class); TestPlugin testPlugin = (TestPlugin) rangerPluginClassLoader.loadClass("org.apache.ranger.plugin.classloader.test.Impl.TestPluginImpl").newInstance(); System.out.println(testPlugin.print()); } catch (Throwable t) { t.printStackTrace(); } } }
jnplonte/helper
lib/sort.js
"use strict"; const sortFunction = { 'sortObject': function(data = {}) { const ordered = {}; Object.keys(data).sort().forEach(function (key) { ordered[key] = data[key]; }); return ordered; }, 'sortObjectInList': function(data = {}, arrayList = []) { const obj = {}; arrayList.forEach((dataList) => { obj[dataList] = data[dataList] || ''; }); return obj; }, 'sortArrayInList': function(data = [], arrayList = []) { return data.reduce((finalData, dataVal) => { const obj = {}; arrayList.forEach((dataList) => { obj[dataList] = dataVal[dataList] || ''; }); finalData.push(obj); return finalData; }, []); } } module.exports = sortFunction
danielroth1/PackingRectanglesOptimizer
src/gui/ControlPanel.java
<reponame>danielroth1/PackingRectanglesOptimizer<filename>src/gui/ControlPanel.java package gui; public class ControlPanel { public ControlPanel() { // TODO Auto-generated constructor stub } }
Valda-IR/Valda-IR
valda-dexlib2/src/test/java/at/yawk/valda/ir/dex/compiler/NaiveCodeCompilerTest.java
package at.yawk.valda.ir.dex.compiler; import at.yawk.valda.SmaliUtils; import at.yawk.valda.ir.Classpath; import at.yawk.valda.ir.LocalClassMirror; import at.yawk.valda.ir.LocalMethodMirror; import at.yawk.valda.ir.TriState; import at.yawk.valda.ir.code.BasicBlock; import at.yawk.valda.ir.code.BinaryOperation; import at.yawk.valda.ir.code.Branch; import at.yawk.valda.ir.code.Const; import at.yawk.valda.ir.code.Invoke; import at.yawk.valda.ir.code.LocalVariable; import at.yawk.valda.ir.code.MethodBody; import at.yawk.valda.ir.code.Return; import at.yawk.valda.ir.code.Try; import at.yawk.valda.ir.dex.parser.DexParser; import com.google.common.collect.Iterables; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import javax.annotation.Nonnull; import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.input.NullInputStream; import org.apache.commons.io.output.NullOutputStream; import org.jf.dexlib2.Opcodes; import org.jf.dexlib2.iface.DexFile; import org.jf.dexlib2.iface.MethodImplementation; import org.jf.dexlib2.iface.instruction.Instruction; import org.jf.dexlib2.iface.instruction.formats.Instruction31i; import org.jf.dexlib2.writer.io.DexDataStore; import org.jf.dexlib2.writer.pool.DexPool; import org.objectweb.asm.Type; import org.testng.Assert; import org.testng.annotations.Test; /** * @author yawkat */ @Slf4j public class NaiveCodeCompilerTest { // most of this is tested in CompilerSymmetryIntegrationTest instead @Test public void newInstanceWithReturnInSameLocal() throws IOException { Classpath classpath = new Classpath(); LocalClassMirror clazz = classpath.createClass(Type.getType("LTest;")); LocalMethodMirror method = clazz.addMethod("test"); method.setStatic(true); LocalVariable local = LocalVariable.reference("l"); BasicBlock entryPoint = BasicBlock.create(); entryPoint.addInstruction(Const.createString(local, "s")); entryPoint.addInstruction( Invoke.builder().newInstance() .method(classpath.getTypeMirror(Type.getType("Ljava/lang/String;")) .method("<init>", Type.getMethodType("(Ljava/lang/String;)V"), TriState.FALSE)) .parameter(local).returnValue(local) .build()); entryPoint.addInstruction(Return.create(null)); method.setBody(new MethodBody(entryPoint)); DexFile file = new DexCompiler().compile(classpath); SmaliUtils.printBaksmali(file, s -> log.info("newInstanceWithReturnInSameLocal: {}", s)); DexParser parser = new DexParser(); parser.add(file); MethodBody body = ((LocalClassMirror) parser.parse().getTypeMirror(Type.getType("LTest;"))) .method("test", Type.getMethodType("()V"), TriState.TRUE) .getBody(); Assert.assertNotNull(body); } @Test public void branchBacktrack() throws IOException { Classpath classpath = new Classpath(); LocalClassMirror clazz = classpath.createClass(Type.getType("LTest;")); LocalMethodMirror method = clazz.addMethod("test"); method.setStatic(true); LocalVariable local = LocalVariable.narrow("l"); BasicBlock entryPoint = BasicBlock.create(); for (int i = 0; i < ((1 << 16) / 3); i++) { entryPoint.addInstruction(Const.createNarrow(local, 0x12345678 + i)); } entryPoint.addInstruction(Branch.builder() .type(Branch.Type.EQUAL) .lhs(local) .rhs(null) .branchTrue(entryPoint) .branchFalse(entryPoint) .build()); method.setBody(new MethodBody(entryPoint)); DexFile file = new DexCompiler().compile(classpath); SmaliUtils.printBaksmali(file, s -> log.trace("branchBacktrack: {}", s)); MethodImplementation impl = Iterables.getOnlyElement(Iterables.getOnlyElement(file.getClasses()).getDirectMethods()) .getImplementation(); Assert.assertNotNull(impl); Instruction first = impl.getInstructions().iterator().next(); Assert.assertTrue(first instanceof Instruction31i); Assert.assertEquals(((Instruction31i) first).getNarrowLiteral(), 0x12345678); } @Test public void exceptionHandlerRange() throws IOException { Classpath classpath = new Classpath(); LocalClassMirror clazz = classpath.createClass(Type.getType("LTest;")); LocalMethodMirror method = clazz.addMethod("test"); method.setStatic(true); LocalVariable local = LocalVariable.narrow("l"); BasicBlock entryPoint = BasicBlock.create(); for (int i = 0; i < ((1 << 16) / 3); i++) { entryPoint.addInstruction(Const.createNarrow(local, 0x12345678 + i)); // division can throw exception, so the generator has to generate the try entryPoint.addInstruction( BinaryOperation.builder() .lhs(local).rhs(local).destination(local).type(BinaryOperation.Type.DIV_INT) .build()); } entryPoint.addInstruction(Return.createVoid()); Try try_ = new Try(); try_.addCatch(entryPoint); entryPoint.setTry(try_); method.setBody(new MethodBody(entryPoint)); DexFile file = new DexCompiler().compile(classpath); SmaliUtils.printBaksmali(file, s -> log.info("exceptionHandlerRange: {}", s)); MethodImplementation impl = Iterables.getOnlyElement(Iterables.getOnlyElement(file.getClasses()).getDirectMethods()) .getImplementation(); Assert.assertNotNull(impl); Instruction first = impl.getInstructions().iterator().next(); Assert.assertTrue(first instanceof Instruction31i); Assert.assertEquals(((Instruction31i) first).getNarrowLiteral(), 0x12345678); DexPool pool = new DexPool(Opcodes.getDefault()); pool.internClass(Iterables.getOnlyElement(file.getClasses())); pool.writeTo(new DexDataStore() { @Nonnull @Override public OutputStream outputAt(int offset) { return new NullOutputStream(); } @Nonnull @Override public InputStream readAt(int offset) { return new NullInputStream(100000); } @Override public void close() throws IOException { } }); } }
shoukaiseki/sksweb
src/java/beans/com/ibm/tivoli/imi/spi/IMUserStatusHandler.java
<filename>src/java/beans/com/ibm/tivoli/imi/spi/IMUserStatusHandler.java package com.ibm.tivoli.imi.spi; public abstract interface IMUserStatusHandler { public abstract void addListener(IMUserStatusListener paramIMUserStatusListener) throws IMException; public abstract void removeListener(IMUserStatusListener paramIMUserStatusListener); public abstract void removeAllListeners(); public abstract boolean hasListenedUser(IMUser paramIMUser) throws IMException; public abstract void addListenedUser(IMUser paramIMUser) throws IMException; public abstract void removeListenedUser(IMUser paramIMUser); public abstract void removeAllListenedUsers(); } /* Location: D:\maxapp\MAXIMO.ear\maximouiweb.war\WEB-INF\classes\ * Qualified Name: com.ibm.tivoli.imi.spi.IMUserStatusHandler * JD-Core Version: 0.6.0 */
RobLoach/ion
third_party/openctm/files/lib/ctmlimits.h
<filename>third_party/openctm/files/lib/ctmlimits.h // Google-authored addition to openctm. #ifndef __OPENCTM_CTMLIMITS_H_ #define __OPENCTM_CTMLIMITS_H_ #include <stdbool.h> typedef struct { // If non-zero, any attempt to allocate more than maxAllocation bytes // during mesh loading will fail with CTM_OUT_OF_MEMORY. size_t maxAllocation; // If non-zero, any attempt to allocate more than maxFoo foos // during mesh loading will fail with CTM_LIMIT_VIOLATION. size_t maxVertices; size_t maxTriangles; size_t maxUVMaps; size_t maxAttributeMaps; size_t maxStringLength; // If a forbid limit is true, and a forbidden feature is present in a // serialized mesh, loading will fail with CTM_LIMIT_VIOLATION. bool forbidUVMaps; bool forbidAttributeMaps; bool forbidNormals; bool forbidComment; } CTMlimits; #endif
King0987654/windows2000
private/net/api/confstub.c
/*++ Copyright (c) 1991-92 Microsoft Corporation Module Name: ConfStub.c Abstract: This module contains stubs for the NetConfig APIs. Author: <NAME> (JohnRo) 23-Oct-1991 Environment: Portable to any flat, 32-bit environment. (Uses Win32 typedefs.) Requires ANSI C extensions: slash-slash comments, long external names. Revision History: 23-Oct-1991 JohnRo Created. 28-Oct-1991 JohnRo Use <winerror.h> if <lmerr.h> isn't needed. 20-Nov-1991 JohnRo Work with old or new lmconfig.h for now (based on REVISED_CONFIG_APIS). 02-Dec-1991 JohnRo Implement local NetConfig APIs. 11-Mar-1992 JohnRo Fixed bug in get all where array wasn't terminated. Added real NetConfigSet() handling. 21-Oct-1992 JohnRo RAID 9357: server mgr: can't add to alerts list on downlevel. --*/ // These must be included first: #include <nt.h> // IN, etc. (Only needed by temporary config.h) #include <ntrtl.h> // (Only needed by temporary config.h) #include <windef.h> // IN, DWORD, etc. #include <lmcons.h> // DEVLEN, NET_API_STATUS, etc. #include <netdebug.h> // (Needed by config.h) // These may be included in any order: #include <config.h> // NetpOpenConfigData(), etc. #include <lmapibuf.h> // NetApiBufferFree(). #include <lmerr.h> // NERR_ and ERROR_ equates. #include <lmconfig.h> // NetConfig APIs. #include <netlib.h> // NetpMemoryReallocate(). #include <rxconfig.h> // RxNetConfig APIs. #include <tstring.h> // STRSIZE(), TCHAR_EOS, etc. #define INITIAL_ALLOC_AMOUNT 512 // arbitrary #define INCR_ALLOC_AMOUNT 512 // arbitrary NET_API_STATUS NET_API_FUNCTION NetConfigGet ( IN LPCWSTR UncServerName OPTIONAL, IN LPCWSTR Component, IN LPCWSTR Parameter, #ifdef REVISED_CONFIG_APIS OUT LPBYTE *BufPtr #else OUT LPBYTE *BufPtr, OUT LPDWORD TotalAvailable #endif ) { NET_API_STATUS Status; LPNET_CONFIG_HANDLE ConfigHandle; BOOL TryDownLevel; #ifndef REVISED_CONFIG_APIS UNREFERENCED_PARAMETER(TotalAvailable); #endif *BufPtr = NULL; // Check caller and make error handling easier. Status = NetpOpenConfigData( & ConfigHandle, (LPWSTR)UncServerName, (LPWSTR)Component, TRUE); // just want read-only access if (Status != NERR_Success) { Status = NetpHandleConfigFailure( "NetConfigGet", // debug name Status, // result of NetpOpenConfigData (LPWSTR)UncServerName, & TryDownLevel); if (TryDownLevel) { return (RxNetConfigGet( (LPWSTR)UncServerName, (LPWSTR)Component, (LPWSTR)Parameter, BufPtr)); } else { return (Status); // result of NetpHandleConfigFailure } } Status = NetpGetConfigValue( ConfigHandle, (LPWSTR)Parameter, // keyword (LPTSTR *) (LPVOID) BufPtr); // alloc and set ptr if (Status == NERR_Success) { Status = NetpCloseConfigData( ConfigHandle ); NetpAssert(Status == NERR_Success); } else { NetpAssert(*BufPtr == NULL); (void) NetpCloseConfigData( ConfigHandle ); } return (Status); } // NetConfigGet NET_API_STATUS NET_API_FUNCTION NetConfigGetAll ( IN LPCWSTR UncServerName OPTIONAL, IN LPCWSTR Component, #ifdef REVISED_CONFIG_APIS OUT LPBYTE *BufPtr #else OUT LPBYTE *BufPtr, OUT LPDWORD TotalAvailable #endif ) { DWORD BufSize; // Bytes allocated at *BufPtr (so far). DWORD BufUsed; // Bytes used at *BufPtr (so far). LPNET_CONFIG_HANDLE ConfigHandle; BOOL FirstTime; LPVOID NewBuffPtr; NET_API_STATUS Status; BOOL TryDownLevel; #ifndef REVISED_CONFIG_APIS UNREFERENCED_PARAMETER(TotalAvailable); #endif *BufPtr = NULL; // Check caller and make error handling easier. Status = NetpOpenConfigData( & ConfigHandle, (LPWSTR)UncServerName, (LPWSTR)Component, TRUE); // just want read-only access if (Status != NERR_Success) { Status = NetpHandleConfigFailure( "NetConfigGetAll", // debug name Status, // result of NetpOpenConfigData (LPWSTR)UncServerName, & TryDownLevel); if (TryDownLevel) { return (RxNetConfigGetAll( (LPWSTR)UncServerName, (LPWSTR)Component, BufPtr)); } else { return (Status); // result of NetpHandleConfigFailure } } // Even if there aren't any entries, we'll need to store a null at // end of array. So allocate initial one now. BufSize = INITIAL_ALLOC_AMOUNT; NewBuffPtr = NetpMemoryReallocate( (LPVOID) *BufPtr, // old address BufSize); // new size if (NewBuffPtr == NULL) { // out of memory (void) NetpCloseConfigData( ConfigHandle ); return (ERROR_NOT_ENOUGH_MEMORY); } *BufPtr = NewBuffPtr; BufUsed = 0; // Loop once per entry (at least once if no entries). FirstTime = TRUE; do { LPTSTR KeywordBuffer; LPTSTR ValueBuffer; Status = NetpEnumConfigSectionValues( ConfigHandle, & KeywordBuffer, // Alloc and set ptr. & ValueBuffer, // Alloc and set ptr. FirstTime); FirstTime = FALSE; if (Status == NERR_Success) { DWORD SrcSize = (STRLEN(KeywordBuffer) + 1 + STRLEN(ValueBuffer) + 1) * sizeof(TCHAR); if (BufSize < (BufUsed+SrcSize) ) { if (SrcSize <= INCR_ALLOC_AMOUNT) { BufSize += INCR_ALLOC_AMOUNT; } else { BufSize += SrcSize; } NewBuffPtr = NetpMemoryReallocate( (LPVOID) *BufPtr, /* old address */ BufSize); /* new size */ if (NewBuffPtr == NULL) { /* out of memory */ (void) NetpCloseConfigData( ConfigHandle ); return (ERROR_NOT_ENOUGH_MEMORY); } *BufPtr = NewBuffPtr; } #define AddString( lptstrSrc, CharCount ) \ { \ LPTSTR lptstrDest; \ NetpAssert( CharCount > 0 ); \ lptstrDest = (LPTSTR)NetpPointerPlusSomeBytes( *BufPtr, BufUsed); \ NetpAssert( lptstrDest != NULL ); \ (void) STRNCPY( lptstrDest, lptstrSrc, CharCount ); \ BufUsed += (CharCount * sizeof(TCHAR) ); \ NetpAssert( BufUsed <= BufSize ); \ } AddString( KeywordBuffer, STRLEN(KeywordBuffer) ); (void) NetApiBufferFree( KeywordBuffer ); AddString( TEXT("="), 1 ); AddString( ValueBuffer, STRLEN(ValueBuffer) ); (void) NetApiBufferFree( ValueBuffer ); #define AddNullChar( ) \ { \ AddString( TEXT(""), 1 ); \ } AddNullChar(); // Terminate this entry. } } while (Status == NERR_Success); if (Status == NERR_CfgParamNotFound) { AddNullChar(); // Terminate the array. Status = NetpCloseConfigData( ConfigHandle ); NetpAssert(Status == NERR_Success); } else { NetpAssert( Status != NO_ERROR ); NetpAssert( *BufPtr != NULL ); NetpMemoryFree( *BufPtr ); *BufPtr = NULL; (void) NetpCloseConfigData( ConfigHandle ); } return (Status); } // NetConfigGetAll NET_API_STATUS NET_API_FUNCTION NetConfigSet ( IN LPCWSTR UncServerName OPTIONAL, IN LPCWSTR Reserved1 OPTIONAL, IN LPCWSTR Component, IN DWORD Level, IN DWORD Reserved2, IN LPBYTE Buf, IN DWORD Reserved3 ) { LPCONFIG_INFO_0 Info = (LPVOID) Buf; LPNET_CONFIG_HANDLE ConfigHandle; NET_API_STATUS Status; BOOL TryDownLevel; if (Buf == NULL) { return (ERROR_INVALID_PARAMETER); } else if (Level != 0) { return (ERROR_INVALID_LEVEL); } else if (Info->cfgi0_key == NULL) { return (ERROR_INVALID_PARAMETER); } else if (Info->cfgi0_data == NULL) { return (ERROR_INVALID_PARAMETER); } else if (Reserved1 != NULL) { return (ERROR_INVALID_PARAMETER); } else if (Reserved2 != 0) { return (ERROR_INVALID_PARAMETER); } else if (Reserved3 != 0) { return (ERROR_INVALID_PARAMETER); } Status = NetpOpenConfigData( & ConfigHandle, (LPWSTR)UncServerName, (LPWSTR)Component, FALSE); // don't want _read-only _access if (Status != NERR_Success) { Status = NetpHandleConfigFailure( "NetConfigSet", // debug name Status, // result of NetpOpenConfigData (LPWSTR)UncServerName, & TryDownLevel); if (TryDownLevel) { return (RxNetConfigSet( (LPWSTR)UncServerName, (LPWSTR)Reserved1, (LPWSTR)Component, Level, Reserved2, Buf, Reserved3)); } else { return (Status); // result of NetpHandleConfigFailure } } Status = NetpSetConfigValue( ConfigHandle, Info->cfgi0_key, // keyword Info->cfgi0_data); // new value if (Status == NERR_Success) { Status = NetpCloseConfigData( ConfigHandle ); NetpAssert(Status == NERR_Success); } else { (void) NetpCloseConfigData( ConfigHandle ); } return (Status); } // NetConfigSet
jacekk/emuto
src/parsers/primitives/string.js
// @flow /* eslint no-control-regex: 0 */ /* eslint no-useless-escape: 0 */ import P from 'parsimmon' import type { NodeType } from '../../types' export default P.regexp( /("(((?=\\)\\(["\\\/bfnrt]|u[0-9a-fA-F]{4}))|[^"\\\0-\x1F\x7F]+)*")/ ).map((value: string): NodeType => ({ name: 'primitive', value }))
jitwxs/disruptor-study
disruptor-netty-client/src/main/java/jit/wxs/disruptor/netty/client/NettyClientMain.java
<filename>disruptor-netty-client/src/main/java/jit/wxs/disruptor/netty/client/NettyClientMain.java package jit.wxs.disruptor.netty.client; import com.github.jitwxs.commons.core.util.thread.ThreadPoolUtils; import jit.wxs.disruptor.common.netty.Constant; import jit.wxs.disruptor.common.netty.Entry; import jit.wxs.disruptor.netty.client.handler.NettyClient; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadPoolExecutor; import java.util.stream.IntStream; /** * 客户端主方法 * @author jitwxs * @date 2020年03月23日 1:25 */ public class NettyClientMain { public static void main(String[] args) { // 1. 启动 Netty 客户端 NettyClient nettyClient = new NettyClient(Constant.NETTY_SERVER_IP, Constant.NETTY_SERVER_PORT); nettyClient.start(); // 2. 向服务端发送消息 ThreadPoolExecutor executor = ThreadPoolUtils.unBondQueueExecutor(Runtime.getRuntime().availableProcessors(),"netty-client"); IntStream.range(1, 5).forEach(index -> executor.execute(() -> { IntStream.range(1, 100).forEach(subIndex -> { Entry entry = new Entry(); entry.setUserId(RandomUtils.nextInt(1, 100000)); entry.setMessage(RandomStringUtils.randomAlphabetic(15)); nettyClient.sendServer(entry); }); })); // 3. 同步等待关闭客户端 nettyClient.syncClose(); ThreadPoolUtils.shutdown(executor, 100, () -> System.out.println("close thread pool error")); } }
PrathamBooks/sw-core
app/controllers/api/v1/search_controller.rb
class Api::V1::SearchController < Api::V1::ApplicationController respond_to :json EVERYTHING = "*" PER_PAGE = 10 DEFAULT_CRITERIA = [ { _score: {order: :desc} }, { recommended_status: {order: :asc, ignore_unmapped: true} }, { reads: {order: :desc} }, { likes: {order: :desc} } ] # /api/v1/books-search def books_search common_params = { search: { }, page: params[:page], per_page: params[:per_page] } if params[:cache].present? common_params[:search].merge!(:cache => params[:cache]) end if params[:query].present? common_params[:search].merge!(:query => params[:query]) end # in search, tags will be part of filter and not query if params[:tags].present? common_params[:search].merge!(:tags => params[:tags]) end if params[:story_type].present? common_params[:search].merge!(:story_type => params[:story_type]) end if params[:publishers].present? if params[:publishers].include?("Child Created") common_params[:search].merge!(:child_created => "Child Created") params[:publishers].delete("Child Created") end common_params[:search].merge!(:organizations => params[:publishers]) end if params[:languages].present? common_params[:search].merge!(:languages => params[:languages]) end if params[:levels].present? common_params[:search].merge!(:reading_levels => params[:levels]) end if params[:categories].present? common_params[:search].merge!(:categories => params[:categories]) end if params[:sort].present? if params[:sort]=="Most Read" common_params[:search].merge!(:sort => {reads: {order: :desc}}) elsif params[:sort]=="Most Liked" common_params[:search].merge!(:sort => {likes_and_good_ratings: {order: :desc}}) elsif params[:sort]=="New Arrivals" || params[:sort] == "Editor's Picks" common_params[:search].merge!(:sort => {published_at: {order: :desc}}) elsif current_user && current_user.content_manager? && params[:sort] == "Ratings" common_params[:search].merge!(:sort => {rating: {order: :desc}}) end end if current_user && current_user.content_manager? if params[:status].present? common_params[:search].merge!(:status => params[:status]) unless params[:status].include?("All Stories") common_params[:search].merge!(:status => ["published", "draft"]) if params[:status].include?("All Stories") end if params[:derivation_type].present? && !params[:derivation_type].include?("All Stories") common_params[:search].merge!(:derivation_type => params[:derivation_type]) end end if params[:bulk_download].present? && params[:bulk_download] == "Not Downloaded" common_params[:search].merge!(:bulk_download => params[:bulk_download]) end search_obj = Search::Books.new(common_params, current_user, false) if(params[:sort].present? && params[:sort] == "Editor's Picks") results = search_obj.editor_recommended_stories else results = search_obj.search end metadata = results[:metadata].merge!(total_count_of_all_tabs('books_search', common_params)) if results render json: {"ok"=>true, "metadata" => metadata, "data"=> results[:search_results]} else render json: {"ok"=>true, "message" => "No results found"} end end # /api/v1/lists-search def lists_search list_params = ActionController::Parameters.new({ search: { query: params[:query], status: ["published"] }, page: params[:page], per_page: params[:per_page] }) if params[:categories].present? list_params[:search].merge!(:categories => params[:categories]) end if params[:sort].present? if params[:sort]=="Most Viewed" list_params[:search].merge!(:sort => {views: {order: :desc}}) elsif params[:sort]=="Most Liked" list_params[:search].merge!(:sort => {likes: {order: :desc}}) elsif params[:sort]=="New Arrivals" list_params[:search].merge!(:sort => {created_at: {order: :desc}}) end end list_search_params = list_params["search"] # send the session only when we need to randomize the results # randomization should be done when no filter/options are applied if ( list_search_params["sort"].blank? && list_search_params["categories"].blank? && list_search_params["query"].blank? ) curr_session = session end search_obj = Search::Lists.new(list_params, nil, false, curr_session) results = search_obj.general_search metadata = results[:metadata].merge!(total_count_of_all_tabs('lists_search', list_params)) if results render json: {"ok"=>true, "metadata" => metadata, "data"=> results[:search_results]} else render json: {"ok"=>true, "message" => "No results found"} end end def illustrations_search common_params = ActionController::Parameters.new({ search: { image_mode: "false" # hiding private images for normal users }, content_manager: (current_user.present? && current_user.content_manager?), is_organization_cm: current_user.present? && (current_user.organization? || current_user.content_manager?), page: params[:page], per_page: params[:per_page] }) if params[:query].present? common_params[:search].merge!(:query => params[:query]) end if params[:tags].present? common_params[:search].merge!(:tags => params[:tags]) end if params[:publishers].present? common_params[:search].merge!(:organization => params[:publishers]) end if params[:styles].present? common_params[:search].merge!(:styles => params[:styles]) end if params[:illustrators].present? common_params[:search].merge!(:illustrator_slugs => params[:illustrators]) end if params[:search].present? && params[:search][:illustrator_slugs].present? common_params[:search].merge!(:illustrator_slugs => params[:search][:illustrator_slugs]) end if params[:categories].present? common_params[:search].merge!(:categories => params[:categories]) end if params[:sort].present? if params[:sort]=="Most Viewed" common_params[:search].merge!(:sort => {reads: {order: :desc}}) elsif params[:sort]=="Most Liked" common_params[:search].merge!(:sort => {likes: {order: :desc}}) elsif params[:sort]=="New Arrivals" common_params[:search].merge!(:sort => {created_at: {order: "desc"}}) end end unless ( current_user.try(:allow_gif_images) ) common_params[:no_gif_images] = true end if current_user common_params[:search].merge!(:current_user => current_user) end if params[:bulk_download].present? && params[:bulk_download] == "Not Downloaded" common_params[:search].merge!(:bulk_download => "Not Downloaded") end s = Search::Illustrations.search(common_params) illus = s.results search_results = illus.map { |result| sanitize_search_results_for_api(result) } metadata= { hits: illus.total_count, perPage: illus.per_page, page: illus.current_page, totalPages: illus.total_pages } metadata = metadata.merge!(total_count_of_all_tabs('illustrations_search', common_params)) if search_results render json: {"ok"=>true, "metadata" => metadata, "data"=> search_results} else render json: {"ok"=>true, "message" => "No results found"} end end #GET /api/v1/people-search def people_search per_page = params[:per_page] || 20 page = params[:page] || 1 query = params[:query] || "*" user_search = User.search(query, where: {_or: [{stories_count: {gte: 1}}, {illustrations_count: {gte: 1}}]}, match: :word_start, misspellings: {below: 2}, load: false, execute: false, page: page, per_page: per_page) user_results = user_search.execute results = user_results.map{|u| sanitize_search_results_for_people_search(u)} metadata = { hits: user_results.total_count, perPage: user_results.per_page, page: user_results.current_page, totalPages: user_results.total_pages } if results render json: {"ok"=>true, "metadata" => metadata.merge!(total_count_of_all_tabs('people_search')), "data"=> results} else render json: {"ok"=>true, "message" => "No results found"} end end def sanitize_search_results_for_people_search(result) sanitized_response = {} sanitized_response["id"] = result["id"] sanitized_response["name"] = result["name"] sanitized_response["slug"] = result["slug"] sanitized_response["type"] = result["type"] sanitized_response["profileImage"] = result["profile_image"] sanitized_response end #GET /api/v1/org-search def org_search per_page = params[:per_page] || 20 page = params[:page] || 1 query = params[:query] || "*" org_search = Organization.search(query, where: {_or: [{stories_count: {gte: 1}}, {illustrations_count: {gte: 1}}, {media_count: {gte: 1}}]}, match: :word_start, misspellings: {below: 2}, load: false, execute: false, page: page, per_page: per_page) org_results = org_search.execute results = org_results.map{|o| sanitize_search_results_for_org_search(o)} metadata = { hits: org_results.total_count, perPage: org_results.per_page, page: org_results.current_page, totalPages: org_results.total_pages } if results render json: {"ok"=>true, "metadata" => metadata.merge!(total_count_of_all_tabs('org_search')), "data"=> results} else render json: {"ok"=>true, "message" => "No results found"} end end def sanitize_search_results_for_org_search(result) sanitized_response = {} sanitized_response["id"] = result["id"] sanitized_response["name"] = result["organization_name"] sanitized_response["slug"] = result["slug"] sanitized_response["type"] = (result["organization_type"] == "Publisher") ? "publisher" : "organisation" sanitized_response["profileImage"] = result["logo"] sanitized_response end def sanitize_search_results_for_api(result) sanitized_response = {} sanitized_response["id"] = result["id"] sanitized_response["title"] = result["name"] sanitized_response["slug"] = result["url_slug"] sanitized_response["count"] = 1 sanitized_response["illustrators"] = result["illustrator_details"] if(result["organization"] != "") sanitized_response["publisher"]= {} sanitized_response["publisher"]["name"] = result["organization"] sanitized_response["publisher"]["slug"] = result["publisher_slug"] else sanitized_response["publisher"] = nil end sanitized_response["imageUrls"] = [{}] sanitized_response["imageUrls"][0]["aspectRatio"] = 320.0/240.0 sanitized_response["imageUrls"][0]["cropCoords"] = result["crop_coords"] sanitized_response["imageUrls"][0]["sizes"] = result["image_sizes"] sanitized_response["likesCount"] = result["likes"] sanitized_response["readsCount"] = result["reads"] sanitized_response["illustrationDownloaded"] = current_user ? (result["illustration_downloads"].present? ? result["illustration_downloads"].include?(current_user.id) : false) : false sanitized_response end def books_for_translation common_params = {search: {reading_levels: (params[:reading_levels] if params[:reading_levels]), languages: [params[:source_language]], target_languages: [params[:target_language]], categories: (params[:categories] if params[:categories]), organizations: (params[:publishers] if params[:publishers]), sort: {reads: {order: "desc"}}}, page: params[:page], per_page: params[:per_page], books_for_translation: true} nObj = Search::Books.new(common_params, nil, false) results = nObj.search if results metadata = results[:metadata] results = results[:search_results] render json: {"ok"=>true, "metadata" => metadata, "data"=> results} else render json: {"ok"=>true, "message" => "No results found"} end end #/api/v1/category-banner def category_banner @category = StoryCategory.find_by_name(params[:name]) rescue ActiveRecord::RecordNotFound resource_not_found end #/api/v1/confirm_story_formats def confirm_story_formats story_ids = params[:ids] story_ids_with_format = {"HiRes PDF" => [], "PDF" => []} if params[:download_format] == "HiRes PDF" story_ids.each do|id| story = Story.find(id) if current_user && current_user.content_manager? story_ids_with_format["HiRes PDF"] << id.to_i elsif current_user && (current_user.organization? || current_user.own_stories) if story && story_access(story) story_ids_with_format["HiRes PDF"] << id.to_i else story_ids_with_format["PDF"] << id.to_i end end end render json: {"ok"=>true, "data" => story_ids_with_format}, status: 200 else render json: {"ok"=>true, "data" => {"PDF" => story_ids}}, status: 200 end end def story_access(story) if story.is_user_own_story(current_user) return true elsif (story.organization.present? && current_user.organization?) && (story.organization == current_user.organization) return true end end #/api/v1/confirm_illustration_formats def confirm_illustration_formats image_ids = params[:ids] image_ids_with_format = {"HiRes JPEG" => [], "JPEG" => []} if params[:download_format] == "HiRes JPEG" image_ids.each do|id| image = Illustration.find(id) if current_user && current_user.content_manager? image_ids_with_format["HiRes JPEG"] << id elsif current_user && (current_user.organization? || current_user.person.illustrations.any?) if image && image_access(image) image_ids_with_format["HiRes JPEG"] << id else image_ids_with_format["JPEG"] << id end end end render json: {"ok"=>true, "data" => image_ids_with_format}, status: 200 else render json: {"ok"=>true, "data" => {"JPEG" => image_ids}}, status: 200 end end def image_access(image) if image.illustrators.collect(&:email).join(",") == current_user.email return true elsif (image.organization.present? && current_user.organization?) && (image.organization == current_user.organization) return true end end def total_count_of_all_tabs(search_tab, query_params = {}) if search_tab == "books_search" books_params = query_params else books_params = { search: { query: params[:query] } } end search_obj = Search::Books.new(books_params, current_user, false) results = search_obj.search[:metadata][:hits] books_count = results if search_tab == "lists_search" list_params = query_params else list_params = list_params = ActionController::Parameters.new({ search: { query: params[:query], status: ["published"] } }) end list_search_obj = Search::Lists.new(list_params, nil, false, nil) lists_count = list_search_obj.general_search[:metadata][:hits] if search_tab == "illustrations_search" illustration_params = query_params else illustration_params = ActionController::Parameters.new({ search: { query: params[:query], image_mode: "false" # hiding private images for normal users }, content_manager: (current_user.present? && current_user.content_manager?), is_organization_cm: (current_user.present? && (current_user.organization? || current_user.content_manager?)) }) unless ( current_user.try(:allow_gif_images) ) illustration_params[:no_gif_images] = true end if current_user illustration_params[:search].merge!(:current_user => current_user) end end illustration_result = Search::Illustrations.search(illustration_params).results illustrations_count = illustration_result.total_count query = params[:query] || "*" user_search = User.search(query, where: {_or: [{stories_count: {gte: 1}}, {illustrations_count: {gte: 1}}]}, match: :word_start, misspellings: {below: 2}, load: false, execute: false) user_results = user_search.execute users_count = user_results.total_count query = params[:query] || "*" org_search = Organization.search(query, where: {_or: [{stories_count: {gte: 1}}, {illustrations_count: {gte: 1}}, {media_count: {gte: 1}}]}, match: :word_start, misspellings: {below: 2}, load: false, execute: false) org_results = org_search.execute organisation_count = org_results.total_count count_hash = { books_count: books_count, illustrations_count: illustrations_count, users_count: users_count, lists_count: lists_count, organisation_count: organisation_count} count_hash end #/api/v1/bulk-download include BulkDownload # including lib/bulk_download.rb module. def bulk_download new_params={} ["HiRes PDF", "PDF", "Text Only", "ePub"].each do |format| high_resolution = format == "HiRes PDF" ? "true" : "false" new_params[format] = { high_resolution: high_resolution, stories_to_download: params["#{format}"] } if params["#{format}"] end bk_download new_params, current_user, request end #/api/v1/image-bulk-download include ImageBulkDownload # including lib/image_bulk_download.rb module. def image_bulk_download new_params={} ["HiRes JPEG", "JPEG"].each do |format| style = format == "JPEG" ? "large" : "original" new_params[format] = { images_to_download: params["#{format}"], style: style, } if params["#{format}"] end # image_bk_download is in lib/image_bulk_download.rb module. image_bk_download new_params, current_user, request end end
smdb21/java-miape-api
src/main/java/org/proteored/miapeapi/xml/mzidentml/autogenerated/PSIPIAnalysisProcessProteinDetectionProtocolType.java
<gh_stars>1-10 // // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vhudson-jaxb-ri-2.1-257 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2010.09.14 at 07:25:56 PM CEST // package org.proteored.miapeapi.xml.mzidentml.autogenerated; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; /** * The parameters and settings of a ProteinDetection * process. * * <p>Java class for PSI-PI.analysis.process.ProteinDetectionProtocolType complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="PSI-PI.analysis.process.ProteinDetectionProtocolType"> * &lt;complexContent> * &lt;extension base="{http://psidev.info/psi/pi/mzIdentML/1.0}FuGE.Common.Protocol.ProtocolType"> * &lt;sequence> * &lt;element name="AnalysisParams" type="{http://psidev.info/psi/pi/mzIdentML/1.0}ParamListType" minOccurs="0"/> * &lt;element name="Threshold" type="{http://psidev.info/psi/pi/mzIdentML/1.0}ParamListType"/> * &lt;/sequence> * &lt;attribute name="AnalysisSoftware_ref" use="required" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "PSI-PI.analysis.process.ProteinDetectionProtocolType", propOrder = { "analysisParams", "threshold" }) public class PSIPIAnalysisProcessProteinDetectionProtocolType extends FuGECommonProtocolProtocolType { @XmlElement(name = "AnalysisParams") protected ParamListType analysisParams; @XmlElement(name = "Threshold", required = true) protected ParamListType threshold; @XmlAttribute(name = "AnalysisSoftware_ref", required = true) protected String analysisSoftwareRef; /** * Gets the value of the analysisParams property. * * @return * possible object is * {@link ParamListType } * */ public ParamListType getAnalysisParams() { return analysisParams; } /** * Sets the value of the analysisParams property. * * @param value * allowed object is * {@link ParamListType } * */ public void setAnalysisParams(ParamListType value) { this.analysisParams = value; } /** * Gets the value of the threshold property. * * @return * possible object is * {@link ParamListType } * */ public ParamListType getThreshold() { return threshold; } /** * Sets the value of the threshold property. * * @param value * allowed object is * {@link ParamListType } * */ public void setThreshold(ParamListType value) { this.threshold = value; } /** * Gets the value of the analysisSoftwareRef property. * * @return * possible object is * {@link String } * */ public String getAnalysisSoftwareRef() { return analysisSoftwareRef; } /** * Sets the value of the analysisSoftwareRef property. * * @param value * allowed object is * {@link String } * */ public void setAnalysisSoftwareRef(String value) { this.analysisSoftwareRef = value; } }
micromouseonline/micromouse_maze_tool
mazefiles/cfiles/tic05followersheats.c
/* text version of maze 'mazefiles/binary/tic05followersheats.maz' generated by mazetool (c) <NAME> 2018 o---o---o---o---o---o---o---o---o---o---o---o---o---o---o---o---o | | o o---o o---o---o---o---o---o---o---o---o---o---o---o---o o | | | | | | | o o---o o o---o---o---o---o o o o---o---o---o---o o | | | | | | | | o o---o o---o o---o---o o---o---o o o---o o---o o | | | | | | | | | o o o---o o---o o o---o---o o o o o---o o o | | | | | | | | | | | o o---o o---o o---o---o---o o o o o---o o o o | | | | | | | | | | | | o o---o o o o o---o o o o o o o---o o o | | | | | | | | | | | | | o o o---o o o o---o---o o o---o o---o o---o o | | | | | | | | | o o o---o---o o o o o o o---o---o---o o---o o | | | | | | | | | | | o o o---o o o o---o---o o o o---o o---o o o | | | | | | | | | | | | | o o o---o o o o o o o o o o o---o---o o | | | | | | | | | | | | | o o---o o---o o o o---o o o o o o o o o | | | | | | | | | | | | | | | o o o---o o o o---o o---o o o o o o o o | | | | | | | | | | | | | o o---o o---o o o o---o o---o o o o o o o | | | | | | | | | | | | | o---o o---o o o o o o---o---o o o---o o o o | | | | | | | | | | | o o---o o---o o---o---o---o o---o o o---o o o o | | | | | | | o---o---o---o---o---o---o---o---o---o---o---o---o---o---o---o---o */ int tic05followersheats_maz[] ={ 0x0E, 0x09, 0x0E, 0x08, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x08, 0x0A, 0x08, 0x08, 0x08, 0x09, 0x0D, 0x06, 0x09, 0x06, 0x0B, 0x0C, 0x0A, 0x08, 0x0A, 0x09, 0x07, 0x0E, 0x01, 0x07, 0x07, 0x05, 0x06, 0x09, 0x06, 0x09, 0x0C, 0x03, 0x0D, 0x05, 0x0D, 0x06, 0x0A, 0x09, 0x06, 0x0A, 0x0A, 0x01, 0x0D, 0x06, 0x09, 0x06, 0x03, 0x0E, 0x02, 0x03, 0x04, 0x0A, 0x09, 0x06, 0x09, 0x0E, 0x09, 0x05, 0x04, 0x0A, 0x02, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x02, 0x0A, 0x00, 0x09, 0x06, 0x09, 0x05, 0x05, 0x05, 0x0E, 0x0A, 0x0A, 0x0A, 0x08, 0x0A, 0x0A, 0x08, 0x0A, 0x01, 0x06, 0x09, 0x05, 0x05, 0x05, 0x05, 0x0C, 0x0A, 0x09, 0x0E, 0x02, 0x09, 0x0E, 0x01, 0x0D, 0x07, 0x0C, 0x03, 0x05, 0x05, 0x05, 0x05, 0x06, 0x09, 0x06, 0x09, 0x0E, 0x03, 0x0C, 0x01, 0x04, 0x0B, 0x05, 0x0C, 0x01, 0x07, 0x05, 0x04, 0x09, 0x06, 0x09, 0x06, 0x0A, 0x0A, 0x02, 0x02, 0x02, 0x0A, 0x03, 0x05, 0x06, 0x09, 0x05, 0x05, 0x07, 0x0D, 0x06, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x03, 0x0E, 0x03, 0x05, 0x06, 0x0A, 0x02, 0x0A, 0x0A, 0x0A, 0x08, 0x09, 0x0D, 0x0E, 0x0A, 0x0A, 0x0A, 0x0A, 0x09, 0x05, 0x0E, 0x08, 0x0A, 0x0A, 0x0A, 0x0A, 0x01, 0x05, 0x06, 0x0A, 0x0A, 0x08, 0x0A, 0x09, 0x05, 0x05, 0x0D, 0x05, 0x0E, 0x0A, 0x0A, 0x08, 0x02, 0x03, 0x0D, 0x0E, 0x09, 0x06, 0x09, 0x05, 0x05, 0x05, 0x06, 0x02, 0x0A, 0x0A, 0x0A, 0x01, 0x0F, 0x0C, 0x02, 0x09, 0x06, 0x09, 0x06, 0x01, 0x05, 0x05, 0x0C, 0x0A, 0x0A, 0x0A, 0x0A, 0x03, 0x0E, 0x03, 0x0D, 0x06, 0x0A, 0x02, 0x09, 0x05, 0x07, 0x05, 0x06, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x02, 0x0A, 0x0A, 0x0A, 0x02, 0x02, 0x0A, 0x03, }; /* end of mazefile */
Jarvie8176/eth
parser/TronGrid/Swap_TriggerSmartContract_4bf3e2d0_test.py
<reponame>Jarvie8176/eth<gh_stars>0 from unittest import TestCase from parser.TronGrid.util import prepare_parse_result def test_usage() -> None: expected = { "trx_id": "c96f0d6a3bce8099b1351a9858abd26d9071d7b1da8a4df71f951c8ff4d20284", "url": "https://tronscan.org/#/transaction/c96f0d6a3bce8099b1351a9858abd26d9071d7b1da8a4df71f951c8ff4d20284", "type": "Swap", "status": "SUCCESS", "timestamp": "2020-09-16T18:31:33+00:00", "in_amount": "3664.354797", "in_amount_major": "139000.0", "in_currency": "USDT", "in_rate": None, "in_rate_unit": None, "in_rate_timestamp": None, "out_amount": "139000.0", "out_amount_major": "139000.0", "out_currency": "TRON", "out_rate": None, "out_rate_unit": None, "out_rate_timestamp": None, "fee_amount": "0.48363", "fee_currency": "TRON", "fee_rate": None, "fee_rate_unit": None, "fee_rate_timestamp": None, } result = prepare_parse_result(__file__) assert len(result) == 1 TestCase().assertDictEqual(expected, result[0].to_dto().dict())
edublancas/pipeline
pipeline/sklearn/__init__.py
from .MetaEstimator import MetaEstimator
quis/collections-1
features/support/services_and_information_helper.rb
require "gds_api/test_helpers/search" require_relative "../../test/support/rummager_helpers" module ServicesAndInformationHelpers include GdsApi::TestHelpers::Search include RummagerHelpers def stub_services_and_information_lookups @services_and_information = %w[ environmental-permit-check-if-you-need-one government/publications/environmental-permitting-ep-charges-scheme-april-2014-to-march-2015 hazardous-waste-producer-registration ] stub_services_and_information_links("hm-revenue-customs") stub_content_store_has_item("/government/organisations/hm-revenue-customs/services-information", content_id: "content-id-for-hm-revenue-customs-services-information", base_path: "/government/organisations/hm-revenue-customs/services-information", title: "Services and information - HM Revenue & Customs", format: "services_and_information", public_updated_at: 10.days.ago.iso8601, details: {}, links: { "parent" => [ "title" => "HM Revenue & Customs", "base_path" => "/government/organisations/hm-revenue-customs", ], }) end end World(ServicesAndInformationHelpers)
jethronap/jstat
src/main/java/jstat/ml/models/GaussianMixtureModel.java
package jstat.ml.models; import org.apache.commons.math3.distribution.MultivariateNormalDistribution; import org.apache.commons.math3.distribution.fitting.MultivariateNormalMixtureExpectationMaximization; import org.apache.commons.math3.exception.DimensionMismatchException; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.tensorflow.framework.TensorShapeProto; import java.util.ArrayList; import java.util.List; /** * Gaussian mixture model */ public class GaussianMixtureModel { /** * Constructor * @param components */ public GaussianMixtureModel(GMMConfig config){ this.config = config; this.weights = Nd4j.zeros(config.n_components); this.mixture_mus = new ArrayList<>(); this.mixture_sigmas = new ArrayList<>(); this.dists = new ArrayList<>(); } /** * Returns the probability of the vector x * @param x * @return */ public double prob(INDArray x){ double value = 0.0; double[] vals = x.toDoubleVector(); try{ for(int i=0; i<dists.size(); ++i){ value += weights.getDouble(i)*dists.get(i).density(vals); } } catch (DimensionMismatchException e){ } return value; } /** * Returns the weights of the mixture */ public INDArray getWeights(){return this.weights;} /** * Evaluate the component parameters of the GMM using EM * algorithm */ public void train(INDArray x){ // initialize using KMeans initialize(x); } private void initialize(INDArray x){ } private GMMConfig config; /** * The weights of the mixture */ private INDArray weights; /** * The mean vectors of the mixture */ private List<INDArray> mixture_mus; /** * The covariance matrices of the mixture */ private List<INDArray> mixture_sigmas; /** * The list of the Gaussian distributions */ private List<MultivariateNormalDistribution> dists; }
magicgh/algorithm-contest-code
BashuOJ-Code/5121.cpp
#include<iostream> #include<cstdio> #include<cstring> #include<cstdlib> #include<cmath> #include<iomanip> #include<algorithm> #include<queue> #include<stack> #include<vector> #define ri register int #define ll long long using namespace std; const int MAXN=100005; int Lmax[MAXN],Rmax[MAXN],Lmin[MAXN],Rmin[MAXN],Cnt[MAXN],a[MAXN]; int n; inline const int GetInt() { int num=0,bj=1; char c=getchar(); while(!isdigit(c))bj=(c=='-'||bj==-1)?-1:1,c=getchar(); while(isdigit(c))num=num*10+c-'0',c=getchar(); return num*bj; } inline ll Calc(int l,int r,int mid) { ll cnt=0; Lmax[mid]=Lmin[mid]=a[mid]; Rmax[mid+1]=Rmin[mid+1]=a[mid+1]; for(ri i=mid-1;i>=l;i--) { Lmax[i]=max(Lmax[i+1],a[i]); Lmin[i]=min(Lmin[i+1],a[i]); } for(ri i=mid+2;i<=r;i++) { Rmax[i]=max(Rmax[i-1],a[i]); Rmin[i]=min(Rmin[i-1],a[i]); } for(ri i=l;i<=mid;i++)//最值同在左侧的情况,直接计算右端点位置,统计合法方案数 { int j=i+Lmax[i]-Lmin[i];//右端点位置 if(j>mid&&Rmax[j]<Lmax[i]&&Rmin[j]>Lmin[i])cnt++;//合法的情况,最值必须在左侧,且右端点过中线 } //最值异侧的合法区间满足 Rmax[j]-Lmin[i]=j-i,Rmax[j]-j=Lmin[i]-i //这里考虑最小值在左侧,最大值在右侧的情况 int p=mid+1,q=mid+1; while(q<=r&&Rmin[q]>Lmin[l]) Cnt[Rmax[q]-q]++,q++; while(p<=r&&Rmax[p]<Lmax[l]) Cnt[Rmax[p]-p]--,p++; p--,q--; for(int i=l;i<=mid;i++) { while(p>=mid+1&&Rmax[p]>Lmax[i]) Cnt[Rmax[p]-p]++,p--; while(q>=mid+1&&Rmin[q]<Lmin[i]) Cnt[Rmax[q]-q]--,q--; cnt+=max(Cnt[Lmin[i]-i],0) ; } for(int i=mid+1;i<=r;i++)Cnt[Rmax[i]-i]=0 ; return cnt; } inline ll Solve(int l,int r) { if(l==r)return 1ll; int mid=(l+r)>>1; ll ret=Solve(l,mid)+Solve(mid+1,r); ret+=Calc(l,r,mid); reverse(a+l,a+r+1); if ((r-l+1)&1)mid--; ret+=Calc(l,r,mid); reverse(a+l,a+r+1); return ret; } int main() { n=GetInt(); for(ri i=1;i<=n;i++) { int x=GetInt(),y=GetInt(); a[x]=y; } printf("%lld\n",Solve(1,n)); return 0; }
wnbts/random-cut-forest-by-aws
Java/core/src/main/java/com/amazon/randomcutforest/ComponentList.java
<filename>Java/core/src/main/java/com/amazon/randomcutforest/ComponentList.java /* * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazon.randomcutforest; import java.util.ArrayList; import java.util.Collection; /** * A ComponentList is an ArrayList specialized to contain IComponentModel * instances. Executor classes operate on ComponentLists. * * @param <PointReference> The internal point representation expected by the * component models in this list. * @param <Point> The explicit data type of points being passed */ public class ComponentList<PointReference, Point> extends ArrayList<IComponentModel<PointReference, Point>> { public ComponentList() { super(); } public ComponentList(Collection<? extends IComponentModel<PointReference, Point>> collection) { super(collection); } public ComponentList(int initialCapacity) { super(initialCapacity); } }
nettee/leetcode
solutions/0926.find-and-replace-pattern/0926.find-and-replace-pattern.1540533202.cpp
<reponame>nettee/leetcode /* * [926] Find and Replace Pattern * * https://leetcode.com/problems/find-and-replace-pattern/description/ * * algorithms * Medium (68.65%) * Total Accepted: 11.2K * Total Submissions: 16.3K * Testcase Example: '["abc","deq","mee","aqq","dkd","ccc"]\n"abb"' * * You have a list of words and a pattern, and you want to know which words in * words matches the pattern. * * A word matches the pattern if there exists a permutation of letters p so * that after replacing every letter x in the pattern with p(x), we get the * desired word. * * (Recall that a permutation of letters is a bijection from letters to * letters: every letter maps to another letter, and no two letters map to the * same letter.) * * Return a list of the words in words that match the given pattern.  * * You may return the answer in any order. * * * * * Example 1: * * * Input: words = ["abc","deq","mee","aqq","dkd","ccc"], pattern = "abb" * Output: ["mee","aqq"] * Explanation: "mee" matches the pattern because there is a permutation {a -> * m, b -> e, ...}. * "ccc" does not match the pattern because {a -> c, b -> c, ...} is not a * permutation, * since a and b map to the same letter. * * * * Note: * * * 1 <= words.length <= 50 * 1 <= pattern.length = words[i].length <= 20 * * * */ class Solution { public: vector<string> findAndReplacePattern(vector<string>& words, string pattern) { vector<string> res; for (string& word : words) { if (match(word, pattern)) { res.push_back(word); } } return res; } private: bool match(string& word, string& pattern) { vector<char> perm(26, (char) 0); for (int i = 0; i < word.length(); i++) { char c = word[i]; char t = pattern[i]; if (perm[c - 'a'] == 0) { perm[c - 'a'] = t; } else { if (perm[c - 'a'] != t) { return false; } } } vector<int> count(26, 0); for (char c : perm) { if (c == 0) { continue; } count[c - 'a']++; if (count[c - 'a'] > 1) { return false; } } return true; } };
ruoyewu/QpD
src/queue_reconstruction_by_height/Main.java
<filename>src/queue_reconstruction_by_height/Main.java<gh_stars>0 package queue_reconstruction_by_height; import java.util.*; /** * User: wuruoye * Date: 2019-02-24 22:46 * Description: */ public class Main { public static void main(String[] args) { int[][] people = new int[][]{{7,0}, {4,4}, {7,1}, {5,0}, {6,1}, {5,2}}; reconstructQueue2(people); } public static int[][] reconstructQueue(int[][] people) { if (people.length == 0) return people; LinkedList<int[]> left = new LinkedList<>(); int[] min = null; for (int[] p : people) { if (p[1] == 0 && (min == null || min[0] > p[0])) { if (min != null) left.add(min); min = p; } else { left.add(p); } } people[0] = min; construct(people, 1, left); return people; } private static boolean construct(int[][] people, int pos, LinkedList<int[]> left) { if (pos == people.length) return true; for (int i = 0; i < left.size(); i++) { int[] p = left.get(i); int k = 0, h = p[0]; for (int j = 0; j < pos; j++) { if (people[j][0] >= h) { k++; } } if (k == p[1]) { // 可选 people[pos] = p; left.remove(i); if (construct(people, pos+1, left)) { return true; } left.add(i, p); } } return false; } public static int[][] reconstructQueue2(int[][] people) { Arrays.sort(people, new Comparator<int[]>() { @Override public int compare(int[] o1, int[] o2) { return o1[0] == o2[0] ? o1[1] - o2[1] : o2[0] - o1[0]; } }); List<int[]> result = new ArrayList<>(); for (int[] p : people) { result.add(p[1], p); } return result.toArray(people); } }
lechium/iPhoneOS_12.1.1_Headers
usr/lib/libboringssl.dylib/boringssl_concrete_boringssl_ctx_callback.h
/* * This header is generated by classdump-dyld 1.0 * on Saturday, June 1, 2019 at 6:42:35 PM Mountain Standard Time * Operating System: Version 12.1.1 (Build 16C5050a) * Image Source: /usr/lib/libboringssl.dylib * classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by <NAME>. */ #import <libobjc.A.dylib/OS_boringssl_ctx_callback.h> @class NSString; @interface boringssl_concrete_boringssl_ctx_callback : NSObject <OS_boringssl_ctx_callback> { unsigned short callback_type; /*^block*/id add_callback; /*^block*/id free_callback; /*^block*/id parse_callback; } @property (readonly) unsigned long long hash; @property (readonly) Class superclass; @property (copy,readonly) NSString * description; @property (copy,readonly) NSString * debugDescription; @end
rdstonech/VW
api/src/main/java/com/voxelwind/api/game/item/data/wood/Log.java
<gh_stars>0 package com.voxelwind.api.game.item.data.wood; import com.google.common.base.Preconditions; import com.voxelwind.api.game.util.data.LogDirection; import com.voxelwind.api.game.util.data.TreeSpecies; import java.util.Objects; /** * Represents a log. */ public class Log extends Wood { public static Log of (TreeSpecies species, LogDirection direction) { Preconditions.checkNotNull (species, "species"); Preconditions.checkNotNull (direction, "direction"); return new Log (species, direction); } private final LogDirection direction; private Log (TreeSpecies species, LogDirection direction) { super (species); this.direction = direction; } public LogDirection getDirection () { return direction; } @Override public boolean equals (Object o) { if (this == o) return true; if (o == null || getClass () != o.getClass ()) return false; if (!super.equals (o)) return false; Log log = (Log) o; return direction == log.direction; } @Override public int hashCode () { return Objects.hash (super.hashCode (), direction); } @Override public String toString () { return "Log{" + "species=" + getDirection () + ',' + "direction=" + direction + '}'; } }
agaveplatform/science-apis
agave-systems/systems-core/src/test/java/org/iplantc/service/transfer/gridftp/GridFTPLoadTest.java
<gh_stars>0 package org.iplantc.service.transfer.gridftp; import org.apache.commons.io.FilenameUtils; import org.apache.log4j.Logger; import org.codehaus.plexus.util.FileUtils; import org.iplantc.service.systems.dao.SystemDao; import org.iplantc.service.systems.model.StorageSystem; import org.iplantc.service.transfer.AbstractRemoteDataClientTest; import org.iplantc.service.transfer.BaseTransferTestCase; import org.iplantc.service.transfer.RemoteFileInfo; import org.iplantc.service.transfer.exceptions.RemoteDataException; import org.json.JSONException; import org.json.JSONObject; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import java.io.*; @Test(enabled = false, groups= {"gridftp","performance","broken"}) public class GridFTPLoadTest extends BaseTransferTestCase { private static final Logger log = Logger.getLogger(AbstractRemoteDataClientTest.class); @BeforeClass public void beforeClass() throws Exception { super.beforeClass(); JSONObject json = getSystemJson(); system = StorageSystem.fromJSON(json); system.setOwner(SYSTEM_USER); system.getStorageConfig().setHomeDir(system.getStorageConfig().getHomeDir() + "/agave-data-unittests5"); storageConfig = system.getStorageConfig(); salt = system.getSystemId() + storageConfig.getHost() + storageConfig.getDefaultAuthConfig().getUsername(); new SystemDao().persist(system); } @AfterClass public void afterClass() throws Exception { try { FileUtils.deleteDirectory(LOCAL_DOWNLOAD_DIR); clearSystems(); } finally { try { client.disconnect(); } catch (Exception e) {} } } @BeforeMethod public void beforeMethod() throws Exception { try { FileUtils.deleteDirectory(LOCAL_DOWNLOAD_DIR); } catch (IOException e1) {} try { // auth client and ensure test directory is present if (client == null) { client = system.getRemoteDataClient(); } client.authenticate(); if (client.doesExist("")) { client.delete(""); } client.mkdirs(""); if (!client.isDirectory("")) { Assert.fail("System home directory " + client.resolvePath("") + " exists, but is not a directory."); } } catch (Exception e) { Assert.fail("Failed to create home directory " + client.resolvePath("") + " before test method.", e); } } protected JSONObject getSystemJson() throws JSONException, IOException { return jtd.getTestDataObject(STORAGE_SYSTEM_TEMPLATE_DIR + "/" + "gridftp-myproxy.example.com.json"); } @Test public void testRecursiveStreamingGet() throws Exception { client.put(LOCAL_DIR, ""); File localDir = new File(LOCAL_DOWNLOAD_DIR); streamingGet(FilenameUtils.getName(LOCAL_DIR), localDir); } private void streamingGet(String remotePath, File localFile) throws IOException, RemoteDataException { if (client.isFile(remotePath)) { System.out.println("Streaming " + remotePath + " file from remote server."); InputStream in = null; BufferedOutputStream out = null; try { //remotePath = FilenameUtils.getName(remotePath); in = client.getInputStream(remotePath, true); out = new BufferedOutputStream(new FileOutputStream(localFile)); int bufferSize = client.getMaxBufferSize(); byte[] b = new byte[bufferSize]; int len = 0; while ((len = in.read(b)) > -1) { out.write(b, 0, len); } out.flush(); in.close(); out.close(); Assert.assertTrue(client.doesExist(remotePath), "Data not found on remote system after writing via output stream."); Assert.assertTrue(client.isFile(remotePath), "Data found to be a directory on remote system after writing via output stream."); } catch (Exception e) { Assert.fail("Writing to output stream threw unexpected exception", e); } finally { try { in.close(); } catch (Exception e) {} try { out.close(); } catch (Exception e) {} } } else { System.out.println("Creating " + localFile.getPath() + " on the loal file system."); localFile.mkdirs(); System.out.println("Listing contents of " + remotePath + " on the remote file system."); for(RemoteFileInfo remoteFile: client.ls(remotePath)) { System.out.println("Found " + remotePath + "/" + remoteFile.getName() + " on the remote system."); streamingGet((remotePath + "/" + remoteFile.getName()) , new File(localFile, remoteFile.getName())); } } } @Test(enabled=false) public void testRecursiveStreamingPut() throws Exception { File localDir = new File(LOCAL_DIR); System.out.println("Creating parent directory"); streamingPut(localDir, FilenameUtils.getName(LOCAL_DIR)); } private void streamingPut(File localFile, String remotePath) throws IOException, RemoteDataException { if (localFile.isFile()) { OutputStream out = null; BufferedInputStream in = null; try { remotePath = localFile.getName(); out = client.getOutputStream(remotePath, true, false); in = new BufferedInputStream(new FileInputStream(localFile)); int bufferSize = client.getMaxBufferSize(); byte[] b = new byte[bufferSize]; int len = 0; while ((len = in.read(b)) > -1) { out.write(b, 0, len); } out.flush(); in.close(); out.close(); Assert.assertTrue(client.doesExist(remotePath), "Data not found on remote system after writing via output stream."); Assert.assertTrue(client.isFile(remotePath), "Data found to be a directory on remote system after writing via output stream."); } catch (Exception e) { Assert.fail("Writing to output stream threw unexpected exception", e); } finally { try { in.close(); } catch (Exception e) {} try { out.close(); } catch (Exception e) {} } } else { client.mkdirs(remotePath); for(File file: localFile.listFiles()) { streamingPut(file, remotePath + "/" + file.getName()); } } } @Test(enabled=false) public void testRepeatedPutAndDeleteOfFileBug28() throws Exception { // generate a local scratch file String testFileName = "testRepeatedPutAndDeleteOfFileBug28andThisNameIsRealllllllllllllllllyLong.txt"; int nbrIterations = 10; File localFile = new File(LOCAL_BINARY_FILE); for (int i = 0; i < nbrIterations; i++) { long startTime = System.currentTimeMillis(); if (client.doesExist("")) { client.delete(""); } client.mkdirs(""); client.put(localFile.getAbsolutePath(), testFileName); client.put(LOCAL_DIR, ""); System.out.println("Iteration[" + i + "]: " + (System.currentTimeMillis() - startTime)); } } }
zcy421593/aliyun-openapi-cpp-sdk
aliyun-api-rds_region/2014-08-15/include/ali_rdsregion_describe_accounts_types.h
#ifndef ALI_RDSREGION_DESCRIBE_ACCOUNTS_TYPESH #define ALI_RDSREGION_DESCRIBE_ACCOUNTS_TYPESH #include <stdio.h> #include <string> #include <vector> namespace aliyun { struct RdsRegionDescribeAccountsRequestType { std::string owner_id; std::string resource_owner_account; std::string resource_owner_id; std::string db_instance_id; std::string account_name; std::string owner_account; }; struct RdsRegionDescribeAccountsDatabasePrivilegeType { std::string db_name; std::string account_privilege; }; struct RdsRegionDescribeAccountsDBInstanceAccountType { std::vector<RdsRegionDescribeAccountsDatabasePrivilegeType> database_privileges; std::string db_instance_id; std::string account_name; std::string account_status; std::string account_description; }; struct RdsRegionDescribeAccountsResponseType { std::vector<RdsRegionDescribeAccountsDBInstanceAccountType> accounts; }; } // end namespace #endif
ausbin/qcor
mlir/transforms/lowering/CallableLowering.hpp
#pragma once #include "quantum_to_llvm.hpp" namespace qcor { class TupleUnpackOpLowering : public ConversionPattern { protected: public: explicit TupleUnpackOpLowering(MLIRContext *context) : ConversionPattern(mlir::quantum::TupleUnpackOp::getOperationName(), 1, context) {} LogicalResult matchAndRewrite(Operation *op, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override; }; class CreateCallableOpLowering : public ConversionPattern { protected: public: inline static const std::string qir_create_callable = "__quantum__rt__callable_create"; explicit CreateCallableOpLowering(MLIRContext *context) : ConversionPattern(mlir::quantum::CreateCallableOp::getOperationName(), 1, context) {} LogicalResult matchAndRewrite(Operation *op, ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) const override; }; } // namespace qcor
fandylua/clickhouse-jdbc
clickhouse-client/src/main/java/com/clickhouse/client/data/ClickHouseIpv4Value.java
package com.clickhouse.client.data; import java.math.BigDecimal; import java.math.BigInteger; import java.net.Inet4Address; import java.net.Inet6Address; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.ZoneOffset; import java.util.UUID; import com.clickhouse.client.ClickHouseChecker; import com.clickhouse.client.ClickHouseValue; import com.clickhouse.client.ClickHouseValues; /** * Wraper class of Inet4Address. */ public class ClickHouseIpv4Value extends ClickHouseObjectValue<Inet4Address> { /** * Create a new instance representing null value. * * @return new instance representing null value */ public static ClickHouseIpv4Value ofNull() { return ofNull(null); } /** * Update given value to null or create a new instance if {@code ref} is null. * * @param ref object to update, could be null * @return same object as {@code ref} or a new instance if it's null */ public static ClickHouseIpv4Value ofNull(ClickHouseValue ref) { return ref instanceof ClickHouseIpv4Value ? (ClickHouseIpv4Value) ((ClickHouseIpv4Value) ref).set(null) : new ClickHouseIpv4Value(null); } /** * Wrap the given value. * * @param value value * @return object representing the value */ public static ClickHouseIpv4Value of(Inet4Address value) { return of(null, value); } /** * Update value of the given object or create a new instance if {@code ref} is * null. * * @param ref object to update, could be null * @param value value * @return same object as {@code ref} or a new instance if it's null */ public static ClickHouseIpv4Value of(ClickHouseValue ref, Inet4Address value) { return ref instanceof ClickHouseIpv4Value ? (ClickHouseIpv4Value) ((ClickHouseIpv4Value) ref).set(value) : new ClickHouseIpv4Value(value); } protected ClickHouseIpv4Value(Inet4Address value) { super(value); } @Override public ClickHouseIpv4Value copy(boolean deep) { return new ClickHouseIpv4Value(getValue()); } @Override public byte asByte() { BigInteger bigInt = asBigInteger(); return bigInt == null ? (byte) 0 : bigInt.byteValueExact(); } @Override public short asShort() { BigInteger bigInt = asBigInteger(); return bigInt == null ? (short) 0 : bigInt.shortValueExact(); } @Override public int asInteger() { BigInteger bigInt = asBigInteger(); return bigInt == null ? 0 : bigInt.intValueExact(); } @Override public long asLong() { BigInteger bigInt = asBigInteger(); return bigInt == null ? 0L : bigInt.longValue(); } @Override public BigInteger asBigInteger() { return isNullOrEmpty() ? null : new BigInteger(1, getValue().getAddress()); } @Override public float asFloat() { BigInteger bigInt = asBigInteger(); return bigInt == null ? 0F : bigInt.floatValue(); } @Override public double asDouble() { BigInteger bigInt = asBigInteger(); return bigInt == null ? 0D : bigInt.doubleValue(); } @Override public BigDecimal asBigDecimal(int scale) { BigInteger bigInt = asBigInteger(); return bigInt == null ? null : new BigDecimal(bigInt, scale); } @Override public Inet4Address asInet4Address() { return getValue(); } @Override public Inet6Address asInet6Address() { return ClickHouseValues.convertToIpv6(getValue()); } @Override public Object asObject() { return getValue(); } @Override public String asString(int length, Charset charset) { if (isNullOrEmpty()) { return null; } String str = String.valueOf(getValue().getHostAddress()); if (length > 0) { ClickHouseChecker.notWithDifferentLength(str.getBytes(charset == null ? StandardCharsets.UTF_8 : charset), length); } return str; } @Override public String toSqlExpression() { if (isNullOrEmpty()) { return ClickHouseValues.NULL_EXPR; } return new StringBuilder().append('\'').append(getValue().getHostAddress()).append('\'').toString(); } @Override public ClickHouseIpv4Value update(byte value) { return update((int) value); } @Override public ClickHouseIpv4Value update(short value) { return update((int) value); } @Override public ClickHouseIpv4Value update(int value) { set(ClickHouseValues.convertToIpv4(value)); return this; } @Override public ClickHouseIpv4Value update(long value) { return update((int) value); } @Override public ClickHouseIpv4Value update(float value) { return update((int) value); } @Override public ClickHouseIpv4Value update(double value) { return update((int) value); } @Override public ClickHouseIpv4Value update(BigInteger value) { if (value == null) { resetToNullOrEmpty(); } else { update(value.intValueExact()); } return this; } @Override public ClickHouseIpv4Value update(BigDecimal value) { if (value == null) { resetToNullOrEmpty(); } else { update(value.intValueExact()); } return this; } @Override public ClickHouseIpv4Value update(Enum<?> value) { if (value == null) { resetToNullOrEmpty(); } else { update(value.ordinal()); } return this; } @Override public ClickHouseIpv4Value update(Inet4Address value) { set(value); return this; } @Override public ClickHouseIpv4Value update(Inet6Address value) { set(ClickHouseValues.convertToIpv4(value)); return this; } @Override public ClickHouseIpv4Value update(LocalDate value) { if (value == null) { resetToNullOrEmpty(); } else { update((int) value.toEpochDay()); } return this; } @Override public ClickHouseIpv4Value update(LocalTime value) { if (value == null) { resetToNullOrEmpty(); } else { update(value.toSecondOfDay()); } return this; } @Override public ClickHouseIpv4Value update(LocalDateTime value) { if (value == null) { resetToNullOrEmpty(); } else { update((int) value.toEpochSecond(ZoneOffset.UTC)); } return this; } @Override public ClickHouseIpv4Value update(String value) { set(ClickHouseValues.convertToIpv4(value)); return this; } @Override public ClickHouseIpv4Value update(UUID value) { BigInteger v = ClickHouseValues.convertToBigInteger(value); if (v == null) { resetToNullOrEmpty(); } else { update(v.intValueExact()); } return this; } @Override public ClickHouseIpv4Value update(ClickHouseValue value) { if (value == null) { resetToNullOrEmpty(); } else { set(value.asInet4Address()); } return this; } @Override public ClickHouseIpv4Value update(Object value) { if (value instanceof Inet4Address) { set((Inet4Address) value); } else if (value instanceof Inet6Address) { set(ClickHouseValues.convertToIpv4((Inet6Address) value)); } else { super.update(value); } return this; } }
amichard/tfrs
backend/api/migrations/0192_update_guest_role.py
<filename>backend/api/migrations/0192_update_guest_role.py from django.db import migrations from django.db.migrations import RunPython def set_default_role(apps, schema_editor): """ Set FSNoAccess (Guest) as a default role """ db_alias = schema_editor.connection.alias role = apps.get_model('api', 'Role') guest_role = role.objects.using(db_alias).get(name="FSNoAccess") guest_role.default_role = True guest_role.save() def remove_default_role(apps, schema_editor): """ Unflag all default roles """ db_alias = schema_editor.connection.alias role_model = apps.get_model('api', 'Role') default_roles = role_model.objects.using(db_alias).filter( default_role=True ) for role in default_roles: role.default_role = False role.save() class Migration(migrations.Migration): """ Attaches the functions for the migrations """ dependencies = [ ('api', '0191_role_default_role'), ] operations = [ RunPython( set_default_role, remove_default_role ) ]
RollingSoftware/L2J_HighFive_Hardcore
l2j_server/src/main/java/com/l2jserver/gameserver/ai/NextAction.java
/* * Copyright (C) 2004-2016 L2J Server * * This file is part of L2J Server. * * L2J Server is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * L2J Server is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.l2jserver.gameserver.ai; import java.util.ArrayList; import java.util.List; /** * Class for AI action after some event.<br> * Has 2 array list for "work" and "break". * @author Yaroslav */ public class NextAction { public interface NextActionCallback { public void doWork(); } private List<CtrlEvent> _events; private List<CtrlIntention> _intentions; private NextActionCallback _callback; /** * Main constructor. * @param events * @param intentions * @param callback */ public NextAction(List<CtrlEvent> events, List<CtrlIntention> intentions, NextActionCallback callback) { _events = events; _intentions = intentions; setCallback(callback); } /** * Single constructor. * @param event * @param intention * @param callback */ public NextAction(CtrlEvent event, CtrlIntention intention, NextActionCallback callback) { if (_events == null) { _events = new ArrayList<>(); } if (_intentions == null) { _intentions = new ArrayList<>(); } if (event != null) { _events.add(event); } if (intention != null) { _intentions.add(intention); } setCallback(callback); } /** * Do action. */ public void doAction() { if (_callback != null) { _callback.doWork(); } } /** * @return the _event */ public List<CtrlEvent> getEvents() { // If null return empty list. if (_events == null) { _events = new ArrayList<>(); } return _events; } /** * @param event the event to set. */ public void setEvents(ArrayList<CtrlEvent> event) { _events = event; } /** * @param event */ public void addEvent(CtrlEvent event) { if (_events == null) { _events = new ArrayList<>(); } if (event != null) { _events.add(event); } } /** * @param event */ public void removeEvent(CtrlEvent event) { if (_events == null) { return; } _events.remove(event); } /** * @return the _callback */ public NextActionCallback getCallback() { return _callback; } /** * @param callback the callback to set. */ public void setCallback(NextActionCallback callback) { _callback = callback; } /** * @return the _intentions */ public List<CtrlIntention> getIntentions() { // If null return empty list. if (_intentions == null) { _intentions = new ArrayList<>(); } return _intentions; } /** * @param intentions the intention to set. */ public void setIntentions(ArrayList<CtrlIntention> intentions) { _intentions = intentions; } /** * @param intention */ public void addIntention(CtrlIntention intention) { if (_intentions == null) { _intentions = new ArrayList<>(); } if (intention != null) { _intentions.add(intention); } } /** * @param intention */ public void removeIntention(CtrlIntention intention) { if (_intentions == null) { return; } _intentions.remove(intention); } }
couchbase-fs/sdk-go
tools/cbfsadm/backup.go
package main import ( "flag" "io" "log" "net/http" "net/url" "os" "strconv" "strings" "time" "github.com/couchbaselabs/cbfs/config" "github.com/couchbaselabs/cbfs/tools" ) var backupFlags = flag.NewFlagSet("backup", flag.ExitOnError) var backupWait = backupFlags.Bool("w", false, "Wait for backup to complete") type Backup struct { Filename string OID string When time.Time Conf cbfsconfig.CBFSConfig } func backupCommand(ustr string, args []string) { u := cbfstool.ParseURL(ustr) fn := backupFlags.Arg(0) u.Path = "/.cbfs/backup/" form := url.Values{ "fn": []string{fn}, "bg": []string{strconv.FormatBool(*backupWait == false)}, } start := time.Now() res, err := http.Post(u.String(), "application/x-www-form-urlencoded", strings.NewReader(form.Encode())) cbfstool.MaybeFatal(err, "Error executing POST to %v - %v", u, err) defer res.Body.Close() if !(res.StatusCode == 202 || res.StatusCode == 201) { log.Printf("backup error: %v", res.Status) io.Copy(os.Stderr, res.Body) os.Exit(1) } if *backupWait { log.Printf("Completed backup to %v in %v", fn, time.Since(start)) } else { log.Printf("Submitted backup task for %v", fn) } }
weizhou/p5-image
src/p5Blenders/P5ExclusionBlender.js
import { P5Blender } from "./P5Blender"; export class P5ExclusionBlender extends P5Blender{ constructor(){ super(); this.fragmentShader = ` #ifdef GL_ES precision mediump float; #endif varying vec2 vTexCoord; uniform sampler2D textureID1; uniform sampler2D textureID2; void main() { vec4 base = texture2D(textureID1, vTexCoord); vec4 overlay = texture2D(textureID2, vTexCoord); //Dca = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa) gl_FragColor = vec4((overlay.rgb * base.a + base.rgb * overlay.a - 2.0 * overlay.rgb * base.rgb) + overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a), base.a); } `; } }
lkumarjain/jain-I18n
src/main/java/com/jain/addon/i18N/I18NChangeListener.java
/* * Copyright 2012 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.jain.addon.i18N; import java.io.Serializable; import java.util.HashMap; import java.util.Locale; import java.util.Map; import com.jain.addon.i18N.handlers.I18NComponentHandler; import com.jain.addon.i18N.handlers.factory.I18NComponentHandlerFactory; import com.vaadin.data.Property.ValueChangeEvent; import com.vaadin.data.Property.ValueChangeListener; import com.vaadin.ui.Component; import com.vaadin.ui.Component.Event; import com.vaadin.ui.HasComponents; /** * <code>I18NChangeListener<code> is a default listener provided for the locale change. * This is implemented as value change listener in the application * because most of the application uses drop down for local selection. * I case you are using some other way, call value change method to invoke all locale change events. * @author <NAME> * @since Aug 27, 2012 * @version 1.0.0 */ @SuppressWarnings("serial") public class I18NChangeListener implements ValueChangeListener { private Map<Component, I18NComponentHandler> componentMap; private Locale currentLocale; protected I18NChangeListener () { this.componentMap = new HashMap <Component, I18NComponentHandler> (); this.currentLocale = Locale.getDefault(); } public void localeChanged (Component component) { if (currentLocale != component.getUI().getLocale()) { currentLocale = component.getUI().getLocale(); updateComponents(component); } } private void updateComponents(Component component) { if(component instanceof HasComponents) { HasComponents container = (HasComponents) component; for (Component containerComponent : container) { updateComponents(containerComponent); } } updateCaption(component); if (component instanceof I18NListener) { I18NChangeEvent event = new I18NChangeEvent(component, currentLocale); ((I18NListener) component).localeChanged(event); } } private void updateCaption(Component component) { I18NComponentHandler handler = componentMap.get(component); if (handler != null) { handler.applyI18N(component, currentLocale); } } public void registor(Component component) { componentMap.put(component, I18NComponentHandlerFactory.getHandler(component)); updateCaption(component); } public void deRegistor(Component component) { componentMap.remove(component); } public String getI18NCaption(Component component) { I18NComponentHandler handler = componentMap.get(component); if (handler != null) return handler.getI18NCaption(); return ""; } public String getI18NCaption(Component component, Serializable serializable) { I18NComponentHandler handler = componentMap.get(component); if (handler != null) return handler.getI18NCaption(serializable); return ""; } public I18NComponentHandler getI18NComponentHandler(Component component) { I18NComponentHandler handler = componentMap.get(component); return handler; } public void valueChange(ValueChangeEvent valueChangeEvent) { Event event = (Event) valueChangeEvent; Locale selected = ((Locale) valueChangeEvent.getProperty().getValue()); selected = selected == null ? Locale.getDefault() : selected; event.getComponent().getUI().setLocale(selected); } }
TwoPillar/jiba
src/com/twopillar/jiba/util/PreferencesUtils.java
package com.twopillar.jiba.util; import android.content.Context; import android.content.SharedPreferences; public class PreferencesUtils { public static final String PREFERENCES_NAME = "cfg"; // 配置文件名称 /** * 1.int写入 * * @param context * @param key * @param value */ public static void putIntPreferences(Context context, String key, int value) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); SharedPreferences.Editor editor = settings.edit(); editor.putInt(key, value); editor.commit(); } /** * 2.int获取 * * @param context * @param key * @return 若为空默认0 */ public static int getIntPreferences(Context context, String key, int defaultValue) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getInt(key, defaultValue); } /** * int获取 * 默认值为-1 */ public static int getIntPreferences(Context context, String key) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getInt(key, -1); } /** * 3.String 写入 * * @param context * @param key * @param value */ public static void putStringPreferences(Context context, String key, String value) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); SharedPreferences.Editor editor = settings.edit(); editor.putString(key, value); editor.commit(); } /** * 4.String 获取 * * @param context * @param key */ public static String getStringValue(Context context, String key, String defValue) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getString(key, defValue); } /** * String 获取 * 默认值为空字符串 */ public static String getStringValue(Context context, String key) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getString(key, ""); } /** * 5.boolean 写入 * * @param context * @param key * @param value */ public static void putBooleanPreferences(Context context, String key, boolean value) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); SharedPreferences.Editor editor = settings.edit(); editor.putBoolean(key, value); editor.commit(); } /** * * boolean 获取 * 默认值为false * */ public static boolean getBooleanPreferences(Context context, String key) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getBoolean(key, false); } /** * 6.boolean 获取 * * @param context * @param key * @param defValue * @return */ public static boolean getBooleanPreferences(Context context, String key, boolean defValue) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getBoolean(key, defValue); } /** * 7.Long 写入 * * @param context * @param key * @param value */ public static void putLongPreferences(Context context, String key, long value) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); SharedPreferences.Editor editor = settings.edit(); editor.putLong(key, value); editor.commit(); } /** * 8.Long 获取 * * @param context * @param key * @return */ public static long getLongPreferences(Context context, String key, Long defValue) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getLong(key, defValue); } /** * Long 获取 * 默认值为-1 * */ public static long getLongPreferences(Context context, String key) { SharedPreferences settings = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE); return settings.getLong(key, -1); } }
openedx/paragon
icons/es5/Unpublished.js
function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } import * as React from "react"; function SvgUnpublished(props) { return /*#__PURE__*/React.createElement("svg", _extends({ xmlns: "http://www.w3.org/2000/svg", width: 24, height: 24, viewBox: "0 0 24 24" }, props), /*#__PURE__*/React.createElement("path", { d: "M21.19 21.19L2.81 2.81 1.39 4.22l2.27 2.27A9.91 9.91 0 002 12c0 5.52 4.48 10 10 10 2.04 0 3.93-.61 5.51-1.66l2.27 2.27 1.41-1.42zm-10.6-4.59l-4.24-4.24 1.41-1.41 2.83 2.83.18-.18 1.41 1.41-1.59 1.59zm3-5.84l-7.1-7.1A9.91 9.91 0 0112 2c5.52 0 10 4.48 10 10 0 2.04-.61 3.93-1.66 5.51L15 12.17l2.65-2.65-1.41-1.41-2.65 2.65z" })); } export default SvgUnpublished;
sweetkristas/anura
src/formula_callable_definition_fwd.hpp
<filename>src/formula_callable_definition_fwd.hpp /* Copyright (C) 2003-2013 by <NAME> <<EMAIL>> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef FORMULA_CALLABLE_DEFINITION_FWD_HPP_INCLUDED #define FORMULA_CALLABLE_DEFINITION_FWD_HPP_INCLUDED #include <boost/intrusive_ptr.hpp> namespace game_logic { class formula_callable_definition; typedef boost::intrusive_ptr<formula_callable_definition> formula_callable_definition_ptr; typedef boost::intrusive_ptr<const formula_callable_definition> const_formula_callable_definition_ptr; } #endif
plamenti/DesignPatterns
Factory/src/com/plamenti/factoryMethod/ChicagoPizzaStore.java
<gh_stars>0 package com.plamenti.factoryMethod; import com.plamenti.factoryMethod.chicagoPizzas.ChicagoStyleCheesePizza; import com.plamenti.factoryMethod.chicagoPizzas.ChicagoStyleClamPizza; import com.plamenti.factoryMethod.chicagoPizzas.ChicagoStylePepperoniPizza; import com.plamenti.factoryMethod.chicagoPizzas.ChicagoStyleVeggiePizza; public class ChicagoPizzaStore extends PizzaStore{ @Override public Pizza createPizza(String type){ Pizza pizza = null; if(type.equals("cheese")){ pizza = new ChicagoStyleCheesePizza(); } else if(type.equals("clam")){ pizza = new ChicagoStyleClamPizza(); } else if(type.equals("pepperoni")){ pizza = new ChicagoStylePepperoniPizza(); } else if(type.equals("veggie")){ pizza = new ChicagoStyleVeggiePizza(); } return pizza; } }
AlexDoumas/BrPong_1
DoraGamePlaying-m7noskip/basicRunDORA.py
# basicRunDORA.py # basic functions and core run code for a simple DORA run (i.e., mapping, retrieval, predication, schema induction, whole relation formation). # Are you being run on an iPhone? run_on_iphone = False # imports. import random, numbers, math, operator, copy, json import numpy as np import dataTypes import DORA_GUI import buildNetwork if not run_on_iphone: import pygame from pygame.locals import * import pdb # Initialize pygame screen size to 1200x800. #screen_width = 1200.0 #screen_height = 700.0 # class that performs all the run operations in DORA. In class form so that new operations (e.g., compression, predicate recognition) can be implemented as new functions in this class (under the phase set section). class runDORA(object): def __init__(self, memory, parameters): self.memory = memory self.firingOrderRule = parameters['firingOrderRule'] self.firingOrder = None # initialized to None. self.asDORA = parameters['asDORA'] self.gamma = parameters['gamma'] self.delta = parameters['delta'] self.eta = parameters['eta'] self.HebbBias = parameters['HebbBias'] self.lateral_input_level = parameters['lateral_input_level'] self.strategic_mapping = parameters['strategic_mapping'] self.ignore_object_semantics = parameters['ignore_object_semantics'] self.ignore_memory_semantics = parameters['ignore_memory_semantics'] self.mag_decimal_precision = parameters['mag_decimal_precision'] self.dim_list = parameters['dim_list'] self.exemplar_memory = parameters['exemplar_memory'] self.recent_analog_bias = parameters['recent_analog_bias'] self.bias_retrieval_analogs = parameters['bias_retrieval_analogs'] self.use_relative_act = parameters['use_relative_act'] if run_on_iphone: self.doGUI = False else: self.doGUI = parameters['doGUI'] self.screen = 0 self.GUI_information = None # initialize to None. self.screen_width = parameters['screen_width'] self.screen_height = parameters['screen_height'] self.GUI_update_rate = parameters['GUI_update_rate'] self.starting_iteration = parameters['starting_iteration'] self.num_phase_sets_to_run = None self.count_by_RBs = None # initialize to None. self.local_inhibitor_fired = False # initialize to False. ###################################### ###### DORA OPERATION FUNCTIONS ###### ###################################### # 1) Bring a prop or props into WM (driver). This step is completed by passing the variable memory as an argument to the function (memory contains the driver proposition(s)). # function to prepare runDORA object for a run. def initialize_run(self, mapping): # index memory. self.memory = indexMemory(self.memory) # set up driver and recipient. self.memory.driver.Groups = [] self.memory.driver.Ps = [] self.memory.driver.RBs = [] self.memory.driver.POs = [] self.memory.recipient.Groups = [] self.memory.recipient.Ps = [] self.memory.recipient.RBs = [] self.memory.recipient.POs = [] if mapping == True and self.exemplar_memory == True: self.memory = make_AM_copy(self.memory) else: self.memory = make_AM(self.memory) # initialize .same_RB_POs field for POs. self.memory = update_same_RB_POs(self.memory) # initialize GUI if necessary. if self.doGUI: self.screen, self.GUI_information = DORA_GUI.initialize_GUI(self.screen_width, self.screen_height, self.memory) # get PO SemNormalizations. for myPO in self.memory.POs: myPO.get_weight_length() # 2) Initialize activations and inputs of all units to 0. def initialize_network_state(self): self.memory = initialize_memorySet(self.memory) self.inferred_new_P = False # 3) Select firing order of RBs in the driver (for now this step is random or user determined). def create_firing_order(self): if len(self.memory.driver.RBs) > 0: self.count_by_RBs = True else: self.count_by_RBs = False # and randomly assign the PO firing order. self.firingOrder = [] for myPO in self.memory.driver.POs: self.firingOrder.append(myPO) random.shuffle(self.firingOrder) if self.count_by_RBs: self.firingOrder = makeFiringOrder(self.memory, self.firingOrderRule) # function to perform steps 1-3 above. def do_1_to_3(self, mapping): self.initialize_run(mapping) self.initialize_network_state() self.create_firing_order() # 4) Enter the phase set. A phase set is each RB firing at least once (i.e., all RBs in firingOrder firing). It is in phase_sets you will do all of DORA's interesting operations (retrieval, mapping, learning, etc.). There is a function for each interesting operation. def do_map(self): # do initialize network operations (steps 1-3 above). self.memory = resetMappingUnits(self.memory) self.do_1_to_3(mapping=True) phase_sets = 3 # if there are multiple relations in the driver (i.e., the number of P units is 2 or more), then switch to LISA mode, and set ignore_object_semantics to True. changed_mode = False changed_ig_obj_sem = False if len(self.memory.driver.Ps) >= 2 and self.strategic_mapping == True: # set asDORA mode to False if it is not already. if self.asDORA == True: self.asDORA = False changed_mode = True # set ignore_object_semantics to False if it is not already. if self.ignore_object_semantics == False: self.ignore_object_semantics = True changed_ig_obj_sem = True # set up mapping hypotheses. # initialize (i.e., reset to empty) all the mappingHypotheses and mappingConnections. self.memory = resetMappingUnits(self.memory) # set up mappingHypotheses and mappingConnection units. self.memory = setupMappingUnits(self.memory) for phase_set in range(phase_sets): # if counting by RBs, then fire all RBs in self.firingOrder; otherwise fire POs in self.firingOrder if self.count_by_RBs: for currentRB in self.firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). phase_set_iterator = 0 while self.memory.globalInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentRB.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) #high_act = 0 #for myP in self.memory.recipient.POs: # if myP.act > high_act and myP.predOrObj == 0: # high_act = myP.act # 4.3.11) Update mapping hypotheses. self.memory = update_mappingHyps(self.memory) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # RB firing is OVER. self.post_count_by_operations() else: # make sure you are operating asDORA. asLISA = False if not self.asDORA: self.asDORA = True asLISA = True # fire by POs. for currentPO in self.firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). phase_set_iterator = 0 while self.memory.localInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentPO.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.11) Update mapping hypotheses. self.memory = update_mappingHyps(self.memory) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # PO firing is OVER. self.post_count_by_operations() # if you were operating as LISA before starting to map with just POs, go back to as LISA (i.e., set self.asDORA back to False). if asLISA: self.asDORA = False # phase set is OVER. self.post_phase_set_operations(retrieval_license=False, map_license=True) # if you changed mode or changed ignore_object_semantics, then change them back. if changed_mode == True: self.asDORA = True if changed_ig_obj_sem == True: self.ignore_object_semantics = False def do_retrieval(self): # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) phase_sets = 1 for phase_set in range(phase_sets): # fire all RBs in self.firingOrder, unless there are no RBs, in which case fire the POs in self.firingOrder. if len(self.memory.driver.RBs) > 0: for currentRB in self.firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). while self.memory.globalInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentRB.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.12) Run retrieval routines. self.memory = retrieval_routine(self.memory, self.asDORA, self.gamma, self.delta, self.HebbBias, self.lateral_input_level, self.bias_retrieval_analogs) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # RB firing is OVER. self.post_count_by_operations() else: # when you are retrieving by POs, you are firing the POs one at a time in the driver by default as the firing order is composed of POs only. As a consequence, when you are running in LISA mode and PO inhibitors are not updating, you will not get PO time sharing (i.e., a PO will keep firing forever, as it's inhibitor is not updating (in LISA mode PO inhibitors do not update)). So, you must move to DORA mode for retrieval. # set .asDORA to True. previous_mode = self.asDORA self.asDORA = True for currentPO in self.firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). while self.memory.localInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentPO.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.12) Run retrieval routines. self.memory = retrieval_routine(self.memory, self.asDORA, self.gamma, self.delta, self.HebbBias, self.lateral_input_level, self.bias_retrieval_analogs) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # PO firing is OVER. self.post_count_by_operations() # return the .asDORA setting to its previous state. self.asDORA = previous_mode # phase set is OVER. self.post_phase_set_operations(retrieval_license=True, map_license=False) def do_retrieval_v2(self): # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) phase_sets = 1 for phase_set in range(phase_sets): # fire all RBs in self.firingOrder, unless there are no RBs, in which case fire the POs in self.firingOrder. if len(self.memory.driver.RBs) > 0: for currentRB in self.firingOrder: # initialize local_inhibitor_fired to False. self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps for 7 time steps (counted by the phase_set_iterator). The point of allowing only a few time steps is to let the most semantically similar POs get active. phase_set_iterator = 0 while phase_set_iterator < 7: # 4.3.1-4.3.10) update network activations. currentRB.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.12) Run retrieval routines. self.memory = retrieval_routine(self.memory, self.asDORA, self.gamma, self.delta, self.HebbBias, self.lateral_input_level, self.bias_retrieval_analogs) # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # RB firing is OVER. self.post_count_by_operations() else: # when you are retrieving by POs, you are firing the POs one at a time in the driver by default as the firing order is composed of POs only. As a consequence, when you are running in LISA mode and PO inhibitors are not updating, you will not get PO time sharing (i.e., a PO will keep firing forever, as it's inhibitor is not updating (in LISA mode PO inhibitors do not update)). So, you must move to DORA mode for retrieval. # set .asDORA to True. previous_mode = self.asDORA self.asDORA = True for currentPO in self.firingOrder: # initialize local_inhibitor_fired to False. self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps for 7 time steps (counted by the phase_set_iterator). The point of allowing only a few time steps is to let the most semantically similar POs get active. phase_set_iterator = 0 while phase_set_iterator < 4: # 4.3.1-4.3.10) update network activations. currentPO.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.12) Run retrieval routines. self.memory = retrieval_routine(self.memory, self.asDORA, self.gamma, self.delta, self.HebbBias, self.lateral_input_level, self.bias_retrieval_analogs) # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # PO firing is OVER. self.post_count_by_operations() # return the .asDORA setting to its previous state. self.asDORA = previous_mode # phase set is OVER. self.post_phase_set_operations(retrieval_license=True, map_license=False) # operations for DORA's same/different/more/less detection using simple entropy. Find instances of same/different and more/less using entropy. # Basic idea (same/diff): For same/different, compare or over-lay the two representations. Create a DORAese sematnic signal (i.e., shared units have greater activation than unshared units). Calculate the error of the DORAese semantic pattern to a pattern with no entropy (i.e., all the active semantics have activation of 1.0). The extent of the error is a measure of difference, with low (or zero) error corresponding to 'same', and higher error corresponding to 'different'. # Basic idea (more/less): For more/less the idea is very similar to same/diff. If you have two instances coded with magnitude, and the magnitude corresponds to a level of neural firing (more firing for more magnitude), identifying more and less is simply comparing or over-laying the two representations of magnitude, and computing an error signal. The higher the error signal the greater the difference, and the item that is over-activated by the error signal (i.e., the error signal shows too much activation) is the 'more' item, and the under-activated item (i.e., the error signal shows too little activation) is the 'less' item. def do_entropy_ops_within(self, pred_only): # within (entropy over items from the same analog in the driver/recipient) set entropy_ops are used to compute specific kinds of similarity/difference/magnitude over dimensions (coded by preds) in the same analog in the driver or the recipient. extend_SDML = True # set asDORA mode to True if it is not already. changed_mode = False if self.asDORA == False: self.asDORA = True changed_mode = True # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) # for each analog in the driver, check if any preds code the same dimension. If those preds have not had entropy operations run over them already (i.e., they are NOT connected to any invariants for 'same', 'more', or 'less'), then run ent_magnitudeMoreLessSame() over the dimensions and attach 'more' invariant to the greater extent and 'less' invariant to the smaller extent, or 'same' invariant to both if they are equal. for analog in self.memory.driver.analogs: for myPO in analog.myPOs: # if that PO codes a dimension, find if there are any other POs in the same mode (i.e., .predOrObj) that code a dimension. Here you iterate through analog.myPOs from myPO onwards. for myPO2 in analog.myPOs[analog.myPOs.index(myPO):]: if (myPO is not myPO2) and (myPO.predOrObj == myPO2.predOrObj): # check if they code the same dimension (are they both connected to a semantic unit coding a dimension with a weight near 1?), and whether any POs are connected to any SDM semantics (i.e., "more", "less", or "same"). intersect_dim, one_mag_sem_present, both_mag_sem_present = en_based_mag_checks(myPO, myPO2) # if the POs are preds, and mag_sem_present is False, then run ent_magnitudeMoreLessSame(). if not one_mag_sem_present and (myPO.predOrObj == 1) and (len(intersect_dim) == 1): print intersect_dim self.memory = basic_en_based_mag_comparison(myPO, myPO2, intersect_dim, self.memory, self.mag_decimal_precision) # if the POs are preds and those preds code at least one common dimension, then run ent_magnitudeMoreLessSame(). elif (myPO.predOrObj == 1) and both_mag_sem_present and extend_SDML==True: # if there are magnitude semantics present, and there are some matching dimensions, then activate the appropriate magnitude semantics and matching dimensions, and adjust weights as appropriate (i.e., turn on the appropriate magnitude semantics for each PO, and adjust weights accordingly). self.memory = basic_en_based_mag_refinement(myPO, myPO2, self.memory) elif (myPO.predOrObj == 0) and (len(intersect_dim) >= 1 and pred_only == False): # for each common dimension, run ent_magnitudeMoreLessSame(). for dim in intersect_dim: # find the semantics that code for the amount on the common dimension. sem_link_PO = [link for link in myPO.mySemantics if (link.mySemantic.dimension == dim) and (link.mySemantic.amount != 'nil')] sem_link_PO2 = [link for link in myPO2.mySemantics if (link.mySemantic.dimension == dim) and (link.mySemantic.amount != 'nil')] # get the sem_links out of lists and into simple variables. sem_link_PO, sem_link_PO2 = sem_link_PO[0], sem_link_PO2[0] # get the extents/amounts for the dimension. extent1 = sem_link_PO.mySemantic.amount extent2 = sem_link_PO2.mySemantic.amount more, less, same_flag, iterations = ent_magnitudeMoreLessSame(extent1, extent2, self.mag_decimal_precision) # connect the two POs to the appropraite relative magnitude semantics (based on the invariant patterns detected doing ent_magnitudeMoreLessSame()). if more == extent2: # call attach_mag_semantics() with myPO2 as firstPO and myPO as secondPO. self.memory = attach_mag_semantics(same_flag, myPO2, myPO, sem_link_PO2, sem_link_PO, self.memory) else: # call attach_mag_semantics() with myPO as firstPO and myPO2 as secondPO. self.memory = attach_mag_semantics(same_flag, myPO, myPO2, sem_link_PO, sem_link_PO2, self.memory) # if you changed asDORA mode (i.e., changed_mode == True), then set asDORA back to False. if changed_mode == True: self.asDORA = False def do_entropy_ops_between(self): # between (entropy over mapped items across the driver and recipient) set entropy_operations are used to compute over-all similarity/difference between mapped items. ########## # WAIT, DO I JUST WANNA DO SIMPLE SEMANTIC SIMILARITY CALCULATION USING SEMANTIC WEIGHTS? IF THEY MAP, COMPUTE THE SEMANTIC SIMILARITY BASED ON SEMANTIC WEIGHT VECTORS? # make sure that the .max_map_unit field has been filled in for all units. self.memory = get_max_map_units(self.memory) # compute entropy similarity for all mapped items across driver and recipient. for myPO in self.memory.driver.POs: # find the unit (if any) that myPO maps to. if myPO.max_map_unit: myPO2 = myPO.max_map_unit # activate the two POs, and let them activate their semntic units. myPO.act, myPO2.act = 1.0, 1.0 for iter_i in range(10): # update semantic inputs. for semantic in self.memory.semantics: semantic.update_input(self.memory, self.ignore_object_semantics, self.ignore_memory_semantics) # update sematnic activations. max_input = get_max_sem_input(self.memory) for semantic in self.memory.semantics: semantic.set_max_input(max_input) semantic.update_act() # run basic ent_overall_same_diff() to compute the similarity_ratio between the mapped POs. difference_ratio = ent_overall_same_diff(self.memory.semantics) print difference_ratio # finally, clear the inputs and activations of all current units. self.memory = initialize_AM(self.memory) # and return difference_ratio. return difference_ratio def do_predication(self): # you have to be operating asDORA for predication routines, so make sure .asDORA is True while performing predication. asDORA_flag = False if not self.asDORA: # set .asDORA to True, and asDORA_flag to True. self.asDORA = True asDORA_flag = True # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) phase_sets = 1 for phase_set in range(phase_sets): # make a firingOrder out of all the objects in the driver. If count_by_RBs is False, it's just self.firingOrder, otherwise, you need to make it by finding all the object POs. if self.count_by_RBs == False: firingOrder = self.firingOrder else: firingOrder = [] for currentPO in self.memory.driver.POs: if currentPO.predOrObj == 0: firingOrder.append(currentPO) # fire all POs in firingOrder. for currentPO in firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). phase_set_iterator = 0 made_new_pred = False while self.memory.localInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentPO.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.13.1) Do predication. self.memory, made_new_pred = predication_routine(self.memory, made_new_pred, self.gamma) ############################################################ # for DEBUGGING: As long as at least 3 semantics match. # everything other than self.memory, made_new_pred = predication_routine(self.memory, made_new_pred, self.gamma) above the next comment should be deleted once debugging is done. ############################################################ #match_num = 0 #for semantic in self.memory.semantics: # if semantic.act > 0.95: # match_num += 1 #if match_num >= 3: # self.memory, made_new_pred = predication_routine(self.memory, made_new_pred, self.gamma) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # PO firing is OVER. self.post_count_by_operations() ############################################################################### # FOR DEBUGGING: To catch instances were a single RB is made in isolation from a mapping based on a single random semantic. #for analog in self.memory.analogs: # if len(analog.myRBs)%2 != 0: # pdb.set_trace() ############################################################################### # FOR DEBUGGING. # make sure that all new new RBs and their POs are in the same analog. for RB1 in self.memory.recipient.RBs: if RB1.inferred == True: for RB2 in self.memory.recipient.RBs: if RB2.inferred == True: if RB1.myanalog is not RB2.myanalog: # DEBUGGING FOR NOW. You can provide a fix if it proves necessary. pdb.set_trace() # FOR DEBUGGING: Make sure for each RB that the pred and object are in the same analog. for myRB in self.memory.recipient.RBs: if myRB.myPred[0].myanalog is not myRB.myObj[0].myanalog: pdb.set_trace() ############################################################################### # make sure that any new items (i.e., those in the newSet) are part of an analog, and that any newly predicated units that are based on driver units in the same analog, are themselves in the same analog (i.e., if two new predicates are based on two driver units from the same analog, then the new predicates should be in the same analog). # for each analog in the driver, make a list of all newSet objects that are based on POs from that analog. newSet_analogs = [] for analog in self.memory.driver.analogs: new_analog_elements = [] for myPO in analog.myPOs: # if the recipient unit that myPO maps to has created a new unit (i.e., .my_made_unit field is not empty), then add the made unit from newSet to the new_analog_elements list. if myPO.max_map_unit: if myPO.max_map_unit.my_made_unit: new_analog_elements.append(myPO.max_map_unit.my_made_unit) # add the new_analog_elements to the newSet_analogs list. newSet_analogs.append(new_analog_elements) # make a new analog object and fill in the appropriate elements based on newSet_analogs. for analog_elements in newSet_analogs: new_analog = dataTypes.Analog() for obj in analog_elements: # add the new_analog to the object, the RB, and the pred, and vise versa. obj.myanalog = new_analog obj.myRBs[0].myanalog = new_analog obj.myRBs[0].myPred[0].myanalog = new_analog new_analog.myRBs.append(obj.myRBs[0]) new_analog.myPOs.append(obj) new_analog.myPOs.append(obj.myRBs[0].myPred[0]) # add the new_analog to memory. self.memory.analogs.append(new_analog) #pdb.set_trace() # do post_phase_set_operations(). self.post_phase_set_operations(retrieval_license=False, map_license=False) # reset inferences (i.e., reset .inferred, .my_maker_unit, and .my_made_unit fields from all the newSet and recipient units). self.memory = reset_inferences(self.memory) # if asDORA_flag is True (i.e., the network was in LISA mode, but was converted to DORA mode for the duration of predication), then change .asDORA back to False. if asDORA_flag == True: self.asDORA = False def do_rel_form(self): # Only done with RBs, so only do if count_by_RBs is True. if self.count_by_RBs: # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) phase_sets = 1 # you are running do_rel_form(), so set the inferred_new_P variable to False inferred_new_P = False for phase_set in range(phase_sets): # fire all RBs in self.firingOrder. if len(self.memory.driver.RBs) > 0: for currentRB in self.firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). while self.memory.globalInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentRB.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.13.2) Do whole-relation formation. self.memory, inferred_new_P = rel_form_routine(self.memory, inferred_new_P) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # RB firing is OVER. self.post_count_by_operations() # a full phase_set has run. # if a new P has been inferred, make sure it connects to at least 2 RBs, otherwise, delete it. if inferred_new_P: # the new P will be the last item in memory.Ps. Make sure it connects to at least 2 RBs. if len(self.memory.Ps[-1].myRBs) >= 2: # make a new analog that includes the new myP. Add the RB and its POs to the current P's analog, and delete the analog the RB currently belongs to. # make the new analog. new_analog = dataTypes.Analog() # add the new P to the new_analog (and vise versa). new_analog.myPs.append(self.memory.Ps[-1]) self.memory.Ps[-1].myanalog = new_analog # for each RB connected to the new P, add that RB and its POs to the current P's analog. old_analogs = [] for myRB in self.memory.Ps[-1].myRBs: # new_analog is the new analog you want to add elements to. # get the old_analog (which is the analog that RB currently belongs to). old_analog = myRB.myanalog # add the RB to new_analog (and vise versa). new_analog.myRBs.append(myRB) myRB.myanalog=new_analog # remove the RB from old_analog. old_analog.myRBs.remove(myRB) # add the RB's pred and obj units to new_analog (and vise vera). #new_analog.myPOs.append(myRB.myPred[0]) #myRB.myPred[0].myanalog = new_analog #new_analog.myPOs.append(myRB.myObj[0]) #myRB.myObj[0].myanalog = new_analog # remove the RB's Pred and Obj units from old_analog. #old_analog.myPOs.remove(myRB.myPred[0]) # FOR DEBUGGING: I'm getting an odd error with looking for a non-existent object on every 4 runs or so. Throw up a try/except and see if you can catch the error. try: old_analog.myPOs.remove(myRB.myObj[0]) except: pdb.set_trace() ######################################################################## # FOR DEBUGGING: You've put all these operations in this block and commented them out above (see that the below commands are all commented out in the above 15 or so lines). You might want to delete this block and uncomment out the operations above when you fix the bug that your above try/except statement is catching. new_analog.myPOs.append(myRB.myPred[0]) myRB.myPred[0].myanalog = new_analog new_analog.myPOs.append(myRB.myObj[0]) myRB.myObj[0].myanalog = new_analog old_analog.myPOs.remove(myRB.myPred[0]) ######################################################################## # keep a list of all items that have served as old_analogs. if old_analog not in old_analogs: old_analogs.append(old_analog) # add the new_analog to self.memory.analogs. self.memory.analogs.append(new_analog) # for each old_analog in old_analogs, if it is empty (i.e., if you've deleted all its tokens), delete it. for old_analog in old_analogs: if len(old_analog.myRBs) == 0 and len(old_analog.myPOs) == 0: self.memory.analogs.remove(old_analog) else: # delete the new P unit. del(self.memory.Ps[-1]) # run post_phase_set_operations. self.post_phase_set_operations(retrieval_license=False, map_license=False, inferred_new_P=True) def do_schematization(self): # Change asDORA mode to asDORA = True. oldasDORA = self.asDORA self.asDORA = True # Only done with RBs, so only do if count_by_RBs is True. if self.count_by_RBs: # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) phase_sets = 1 for phase_set in range(phase_sets): # fire all RBs in self.firingOrder. if len(self.memory.driver.RBs) > 0: for currentRB in self.firingOrder: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). while self.memory.globalInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentRB.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.13.3) Do schematization/predicate refinement. self.memory = schematization_routine(self.memory, self.gamma, phase_set_iterator) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # RB firing is OVER. self.post_count_by_operations() # phase_set is over. self.post_phase_set_operations(retrieval_license=False, map_license=False) # now make sure that any new items (i.e., those in the newSet) are part of an analog. Create a new analog and put all items from newSet in that analog. new_analog = dataTypes.Analog() for myP in self.memory.newSet.Ps: myP.myanalog = new_analog new_analog.myPs.append(myP) for myRB in self.memory.newSet.RBs: myRB.myanalog = new_analog new_analog.myRBs.append(myRB) for myPO in self.memory.newSet.POs: myPO.myanalog = new_analog new_analog.myPOs.append(myPO) self.memory.analogs.append(new_analog) # you're done with schematization, so switch back to oldasDORA state. self.asDORA = oldasDORA # FOR DEBUGGING: check if an RB has been made without two POs. for myRB in self.memory.newSet.RBs: if (len(myRB.myPred) < 1) and (len(myRB.myObj) < 1): pdb.set_trace() def do_rel_gen(self): # make sure that DORA is in DORA mode. Change .asDORA to true, but also save the current .asDORA state so that you can return to current .asDORA state at the end of generalisation. DORA_state = self.asDORA self.asDORA = True # Only done with RBs, so only do if count_by_RBs is True. if self.count_by_RBs: # find the analog in which all mapping driver units live. driver_analog = find_driver_analog_rel_gen(self.memory) # make sure all recipient tokens that map to driver tokens are in the same analog. self.group_recip_maps() # find the analog that contains the mapped recipient units so that it can be passed to reh rel_gen_routine(). recip_analog = find_recip_analog(self.memory) # do initialize network operations (steps 1-3 above). self.do_1_to_3(mapping=False) phase_sets = 1 for phase_set in range(phase_sets): # fire all RBs in the driver analog that contains mapped elements. if len(driver_analog.myRBs) > 0: for currentRB in driver_analog.myRBs: # initialize phase_set_iterator and flags (local_inhibitor_fired). phase_set_iterator = 1 self.local_inhibitor_fired = False # 4.1-4.2) Fire the current RB in the firingOrder. Update the network in discrete time-steps until the globalInhibitor fires (i.e., the current active RB is inhibited by its inhibitor). while self.memory.globalInhibitor.act == 0: # 4.3.1-4.3.10) update network activations. currentRB.act = 1.0 self.time_step_activations(phase_set, self.ignore_object_semantics, self.ignore_memory_semantics) # 4.3.14) Do relational inference. self.memory = rel_gen_routine(self.memory, self.gamma, recip_analog) # fire the local_inhibitor if necessary. self.time_step_fire_local_inhibitor() # update GUI. phase_set_iterator += 1 if self.doGUI: self.time_step_doGUI(phase_set_iterator) # RB firing is OVER. self.post_count_by_operations() # phase_set is over. self.post_phase_set_operations(retrieval_license=False, map_license=False) # return .asDORA state to starting .asDORA state. self.asDORA = DORA_state ###################################################################### ###################################################################### ###################################################################### ###################################### ###### DORA TIME_STEP FUNCTIONS ###### ###################################### # functions implementing operations performed during a single time-step in DORA. # function to perform basic network activation update for a time_step in the phase set. def time_step_activations(self, phase_set, ignore_object_semantics=False, ignore_memory_semantics=False): # initialize the input to all tokens and semantic units. self.memory = initialize_input(self.memory) # 4.3.2) Update modes of all P units in the driver and the recipient. if self.count_by_RBs: for myP in self.memory.driver.Ps: myP.get_Pmode() for myP in self.memory.recipient.Ps: myP.get_Pmode() # 4.3.3) Update input to driver token units. self.memory = update_driver_inputs(self.memory, self.asDORA, self.lateral_input_level) # 4.3.4-5) Update input to and activation of PO and RB inhibitors. for myRB in self.memory.driver.RBs: myRB.update_inhibitor_input() myRB.update_inhibitor_act() # update PO inhibitor act only if in DORA mode (i.e., asDORA == True). for myPO in self.memory.driver.POs: myPO.update_inhibitor_input() if self.asDORA: myPO.update_inhibitor_act() for myRB in self.memory.recipient.RBs: myRB.update_inhibitor_input() #RB.update_inhibitor_act() for myPO in self.memory.recipient.POs: myPO.update_inhibitor_input() if self.asDORA: myPO.update_inhibitor_act() # 4.3.6-7) Update input and activation of local and global inhibitors. self.memory.localInhibitor.checkDriverPOs(self.memory) self.memory.globalInhibitor.checkDriverRBs(self.memory) # 4.3.8) Update input to semantic units. for semantic in self.memory.semantics: # ignore input to semantic units from POs in object mode if ignore_object_semantics==True (i.e., if DORA is focusing on relational properties (from Hummel & Holyoak, 2003)). semantic.update_input(self.memory, ignore_object_semantics, ignore_memory_semantics) # 4.3.9) Update input to all tokens in the recipient and emerging recipient (i.e., newSet). self.memory = update_recipient_inputs(self.memory, self.asDORA, phase_set, self.lateral_input_level, self.ignore_object_semantics) self.memory = update_newSet_inputs(self.memory) # 4.3.10) Update activations of all units in the driver, recipient, and newSet, and all semanticss. self.memory = update_activations_run(self.memory, self.gamma, self.delta, self.HebbBias, phase_set) # function to fire the local inhibitor if necessary. def time_step_fire_local_inhibitor(self): if self.asDORA and self.memory.localInhibitor.act >= 0.99 and not self.local_inhibitor_fired: self.memory = self.memory.localInhibitor.fire_local_inhibitor(self.memory) self.local_inhibitor_fired = True # function to do GUI. def time_step_doGUI(self, phase_set_iterator): if self.doGUI: # check for keypress for pause. debug = False pause = False for event in pygame.event.get(): if not hasattr(event,'key'): continue elif event.key == K_p and event.type == KEYDOWN: # graphics are paused, wait for unpause. pause = True if pause: wait = True while wait: for event2 in pygame.event.get(): if event2.type == KEYDOWN: if event2.key == K_p: pause = False wait = False elif event2.key == K_d: # enter debug. debug = True ########################################################### ############### ENTER DEBUGGING DURING RUN! ############### # for DEBUGGING, enters set_trace() after a GUI pause. ########################################################### if debug: pdb.set_trace() # check for update GUI. if phase_set_iterator % self.GUI_update_rate == 0: # update_GUI. self.screen, self.memory = DORA_GUI.run_GUI(self.screen, self.GUI_information, self.memory, False) #################################################### #### DORA POST COUNT_BY AND PHASE_SET FUNCTIONS #### #################################################### # function to perform operations that occur after PO (if firing by POs) or RB (if firing by RBs) fires (i.e., what we're calling "count_by" operations as they occur after the firing of of the token you're firing (or counting) by). def post_count_by_operations(self): # fire the globalInhibitor. self.memory = self.memory.globalInhibitor.fire_global_inhibitor(self.memory) # reset the memory.localInhibitor.act and memory.globalInhibitor.act back to 0.0. self.memory.localInhibitor.act = 0.0 self.memory.globalInhibitor.act = 0.0 # reset the RB and PO inhibitors. for myRB in self.memory.RBs: myRB.reset_inhibitor() for myPO in self.memory.POs: myPO.reset_inhibitor() # functions to perform post-phase_set operations. def post_phase_set_operations(self, retrieval_license, map_license, inferred_new_P=False): # if you were doing retrieval (i.e., if retrieval_license is True), then use the Luce choice axiom here to retrieve items from memorySet into the recipient. if retrieval_license: self.memory = retrieve_tokens(self.memory, self.bias_retrieval_analogs, self.use_relative_act) # reset the mode of all P units in the recipient back to neutral (i.e., 0); for myP in self.memory.recipient.Ps: myP.initialize_Pmode() # reset the activation and input of all units back to 0. self.memory = initialize_AM(self.memory) # if you made a new P during relation formation, name it with the name of all its RBs. if inferred_new_P: # the new P is the last P in memory.Ps. Name it. name_string = '' # NOTE: I've added a try/except here because I got an odd error once that I've been unable to recreate. I'm leaving it here just in case it pops up again. try: len(self.memory.Ps[-1].myRBs) except: print('\nHey, you got a an error awhile ago that you were unable to reproduce. Basically, it seems you learned a P unit with no RBs (or something to that effect). You added a try/except to catch it in case it popped up again. It has. You will want to look very carefully at what happened with the latest P unit that has been made.\n') pdb.set_trace() for myRB in self.memory.Ps[-1].myRBs: name_string = name_string+'+'+myRB.name self.memory.Ps[-1].name = name_string # reset the comparison-based learning parameters back to False. # I DON'T THINK RESETTING THESE IS ACTUALLY NECESSARY... #predication_check = False #relation_formation_check = False # remove all links between POs and semantics that are below threshold (=0.01), and round up any connections that are above 0.999. self.memory = del_small_link(self.memory, 0.1) self.memory = round_big_link(self.memory, 0.9) # 5) If mapping is licenced, update the mapping connections and update the max_map field for all driver and recipient tokens. if map_license: # update mapping connections. self.memory = update_mappingConnections(self.memory, self.eta) # update max_map fields. self.memory = get_max_maps(self.memory) # reset hypotheses back to 0.0 self.memory = reset_mappingHyps(self.memory) # recalibrate PO weights. #self.memory = calibrate_weight(self.memory) # the kludgy comparitor function that runs after the phase set. def do_kludge_comparitor(self): # run kludgy comparitor. # make sure I have RBs. if self.count_by_RBs: # comparitor all pairs of preds in driver and recipient. Make all driver pred pairs that are either connected to same P, or not connected to a myP. # first, the driver. driver_pred_pairs = [] for PO1 in self.memory.driver.POs: if PO1.predOrObj == 1: PO1.get_index(self.memory) for PO2 in self.memory.driver.POs[PO1.my_index+1::]: if PO2.predOrObj == 1: for RB1 in PO1.myRBs: break_flag = False for RB2 in PO2.myRBs: if (len(RB1.myParentPs) == 0) and(len(RB2.myParentPs) == 0): # both POs are connected to RBs with no Ps, so comparitor them. driver_pred_pairs.append([PO1,PO2]) break_flag = True break elif RB1.myParentPs[0] is RB2.myParentPs[0]: # the two POs share a P unit, so comparitor them. driver_pred_pairs.append([PO1,PO2]) break_flag = True break if break_flag: break # now the recipient POs. recipient_pred_pairs = [] for PO1 in self.memory.recipient.POs: if PO1.predOrObj == 1: PO1.get_index(self.memory) for PO2 in self.memory.recipient.POs[PO1.my_index+1::]: if PO2.predOrObj == 1: for RB1 in PO1.myRBs: break_flag = False for RB2 in PO2.myRBs: if RB1.myParentPs[0] is RB2.myParentPs[0]: # the two POs share a P unit, so comparitor them. recipient_pred_pairs.append([PO1,PO2]) break_flag = True break elif (len(RB1.myParentPs) == 0) and(len(RB2.myParentPs) == 0): # both POs are connected to RBs with no Ps, so comparitor them. recipient_pred_pairs.append([PO1,PO2]) break_flag = True break if break_flag: break # now run the comparitor on all driver and recipient pairs. for pair in driver_pred_pairs: self.memory = kludgey_comparitor(pair[0], pair[1], self.memory) for pair in recipient_pred_pairs: self.memory = kludgey_comparitor(pair[0], pair[1], self.memory) # function for use during relational generalisation. This function groups all recipient units that map to driver tokens into a single analog. def group_recip_maps(self): # find all analogs in the recipient that have mapped units and add them to the analog_list. analog_list = [] for myPO in self.memory.POs: if myPO.max_map > 0.0: # add the analog that PO is in to analog_list (assuming it is not already in there). if myPO.myanalog not in analog_list: analog_list.append(myPO.myanalog) for myRB in self.memory.RBs: if myRB.max_map > 0.0: # add the analog that PO is in to analog_list (assuming it is not already in there). if myRB.myanalog not in analog_list: analog_list.append(RB.myanalog) for myP in self.memory.Ps: if myP.max_map > 0.0: # add the analog that PO is in to analog_list (assuming it is not already in there). if myP.myanalog not in analog_list: analog_list.append(myP.myanalog) # if necessary (i.e., if analog_list contains multiple analogs), combine all analogs in analog_list into a single analog. if len(analog_list) > 1: new_analog = dataTypes.Analog() for analog in analog_list: # add all the POs, RBs, and Ps from the analogs in analog_list to the new_analog. for myPO in analog.myPOs: # add the PO to new_analog and update the .myanalog information for that myPO. new_analog.myPOs.append(myPO) myPO.myanalog = new_analog for myRB in analog.myRBs: # add the PO to new_analog and update the .myanalog information for that myPO. new_analog.myRBs.append(myRB) myRB.myanalog = new_analog for myP in analog.myPs: # add the P to new_analog and update the .myanalog information for that myP. new_analog.myPs.append(myP) myP.myanalog = new_analog # and now delete the old analog from self.memory.analogs. analog_index = self.memory.analogs.index(analog) self.memory.analogs.pop(analog_index) del(analog) ###################################################################### ####################################### ######### CORE DORA FUNCTIONS ######### ####################################### # function to make AM without making copies of items in LTM. # noinspection PyPep8Naming def make_AM(memory): # for each token, if it is in an AM set (driver, recipient), then make sure all sub-tokens are also in that set. Make sure that all tokens from the same analog are in the same set or in memory (i.e., tokens from the same analog CANNOT be in different AM sets). run findDriverRecipient(). # for each token unit, make sure all subtokens are in the same set. Also, make sure that if a token is to enter recipient, that it checks to make sure none of it's subtokens are in the driver, and, if they are, that it remains in memory. for Group in memory.Groups: if Group.set != 'memory': # make sure all subtokens are in the same set. Group = set_sub_tokens(Group) for myP in memory.Ps: if myP.set != 'memory': # make sure all subtokens are in the same set. myP = set_sub_tokens(myP) for myRB in memory.RBs: if myRB.set != 'memory': # make sure all subtokens are in the same set. myRB = set_sub_tokens(myRB) for myPO in memory.POs: if myPO.set != 'memory': # make sure all subtokens are in the same set. myPO = set_sub_tokens(myPO) # and bring all the copied items from memory into AM (i.e., into driver/recipient). memory = findDriverRecipient(memory) # done. return memory # function to make sure all sub-tokens of a token to enter AM are in the same set. Function also checks that any item to enter the recipient, does not have driver sub-tokens. def set_sub_tokens(token): # check what kind of token you're dealing with. if token.my_type == 'Group': # if you're dealing with a Group, then for each of it's sub-groups, sub-Ps, and sub-RBs, set that sub_token.set to the same set as the Group, and run set_sub_tokens on that sub-token. # check to make sure that if the Group is in the recipient, none of it's subtokens are in the driver. go_on = True if token.set == 'recipient': go_on = check_sub_tokens(token) if go_on: for Group_under in token.myChildGroups: Group_under.set = token.set Group_under = set_sub_tokens(Group_under) for myP in token.myPs: myP.set = token.set myP = set_sub_tokens(myP) for myRB in token.myRBs: myRB.set = token.set myRB = set_sub_tokens(myRB) else: # set the token.set to 'memory'. token.set = 'memory' elif token.my_type == 'P': # if you're dealing with a P, then for each of it's RBs, set that myRB.set to the same set as the P, and run set_sub_tokens on the myRB. # check to make sure that if the P is in the recipient, none of it's subtokens are in the driver. go_on = True if token.set == 'recipient': go_on = check_sub_tokens(token) if go_on: for myRB in token.myRBs: myRB.set = token.set myRB = set_sub_tokens(myRB) else: # set the token.set to 'memory'. token.set = 'memory' elif token.my_type == 'RB': # if you're dealing with a RB, then for each of it's child-Ps and POs, set that token.set to the same set as the RB, and run set_sub_tokens on the the sub-token. # check to make sure that if the RB is in the recipient, none of it's subtokens are in the driver. go_on = True if token.set == 'recipient': go_on = check_sub_tokens(token) if go_on: if len(token.myPred) > 0: token.myPred[0].set = token.set token.myPred[0] = set_sub_tokens(token.myPred[0]) if len(token.myObj) > 0: token.myObj[0].set = token.set token.myObj[0] = set_sub_tokens(token.myObj[0]) elif len(token.myChildP) > 0: token.myChildP[0].set = token.set token.myChildP[0] = set_sub_tokens(token.myChildP[0]) else: # set the token.set to 'memory'. token.set = 'memory' # done. return token # function to check all sub-tokens of a token bound for the recipient, to make sure that none are in the driver. def check_sub_tokens(token): # set the go_on_flag to True (indicating that that there are no driver sub-tokens of a recipient super-token). go_on_flag = True # make sure you're dealing with a recipient token (this is a redundent check, but is here for safety). if token.set == 'recipient': # make sure all sub-tokens are NOT in the driver. if token.my_type == 'Group': # make sure none of my sub-groups (or their sub-tokens) are in the driver. for sub_group in token.myGroups: if sub_group.set == 'driver': # set token.set to 'memory', go_on_flag to False, and break the loop. token.set = 'memory' go_on_flag = False break else: # make sure none of the sub-token of the sub-group are in the driver. go_on_flag = check_sub_tokens(sub_group) # if go_on_flag is now False (i.e., the token has sub-tokens in the driver), token.set should be set to 'memory' (as it should not be retrieved into the recipient). if not go_on_flag: token.set == 'memory' # if go_on_flag is still True, make sure none of my Ps (or their sub-tokens) are in the driver. if go_on_flag: for myP in token.myPs: if myP.set == 'driver': # set token.set to 'memory', go_on_flag to False, and break the loop. token.set = 'memory' go_on_flag = False break else: # make sure none of the sub-token of the sub-group are in the driver. go_on_flag = check_sub_tokens(myP) # if go_on_flag is now False (i.e., the token has sub-tokens in the driver), token.set should be set to 'memory' (as it should not be retrieved into the recipient). if not go_on_flag: token.set == 'memory' # if go_on_flag is still True, make sure none of my RBs (or their sub-tokens) are in the driver. if go_on_flag: for myRB in token.myRBs: if myRB.set == 'driver': # set token.set to 'memory', go_on_flag to False, and break the loop. token.set = 'memory' go_on_flag = False break else: # make sure none of the sub-token of the sub-group are in the driver. go_on_flag = check_sub_tokens(myRB) # if go_on_flag is now False (i.e., the token has sub-tokens in the driver), token.set should be set to 'memory' (as it should not be retrieved into the recipient). if not go_on_flag: token.set == 'memory' elif token.my_type == 'P': # make sure none of my RBs (or their sub-tokens) are in the driver. for myRB in token.myRBs: if myRB.set == 'driver': # set token.set to 'memory', go_on_flag to False, and break the loop. token.set = 'memory' go_on_flag = False break else: # make sure none of the sub-token of the sub-group are in the driver. go_on_flag = check_sub_tokens(myRB) # if go_on_flag is now False (i.e., the token has sub-tokens in the driver), token.set should be set to 'memory' (as it should not be retrieved into the recipient). if not go_on_flag: token.set == 'memory' elif token.my_type == 'RB': # make sure none of my POs are in the driver. if token.myPred[0].set == 'driver': # set token.set to 'memory' and go_on_flag to False. token.set == 'memory' go_on_flag = False # make sure you only check the objs of RBs not taking a child P as an argument. if len(token.myObj) > 0: if token.myObj[0].set == 'driver': # set token.set to 'memory' and go_on_flag to False. token.set == 'memory' go_on_flag = False # done. return go_on_flag # function to make copies of items from memory to enter AM. def make_AM_copy(memory): # go through memory and make a list of all analogs to be copied. For each item, if it is to be retrieved into AM, then check if its analog is in the list of analogs to enter AM. If not, add it. analogs_to_copy = [] for analog in memory.analogs: # check if the analog is to be copied, and if so, copy it. copy_analog_flag = check_analog_for_tokens_to_copy(analog) if copy_analog_flag and (analog not in analogs_to_copy): analogs_to_copy.append(analog) # now copy all analogs from analogs_to_copy into memory. for analog in analogs_to_copy: memory = copy_analog(analog, memory) # and bring all the copied items from memory into AM (i.e., into driver/recipient). memory = findDriverRecipient(memory) # all done. return memory # function to check an analog for whether it contains any tokens to copy. def check_analog_for_tokens_to_copy(analog): # check if analog is to be copied. Go through all tokens in the analog and see whether the .set field of any is NOT 'memory'. If it is not, break the loop and copy the analog to analogs_to_copy. copy_analog_flag = False # first check all the Groups. if not copy_analog_flag: for Group in analog.myGroups: if Group.set != 'memory': # set copy_analog_flag to True and break the loop. copy_analog_flag = True break # if copy_analog_flag is still False, check the Ps. if not copy_analog_flag: for myP in analog.myPs: if myP.set != 'memory': # set copy_analog_flag to True and break the loop. copy_analog_flag = True break # if copy_analog_flag is still False, check the RBs. if not copy_analog_flag: for myRB in analog.myRBs: if myRB.set != 'memory': # set copy_analog_flag to True and break the loop. copy_analog_flag = True break # if copy_analog_flag is still False, check the POs. if not copy_analog_flag: for myPO in analog.myPOs: if myPO.set != 'memory': # set copy_analog_flag to True and break the loop. copy_analog_flag = True break # return copy_analog_flag return copy_analog_flag # function to copy a to be retrieved analog and it's elements into AM. def copy_analog(analog, memory): # make a copy of the analog. NOTE: you can't use copy here because of recursion issues, so you're rolling your own copy code. Maybe there's a package for this, but then you're you, so you're not looking it up. new_analog = dataTypes.Analog() # make all tokens from the to be copied analog. new_analog, memory = copy_analog_tokens(analog, new_analog, memory) # in the original analog, set the .set field of each element to 'memory'. analog = clear_set(analog) # for each token in the copied analog, if a token is to be retrieved, then make sure all tokens below it are also to be retrieved (e.g., if a P is to be retrieved into 'recipient', make sure all RBs and POs connected to those RBs also have their .set field set to 'recipient). new_analog = retrieve_all_relevant_tokens(new_analog) # for each token in the copied analog, delete any token that is not be be retrieved (i.e., the .set field is 'memory') (I don't think you need this part: AND there are no higher tokens that are to be retrieved (e.g., a PO has no RBs to be retrieved)), delete that token. Make sure all items above and below that token have that token removed from their list of connections (e.g., a to be deleted RB is removed as a connection its parent and child Ps, and its predicate and object POs). new_analog = delete_unretrieved_tokens(new_analog) # place copied analog into memory. memory.analogs.append(new_analog) # all done. return memory # function to make all tokens from the to be copied analog. def copy_analog_tokens(analog, new_analog, memory): # start with Ps. (1) make the P. (2) then make each RB. Connect the RB to the P. (3) For each RB's POs, (4) check if a PO by that name already exists in new_analog.myPOs, and if so, connect that PO to currentRB, otherwise, make the PO, and connect it to the RB. Then, for each RB without Ps, start with (3) above. Then, for each PO without RBs, start with (4) above. for myP in analog.myPs: # make a copy of the P. copy_P = dataTypes.PUnit(myP.name, myP.set, new_analog, False, new_analog) # put the copied P in new_analog and in memory. new_analog.myPs.append(copy_P) memory.Ps.append(copy_P) # make copy_P's RB units. for myRB in myP.myRBs: # make a copy of the RB. copy_RB = dataTypes.RBUnit(myRB.name, myRB.set, new_analog, False, new_analog) # put the copy_RB in new_analog and in memory. new_analog.myRBs.append(copy_RB) memory.RBs.append(copy_RB) # connect the copy_RB to copy_P and vise versa. copy_RB.myParentPs.append(copy_P) copy_P.myRBs.append(copy_RB) # make the RBs pred (if it does not already exist). Check if a pred with the same name as myRB.myPred[0] already exists in new_analog.myPOs. make_new_PO = True for myPO in new_analog.myPOs: if myPO.name == myRB.myPred[0].name: # the PO is already in new_analog, so just connect it to the copy_RB. myPO.myRBs.append(copy_RB) copy_RB.myPred.append(myPO) # set make_new_PO flag to False. make_new_PO = False break # if the PO does not already exist in the new_analog, then make it. if make_new_PO: # make the RB's pred. copy_pred = dataTypes.POUnit(myRB.myPred[0].name, myRB.myPred[0].set, new_analog, False, new_analog, 1) # put the copy_pred in new_analog and in memory. new_analog.myPOs.append(copy_pred) memory.POs.append(copy_pred) # connect the copy_pred to copy_RB and vise versa. copy_pred.myRBs.append(copy_RB) copy_RB.myPred.append(copy_pred) # make all the semantic connections for copy_pred. for link in myRB.myPred[0].mySemantics: # create a new link for the copy_pred. new_link = dataTypes.Link(copy_pred, None, link.mySemantic, link.weight) # add the new_link to memory.Links, new_pred.semantics, and link.mySemantic.myPOs. memory.Links.append(new_link) copy_pred.mySemantics.append(new_link) link.mySemantic.myPOs.append(new_link) # make the RBs object (if it does not already exist). make_new_PO = True for myPO in new_analog.myPOs: if myPO.name == myRB.myObj[0].name: # the PO is already in new_analog, so just connect it to the copy_RB. myPO.myRBs.append(copy_RB) copy_RB.myObj.append(myPO) # set make_new_PO flag to False. make_new_PO = False break # if the PO does not already exist in the new_analog, then make it. if make_new_PO: # make the RB's object. copy_obj = dataTypes.POUnit(myRB.myObj[0].name, myRB.myObj[0].set, new_analog, False, new_analog, 0) # put the copy_obj in new_analog and in memory. new_analog.myPOs.append(copy_obj) memory.POs.append(copy_obj) # connect the copy_obj to copy_RB and vise versa. copy_obj.myRBs.append(copy_RB) copy_RB.myObj.append(copy_obj) # make all the semantic connections for copy_obj. for link in myRB.myObj[0].mySemantics: # create a new link for the copy_obj. new_link = dataTypes.Link(copy_obj, None, link.mySemantic, link.weight) # add the new_link to memory.Links, copy_obj.semantics, and link.mySemantic.myPOs. memory.Links.append(new_link) copy_obj.mySemantics.append(new_link) link.mySemantic.myPOs.append(new_link) # now make all RBs that don't have Ps. for myRB in analog.myRBs: if len(myRB.myParentPs) == 0: # make a copy of the RB. copy_RB = dataTypes.RBUnit(myRB.name, myRB.set, new_analog, False, new_analog) # put the copy_RB in new_analog and in memory. new_analog.myRBs.append(copy_RB) memory.RBs.append(copy_RB) # make the RBs pred (if it does not already exist). Check if a pred with the same name as myRB.myPred[0] already exists in new_analog.myPOs. make_new_PO = True for myPO in new_analog.myPOs: if myPO.name == myRB.myPred[0].name: # the PO is already in new_analog, so just connect it to the copy_RB. myPO.myRBs.append(copy_RB) copy_RB.myPred.append(myPO) # set make_new_PO flag to False. make_new_PO = False break # if the PO does not already exist in the new_analog, then make it. if make_new_PO: # make the RB's pred. copy_pred = dataTypes.POUnit(myRB.myPred[0].name, myRB.myPred[0].set, new_analog, False, new_analog, 1) # put the copy_pred in new_analog and memory. new_analog.myPOs.append(copy_pred) memory.POs.append(copy_pred) # connect the copy_pred to copy_RB and vise versa. copy_pred.myRBs.append(copy_RB) copy_RB.myPred.append(copy_pred) # make all the semantic connections for copy_pred. for link in myRB.myPred[0].mySemantics: # create a new link for the copy_pred. new_link = dataTypes.Link(copy_pred, None, link.mySemantic, link.weight) # add the new_link to memory.Links, new_pred.semantics, and link.mySemantic.myPOs. memory.Links.append(new_link) copy_pred.mySemantics.append(new_link) link.mySemantic.myPOs.append(new_link) # make the RBs object (if it does not already exist). make_new_PO = True for myPO in new_analog.myPOs: if myPO.name == myRB.myObj[0].name: # the PO is already in new_analog, so just connect it to the copy_RB. myPO.myRBs.append(copy_RB) copy_RB.myObj.append(myPO) # set make_new_PO flag to False. make_new_PO = False break # if the PO does not already exist in the new_analog, then make it. if make_new_PO: # make the RB's object. copy_obj = dataTypes.POUnit(myRB.myObj[0].name, myRB.myObj[0].set, new_analog, False, new_analog, 0) # put the copy_obj in new_analog and memory. new_analog.myPOs.append(copy_obj) memory.POs.append(copy_obj) # connect the copy_obj to copy_RB and vise versa. copy_obj.myRBs.append(copy_RB) copy_RB.myObj.append(copy_obj) # make all the semantic connections for copy_obj. for link in myRB.myObj[0].mySemantics: # create a new link for the copy_obj. new_link = dataTypes.Link(copy_obj, None, link.mySemantic, link.weight) # add the new_link to memory.Links, copy_obj.semantics, and link.mySemantic.myPOs. memory.Links.append(new_link) copy_obj.mySemantics.append(new_link) link.mySemantic.myPOs.append(new_link) # make all POs that don't have RBs. for myPO in analog.myPOs: if len(myPO.myRBs) == 0: make_new_PO = True for checkPO in new_analog.myPOs: if checkPO.name == myPO.name: # the PO is already in new_analog, so set make_new_PO flag to False. make_new_PO = False break # if the PO does not already exist in the new_analog, then make it. if make_new_PO: # make the RB's object. copy_obj = dataTypes.POUnit(myPO.name, myPO.set, new_analog, False, new_analog, 0) # put the copy_obj in new_analog and memory. new_analog.myPOs.append(copy_obj) memory.POs.append(copy_obj) # make all the semantic connections for copy_obj. for link in myPO.mySemantics: # create a new link for the copy_obj. new_link = dataTypes.Link(copy_obj, None, link.mySemantic, link.weight) # add the new_link to memory.Links, copy_obj.semantics, and link.mySemantic.myPOs. memory.Links.append(new_link) copy_obj.mySemantics.append(new_link) link.mySemantic.myPOs.append(new_link) # all done. return new_analog, memory # function to clear .set field for all tokens in an analog. def clear_set(analog): for Group in analog.myGroups: Group.set = 'memory' for myP in analog.myPs: myP.set = 'memory' for myRB in analog.myRBs: myRB.set = 'memory' for myPO in analog.myPOs: myPO.set = 'memory' # done. return analog # function to make sure all lower tokens of a to be retrieved into AM token in an analog are also set to be retrieved. def retrieve_all_relevant_tokens(analog): # check each token, and if it is to be retrieved into AM (i.e., .set is NOT 'memory), make sure all tokens below it are also be be retrieved into AM. for Group in analog.myGroups: if Group.set != 'memory': Group = retrieve_lower_tokens(Group) for myP in analog.myPs: if myP.set != 'memory': myP = retrieve_lower_tokens(myP) for myRB in analog.myRBs: if myRB.set != 'memory': myRB = retrieve_lower_tokens(myRB) for myPO in analog.myPOs: if myPO.set != 'memory': myPO = retrieve_lower_tokens(myPO) # done. return analog # function to make sure all a token's sub-tokens are in the proper .set def retrieve_lower_tokens(token): # check what kind of token you're dealing with. if token.my_type == 'Group': # if you're dealing with a Group, then for each of it's sub-groups, sub-Ps, and sub-RBs, set that sub_token.set to the same set as the Group, and run retrieve_lower_tokens on that sub-token. for Group_under in token.myChildGroups: Group_under.set = token.set Group_under = retrieve_lower_tokens(Group_under) for myP in token.myPs: myP.set = token.set myP = retrieve_lower_tokens(myP) for myRB in token.myRBs: myRB.set = token.set myRB = retrieve_lower_tokens(myRB) if token.my_type == 'P': # if you're dealing with a P, then for each of it's RBs, set that myRB.set to the same set as the P, and run retrieve_lower_tokens on the myRB. for myRB in token.myRBs: myRB.set = token.set myRB = retrieve_lower_tokens(myRB) if token.my_type == 'RB': # if you're dealing with a RB, then for each of it's child-Ps and POs, set that token.set to the same set as the RB, and run retrieve_lower_tokens on the the sub-token. token.myPred[0].set = token.set token.myPred[0] = retrieve_lower_tokens(token.myPred[0]) if len(token.myObj) > 0: token.myObj[0].set = token.set token.myObj[0] = retrieve_lower_tokens(token.myObj[0]) elif len(token.myChildP) > 0: token.myChildP[0].set = token.set token.myChildP[0] = retrieve_lower_tokens(token.myChildP[0]) # done. return token # function to delete unretrieved tokens from a copied analog. def delete_unretrieved_tokens(analog): # go through each token in the analog. If it is unretrieved (i.e., token.set == 'memory'), delete that token and make sure you also delete that token from any tokens to which is is connected. NOTE: You don't need to worry about connections between POs and semantics, as the semantics copied POs are connected to are themselves copied and it doesn't matter if they are deleted. You'll replace these copied semantics with the original semantics using replace_copied_semantics() later in the check_analog_for_tokens_to_copy() function. for Group in analog.myGroups: if Group.set == 'memory': analog = delete_token(Group, analog) for myP in analog.myPs: if myP.set == 'memory': analog = delete_token(myP, analog) for myRB in analog.myRBs: if myRB.set == 'memory': analog = delete_token(RB, analog) for myPO in analog.myPOs: if myPO.set == 'memory': analog = delete_token(myPO, analog) # done. return analog # function to delete a token from an analog. def delete_token(token, analog): # figure out what kind of unit token is, then delete the token and delete instances of that token from any units it is connected to. if token.my_type == 'Group': # delete the Group from its ParentGroups, ChildGroups, Ps, and RBs. for pGroup in token.myParentGroups: pGroup.myChildGroups.remove(token) for cGroup in token.myChildGroups: cGroup.myParentGroups.remove(token) for myP in token.myPs: myP.myGroups.remove(token) for myRB in token.myRBs: myRB.myGroups.remove(token) # delete the Group iteself from analog. # NOTE: You don't need to delete the analog from memory, because you haven't added the analog to memory yet. It is still just a copied analog analog.myGroups.remove(token) elif token.my_type == 'P': # delete the P from its Groups, ParentRBs, and ChildRBs. for Group in token.myGroups: Group.myPs.remove(token) for ParentRB in token.myParentRBs: ParentRB.myChildP.remove(token) for ChildRB in token.myRBs: ChildRB.myParentPs.remove(token) # delete the P iteself. analog.myPs.remove(token) elif token.my_type == 'RB': # delete the RB from its ParentPs, Pred, and either ChildP or Object. for ParentP in token.myParentPs: ParentP.myRBs.remove(token) for pred in token.myPred: # if you are removing an RB, make sure that the PO connected to that RB also has the RB's second PO removed from its .same_RB_POs field. That is, a PO knows what other POs share RBs with it (so that it does not inhibit them during LISA mode). If an RB is deleted, the two POs sharing that RB no longer do. (RECALL: in a single analog, if two instances of a role-binding occur, the same PO tokens and same RB token is used to instantiate both (e.g., if L1(x) occurs twice in the same analog, then L1, x and L1+x are used in both instances). As a consequence, if an RB is deleted, then it means that the Pred and object are not linked by an RB in the current analog, and, as such, that neither PO should appear in the other PO's .same_RB_POs field.) # first, check to make sure the RB takes an object as an argument. A P as an argument will not appear in myPO.same_RB_POs as a P is NOT a myPO. if len(token.myObj) > 0: pred.same_RB_POs.remove(token.myObj[0]) pred.myRBs.remove(token) for ChildP in token.myChildP: ChildP.myParentRBs.remove(token) for obj in token.myObj: # delete the token's pred from the object's .same_RB_POs field. obj.same_RB_POs.remove(token.myPred[0]) obj.myRBs.remove(token) # delete the RB iteself. analog.myRBs.remove(token) elif token.my_type == 'PO': # delete the PO from its RBs. for myRB in token.myRBs: if token.predOrObj == 1: myRB.myPred.remove(token) else: myRB.myObj.remove(token) # delete the PO iteself. analog.myPOs.remove(token) # done. return analog # function to replace semantics in a new PO with the original memory.semantics versions. def replace_copied_semantics(myPO, semantics): # for each Link in my .mySemantics, find the original semantic with the same name as .mySemantic.name, and replace .mySemantic with the original semantic from memory. for Link in myPO.mySemantics: # search each semantic in memory.semantics for the one with the same name as Link.mySemantic. Once you have found that semantic, replace Link.mySemantic with the semantic from memory, and break the for loop. for semantic in semantics: if semantic.name == Link.mySemantic.name: Link.mySemantic = semantic semantic.myPOs.append(Link) break # break the for loop. # done. return PO, semantics # function to find token in memory whose set is driver or recipient in order to construct the driver and recipient sets for the run. Returns driver and recipient sets. def findDriverRecipient(memory): # first clear out the memory.driver and memory.recipient fields. memory.driver.Groups, memory.driver.Ps, memory.driver.RBs, memory.driver.POs, memory.driver.analogs = [], [], [], [], [] memory.recipient.Groups, memory.recipient.Ps, memory.recipient.RBs, memory.recipient.POs, memory.recipient.analogs = [], [], [], [], [] # for each Group, P, RB, and PO, if the set is driver, put in in the driverSet, otherwise, elif the set is recipient, put it in the recipientSet. for Group in memory.Groups: if Group.set == 'driver': memory.driver.Groups.append(Group) # reset the .copy_for_DR field back to False. Group.copy_for_DR = False # now add the analog to driver.analogs if it is not already there. if Group.myanalog not in memory.driver.analogs: memory.driver.analogs.append(Group.myanalog) elif Group.set == 'recipient': memory.recipient.Groups.append(Group) # reset the .copy_for_DR field back to False. Group.copy_for_DR = False # now add the analog to recipient.analogs if it is not already there. if Group.myanalog not in memory.recipient.analogs: memory.recipient.analogs.append(Group.myanalog) for myP in memory.Ps: if myP.set == 'driver': memory.driver.Ps.append(myP) # reset the .copy_for_DR field back to False. myP.copy_for_DR = False # now add the analog to driver.analogs if it is not already there. if myP.myanalog not in memory.driver.analogs: memory.driver.analogs.append(myP.myanalog) elif myP.set == 'recipient': memory.recipient.Ps.append(myP) # reset the .copy_for_DR field back to False. myP.copy_for_DR = False # now add the analog to recipient.analogs if it is not already there. if myP.myanalog not in memory.recipient.analogs: memory.recipient.analogs.append(myP.myanalog) for myRB in memory.RBs: if myRB.set == 'driver': memory.driver.RBs.append(myRB) # reset the .copy_for_DR field back to False. myRB.copy_for_DR = False # now add the analog to driver.analogs if it is not already there. if myRB.myanalog not in memory.driver.analogs: memory.driver.analogs.append(myRB.myanalog) elif myRB.set == 'recipient': memory.recipient.RBs.append(myRB) # reset the .copy_for_DR field back to False. myRB.copy_for_DR = False # now add the analog to recipient.analogs if it is not already there. if myRB.myanalog not in memory.recipient.analogs: memory.recipient.analogs.append(myRB.myanalog) for myPO in memory.POs: if myPO.set == 'driver': memory.driver.POs.append(myPO) # now add the analog to driver.analogs if it is not already there. if myPO.myanalog not in memory.driver.analogs: memory.driver.analogs.append(myPO.myanalog) elif myPO.set == 'recipient': memory.recipient.POs.append(myPO) # now add the analog to recipient.analogs if it is not already there. if myPO.myanalog not in memory.recipient.analogs: memory.recipient.analogs.append(myPO.myanalog) # done. return memory # make firing order. def makeFiringOrder(memory, rule): # set the firing order of the driver using rule. # right now, the only rule is random, the default. # you should add pragmatics. if rule == 'by_top_random': # arrange RBs randomly within Ps or Groups. if len(memory.driver.Groups) > 0: # arrange by Groups. # randomly arrange the Groups. Gorder = memory.driver.Groups random.shuffle(Gorder) # now select RBs from Porder. firingOrder = [] Porder = [] for Group in Gorder: # order my Ps. for myP in Group.myPs: Porder.append(myP) # now add the RBs from each P in Porder to firingOrder. for myP in Porder: for myRB in myP.myRBs: # add RB to firingOrder. firingOrder.append(myRB) elif len(memory.driver.Ps) > 0: # arrange by Ps. # randomly arrange the Ps. Porder = memory.driver.Ps random.shuffle(myPorder) # now select RBs from Porder. firingOrder = [] for myP in Porder: for myRB in myP.myRBs: # add RB to firingOrder. firingOrder.append(myRB) else: # arrange RBs or POs randomly. firingOrder = [] if len(memory.driver.RBs) > 0: for myRB in memory.driver.RBs: firingOrder.append(myRB) random.shuffle(firingOrder) else: # arrange by POs. for myPO in memory.driver.POs: firingOrder.append(myPO) random.shuffle(firingOrder) else: # use a totally random firing order. if not rule == 'totally_random': print 'You have not input a valid firing rule. I am arranging RBs at random.' firingOrder = [] if len(memory.driver.RBs) > 0: for myRB in memory.driver.RBs: firingOrder.append(myRB) random.shuffle(firingOrder) else: # arrange by POs. for myPO in memory.driver.POs: firingOrder.append(myPO) random.shuffle(firingOrder) # done. return firingOrder # index all items in memory. def indexMemory(memory): for Group in memory.Groups: Group.get_index(memory) for myP in memory.Ps: myP.get_index(memory) for myRB in memory.RBs: myRB.get_index(memory) for myPO in memory.POs: myPO.get_index(memory) # done. return memory # update all the .same_RB_POs for all POs in memory. def update_same_RB_POs(memory): # clear the same_RB_PO field of all POs in memory. for myPO in memory.POs: myPO.same_RB_POs = [] # update the .same_RB_PO field of POs by iterating through RBs, and adding objects to pred's field and preds to object's field. for myRB in memory.RBs: # if there is an object and a pred, add the pred to object's .same_RB_POs, and object to pred's .same_RB_POs. if (len(myRB.myObj) > 0) and (len(myRB.myPred) > 0): myRB.myObj[0].same_RB_POs.append(myRB.myPred[0]) myRB.myPred[0].same_RB_POs.append(myRB.myObj[0]) # done. return memory # a function to clear activation and input to all driver, recipient, newSet, and semantic units (i.e., everything in active memory, AM). def initialize_AM(memory): for Group in memory.driver.Groups: Group.initialize_act() for myP in memory.driver.Ps: myP.initialize_act() for myRB in memory.driver.RBs: myRB.initialize_act() for myPO in memory.driver.POs: myPO.initialize_act() for Group in memory.recipient.Groups: Group.initialize_act() for myP in memory.recipient.Ps: myP.initialize_act() for myRB in memory.recipient.RBs: myRB.initialize_act() for myPO in memory.recipient.POs: myPO.initialize_act() for Group in memory.newSet.Groups: Group.initialize_act() for myP in memory.newSet.Ps: myP.initialize_act() for myRB in memory.newSet.RBs: myRB.initialize_act() for myPO in memory.newSet.POs: myPO.initialize_act() for semantic in memory.semantics: semantic.initializeSem() # done. return memory # a function to clear the activation and input to all units in the network. def initialize_memorySet(memory): for Group in memory.Groups: Group.initialize_act() for myP in memory.Ps: myP.initialize_act() for myRB in memory.RBs: myRB.initialize_act() for myPO in memory.POs: myPO.initialize_act() # done. return memory # initialize input to all driver, recipient, newSet and semantic units. def initialize_input(memory): for Group in memory.driver.Groups: Group.initialize_input(0.0) for myP in memory.driver.Ps: myP.initialize_input(0.0) for myRB in memory.driver.RBs: myRB.initialize_input(0.0) for myPO in memory.driver.POs: myPO.initialize_input(0.0) for Group in memory.recipient.Groups: Group.initialize_input(0.0) for myP in memory.recipient.Ps: myP.initialize_input(0.0) for myRB in memory.recipient.RBs: myRB.initialize_input(0.0) for myPO in memory.recipient.POs: myPO.initialize_input(0.0) for Group in memory.newSet.Groups: Group.initialize_input(0.0) for myP in memory.newSet.Ps: myP.initialize_input(0.0) for myRB in memory.newSet.RBs: myRB.initialize_input(0.0) for myPO in memory.newSet.POs: myPO.initialize_input(0.0) for semantic in memory.semantics: semantic.initialize_input(0.0) # done. return memory # update the activations of all units in driver, recipient, and newSet. def update_activations_run(memory, gamma, delta, HebbBias, phase_set): for Group in memory.driver.Groups: Group.update_act(gamma, delta, HebbBias) for myP in memory.driver.Ps: myP.update_act(gamma, delta, HebbBias) for myRB in memory.driver.RBs: myRB.update_act(gamma, delta, HebbBias) for myPO in memory.driver.POs: myPO.update_act(gamma, delta, HebbBias) for Group in memory.recipient.Groups: Group.update_act(gamma, delta, HebbBias) for myP in memory.recipient.Ps: myP.update_act(gamma, delta, HebbBias) for myRB in memory.recipient.RBs: myRB.update_act(gamma, delta, HebbBias) for myPO in memory.recipient.POs: myPO.update_act(gamma, delta, HebbBias) for Group in memory.newSet.Groups: Group.update_act(gamma, delta, HebbBias) for myP in memory.newSet.Ps: myP.update_act(gamma, delta, HebbBias) for myRB in memory.newSet.RBs: myRB.update_act(gamma, delta, HebbBias) for myPO in memory.newSet.POs: myPO.update_act(gamma, delta, HebbBias) # get the max input to any semantic unit, then update semantic activations. max_input = get_max_sem_input(memory) for semantic in memory.semantics: semantic.set_max_input(max_input) semantic.update_act() # done. return memory # update the activation of all units in memory that are NOT in driver, recipient, or newSet. (For use in retrieval.) def update_acts_memory(memory, gamma, delta, HebbBias): for Group in memory.Groups: if Group.set == 'memory': Group.update_act(gamma, delta, HebbBias) for myP in memory.Ps: if myP.set == 'memory': myP.update_act(gamma, delta, HebbBias) for myRB in memory.RBs: if myRB.set == 'memory': myRB.update_act(gamma, delta, HebbBias) for myPO in memory.POs: if myPO.set == 'memory': myPO.update_act(gamma, delta, HebbBias) # done. return memory # update inputs to driver units. def update_driver_inputs(memory, asDORA, lateral_input_level): # update inputs to all driver units. for Group in memory.driver.Groups: Group.update_input_driver(memory, asDORA) for myP in memory.driver.Ps: if myP.mode == 1: myP.update_input_driver_parent(memory, asDORA) elif myP.mode == -1: myP.update_input_driver_child(memory, asDORA) for myRB in memory.driver.RBs: myRB.update_input_driver(memory, asDORA) for myPO in memory.driver.POs: myPO.update_input_driver(memory, asDORA) # done return memory # update inputs to recipient units. def update_recipient_inputs(memory, asDORA, phase_set, lateral_input_level, ignore_object_semantics): # update inputs to all recipient units. for Group in memory.recipient.Groups: Group.update_input_driver(memory, asDORA) for myP in memory.recipient.Ps: if myP.mode == 1: myP.update_input_recipient_parent(memory, asDORA, phase_set, lateral_input_level) elif myP.mode == -1: myP.update_input_recipient_child(memory, asDORA, phase_set, lateral_input_level) for myRB in memory.recipient.RBs: myRB.update_input_recipient(memory, asDORA, phase_set, lateral_input_level) for myPO in memory.recipient.POs: myPO.update_input_recipient(memory, asDORA, phase_set, lateral_input_level, ignore_object_semantics) # done. return memory # update newSet inputs. def update_newSet_inputs(memory): # units in NewSet have input 1 if the token that made them in the driver is active above threshold, 0 otherwise. threshold = .75 for Group in memory.newSet.Groups: if Group.my_maker_unit.act > threshold: Group.act = 1.0 else: Group.act = 0.0 for myP in memory.newSet.Ps: if myP.my_maker_unit.act > threshold: myP.act = 1.0 else: myP.act = 0.0 for myRB in memory.newSet.RBs: if myRB.my_maker_unit: if myRB.my_maker_unit.act > threshold: myRB.act = 1.0 else: myRB.act = 0.0 for myPO in memory.newSet.POs: if myPO.my_maker_unit: if myPO.my_maker_unit.act > threshold: myPO.act = 1.0 else: myPO.act = 0.0 # done. return memory # update input to all memorySet units that are not in driver, recipient, or newSet (used during retreival). def update_memory_inputs(memory, asDORA, lateral_input_level): # for all units not in driver, recipient, or newSet (i.e., units with set != driver, recipient, or newSet), update input. Units in memory update as units in recipient. # set phase_set to 2. phase_set = 2 for Group in memory.Groups: if Group.set == 'memory': Group.update_input_recipient(memory, asDORA, phase_set, lateral_input_level) for myP in memory.Ps: if myP.set == 'memory': # NOTE: I think it might be best to avoid modes altogether when working in retieval mode. This version of the code reflects this assumption. myP.update_input_recipient_parent(memory, asDORA, phase_set, lateral_input_level) for myRB in memory.RBs: if myRB.set == 'memory': myRB.update_input_recipient(memory, asDORA, phase_set, lateral_input_level) for myPO in memory.POs: if myPO.set == 'memory': myPO.update_input_recipient(memory, asDORA, phase_set, lateral_input_level) # update with phase_set = 2 so that myPO units also take top down input from RBs. # done. return memory # get the max input to semantics unit in the network. def get_max_sem_input(memory): max_input = 0.0 for semantic in memory.semantics: if semantic.myinput > max_input: max_input = semantic.myinput # done. return max_input # function to delete links between semantics and POs that are less than threshold. def del_small_link(memory, threshold): for link in memory.Links: # if the link is less than threshold, then delete the link from the PO and the semantic, and remove the link from memory.Links. if link.weight < threshold: link.myPO.mySemantics.remove(link) link.mySemantic.myPOs.remove(link) memory.Links.remove(link) # done. return memory # function to round up to 1.0 any links between semantics and POs that are above a certain threshold. def round_big_link(memory, threshold): for link in memory.Links: if link.weight > threshold: link.weight = 1.0 # done. return memory # check if the requirements for entropy based same/different/more/less are met. def entropy_samediff_requirements(memory): do_entropy_SDML = False # make sure that the most active PO in driver and recipient have activation above 0.6 and map to one-another. #PO1 = get_most_active_unit(memory.driver.POs) #PO2 = get_most_active_unit(memory.recipient.POs) #if PO1.act >= 0.6 and PO2.act >= 0.6: # if PO1.max_map_unit is PO2: # do_entropy_SMDL = True # done. return do_entropy_SDML # check if the requirements for predication are met. def predication_requirements(memory): # make sure that all driver POs map to units in the recipient that don't have RBs, and that those mappings are above threshold(=.8). # get the max_maps and max_map_units. memory = get_max_maps(memory) memory = get_max_map_units(memory) do_predication = False for myPO in memory.driver.POs: # make sure that my max_map is at least .8, and the unit to which I map maximally has no RBs. if myPO.max_map > .8 and len(myPO.max_map_unit.myRBs) < 1: do_predication = True else: do_predication = False break # done. return do_predication # check if the reqiurements for relation-formation are met. def rel_form_requirements(memory): # make sure that there are at least 2 RBs in the recipient that both map to RBs in the driver with mapping connections above 0.8, and that are NOT already connected to a P unit. do_rel_form = False RBs_meeting_requirements = 0 for myRB in memory.recipient.RBs: for mappingConnection in myRB.mappingConnections: if mappingConnection.weight >= .8: if len(mappingConnection.recipientToken.myParentPs) < 1: # increment the RBs_meeting_requirements variable by 1. RBs_meeting_requirements += 1 # if RBs_meeting_requirements is greater or equal to 2, then set do_rel_form to True. if RBs_meeting_requirements >= 2: do_rel_form = True break # done. return do_rel_form # check if requirements for schema induction are met. def schema_requirements(memory): do_schematize = True # make sure that all driver and recipient units that map to a recipient/driver (respectively) unit (i.e., have a mapping connection above 0.0) are also above threshold(=.7). Also, make sure that any token that meets the requirement for schematisation also has all its lower-order tokens (e.g., RBs and POs in the case of Ps, POs in the case of RBs) and higher-order tokens (e.g., PS in the case of RBs) have mapping connections above threshold as well. threshold = 0.7 for myP in memory.driver.Ps: if myP.max_map > 0 and myP.max_map < threshold: do_schematize = False break else: # make sure that the RBs and POs also meet requirements. for myRB in myP.myRBs: # make sure RBs and POs meet requirment. if myRB.max_map < threshold: do_schematize = False break elif myRB.myPred[0].max_map < threshold: do_schematize = False break elif myRB.myObj[0].max_map < threshold: do_schematize = False break if do_schematize == False: break if do_schematize: for myRB in memory.driver.RBs: if myRB.max_map > 0 and myRB.max_map < threshold: do_schematize = False break else: # make sure that POs and Ps also meet requirments. if myRB.myPred[0].max_map < threshold: do_schematize = False break elif myRB.myObj[0].max_map < threshold: do_schematize = False break elif len(myRB.myParentPs) > 0: if myRB.myParentPs[0].max_map < threshold: do_schematize = False break if do_schematize: for myPO in memory.driver.POs: if myPO.max_map > 0 and myPO.max_map < threshold: do_schematize = False break if do_schematize: for myP in memory.recipient.Ps: if myP.max_map > 0 and myP.max_map < threshold: do_schematize = False break else: # make sure that the RBs and POs also meet requirements. for myRB in myP.myRBs: # make sure RBs and POs meet requirment. if myRB.max_map < threshold: do_schematize = False break elif myRB.myPred[0].max_map < threshold: do_schematize = False break elif myRB.myObj[0].max_map < threshold: do_schematize = False break if do_schematize == False: break if do_schematize: for myRB in memory.recipient.RBs: if myRB.max_map > 0 and myRB.max_map < threshold: do_schematize = False break else: # make sure that POs and Ps also meet requirments. if myRB.myPred[0].max_map < threshold: do_schematize = False break elif myRB.myObj[0].max_map < threshold: do_schematize = False break elif len(myRB.myParentPs) > 0: if myRB.myParentPs[0].max_map < threshold: do_schematize = False break if do_schematize: for myPO in memory.recipient.POs: if myPO.max_map > 0 and myPO.max_map < threshold: do_schematize = False break # done. return do_schematize # check if requirements for relational generalization are met. def rel_gen_requirements(memory): do_inference = False # make sure that at least one driver unit maps to a recipient unit. for myP in memory.driver.Ps: if myP.max_map > 0.0: do_inference = True break if not do_inference: for myRB in memory.driver.RBs: if myRB.max_map > 0.0: do_inference = True break if not do_inference: for myPO in memory.driver.POs: if myPO.max_map > 0.0: do_inference = True break # now make sure that for units in the driver that do map, the mapping is above threshold(=.7). if do_inference: for myP in memory.driver.Ps: if .7 > myP.max_map > 0.0: do_inference = False break if do_inference: for myRB in memory.driver.RBs: if .7 > myRB.max_map > 0.0: do_inference = False break if do_inference: for myPO in memory.driver.POs: if .7 > myPO.max_map > 0.0: do_inference = False break # done. return do_inference # get the max mapping weight for all driver and recipient units. def get_max_maps(memory): for Group in memory.driver.Groups: max_map = 0.0 for mappingConnection in Group.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight Group.max_map = max_map for myP in memory.driver.Ps: max_map = 0.0 for mappingConnection in myP.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight myP.max_map = max_map for myRB in memory.driver.RBs: max_map = 0.0 for mappingConnection in myRB.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight myRB.max_map = max_map for myPO in memory.driver.POs: max_map = 0.0 for mappingConnection in myPO.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight myPO.max_map = max_map for Group in memory.recipient.Groups: max_map = 0.0 for mappingConnection in Group.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight Group.max_map = max_map for myP in memory.recipient.Ps: max_map = 0.0 for mappingConnection in myP.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight myP.max_map = max_map for myRB in memory.recipient.RBs: max_map = 0.0 for mappingConnection in myRB.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight myRB.max_map = max_map for myPO in memory.recipient.POs: max_map = 0.0 for mappingConnection in myPO.mappingConnections: if mappingConnection.weight > max_map: max_map = mappingConnection.weight myPO.max_map = max_map # done. return memory # initialize (i.e., reset to empty) all the mappingHypotheses and mappingConnections. def resetMappingUnits(memory): # delete all mappingHypothesis and mappingConnection units. memory.mappingHypotheses = [] memory.mappingConnections = [] # delete the mappingHypotheses and mappingConnections fields of all driver and recipient units. for Group in memory.driver.Groups: Group.mappingHypotheses = [] Group.mappingConnections = [] for myP in memory.driver.Ps: myP.mappingHypotheses = [] myP.mappingConnections = [] for myRB in memory.driver.RBs: myRB.mappingHypotheses = [] myRB.mappingConnections = [] for myPO in memory.driver.POs: myPO.mappingHypotheses = [] myPO.mappingConnections = [] for Group in memory.recipient.Groups: Group.mappingHypotheses = [] Group.mappingConnections = [] for myP in memory.recipient.Ps: myP.mappingHypotheses = [] myP.mappingConnections = [] for myRB in memory.recipient.RBs: myRB.mappingHypotheses = [] myRB.mappingConnections = [] for myPO in memory.recipient.POs: myPO.mappingHypotheses = [] myPO.mappingConnections = [] # done. return memory # reset the .mappingHypotheses, .mappingConnections, and .max_map of all tokens. def reset_mappings(memory): for Group in memory.Groups: Group.mappingHypotheses = [] Group.mappingConnections = [] Group.max_map = 0.0 Group.max_map_unit = None for myP in memory.Ps: myP.mappingHypotheses = [] myP.mappingConnections = [] myP.max_map = 0.0 myP.max_map_unit = None for myRB in memory.RBs: myRB.mappingHypotheses = [] myRB.mappingConnections = [] myRB.max_map = 0.0 myRB.max_map_unit = None for myPO in memory.POs: myPO.mappingHypotheses = [] myPO.mappingConnections = [] myPO.max_map = 0.0 myPO.max_map_unit = None # done. return memory # set up mappingHypotheses and mappingConnection units. def setupMappingUnits(memory): # set up mapping hypothesis and mapping connection units for every driver token and every token of the same type in the recipient. for P_dri in memory.driver.Ps: # for every recipient P unit create a mapping hypothesis and mapping connection. for P_rec in memory.recipient.Ps: # create a mapping conneciton unit. new_map_unit = dataTypes.mappingConnection(P_dri, P_rec, 0.0) # connect new_map_unit to driver and recipient mappingConnections fields. P_dri.mappingConnections.append(new_map_unit) P_rec.mappingConnections.append(new_map_unit) # add new_hyp unit to memory.mappingConnections. memory.mappingConnections.append(new_map_unit) # create a mapping hypothesis unit. new_hyp = dataTypes.mappingHypothesis(P_dri, P_rec, new_map_unit) # connect new_hyp to driver and recipient mappingHypotheses fields. P_dri.mappingHypotheses.append(new_hyp) P_rec.mappingHypotheses.append(new_hyp) # add new_hyp unit to memory.mappingHypotheses. memory.mappingHypotheses.append(new_hyp) for RB_dri in memory.driver.RBs: # for every recipient RB unit create a mapping hypothesis and mapping connection. for RB_rec in memory.recipient.RBs: # create a mapping conneciton unit. new_map_unit = dataTypes.mappingConnection(RB_dri, RB_rec, 0.0) # connect new_map_unit to driver and recipient mappingConnections fields. RB_dri.mappingConnections.append(new_map_unit) RB_rec.mappingConnections.append(new_map_unit) # add new_hyp unit to memory.mappingConnections. memory.mappingConnections.append(new_map_unit) # create a mapping hypothesis unit. new_hyp = dataTypes.mappingHypothesis(RB_dri, RB_rec, new_map_unit) # connect new_hyp to driver and recipient mappingHypotheses fields. RB_dri.mappingHypotheses.append(new_hyp) RB_rec.mappingHypotheses.append(new_hyp) # add new_hyp unit to memory.mappingHypotheses. memory.mappingHypotheses.append(new_hyp) for PO_dri in memory.driver.POs: # for every recipient PO of the same type (pred or obj) create a mapping hypothesis and mapping connection. for PO_rec in memory.recipient.POs: # create mapping connection unit. new_map_unit = dataTypes.mappingConnection(PO_dri, PO_rec, 0.0) # connect new_map_unit to driver and recipient mappingConnections fields. PO_dri.mappingConnections.append(new_map_unit) PO_rec.mappingConnections.append(new_map_unit) # add new_map_unit to memory.mappingConnections. memory.mappingConnections.append(new_map_unit) # and if the POs are of the same type, also create mapping hypothesis unit. if PO_dri.predOrObj == PO_rec.predOrObj: # create a mapping hypothesis unit. new_hyp = dataTypes.mappingHypothesis(PO_dri, PO_rec, new_map_unit) # connect new_hyp to driver and recipient mappingHypotheses fields. PO_dri.mappingHypotheses.append(new_hyp) PO_rec.mappingHypotheses.append(new_hyp) # add new_hyp unit to memory.mappingHypotheses. memory.mappingHypotheses.append(new_hyp) # done. return memory # update the mapping hypotheses. def update_mappingHyps(memory): for hypothesis in memory.mappingHypotheses: hypothesis.update_hyp(memory) # done. return memory # reset the values of the mapping hypotheses. def reset_mappingHyps(memory): for hyp in memory.mappingHypotheses: hyp.hypothesis = 0.0 hyp.max_hyp = 0.0 # done. return memory # update mapping connections. def update_mappingConnections(memory, eta): # first step: divisively normalize all mapping hypotheses. For each mappng hypothesis divide it by the highest hypothesis of either unit involved in that hypothesis. For example, for the mapping hypothesis between P[i] and P[j] divide by max(max(hypothesis involving P[i]), max(hypothesis involving P[j])). ############################# # BROKEN? #pdb.set_trace() ############################# for hypothesis in memory.mappingHypotheses: hypothesis.max_hyp = hypothesis.hypothesis for hyp in hypothesis.driverToken.mappingHypotheses: if hyp.hypothesis > hypothesis.max_hyp: hypothesis.max_hyp = hyp.hypothesis for hyp2 in hypothesis.recipientToken.mappingHypotheses: if hyp2.hypothesis > hypothesis.max_hyp: hypothesis.max_hyp = hyp2.hypothesis # now do the divisive normalization by dividing hypothesis.hypothesis by max_hyp. for hypothesis in memory.mappingHypotheses: if hypothesis.max_hyp > 0: hypothesis.hypothesis /= hypothesis.max_hyp # second step: subractively normalize each hypothesis. For each hypothesis, look at all hypotheses of the driver and recipient unit sharing the hypothesis. Find the largest hypothesis in either the shared driver or recipient that is NOT the hypothesis itself. Subtract by that value. for hypothesis in memory.mappingHypotheses: hypothesis.max_hyp = 0.0 max_hyp = 0.0 for hyp in hypothesis.driverToken.mappingHypotheses: if (hyp.hypothesis > max_hyp) and (not (hyp is hypothesis)): max_hyp = hyp.hypothesis for hyp2 in hypothesis.recipientToken.mappingHypotheses: if (hyp2.hypothesis > max_hyp) and (not (hyp2 is hypothesis)): max_hyp = hyp2.hypothesis if max_hyp > 0: hypothesis.max_hyp = max_hyp for hypothesis in memory.mappingHypotheses: hypothesis.hypothesis -= hypothesis.max_hyp # update the mappingConnections using the corresponding mappingHypothesis. hypothesis.myMappingConnection.weight += (eta*(1.1-hypothesis.myMappingConnection.weight)*hypothesis.hypothesis) # and mapping connections are hard limited to between 0 and 1. if hypothesis.myMappingConnection.weight > 1: hypothesis.myMappingConnection.weight = 1 elif hypothesis.myMappingConnection.weight < 0: hypothesis.myMappingConnection.weight = 0 # done. return memory # update max_map_unit (i.e., the unit I most map to). def get_max_map_units(memory): # for each token in driver and recipient, get its hightest mapping connection and place the weight of that connection in .max_map. for Group in memory.driver.Groups: max_map = 0.0 max_map_unit = None for mapping in Group.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.recipientToken # update .max_map. Group.max_map = max_map Group.max_map_unit = max_map_unit for myP in memory.driver.Ps: max_map = 0.0 max_map_unit = None for mapping in myP.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.recipientToken # update .max_map. myP.max_map = max_map myP.max_map_unit = max_map_unit for myRB in memory.driver.RBs: max_map = 0.0 max_map_unit = None for mapping in myRB.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.recipientToken # update .max_map. myRB.max_map = max_map myRB.max_map_unit = max_map_unit for myPO in memory.driver.POs: max_map = 0.0 max_map_unit = None for mapping in myPO.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.recipientToken # update .max_map. myPO.max_map = max_map myPO.max_map_unit = max_map_unit for Group in memory.recipient.Groups: max_map = 0.0 max_map_unit = None for mapping in Group.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.recipientToken # update .max_map. Group.max_map = max_map Group.max_map_unit = max_map_unit for myP in memory.recipient.Ps: max_map = 0.0 max_map_unit = None for mapping in myP.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.driverToken # update .max_map. myP.max_map = max_map myP.max_map_unit = max_map_unit for myRB in memory.recipient.RBs: max_map = 0.0 max_map_unit = None for mapping in myRB.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.driverToken # update .max_map. myRB.max_map = max_map myRB.max_map_unit = max_map_unit for myPO in memory.recipient.POs: max_map = 0.0 max_map_unit = None for mapping in myPO.mappingConnections: if mapping.weight > max_map: max_map = mapping.weight max_map_unit = mapping.driverToken # update .max_map. myPO.max_map = max_map myPO.max_map_unit = max_map_unit # done. return memory # function to do run the network during retieval. def retrieval_routine(memory, asDORA, gamma, delta, HebbBias, lateral_input_level, bias_retrieval_analogs): # update input to memorySet units. memory = update_memory_inputs(memory, asDORA, lateral_input_level) # update activation of memorySet units. memory = update_acts_memory(memory, gamma, delta, HebbBias) if bias_retrieval_analogs: # for each analog, track the total activation of its units if they are in memory (i.e., if the analog is not already in driver or recipient). for analog in memory.analogs: analog.total_act = 0.0 for myP in analog.myPs: if myP.set == 'memory': analog.total_act += myP.act for myRB in analog.myRBs: if myRB.set == 'memory': analog.total_act += myRB.act for myPO in analog.myPOs: if myPO.set == 'memory': analog.total_act += myPO.act analog.sum_num_units() else: # track the most active P, RB, and PO units in memory. for myP in memory.Ps: if myP.set == 'memory': if myP.act > myP.max_act: myP.max_act = myP.act for myRB in memory.RBs: if myRB.set == 'memory': if myRB.act > myRB.max_act: myRB.max_act = myRB.act for myPO in memory.POs: if myPO.set == 'memory': if myPO.act > myPO.max_act: myPO.max_act = myPO.act # done. return memory # function to retrieve tokens from memory. Takes as arguments the memory set, and a bias_retrieval_analogs flag that if True, biases retrieval towards whole analogs. def retrieve_tokens(memory, bias_retrieval_analogs, use_relative_act): # if bias_retrieval_analogs is true, bias towards retrieving whole analogs. Otherwise, default to no bias (myPs, RBs, and POs stand some odds of being retrieved regardless of their interconnectivity (of course, if a token is retrieved, all tokens below it that the token is connected to are also retrieved)). if use_relative_act: # retrieve using relative activation of propositions. if bias_retrieval_analogs: # retrieve whole analogs. # create a normalised retrieval score for each analog (i.e., analog.total_act/analog.num_units), and make a list of all analog activations. analog_activation_list = [] for analog in memory.analogs: # make sure analog has a .total_act and .num_units > 0. if analog.total_act > 0 and analog.num_units > 0: # calculate analog.normalised_retrieval_act and add that to sum_normalised_analogs. analog.normalised_retrieval_act = analog.total_act/analog.num_units analog_activation_list.append(analog.normalised_retrieval_act) # retrieve analogs with a probability calculated as a function of the ratio of the specific analog's normalised activation to the average normalised activation of all active analogs. # find the average and highest normalised activation for analogs. avg_analog_norm_act = np.mean(analog_activation_list) high_analog_norm_act = max(analog_activation_list) avg_analog_norm_act = (high_analog_norm_act+avg_analog_norm_act)/2 #pdb.set_trace() # transform all retrieval activations using a sigmoidal function with a threshold around high_analog_norm_act. for analog in memory.analogs: if analog.total_act > 0: analog.normalised_retrieval_act = 1/(1 + math.exp(10*(analog.normalised_retrieval_act-avg_analog_norm_act))) # get the sum of all transformed noralised analog activations. sum_analog_norm_act = sum(analog_activation_list) # retrieve analogs using the Luce choice rule appled to transformed activations. for analog in memory.analogs: # if analog has a .total_act and .num_units > 0, then calculate the retrieve_prob. if analog.total_act > 0 and analog.num_units > 0: retrieve_prob = analog.normalised_retrieval_act/sum_analog_norm_act randomNum = random.random() if retrieve_prob >= randomNum: # retrieve the analog and all it's tokens. analog = retrieve_analog_contents(analog) else: # retirieve using the old Luce choice axiom. if bias_retrieval_analogs: # retrieve whole analogs. # create a normalised retrieval score for each analog (i.e., analog.total_act/analog.num_units) and sum up all normalised retrieval scores for each analog in memory. sum_normalised_analogs = 0.0 for analog in memory.analogs: # make sure analog has a .total_act and .num_units > 0. if analog.total_act > 0 and analog.num_units > 0: # calculate my num_units. analog.sum_num_units() # calculate analog.normalised_retrieval_act and add that to sum_normalised_analogs. analog.normalised_retrieval_act = analog.total_act/analog.num_units sum_normalised_analogs += analog.normalised_retrieval_act # retrieve analogs using the Luce choice axiom. for analog in memory.analogs: # if analog has a .total_act and .num_units > 0, then calculate the retrieve_prob via Luce choice. if analog.total_act > 0 and analog.num_units > 0: retrieve_prob = analog.normalised_retrieval_act/sum_normalised_analogs randomNum = random.random() if retrieve_prob >= randomNum: # retrieve the analog and all it's tokens. analog = retrieve_analog_contents(analog) else: # get sum of all max_acts of all P, RB and P units in memorySet. P_sum, RB_sum, PO_sum = 0.0, 0.0, 0.0 for myP in memory.Ps: P_sum += myP.max_act for myRB in memory.RBs: RB_sum += myRB.max_act for myPO in memory.POs: PO_sum += myPO.max_act # for each P, RB, and PO in memorySet (i.e., NOT in driver, recipient, or newSet), retrieve it (and the proposition attached to it) into recipient according to the Luce choice rule. # P units. for myP in memory.Ps: # make sure that the P is in memory and that P_sum > 0 (so you don't get a divide by 0 error). if (myP.set == 'memory') and (myP_sum > 0): retrieve_prob = myP.max_act/P_sum randomNum = random.random() if retrieve_prob > randomNum: # retrieve P and all units attached into recipient. myP.set = 'recipient' # add the RBs. for myRB in myP.myRBs: myRB.set = 'recipient' # add the POs. myRB.myPred[0].set = 'recipient' # if it has an object add that object. if len(myRB.myObj) >= 1: myRB.myObj[0].set = 'recipient' else: # add it's child myP. myRB.myChildP[0].set = 'recipient' # RB units. for myRB in memory.RBs: # make sure that the RB is in memory and that RB_sum > 0 (so you don't get a divide by 0 error). if (RB.set == 'memory') and (RB_sum > 0): retrieve_prob = myRB.max_act/RB_sum randomNum = random.random() if retrieve_prob > randomNum: # retrieve RB and all units attached into recipient. myRB.set = 'recipient' # add the Ps. for myP in myRB.myParentPs: myP.set = 'recipient' # add the POs. myRB.myPred[0].set = 'recipient' # if it has an object add that object. if len(RB.myObj) >= 1: myRB.myObj[0].set = 'recipient' else: # add it's child myP. myRB.myChildP[0].set = 'recipient' # PO units. for myPO in memory.POs: # make sure that the PO is in memory and that PO_sum > 0 (so you don't get a divide by 0 error). if (myPO.set == 'memory') and (myPO_sum > 0): retrieve_prob = myPO.max_act/PO_sum randomNum = random.random() if retrieve_prob > randomNum: # retrieve PO and all units attached into recipient. myPO.set = 'recipient' memory.recipient.POs.append(myPO) # add the RBs. for myRB in myPO.myRBs: myRB.set = 'recipient' # add the RB's P unit if it exists. if len(myRB.myParentPs) > 0: myRB.myParentPs[0].set = 'recipient' # done. return memory # funtion to retrieve all of the contents of an analog from memory into the recipient. def retrieve_analog_contents(analog): for myP in analog.myPs: myP.set = 'recipient' for myRB in analog.myRBs: myRB.set = 'recipient' for myPO in analog.myPOs: myPO.set = 'recipient' # Take as input a set of nodes of a specific type (e.g., memory.POs, or memory.recipient.RBs) and return most active unit. def get_most_active_unit(tokens): # make sure that you've passed a non-empty array. if len(tokens) > 0: active_token = tokens[0] for token in tokens: if token.act > active_token.act: active_token = token # make sure that you're actually returning an active unit (not just the first token if all token have the same activation (e.g., 0.0)). if active_token.act < .01: active_token = None else: active_token = None # done. return active_token # Take as input a set of P units and a tag specifying 'parent' or 'child', and return most active unit of that type. def get_most_active_Punit(tokens, tag): if tag == 'parent': desired_mode = 1 elif tag == 'child': desired_mode = -1 else: desired_mode = 0 activity = 0.0 active_token = None for token in tokens: if token.act > activity and token.mode == desired_mode: active_token = token activity = token.act # done. return active_token # Take as input a unit, and return its mappingConnection link with the greatest weight. If it maps to no unit (i.e., mappingConnection.weight == 0), return 'null'. def get_my_max_map(unit): max_map = unit.mappingConnections[0] for mapping in unit.mappingConnections: if mapping.weight > max_map.weight: max_map = mapping # if the unit I most map to has weight 0, return null, else return unit I most map to. if max_map.weight == 0: max_map = 'null' return max_map # Take as input a unit, and return the unit to which it maps. If it maps to no unit, return 'null'. def get_my_max_map_unit(unit): max_map = unit.mappingConnections[0] for mapping in unit.mappingConnections: if mapping.weight > max_map.weight: max_map = mapping # if the unit I most map to has weight 0, return null, else return unit I most map to. if max_map.weight == 0: max_map_unit = 'null' else: # if I am in driver, return recipientToken, otherwise, return driverToken. if unit.set == 'driver': max_map_unit = max_map.recipientToken else: max_map_unit = max_map.driverToken return max_map_unit # function to do all the necessary checks for entropy/energy based magnitude comparison. def en_based_mag_checks(myPO, myPO2): # check if they code the same dimension (are they both connected to a semantic unit coding a dimension with a weight near 1?), and whether any POs are connected to any SDM semantics (i.e., "more", "less", or "same"). # first, do they code for intersecting dimensions. intersect_dim = list(set([x.mySemantic.dimension for x in myPO.mySemantics if x.mySemantic.dimension and x.weight > .7]).intersection([y.mySemantic.dimension for y in myPO2.mySemantics if y.mySemantic.dimension and y.weight > .7])) # second, does either PO have connections to any SDM semantics with weights above threshold(=.7). one_mag_sem_present = False both_mag_sem_present = False for link in myPO.mySemantics: if link.mySemantic.name == 'same' or link.mySemantic.name == 'different' or link.mySemantic.name == 'more' or link.mySemantic.name == 'less': if link.weight > .7: one_mag_sem_present = True break # now, if one_mag_sem_present is True, then check if there are mag_sem in myPO2. If there are, set both_mag_sem_present to True. if one_mag_sem_present: for link in myPO2.mySemantics: if link.mySemantic.name == 'same' or link.mySemantic.name == 'different' or link.mySemantic.name == 'more' or link.mySemantic.name == 'less': if link.weight > .7: both_mag_sem_present = True break else: # otherwise (i.e., one_mag_sem_present is False), then check if there are mag_sem in myPO2. If there are, set one_mag_sem_present to True (i.e., there are no mag_sem in myPO, but there are in myPO2). for link in myPO2.mySemantics: if link.mySemantic.name == 'same' or link.mySemantic.name == 'different' or link.mySemantic.name == 'more' or link.mySemantic.name == 'less': if link.weight > .7: one_mag_sem_present = True break # third, find the dimension of highest over-lap. That is, find the semantic that codes the 'value' for each dimension in intersect_dim with the highest weight. # for each PO find the 'state' and 'value' semantics for each dimension in intersect_dim. high_dim = None high_dim_weight = 0.0 for dim in intersect_dim: # add the weights of the 'value' semantics for the current dim for both myPO and myPO2. current_weight = None for link in myPO.mySemantics: if link.mySemantic.dimension == dim and link.mySemantic.ont_status == 'value': for link2 in myPO2.mySemantics: if link2.mySemantic.dimension == dim and link2.mySemantic.ont_status == 'value': current_weight = link.weight + link2.weight break break if current_weight > high_dim_weight: high_dim = dim high_dim_weight = current_weight elif current_weight == high_dim_weight: # flip a coin to select dimension. value = random.random() if value > .5: high_dim = dim high_dim_weight = current_weight # set intersect_dim to a list containing only high_dim. if high_dim: intersect_dim = [high_dim] else: intersect_dim = [] # return the intersect_dim and mag_sem_present. return intersect_dim, one_mag_sem_present, both_mag_sem_present # function to do basic energy/entropy based magnitude comparison when no magnitude semantics are present. def basic_en_based_mag_comparison(myPO, myPO2, intersect_dim, memory, mag_decimal_precision=0): # find the semantic links connecting to the absolute dimensional value. sem_link_PO = [link for link in myPO.mySemantics if (link.mySemantic.dimension == intersect_dim[0]) and link.mySemantic.ont_status == 'value'] sem_link_PO2 = [link for link in myPO2.mySemantics if (link.mySemantic.dimension == intersect_dim[0]) and link.mySemantic.ont_status == 'value'] # if the dimension is numeric (e.g., height-10), then get the average value of all dimensional values in the sem_links_PO and sem_link_PO2 and assign these to extent1 and extent2 respectively. if isinstance(sem_link_PO[0].mySemantic.amount, numbers.Number): extent1 = sum([link.mySemantic.amount for link in sem_link_PO])/float(len(sem_link_PO)) extent2 = sum([link.mySemantic.amount for link in sem_link_PO2])/float(len(sem_link_PO2)) else: # otherwise, given that the dimension is non-numeric (e.g., colour-red), then set extent1 and extent2 to the respective values (e.g., red, green) of the 'value' semantics for the compared POs. extent1 = sem_link_PO.mySemantic.amount extent2 = sem_link_PO2.mySemantic.amount # compute ent_magnitudeMoreLessSame(). more, less, same_flag, iterations = ent_magnitudeMoreLessSame(extent1, extent2, mag_decimal_precision) # find any other dimensional semantics with high weights so that the weights can be reduced by the entropy process. other_sem_links_PO = [link for link in myPO.mySemantics if (link.mySemantic.dimension != None) and (link.mySemantic.dimension != intersect_dim[0])] other_sem_links_PO2 = [link for link in myPO2.mySemantics if (link.mySemantic.dimension != None) and (link.mySemantic.dimension != intersect_dim[0])] sem_link_PO += other_sem_links_PO sem_link_PO2 += other_sem_links_PO2 # connect the two POs to the appropraite relative magnitude semantics (based on the invariant patterns detected just above). if more == extent2: # call attach_mag_semantics() with myPO2 as firstPO and myPO as secondPO. memory = attach_mag_semantics(same_flag, myPO2, myPO, sem_link_PO2, sem_link_PO, memory) else: # call attach_mag_semantics() with myPO as firstPO and myPO2 as secondPO. memory = attach_mag_semantics(same_flag, myPO, myPO2, sem_link_PO, sem_link_PO2, memory) # return memory. return memory # function to do basic energy/entropy based magnitude refinement when magnitude semantics are already present. def basic_en_based_mag_refinement(myPO, myPO2, memory): # if there are magnitude semantics present, and there are some matching dimensions, then activate the appropriate magnitude semantics and matching dimensions, and adjust weights as appropriate (i.e., turn on the appropriate magnitude semantics for each PO, and adjust weight accordingly). # find the dimension on which they match if there is one. match_dim = list(set([x.mySemantic.dimension for x in myPO.mySemantics if x.mySemantic.dimension and x.mySemantic.ont_status == 'state' and x.weight > .95]).intersection([y.mySemantic.dimension for y in myPO2.mySemantics if y.mySemantic.dimension and y.mySemantic.ont_status == 'state' and y.weight > .95])) # find the magnitude semantic each PO is most connected to. # first, myPO1. myPO1_mag_link = None myPO1_other_mags = [] for mylink in myPO.mySemantics: if mylink.mySemantic.name == 'same' or mylink.mySemantic.name == 'different' or mylink.mySemantic.name == 'more' or mylink.mySemantic.name == 'less': if myPO1_mag_link: # check if the weight to this semantic is greater than the weight to the semantic currently in myPO1_mag_link. if mylink.weight > myPO1_mag_link.weight: myPO1_other_mags.append(myPO1_mag_link) myPO1_mag_link = mylink else: myPO1_other_mags.append(mylink) else: # mylink is the first magnitude link for this PO, so put it in myPO1_mag_link. myPO1_mag_link = mylink elif mylink.mySemantic.ont_status == 'state': if mylink not in match_dim and mylink.weight < .7: myPO1_other_mags.append(mylink) # second, myPO2. myPO2_mag_link = None myPO2_other_mags = [] for mylink in myPO2.mySemantics: if mylink.mySemantic.name == 'same' or mylink.mySemantic.name == 'different' or mylink.mySemantic.name == 'more' or mylink.mySemantic.name == 'less': if myPO2_mag_link: # check if the weight to this semantic is greater than the weight to the semantic currently in myPO2_mag_link. if mylink.weight > myPO2_mag_link.weight: myPO2_other_mags.append(myPO2_mag_link) myPO2_mag_link = mylink else: myPO2_other_mags.append(mylink) else: # mylink is the first magnitude link for this PO, so put it in myPO2_mag_link. myPO2_mag_link = mylink elif mylink.mySemantic.ont_status == 'state': if mylink not in match_dim and mylink.weight < .7: myPO2_other_mags.append(mylink) ####################################################################################################### # NOTE: DEBUG... for some reason I got a weird behaviour where there was no myPO1_mag_link. We shouldn't be entering refinement function if that's the case. This code will force debug entry if we get that situation, then we can trace the problem... if myPO1_mag_link == None or myPO2_mag_link == None: pdb.set_trace() ####################################################################################################### # if either PO is attached to 'more', activate it and update weights, then actiate the other PO and the 'less' semantic and update weights. Otherwise, update the 'same' or different' connections of both POs. if myPO1_mag_link.mySemantic.name == 'more': # update the weight to myPO1_mag_link. myPO1_mag_link.weight = 1.0 # update the weight to other mag_links. for link in myPO1_other_mags: link.weight /= 2 # update the weight to myPO2_mag_links. if myPO2_mag_link.mySemantic.name == 'less': # update the weight to myPO1_mag_link. myPO2_mag_link.weight = 1.0 # update the weight to other mag_links. for link in myPO2_other_mags: link.weight /= 2 else: myPO2_other_mags.append(myPO2_mag_link) # see if myPO2 is connected to the "more" semantic, and, if it is not, connect it to the "more" semantic. myPO2_mag_link = [link for link in myPO2_other_mags if link.mySemantic.name == 'more'] if len(myPO2_mag_link) > 0: myPO2_mag_link = myPO1_mag_link[0] else: # create a link to the "more" semantic. more_semantic = [semantic for semantic in memory.semantics if semantic.name == 'more'] more_link = dataTypes.Link(myPO2, [], more_semantic, 1.0) myPO2_mag_link = more_link # update the weight to myPO2_mag_link. try: myPO2_mag_link.weight = 1.0 except: pdb.set_trace() # update the weight to other mag_links. for link in myPO2_other_mags: link.weight /= 2 elif myPO2_mag_link.mySemantic.name == 'more': # update the weight to myPO2_mag_link. myPO2_mag_link.weight = 1.0 # update the weight to other mag_links. for link in myPO2_other_mags: link.weight /= 2 # update the weight to myPO1_mag_links. if myPO1_mag_link.mySemantic.name == 'less': # update the weight to myPO1_mag_link. myPO1_mag_link.weight = 1.0 # update the weight to other mag_links. for link in myPO1_other_mags: link.weight /= 2 else: myPO1_other_mags.append(myPO1_mag_link) # see if myPO1 is connected to the "less" semantic, and, if it is not, connect it to the "less" semantic. myPO1_mag_link = [link for link in myPO1_other_mags if link.mySemantic.name == 'less'] if len(myPO1_mag_link) > 0: myPO1_mag_link = myPO1_mag_link[0] else: # create a link to the "less" semantic. less_semantic = [semantic for semantic in memory.semantics if semantic.name == 'less'] less_link = dataTypes.Link(myPO, [], less_semantic, 1.0) myPO1_mag_link = less_link # update the weight to myPO1_mag_link. try: myPO1_mag_link.weight = 1.0 except: pdb.set_trace() # update the weight to other mag_links. for link in myPO1_other_mags: link.weight /= 2 else: # update the weight to myPO1_mag_link. myPO1_mag_link.weight = 1.0 # update the weight to other mag_links. for link in myPO1_other_mags: link.weight /= 2 # update the weight to myPO1_mag_link. myPO2_mag_link.weight = 1.0 # update the weight to other mag_links. for link in myPO2_other_mags: link.weight /= 2 # return memory. return memory # function calculates more/less/same from two codes of extent based on entropy and competion. def ent_magnitudeMoreLessSame(extent1, extent2, mag_decimal_precision=0): # convert extents to whole numbers using the mag_decimal_precision variable, rounding, and adding 1 (mag_decimal_precision and rouding to make decimal values into whole numbers, and adding 1 to account for the possibility that someone has used 0 values for magnitudes). extent1 = round(extent1*(pow(100, mag_decimal_precision)))+1 extent2 = round(extent2*(pow(100, mag_decimal_precision)))+1 # take two representations of extent, and have them compete. # first build a simple entropyNet with the extents as lower-level nodes. entropyNet = dataTypes.entropyNet() for i in range(max(int(extent1),int(extent2))): new_sem = dataTypes.basicEntNode(False, True, []) entropyNet.inputs.append(new_sem) # and now make an object attached to each extent as a higher-level (output) node. # first make the nodes. extent_node1 = dataTypes.basicEntNode(True, False, []) extent_node2 = dataTypes.basicEntNode(True, False, []) entropyNet.outputs = [extent_node1, extent_node2] # connect each node to the correct extent semantics. for i in range(int(extent1)): # create a link between the ith input unit and extent_node1. new_connection = dataTypes.basicLink(extent_node1, entropyNet.inputs[i], 1.0) entropyNet.connections.append(new_connection) # add the connection to the higher and lower nodes it links. extent_node1.connections.append(new_connection) entropyNet.inputs[i].connections.append(new_connection) for i in range(int(extent2)): # create a link between the ith input unit and extent_node2. new_connection = dataTypes.basicLink(extent_node2, entropyNet.inputs[i], 1.0) entropyNet.connections.append(new_connection) # add the connection to the higher and lower nodes it links. extent_node2.connections.append(new_connection) entropyNet.inputs[i].connections.append(new_connection) # set activations of all extent nodes to 1.0. for node in entropyNet.inputs: node.act = 1.0 # until the network settles (i.e., only one output node is active for 3 iterations), keep running. unsettled = 0 iterations = 0 # set gamma and delta. gamma, delta = 0.3, 0.1 delta_outputs_previous = 0.0 settled = 0 while settled < 3: # update the inputs to the output units. for node in entropyNet.outputs: node.clear_input() node.update_input(entropyNet) # update the activations of the output units. for node in entropyNet.outputs: node.update_act(gamma, delta) # FOR DEBUGGING: print inputs and outputs of all nodes. #print 'iteration = ', iterations #print 'INPUTS' #for node in entropyNet.inputs: # print node.input, ', ', node.act #print 'OUTPUTS' #for node in entropyNet.outputs: # print node.input, ', ', node.act # check for settling. if the delta_outputs has not changed, add 1 to settled, otherwise, clear unsettled. delta_outputs = entropyNet.outputs[0].act-entropyNet.outputs[1].act #print delta_outputs == delta_outputs_previous #print settled #print '' if delta_outputs == delta_outputs_previous: settled += 1 else: settled = 0 delta_outputs_previous = delta_outputs iterations += 1 # the active output node is 'more', and the inactive output node is 'less', or the two extents are equal. if entropyNet.outputs[0].act > entropyNet.outputs[1].act: more = extent1 less = extent2 same_flag = False elif entropyNet.outputs[0].act < entropyNet.outputs[1].act: more = extent2 less = extent1 same_flag = False else: # the two extents are equal. more = 'NONE' less = 'NONE' same_flag = True # return more, less, a flag indicating whether the values are the same (called same_flag), and the number of iterations to settling. return more, less, same_flag, iterations # Function to attache magnitude semantics to POs for use with entropy_ops. def attach_mag_semantics(same_flag, firstPO, secondPO, sem_link_PO, sem_link_PO2, memory): # NOTE: In this function, firstPO is the larger and secondPO is the smaller (unless same_flag is True, in which case the two are equal). # I think that the following bit of code is redundent, but it's here just in case to make sure that no magnitude semantics are attached to the POs if either of them are already attached to a magnitude semantic. Check if either PO is attached to 'more' or 'less' or 'same', and if so, set attach_mag_sem_flag to False. attach_mag_sem_flag = True for semantic in firstPO.mySemantics: if semantic.mySemantic.name == 'more' or semantic.mySemantic.name == 'less' or semantic.mySemantic.name == 'same': attach_mag_sem_flag = False # if attach_mag_sem_flag is True, then go ahead and attach magnitude semantics. if attach_mag_sem_flag: if same_flag == True: # connect both POs to the 'same' semantic. # find the 'same' semantic (if it does not exist, create it). sem_exist_flag = False for semantic in memory.semantics: if semantic.name == 'same': # connect the semantic to both POs. new_link1 = dataTypes.Link(firstPO, [], semantic, 1.0) firstPO.mySemantics.append(new_link1) semantic.myPOs.append(new_link1) memory.Links.append(new_link1) new_link2 = dataTypes.Link(secondPO, [], semantic, 1.0) secondPO.mySemantics.append(new_link2) semantic.myPOs.append(new_link2) memory.Links.append(new_link2) # set sem_exist_flag to True and break. sem_exist_flag = True break # if the 'same semantic does not exist, make it and connect it to the two POs. if not sem_exist_flag: # you have the create the semantics. same_semantic = dataTypes.Semantic('same', 'nil', None, 'SDM') memory.semantics.append(same_semantic) # create links between POs and 'same'. PO1_link = dataTypes.Link(firstPO, [], same_semantic, 1.0) firstPO.mySemantics.append(PO1_link) same_semantic.myPOs.append(PO1_link) memory.Links.append(PO1_link) PO2_link = dataTypes.Link(secondPO, [], same_semantic, 1.0) secondPO.mySemantics.append(PO2_link) same_semantic.myPOs.append(PO2_link) memory.Links.append(PO2_link) else: # connect firstPO to 'more' and secondPO to 'less'. # find the 'more' and 'less' semantics (if they do not exist, create them). sem_exist_flag = False sem_added = 0 for semantic in memory.semantics: if semantic.name == 'more': # connect the samentic to firstPO. new_link = dataTypes.Link(firstPO, [], semantic, 1.0) firstPO.mySemantics.append(new_link) semantic.myPOs.append(new_link) memory.Links.append(new_link) # set the sem_exist flag to True, and check if you've connected both 'more' and 'less' semantics (i.e., sem_added == 2). If yes, then break. sem_exist_flag = True sem_added += 1 if sem_added == 2: break elif semantic.name == 'less': # connect the samentic to secondPO. new_link = dataTypes.Link(secondPO, [], semantic, 1.0) secondPO.mySemantics.append(new_link) semantic.myPOs.append(new_link) memory.Links.append(new_link) # set the sem_exist flag to True, and check if you've connected both 'more' and 'less' semantics (i.e., sem_added == 2). If yes, then break. sem_exist_flag = True sem_added += 1 if sem_added == 2: break if not sem_exist_flag: # you have the create the semantics. more_semantic = dataTypes.Semantic('more', 'nil', None, 'SDM') less_semantic = dataTypes.Semantic('less', 'nil', None, 'SDM') memory.semantics.append(more_semantic) memory.semantics.append(less_semantic) # create links between firstPO and 'more'. more_link = dataTypes.Link(firstPO, [], more_semantic, 1.0) firstPO.mySemantics.append(more_link) more_semantic.myPOs.append(more_link) memory.Links.append(more_link) # create links between secondPO and 'less'. less_link = dataTypes.Link(secondPO, [], less_semantic, 1.0) secondPO.mySemantics.append(less_link) less_semantic.myPOs.append(less_link) memory.Links.append(less_link) # reduce weight to absolute value semantics to .5 (as this process constitutes a comparison). for link in sem_link_PO: link.weight /= 2 for link in sem_link_PO2: link.weight /= 2 # return memory. return memory # function to calculate over-all same/diff from entropy. def ent_overall_same_diff(semantic_array): # check semantic array and calculate a similarity score as ratio of unshared to total features. error_array = [] act_array = [] for semantic in semantic_array: # if activation is greater than 0.1, add activation to act_array and add 1.0 to error_array. if semantic.act > 0.1: act_array.append(semantic.act) error_array.append(1.0) # calcuate the error by subtracting act_array from error_array. # NOTE: You can do this operation either with numpy (turning the lists into arrays) or with map and operator. #a_error_array = numpy.array(error_array) #a_act_array = numpy.array(act_array) #diff_array = a_error_array - a_act_array diff_array = list(map(operator.sub, error_array, act_array)) sum_diff = sum(diff_array) sum_act = sum(act_array) # make sure that you're not dividing by 0 (which can happen if you've tried to compute entropy for empty objects). if sum_act > 0: difference_ratio = float(sum_diff)/float(sum_act) else: difference_ratio = 'undefined' # return the difference_ratio. return difference_ratio # prediction routine. def predication_routine(memory, made_new_pred, gamma): # if you have made a new pred (i.e., made_new_pred == True), then learn connections between that pred and active semantics. Otherwise, check if the most active recipient PO meets predication requirements, and if so, infer a new pred. if made_new_pred: # for each active semantic, learn connections between that semantic and the new pred (NOTE: the new pred is the last PO in memory.POs). for semantic in memory.semantics: connected_to_newPO = False # check all the semantic's Links. If any of the semantic Links are to the new PO, set connected_to_newPO to True (i.e., don't bother making a Link for the current semantic and the new PO because one already exits), and update the connection between the new PO and the current semantic. for Link in semantic.myPOs: if memory.newSet.POs[-1] == Link.myPO: # update the connection weight. Link.weight += (1*(Link.mySemantic.act-Link.weight)*gamma) connected_to_newPO = True # if not connected_to_newPO, then learn a connection if semantic.act > 0. if (not connected_to_newPO) and (semantic.act > 0): # infer a new Link for new pred and active semantic. new_Link = dataTypes.Link(memory.newSet.POs[-1], 'nil', semantic, 0.0) # update the weight of the Link. new_Link.weight = 1*(semantic.act-0)*gamma # connect new Link to semantic and new pred and add Link to memory.Links. memory.newSet.POs[-1].mySemantics.append(new_Link) semantic.myPOs.append(new_Link) memory.Links.append(new_Link) else: # get the most active recipient PO. active_rec_PO = get_most_active_unit(memory.recipient.POs) # make sure that there is actually and active_rec_PO (i.e., active_rec_PO != None), of you'll have a crash when trying to check the active_rec_PO.predOrObj. if active_rec_PO != None: # if the most active unit is an object and is active above threshold(=.6), and the unit I map to most in the driver is also active above threshold, and our mapping connection is above .75, then infer new pred and RB and connect new pred to new RB, and new RB to active recipient object, and set made_new_pred to True. if active_rec_PO.predOrObj == 0 and active_rec_PO.act > .6: mapping_connection = get_my_max_map(active_rec_PO) if mapping_connection != 'null': if (mapping_connection.driverToken.act) > .6 and (mapping_connection.weight > .75): # copy the recipient object into the newSet. # make a new object as a copy of active_rec_PO, and fill in .my_maker_unit and .my_made_unit fields for the new object and for active_rec_PO. new_obj = dataTypes.POUnit(active_rec_PO.name, 'newSet', 'null', True, 'null', 0) new_obj.my_maker_unit = active_rec_PO active_rec_PO.my_made_unit = new_obj # fill in the semantics. for link in active_rec_PO.mySemantics: # create a new link between link.mySemantic and new_obj new_link = dataTypes.Link(new_obj, None, link.mySemantic, link.weight) # add the new_link to memory, to new_obj, and to link.mySemantic. memory.Links.append(new_link) new_obj.mySemantics.append(new_link) link.mySemantic.myPOs.append(new_link) # add the new_obj to memory and to newSet. memory.POs.append(new_obj) memory.newSet.POs.append(new_obj) # infer new pred and new myRB. # give the new PO the name 'nil' + the len(memory.POs)+1. new_PO_name = 'nil' + str(len(memory.POs)+1) new_pred = dataTypes.POUnit(new_PO_name, 'newSet', 'null', True, 'null', 1) new_RB_name = RB_name=new_PO_name+'+'+new_obj.name new_RB = dataTypes.RBUnit(new_RB_name, 'newSet', 'null', True, 'null') # connect new_pred to new RB and vise versa. new_pred.myRBs.append(new_RB) new_RB.myPred.append(new_pred) # connect new_obj to RB and vise versa. new_RB.myObj.append(new_obj) new_obj.myRBs.append(new_RB) # add new_pred and new_RB to memory, and to newSet. # NOTE: you are NOT connecting these units to an analog now. They will be connected to a new analog at the end of predication. memory.POs.append(new_pred) memory.RBs.append(new_RB) memory.newSet.POs.append(new_pred) memory.newSet.RBs.append(new_RB) # update the made_new_pred flag to True. made_new_pred = True # done. return memory, made_new_pred # form new relation (myP unit). def rel_form_routine(memory, inferred_new_P): # check to see if a new P has been inferred (i.e, inferred_new_P == True). # If not, AND there are no other active Ps in the recipient, infer a new P unit in the recipient. # Connect the new P to recipient RBs active above threshold(=.8). if inferred_new_P: # the new P is the last inferred P, so the last unit in recipient.Ps. # connect the newP to active RBs above threshold and not already connected to the P unit (and vise verasa). for myRB in memory.recipient.RBs: if (myRB.act >= .8) and (myRB not in memory.recipient.Ps[-1].myRBs): # connect RB and the new P unit, and update the P unit's .myanalog field if the field is empty. memory.recipient.Ps[-1].myRBs.append(myRB) myRB.myParentPs.append(memory.recipient.Ps[-1]) if not memory.recipient.Ps[-1].myanalog: # set the new P's .myanalog to the current RB's analog. memory.recipient.Ps[-1].myanalog = myRB.myanalog # add the new P to the analog's .myPs list. myRB.myanalog.myPs.append(memory.recipient.Ps[-1]) else: # name of the new P should be RB1+RB2+...RBx. For now leave blank and name after phase set. my_name = '' new_P = dataTypes.PUnit(my_name, 'recipient', None, True, None) # NOTE: .myanalog field is left blank for now and updated if the new P sticks around (i.e., if it connects to multiple RBs). The updating is done in the .do_rel_form() function above. # (my_name, my_set, analog, inferred_now, myanalog) memory.Ps.append(new_P) memory.recipient.Ps.append(new_P) # set inferred_new_P to True. inferred_new_P = True # done. return memory, inferred_new_P # schematization routine. def schematization_routine(memory, gamma, phase_set_iterator): # for each driver token unit, if that unit is most active unit of its type (e.g., most active P), and maps to a recipient unit, then check if you have caused a unit to be inferred in newSet. If you have caused a unit to be inferred, then set that newSet unit's activation to 1.0, and update connections to other newSet units (myPs to RBs, RBs to Ps and POs, POs to RBs), and for POs to semantic units. If not, then cause a new unit to be inferred. # find most active myP. # do this for parent Ps. most_active_P = get_most_active_Punit(memory.driver.Ps, 'parent') # make sure I've returned a unit. if most_active_P: # if most_active_P has caused a unit to be inferred in newSet, update the newSet unit, otherwise, infer a newSet unit corresponding to most_active_P. if most_active_P.my_made_unit: # if I am active above threshold, update my newSet unit (set activation to 1.0, connect to active newSet RBs). if most_active_P.act > .4: most_active_P.my_made_unit.act = 1.0 for myRB in memory.newSet.RBs: if myRB.act > .5: # connect any active newSet RB and most_active_P's inferred unit as parent myP. # conect as parent if units not already connected. if not myRB in most_active_P.my_made_unit.myRBs: most_active_P.my_made_unit.myRBs.append(myRB) myRB.myParentPs.append(most_active_P.my_made_unit) else: # I have not caused a unit to be inferred. # check if I am active above threshold (=.4), and I map to a recipient unit above threshold (=.75). if (most_active_P.act >= .4) and (most_active_P.max_map >= .75): # infer a newSet P unit (with activation 1.0) and add it to memory. Set the value of the .myanalog field to 'null', as you will create an analog to house all newSet units at the end of the .doSchematization() routine above. newSet_new_P = dataTypes.PUnit('nil', 'newSet', 0, True, 'null') newSet_new_P.mode = most_active_P.mode newSet_new_P.act = 1.0 newSet_new_P.my_maker_unit = most_active_P most_active_P.my_made_unit = newSet_new_P memory.Ps.append(newSet_new_P) memory.newSet.Ps.append(newSet_new_P) # Second, find most active P for child Ps. most_active_P = get_most_active_Punit(memory.driver.Ps, 'child') # make sure I've returned a unit. if most_active_P: # if most_active_P has caused a unit to be inferred in newSet, update the newSet unit, otherwise, infer a newSet unit corresponding to most_active_P. if most_active_P.my_made_unit: # if I am active above threshold, update my newSet unit (set activation to 1.0, connect to active newSet RBs). if most_active_P.act > .4: most_active_P.my_made_unit.act = 1.0 for myRB in memory.newSet.RBs: if myRB.act > .5: # connect any active newSet RB and most_active_P's inferred unit as child myP. # conect as child if units not already connected. if not myRB in most_active_P.my_made_unit.myRBs: most_active_P.my_made_unit.myParentRBs.append(myRB) myRB.myChildP.append(most_active_P.my_made_unit) else: # I have not caused a unit to be inferred. # check if I am active above threshold (=.4), and I map to a recipient unit above threshold (=.75). if (most_active_P.act >= .4) and (most_active_P.max_map >= .75): # infer a newSet P unit (with activation 1.0) and add it to memory. Set the value of the .myanalog field to 'null', as you will create an analog to house all newSet units at the end of the .doSchematization() routine above. newSet_new_P = dataTypes.PUnit('nil', 'newSet', 0, True, 'null') newSet_new_P.mode = most_active_P.mode newSet_new_P.act = 1.0 newSet_new_P.my_maker_unit = most_active_P most_active_P.my_made_unit = newSet_new_P memory.Ps.append(newSet_new_P) memory.newSet.Ps.append(newSet_new_P) # find most active myRB. most_active_RB = get_most_active_unit(memory.driver.RBs) # if most_active_RB has caused a unit to be inferred in newSet, update the newSet unit, otherwise, infer a newSet unit corresponding to most_active_RB. if most_active_RB: if most_active_RB.my_made_unit: # update my newSet unit (set activation to 1.0, connect to active newSet POs). most_active_RB.my_made_unit.act = 1.0 # I don't think you need to do learning for Ps as you do it for Ps above. #for PO in memory.newSet.POs: for myPO in memory.newSet.POs: if myPO.act > .5: # connect as pred or object if not already connected. if myPO.predOrObj == 1: if not myPO in most_active_RB.my_made_unit.myPred: most_active_RB.my_made_unit.myPred.append(myPO) myPO.myRBs.append(most_active_RB.my_made_unit) elif myPO.predOrObj == 0: if not myPO in most_active_RB.my_made_unit.myObj: most_active_RB.my_made_unit.myObj.append(myPO) myPO.myRBs.append(most_active_RB.my_made_unit) else: # I have not caused a unit to be inferred. # check if I am active above threshold (=.4), and I map to a recipient unit above threshold (=.75). if (most_active_RB.act >= .4) and (most_active_RB.max_map >= .75): # infer a newSet RB unit (with activation 1.0) and add it to memory. Set the value of the .myanalog field to 'null', as you will create an analog to house all newSet units at the end of the .doSchematization() routine above. newSet_new_RB = dataTypes.RBUnit('nil', 'newSet', 0, True, 'null') newSet_new_RB.act = 1.0 newSet_new_RB.my_maker_unit = most_active_RB most_active_RB.my_made_unit = newSet_new_RB memory.RBs.append(newSet_new_RB) memory.newSet.RBs.append(newSet_new_RB) # find most active myPO. most_active_PO = get_most_active_unit(memory.driver.POs) # if most_active_PO has caused a unit to be inferred in newSet, update the newSet unit, otherwise, infer a newSet unit corresponding to most_active_PO. if most_active_PO: if most_active_PO.my_made_unit: # update my newSet unit (set activation to 1.0, connect to active semantics). most_active_PO.my_made_unit.act = 1.0 for semantic in memory.semantics: # check if I am connected to the newSet myPO. If yes, update my connection based on semantic activation. If not, and I am active, infer a connection. connected_to_newSetPO = False # check all the semantic's Links. If any of the semantic Links are to the newSet_PO, set connected_to_newSetPO to True (i.e., don't bother making a Link for the current semantic and the newSet_PO because one already exits), and update the connection between the newSet_PO and the current semantic. for Link in semantic.myPOs: if most_active_PO.my_made_unit == Link.myPO: # update the connection weight. Link.weight += (1*(Link.mySemantic.act-Link.weight)*gamma) connected_to_newSetPO = True # if not connected_to_newPO, then learn a connection if semantic.act > 0. if (not connected_to_newSetPO) and (semantic.act > 0): # infer a new Link for new PO and active semantic. new_Link = dataTypes.Link(most_active_PO.my_made_unit, 'nil', semantic, 0.0) # update the weight of the Link. new_Link.weight = 1*(semantic.act-0)*gamma # connect new Link to semantic and new pred and add Link to memory.Links. most_active_PO.my_made_unit.mySemantics.append(new_Link) semantic.myPOs.append(new_Link) memory.Links.append(new_Link) else: # I have not caused a unit to be inferred. # check if I am active above threshold (=.4), and I map to a recipient unit above threshold (=.75). if (most_active_PO.act >= .4) and (most_active_PO.max_map >= .75): # infer a newSet PO unit (with activation 1.0) and add it to memory. Set the value of the .myanalog field to 'null', as you will create an analog to house all newSet units at the end of the .doSchematization() routine above. # give the new PO the name 'nil' + the len(memory.POs)+1. new_PO_name = 'nil' + str(len(memory.POs)+1) newSet_new_PO = dataTypes.POUnit(new_PO_name, 'newSet', 0, True, 'null', most_active_PO.predOrObj) newSet_new_PO.act = 1.0 newSet_new_PO.my_maker_unit = most_active_PO most_active_PO.my_made_unit = newSet_new_PO memory.POs.append(newSet_new_PO) memory.newSet.POs.append(newSet_new_PO) # done. return memory # function to perform relational generalization. def rel_gen_routine(memory, gamma, recip_analog): # for relational generalization: Recall that only RBs in the driver from the analog that contains mapped tokens are firing. If active driver unit maps to no unit in the recipient, then infer a unit in recipient, set it's activation to 1, and attach it to active units above and below itself (e.g., connect new RBs to Ps and POs, new POs to RBs and semantics, etc.) to which it is NOT already connected. Make sure to mark that the new unit is inferred from the active driver unit (i.e., update .my_made_unit of driver unit and .my_maker_unit of new recipient unit). Newly inferred tokens become part of the same analog as the items in the recipient that map to the analog in the driver that contains the tokens that are currently driving relational generalisation. For example, if the tokens in the driver are part of analog 1, and analog 1 elements map to items in the recipient that are in analog 3, the newly inferred units in the recipient become part of analog 3. # start with the PO units. Find the most active driver PO, and make sure that it is both active above .5 and does not map to anything. active_PO = get_most_active_unit(memory.driver.POs) if active_PO is not None: if active_PO.act >= .5 and active_PO.max_map == 0: # if the active unit has not made a unit (i.e., my_made_unit is empty), then make a new unit in recipient to correspond to active unit. Otherwise (i.e., if active unit HAS made a new unit), update the connections between the active unit's made unit and other active recipient units. if active_PO.my_made_unit is None: # infer a new PO in recipient of same type as active driver PO, and set activation to 1. mytype = active_PO.predOrObj # give the new PO the name 'nil' + the len(memory.POs)+1. new_PO_name = 'nil' + str(len(memory.POs)+1) new_PO = dataTypes.POUnit(new_PO_name, 'recipient', None, True, recip_analog, mytype) new_PO.act = 1 # update the .my_made_unit for driver unit and .my_maker_unit for new recipient unit. active_PO.my_made_unit = new_PO new_PO.my_maker_unit = active_PO # add new PO to memory and memory.recipient. memory.POs.append(new_PO) memory.recipient.POs.append(new_PO) else: # update connections between the new unit and below units. # new unit is a PO, so update connections to semantics. # set activation of new PO to 1. active_PO.my_made_unit.act = 1 # update connections between the new recipient unit and active semantics. # for each active semantic, learn connections between that semantic and the new myPO. for semantic in memory.semantics: connected_to_newPO = False # check all the semantic's Links. If any of the semantic Links are to the new PO, set connected_to_newPO to True (i.e., don't bother making a Link for the current semantic and the new PO because one already exits), and update the connection between the new PO and the current semantic. for Link in semantic.myPOs: if active_PO.my_made_unit == Link.myPO: # update the connection weight. Link.weight += (1*(Link.mySemantic.act-Link.weight)*gamma) connected_to_newPO = True #print Link.mySemantic.name #print Link.weight # if not connected_to_newPO, then learn a connection if semantic.act > 0. if (not connected_to_newPO) and (semantic.act > 0): # infer a new Link for new PO and active semantic. new_Link = dataTypes.Link(active_PO.my_made_unit, 'nil', semantic, 0.0) # update the weight of the Link. new_Link.weight = 1*(semantic.act-0)*gamma # connect new Link to semantic and new pred and add Link to memory.Links. active_PO.my_made_unit.mySemantics.append(new_Link) semantic.myPOs.append(new_Link) memory.Links.append(new_Link) # move to the RB units. Find the most active driver RB, and make sure that it is both active above .5 and does not map to anything. active_RB = get_most_active_unit(memory.driver.RBs) if active_RB is not None: if active_RB.act >= .5 and active_RB.max_map == 0: # if the active unit has not made a unit (i.e., my_made_unit is empty), then make a new unit in recipient to correspond to active unit. Otherwise (i.e., if active unit HAS made a new unit), update the connections between the active unit's made unit and other active recipient units. if active_RB.my_made_unit is None: # infer a new RB in recipient, and set activation to 1. new_RB = dataTypes.RBUnit('nil', 'recipient', None, True, recip_analog) new_RB.act = 1 # update the .my_made_unit for driver unit and .my_maker_unit for new recipient unit. active_RB.my_made_unit = new_RB new_RB.my_maker_unit = active_RB # add new RB to memory and memory.recipient. memory.RBs.append(new_RB) memory.recipient.RBs.append(new_RB) else: # update connections between the new unit and other recipient units below. # new unit is a RB, so update connections to POs. # set activation of new RB to 1. active_RB.my_made_unit.act = 1 # Find the most active PO and connect to the new unit if not already connected, and PO is active above 0.7. most_active_PO = get_most_active_unit(memory.recipient.POs) if most_active_PO: if most_active_PO.act >= .7: if (not most_active_PO in active_RB.my_made_unit.myPred) and most_active_PO.predOrObj == 1: most_active_PO.myRBs.append(active_RB.my_made_unit) active_RB.my_made_unit.myPred.append(most_active_PO) elif (not most_active_PO in active_RB.my_made_unit.myObj) and most_active_PO.predOrObj == 0: most_active_PO.myRBs.append(active_RB.my_made_unit) active_RB.my_made_unit.myObj.append(most_active_PO) # finally, the P units. # first, the P units in child mode. Find the most active driver P in child mode, and make sure that it is both active above .5 and does not map to anything. active_P = get_most_active_Punit(memory.driver.Ps, 'child') # make sure that the returned active P is actually there (i.e., there is an active P in child mode). if active_P: if active_P.act >= .5 and active_P.max_map == 0: # if the active unit has not made a unit (i.e., my_made_unit is empty), then make a new unit in recipient to correspond to active unit. Otherwise (i.e., if active unit HAS made a new unit), update the connections between the active unit's made unit and other active recipient units. if active_P.my_made_unit is None: # infer a new P in recipient, make sure it is in child mode, and set its activation to 1. new_P = dataTypes.PUnit('nil', 'recipient', None, True, recip_analog) new_P.mode = -1 new_P.act = 1 # update the .my_made_unit for driver unit and .my_maker_unit for new recipient unit. active_P.my_made_unit = new_P new_P.my_maker_unit = active_P # add new P to memory and memory.recipient. memory.Ps.append(new_P) memory.recipient.Ps.append(new_P) else: # update connections between the new unit and other recipient units. # set activation of new P to 1. active_P.my_made_unit.act = 1 # new unit is a P in child mode, so update connections to RBs above. Find the most active RB and connect to the new unit if not already connected, and RB is active above 0.7. most_active_RB = get_most_active_unit(memory.recipient.RBs) if most_active_RB.act >= .7 and (not most_active_RB in active_P.my_made_unit.myParentRBs): active_P.my_made_unit.myParentRBs.append(most_active_RB) most_active_RB.myChildP.append(active_P.my_made_unit) # now for the P units in parent mode. Find the most active driver P in parent mode, and make sure that it is both active above .5 and does not map to anything. active_P = get_most_active_Punit(memory.driver.Ps, 'parent') # make sure that the returned active P is actually there (i.e., there is an active P in parent mode). if active_P: if active_P.act >= .5 and active_P.max_map == 0: # if the active unit has not made a unit (i.e., my_made_unit is empty), then make a new unit in recipient to correspond to active unit. Otherwise (i.e., if active unit HAS made a new unit), update the connections between the active unit's made unit and other active recipient units. if active_P.my_made_unit is None: # infer a new P in recipient, make sure it is in parent mode, and set its activation to 1. new_P = dataTypes.PUnit('nil', 'recipient', None, True, recip_analog) new_P.mode = 1 new_P.act = 1 # update the .my_made_unit for driver unit and .my_maker_unit for new recipient unit. active_P.my_made_unit = new_P new_P.my_maker_unit = active_P # add new P to memory and memory.recipient. memory.Ps.append(new_P) memory.recipient.Ps.append(new_P) else: # update connections between the new unit and other recipient units. # set activation of new P unit to 1. active_P.my_made_unit.act = 1 # new unit is a P in parent mode, so update connections to RBs below. Find the most active RB and connect to the new unit if not already connected, and RB is active above 0.5, and RB is not already connected to another P unit. most_active_RB = get_most_active_unit(memory.recipient.RBs) if most_active_RB.act >= .5 and (not most_active_RB in active_P.my_made_unit.myRBs) and len(most_active_RB.myParentPs) < 1: active_P.my_made_unit.myRBs.append(most_active_RB) most_active_RB.myParentPs.append(active_P.my_made_unit) # done. return memory # function to find the analog in the recipient that contains all the mapped recipient units. Currently for use only with rel_gen_routine() function. def find_recip_analog(memory): # search through the POs in the recipient and find their analog. (You only need to search the POs because all recipient units that map have already been compiled into a single analog, and all analogs contain at least POs.) for myPO in memory.recipient.POs: if myPO.max_map > 0.0: recip_analog = myPO.myanalog break # done. return recip_analog # function to find the analog in the driver that contains all the mapped driver units. Currently for use only with do_rel_gen() routine from the runDORA object. def find_driver_analog_rel_gen(memory): # search through the POs in the driver and find their analog. (You only need to search the POs because all driver units that map are from a single analog, and all analogs contain at least POs.) for myPO in memory.driver.POs: if myPO.max_map > 0.0: driver_analog = myPO.myanalog break # done. return driver_analog # function to fix high weight of predicate to 1. This function updates the weights of the most strongly connected semantic(s) of the pred to 1.0. def calibrate_weight(memory): for myPO in memory.driver.POs: max_weight_links = [] max_weight = 0.0 for link in myPO.mySemantics: if link.weight >= max_weight: max_weight_links.append(link) max_weight = link.weight # if the highest weight is less than 1, then recalibrate weights. if max_weight < 1: for link in max_weight_links: link.weight = 1.0 # return memory. return memory # function to update names for all token units in memory. def update_Names_all(memory): for myPO in memory.POs: PO_name = '' # name me after my most weighted semantics. most_weighted_sem = 0.0 for Link in myPO.mySemantics: if Link.weight > most_weighted_sem: most_weighted_sem = Link.weight for Link in myPO.mySemantics: if Link.weight == most_weighted_sem: myPO.name += '&' myPO.name += Link.mySemantic.name for myRB in memory.RBs: RB_name = myRB.myPred[0].name + '+' + myRB.myObj[0].name myRB.name = myRB.name for myP in memory.Ps: P_name = '' for myRB in myP.myRBs: P_name += '+' P_name += myRB.name myP.name = P_name # done. return memory # function to update names for all token units in memory with name 'nil'. def update_Names_nil(memory): for myPO in memory.POs: if myPO.name == 'nil' or 'nil' in myPO.name: myPO.name = '' # name me after my most weighted semantics. most_weighted_sem = 0.0 for Link in myPO.mySemantics: if Link.weight > most_weighted_sem: most_weighted_sem = Link.weight for Link in myPO.mySemantics: if Link.weight == most_weighted_sem: myPO.name += '&' myPO.name += Link.mySemantic.name for myRB in memory.RBs: if myRB.name == 'nil' or 'nil' in myRB.name: if len(myRB.myObj) > 0: RB_name = myRB.myPred[0].name + '+' + myRB.myObj[0].name elif len(myRB.myChildP) > 0: RB_name = myRB.myPred[0].name + '+' + myRB.myChildP[0].name else: RB_name = 'somethingWrongWithThisRB' myRB.name = RB_name for myP in memory.Ps: if myP.name == 'nil' or 'nil' in myP.name: P_name = '' for myRB in myP.myRBs: P_name += '+' P_name += myRB.name myP.name = P_name # done. return memory # function to give names to newSet Ps, RBs, and POs after inference. def give_Names_inferred(memory): for myPO in memory.newSet.POs: PO_name = '' # name me after my most weighted semantics. most_weighted_sem = 0.0 for Link in myPO.mySemantics: if Link.weight > most_weighted_sem: most_weighted_sem = Link.weight for Link in myPO.mySemantics: if Link.weight == most_weighted_sem: myPO.name += '&' myPO.name += Link.mySemantic.name for myRB in memory.newSet.RBs: RB_name = myRB.myPred.name + '+' + myRB.myObj.name myRB.name = myRB.name for myP in memory.newSet.Ps: P_name = '' for myRB in myP.myRBs: P_name += '+' P_name += myRB.name myP.name = P_name for Group in memory.newSet.Ps: Group_name = '' for myP in Group.myRBs: Group_name += '+' Group_name += myP.name Group.name = Group_name # done. return memory # function to clear the set field of every token in memory (i.e., to clear WM). def clearTokenSet(memory): # for each P, RB, and PO, clear the set field. for Group in memory.Groups: Group.set = 'memory' for myP in memory.Ps: myP.set = 'memory' for myRB in memory.RBs: myRB.set = 'memory' for myPO in memory.POs: myPO.set = 'memory' # done. return memory # function to clear the driver. def clearDriverSet(memory): # for each P, RB, and PO, clear the set field. for Group in memory.driver.Groups: Group.set = 'memory' for myP in memory.driver.Ps: myP.set = 'memory' for myRB in memory.driver.RBs: myRB.set = 'memory' for myPO in memory.driver.POs: myPO.set = 'memory' # now clear the memory.driver fields. memory.driver.Ps = [] memory.driver.RBs = [] memory.driver.POs = [] # done. return memory # function to clear the recipient. def clearRecipientSet(memory): # for each P, RB, and PO, clear the set field. for Group in memory.recipient.Groups: Group.set = 'memory' for myP in memory.recipient.Ps: myP.set = 'memory' for myRB in memory.recipient.RBs: myRB.set = 'memory' for myPO in memory.recipient.POs: myPO.set = 'memory' # now clear the memory.recipient fields. memory.recipient.Ps = [] memory.recipient.RBs = [] memory.recipient.POs = [] # done. return memory # reset the .inferred, .my_made_unit, .my_maker_unit field of all tokens. def reset_inferences(memory): for Group in memory.Groups: Group.inferred = False Group.my_made_unit = None Group.my_maker_unit = None for myP in memory.Ps: myP.inferred = False myP.my_made_unit = None myP.my_maker_unit = None for myRB in memory.RBs: myRB.inferred = False myRB.my_made_unit = None myRB.my_maker_unit = None for myPO in memory.POs: myPO.inferred = False myPO.my_made_unit = None myPO.my_maker_unit = None # done. return memory # fucntion to clear the my_maker_ and my_made_unit of all tokens in memory. Used after learning is done and WM is cleared. def reset_maker_made_units(memory): for Group in memory.Groups: Group.my_maker_unit = None Group.my_made_unit = None for myP in memory.Ps: myP.my_maker_unit = None myP.my_made_unit = None for myRB in memory.RBs: myRB.my_maker_unit = None myRB.my_made_unit = None for myPO in memory.POs: myPO.my_maker_unit = None myPO.my_made_unit = None # done. return memory # function to add a token and all it's child tokens to driver or recipient. def add_tokens_to_set(memory, token_num, token_type, the_set): # put the token and all tokens under it into the set. # if the token is a P, then add it's RBs, and each RBs args, and if the arg is a P, then repeat the process for that P's RBs and the RBs' args. if token_type == 'analog': # add all the analog's P units. for myP in memory.analogs[token_num].myPs: myP.set = the_set # add all the analog's RB units. for myRB in memory.analogs[token_num].myRBs: myRB.set = the_set # add all the analog's PO units. for myPO in memory.analogs[token_num].myPOs: myPO.set = the_set elif token_type == 'P': # add the P. memory.Ps[token_num].set = the_set # add my RBs. for myRB in memory.Ps[token_num].myRBs: # add the myRB. myRB.set = the_set myRB.myPred[0].set = the_set # add the RB's argument. if len(myRB.myObj) > 0: myRB.myObj[0].set = the_set else: myRB.myChildP[0].get_index(memory) new_token_num = myRB.myChildP[0].my_index memory = add_tokens_to_set(memory, new_token_num, 'P', the_set) elif token_type == 'RB': # add the myRB. memory.RBs[token_num].set = the_set # add the RB's pred. # edited for DEBUGGING. if len(memory.RBs[token_num].myPred[0].set) < 1: pdb.set_trace() memory.RBs[token_num].myPred[0].set = the_set # add the RB's argument. if len(memory.RBs[token_num].myObj) > 0: memory.RBs[token_num].myObj[0].set = the_set else: memory.RBs[token_num].myChildP[0].get_index(memory) new_token_num = memory.RBs[token_num].myChildP[0].my_index memory = add_tokens_to_set(memory, new_token_num, 'P', the_set) elif token_type == 'PO': # add the PO. memory.POs[token_num].set = the_set # done. return memory # function implementing kludgey comparitor/compariter (used in Doumas et al., 2008, adopted from Hummel & Biederman, 1992). def kludgey_comparitor(myPO1, PO2, memory): # this comparitor is based on Hummel & Biederman, 1992. When two predicates are compared, it looks for any semantics they share that correspond to a dimension. If it finds none, it does nothing. If it finds some, then it does a literal comparison of their values. If they are the same, then it attaches the semantics 'same' and 'dimension_name' (where 'dimension_name is a variable correponding to the name of the dimension upon which the comparitor performed the comparison). If they are different, then it attaches the semantics 'more' and 'dimension_name' to the PO unit with the semantic coding the larger value on the dimension, and 'less' and 'dimension_name' to the PO with the semantic coding the smaller value on the dimension. # find the largest semantic connection weight for both PO1 and PO2. PO1.get_max_semantic_weight() PO2.get_max_semantic_weight() # find the semantics more/less/same, or make them if they do not exist. more, less, same = None, None, None for semantic in memory.semantics: if semantic.name == 'more': more = semantic elif semantic.name == 'less': less = semantic elif semantic.name == 'same': same = semantic if not more: # make a 'more' semantic. more = dataTypes.Semantic('more', dimension='comparative') memory.semantics.append(more) if not less: # make a 'less' semantic. less = dataTypes.Semantic('less', dimension='comparative') memory.semantics.append(less) if not same: # make a 'same' semantic. same = dataTypes.Semantic('same', dimension='comparative') memory.semantics.append(same) # check for a common dimension in the most strongly connected semantics of PO1 and PO2. for link1 in PO1.mySemantics: # don't operate on comparative semantics (i.e., 'more', 'less', 'same'). if (link1.mySemantic.name != 'more') and (link1.mySemantic.name != 'less') and (link1.mySemantic.name != 'same'): for link2 in PO2.mySemantics: # don't operate on comparative semantics (i.e., 'more', 'less', 'same'). if (link2.mySemantic.name != 'more') and (link2.mySemantic.name != 'less') and (link2.mySemantic.name != 'same'): if (link1.weight == PO1.max_sem_weight) and (link2.weight == PO2.max_sem_weight): if (link1.mySemantic.dimension == link2.mySemantic.dimension) and (link1.mySemantic.dimension != 'nil'): # run the simple comparitor. if link1.mySemantic.amount > link2.mySemantic.amount: # connect PO1 to 'more' and PO2 to 'less'. new_link_more = dataTypes.Link(myPO1, None, more, 1.0) PO1.mySemantics.append(new_link_more) more.myPOs.append(new_link_more) memory.Links.append(new_link_more) new_link_less = dataTypes.Link(myPO2, None, less, 1.0) PO2.mySemantics.append(new_link_less) less.myPOs.append(new_link_less) memory.Links.append(new_link_less) elif link1.mySemantic.amount < link2.mySemantic.amount: # connect PO1 to 'less' and PO2 to 'more'. new_link_less = dataTypes.Link(myPO1, None, less, 1.0) PO1.mySemantics.append(new_link_less) less.myPOs.append(new_link_less) memory.Links.append(new_link_less) new_link_more = dataTypes.Link(myPO2, None, more, 1.0) PO2.mySemantics.append(new_link_more) more.myPOs.append(new_link_more) memory.Links.append(new_link_more) elif link1.mySemantic.amount == link2.mySemantic.amount: # they are equal, connect both PO1 and PO2 to 'same'. new_link_same1 = dataTypes.Link(myPO1, None, same, 1.0) PO1.mySemantics.append(new_link_same1) same.myPOs.append(new_link_same1) memory.Links.append(new_link_same1) new_link_same2 = dataTypes.Link(myPO2, None, same, 1.0) PO2.mySemantics.append(new_link_same2) same.myPOs.append(new_link_same2) memory.Links.append(new_link_same2) # reset the .max_sem_weight of the POs to None. PO1.max_sem_weight = None PO1.max_sem_weight = None # done. return memory # function to switch the contents of driver and recipient. def swap_driverRecipient(memory): memory.driver, memory.recipient = memory.recipient, memory.driver # update the set information for units in new driver/recipient. for Group in memory.driver.Groups: Group.set = 'recipient' for myP in memory.driver.Ps: myP.set = 'recipient' for myRB in memory.driver.RBs: myRB.set = 'recipient' for myPO in memory.driver.POs: myPO.set = 'recipient' for Group in memory.recipient.Groups: Group.set = 'recipient' for myP in memory.recipient.Ps: myP.set = 'driver' for myRB in memory.recipient.RBs: myRB.set = 'driver' for myPO in memory.recipient.POs: myPO.set = 'driver' # done. return memory # function to make sure that the .myanalog data in all tokens is consistent. def check_analog_consistency(memory): # go through each analog and make sure that all tokens in that analog have that analog in their .myanalog field. If yes, fine. Otherwise, ... # done. return memory # function to write memory state to a sym file for storage. Takes as arguments the current memory object, and a file_name, which is the name of the file to which the memory state should be written. def write_memory_to_symfile(memory, file_name): #create an array of dicts. sym_dicts = [] # for each analog. analog_counter = 0 for analog in memory.analogs: # for each P in the analog, make a sym file entry for that P and all its connected tokens, and write that sym entry to the open text file. for myP in analog.myPs: # as long as the current P is NOT part of a higher-order relation (its .myParentRBs is empty), then make a sym entry for it. Otherwide, don't bother making a sym entry for it, as it will get made when the higher-order P it is part of has it's sym entry made. if len(myP.myParentRBs) == 0: # make a sym_dict with the current myP. new_sym_dicts = create_dict_P(myP, analog_counter) # for higher order Ps, you might get back multiple dicts in new_sym_dicts, so add them all to sym_dicts. for sym_dict in new_sym_dicts: sym_dicts.append(sym_dict) # for each RB in the analog that has no parentP, make a sym file entry for that RB and all its connected tokens, and write that sym entry to the open text file.. for myRB in analog.myRBs: # if myRB.myParentPs is empty, then make a sym_dict from myRB. if len(myRB.myParentPs) == 0: sym_dict = create_dict_RB(myRB, analog_counter) # add the new sym_dict to sym_dicts. sym_dicts.append(sym_dict) # for each PO in the analog that has no RBs, make a sym file entry for that PO, and write that sym entry to the open text file. for myPO in analog.myPOs: # if myPO.myRBs is empty, then make a sym_dict from myPO. if len(myPO.myRBs) == 0: sym_dict = create_dict_PO(myPO, analog_counter) # add the new sym_dict to sym_dicts. sym_dicts.append(sym_dict) # update the analog_counter, so that the next analog in memory has a new number associated with it in the 'analog' field of the sym file. analog_counter +=1 # write all of sym_dicts to text file called file_name using json. json.dump(sym_dicts, open(file_name, 'w')) # now prepend 'simType='sym_file' symProps = '. NOTE: This process is clunky, because you have to write all the json information to a textfile first as the json.dump() function requires a second argument (the open() component), and thus writes over the content of the text file, and consequently does not allow prepended text information. Prepending information to a text file requires rewriting the text file. with open(file_name, 'r+') as f: old_text = f.read() # read all the contents of f into a new variable. f.seek(0) # go back to the start of f. f.write('simType=\'json_sym\' \n' + old_text) # function to create a sym_dict from a P unit. def create_dict_P(myP, analog_counter): # create an array of the RB dicts made from the current P's RBs. new_sym_dicts = [] RBs = [] for myRB in myP.myRBs: new_RB_dict, p_dict = create_RB_dict(myRB) RBs.append(new_RB_dict) if p_dict: # add p_dict to the array of new_sym_dicts new_sym_dicts.append(p_dict) # create the new_sym_dicts. new_sym_dict = {'name': myP.name, 'RBs': RBs, 'set': 'memory', 'analog': analog_counter} new_sym_dicts.append(new_sym_dict) # return the new_sym_dicts. return new_sym_dicts # function to create a sym_dict from a RB unit. def create_dict_RB(myRB, analog_counter): # make a sym_dict from the current RB. RB_dict, p_dict = create_RB_dict(myRB) # make a new sym_dict using the new RB_dict. new_sym_dict = {'name': 'non_exist', 'RBs': [RB_dict], 'set': 'memory', 'analog': analog_counter} # return new_sym_dict. return new_sym_dict # function to create a sym_dict from a PO unit. def create_dict_PO(myPO, analog_counter): # first get an array of obj semantics. obj_sems = [] for link in myPO.mySemantics: # capture both the name of the semantic and the weight of the semantic to the PO in an array. # also, if the semantic codes for a dimension, then encode that information in sem_info. if link.mySemantic.dimension == 'nil': sem_info = [link.mySemantic.name, link.weight] else: sem_info = [link.mySemantic.name, link.weight, link.mySemantic.dimension, link.mySemantic.amount] obj_sems.append(sem_info) # make new sym_dict. new_sym_dict = {'name': 'non_exist', 'RBs': [{'pred_name': 'non_exist', 'pred_sem': [], 'higher_order': False, 'object_name': myPO.name, 'object_sem': obj_sems, 'P': 'non_exist'}], 'set': 'memory', 'analog': analog_counter} # return new_sym_dict. return new_sym_dict # function to create the RB_dict section of the sym_dict. def create_RB_dict(myRB): # get the pred semantics. pred_sems = [] for link in myRB.myPred[0].mySemantics: # capture both the name of the semantic and the weight of the semantic to the PO in an array. # also, if the semantic codes for a dimension, then encode that information in sem_info. if link.mySemantic.dimension == 'nil': sem_info = [link.mySemantic.name, link.weight, None, None, None] else: sem_info = [link.mySemantic.name, link.weight, link.mySemantic.dimension, link.mySemantic.amount, link.mySemantic.ont_status] pred_sems.append(sem_info) # check if the RB has a higher order argument. # if it has a higher order argument, then set higher_order to True, and create and empty object. # else, set higher_order to False, and get the object semantics. if len(myRB.myObj) == 0: higher_order = True object_name = 'non_exist' object_sems = [] # there are no semantics. # if there is a child P, then create a new sym_dict with that P unit, and set P_name, to the name of that P_unit. p_dict = create_dict_P(myRB.myChildP) P_name = myRB.myChildP.name else: higher_order = False object_name = myRB.myObj[0].name object_sems = [] for link in myRB.myObj[0].mySemantics: # capture both the name of the semantic and the weight of the semantic to the PO in an array. # also, if the semantic codes for a dimension, then encode that information in sem_info. if link.mySemantic.dimension == 'nil': sem_info = [link.mySemantic.name, link.weight] else: sem_info = [link.mySemantic.name, link.weight, link.mySemantic.dimension, link.mySemantic.amount] object_sems.append(sem_info ) P_name = 'non_exist' # create an empty p_dict to pass back (it should be immediately deleted by the funciton calling this (create_RB_dict) function). p_dict = None # now make the RB_dict RB_dict = {'pred_name': myRB.myPred[0].name, 'pred_sem': pred_sems, 'higher_order': higher_order, 'object_name': object_name, 'object_sem': object_sems, 'P': P_name} # and return the RB_dict--and, if necessry, the p_dict. return RB_dict, p_dict
vamsi3/IITB-Operating-Systems
Lab Quiz 3/160050064-labquiz3/part1/xv6/sem_test2.c
<filename>Lab Quiz 3/160050064-labquiz3/part1/xv6/sem_test2.c #include "types.h" #include "stat.h" #include "user.h" #define sem1 0 #define NPROCESS 10 void delay(int ticks) { int endtime; endtime = uptime() + ticks; while(uptime() < endtime); } int main(int argc, char *argv[]) { sem_init(sem1, 5); for(int i=0; i<NPROCESS; i++) { if(fork() > 0) continue; sem_down(sem1); exit(); } printf(1, "[%d] Parent: Waiting for 10 seconds \n", getpid()); delay(10 * 100); for(int i=0; i<NPROCESS; i++) { sem_up(sem1); delay(2); } for(int i=0; i<NPROCESS; i++) wait(); exit(); }
Mario-Kart-Felix/hotkeynet-project
hotkeynet/projects/warmane/constant/credentials.py
# -*- coding: utf-8 -*- """ 此模块用于枚举所有的账号密码, 避免将敏感信息写入代码, 并安全地在 Hotkeynet 脚本中引用这些 信息. """ import json import typing import attr from pathlib_mate import PathCls as Path from enum import Enum # put credentials.json file in the git repo root folder credentials_file = Path(__file__).parent.parent.parent.parent.change(new_basename="credentials.json") assert credentials_file.parent.basename == "hotkeynet-project" credentials_data = json.loads(credentials_file.read_text(encoding="utf-8")) @attr.s class Credential: username = attr.ib() password = <PASSWORD>() # Enumerate all username password data object # 枚举出所有的用户名密码的数据对象, 以供之后引用 class Credentials(Enum): cred_fatmulti1 = Credential(username="fatmulti1", password=credentials_data["<PASSWORD>"]) cred_fatmulti2 = Credential(username="fatmulti2", password=credentials_data["<PASSWORD>"]) cred_fatmulti3 = Credential(username="fatmulti3", password=credentials_data["<PASSWORD>"]) cred_fatmulti4 = Credential(username="fatmulti4", password=credentials_data["<PASSWORD>"]) cred_fatmulti5 = Credential(username="fatmulti5", password=credentials_data["<PASSWORD>multi5"]) cred_fitsheep = Credential(username="fitsheep", password=credentials_data["<PASSWORD>"]) cred_fatmulti6 = Credential(username="fatmulti6", password=credentials_data["<PASSWORD>"]) cred_fatmulti8 = Credential(username="fatmulti8", password=credentials_data["<PASSWORD>"]) cred_fatmulti9 = Credential(username="fatmulti9", password=credentials_data["<PASSWORD>"]) cred_fatmulti10 = Credential(username="fatmulti10", password=credentials_data["<PASSWORD>"]) cred_makun7551 = Credential(username="makun7551", password=credentials_data["<PASSWORD>"]) cred_monkey130 = Credential(username="monkey<PASSWORD>", password=credentials_data["<PASSWORD>"]) cred_freiliheng = Credential(username="freiliheng", password=credentials_data["freiliheng"]) cred_fatmulti11 = Credential(username="fatmulti11", password=credentials_data["fatmulti11"]) cred_fatmulti12 = Credential(username="fatmulti12", password=credentials_data["fatmulti12"]) cred_fatmulti13 = Credential(username="fatmulti13", password=credentials_data["<PASSWORD>"]) cred_fatmulti14 = Credential(username="fatmulti14", password=credentials_data["fatmulti14"]) cred_fatmulti15 = Credential(username="fatmulti15", password=credentials_data["<PASSWORD>"]) cred_fatmulti16 = Credential(username="fatmulti16", password=credentials_data["<PASSWORD>"]) cred_fatmulti17 = Credential(username="fatmulti17", password=credentials_data["<PASSWORD>"]) cred_fatmulti18 = Credential(username="fatmulti18", password=credentials_data["<PASSWORD>multi18"]) cred_fatmulti19 = Credential(username="fatmulti19", password=credentials_data["<PASSWORD>"]) cred_fatmulti20 = Credential(username="fatmulti20", password=credentials_data["<PASSWORD>"]) cred_fatmulti21 = Credential(username="fatmulti21", password=credentials_data["fatmulti21"]) cred_fatmulti22 = Credential(username="fatmulti22", password=credentials_data["<PASSWORD>"]) cred_fatmulti23 = Credential(username="fatmulti23", password=credentials_data["fatmulti23"]) cred_fatmulti24 = Credential(username="fatmulti24", password=credentials_data["fatmulti24"]) cred_fatmulti25 = Credential(username="fatmulti25", password=credentials_data["fatmulti25"]) cred_fatmulti26 = Credential(username="fatmulti26", password=credentials_data["fatmulti26"]) cred_fatmulti27 = Credential(username="fatmulti27", password=credentials_data["fatmulti27"]) cred_fatmulti28 = Credential(username="fatmulti28", password=credentials_data["<PASSWORD>"]) cred_fatmulti29 = Credential(username="fatmulti29", password=credentials_data["<PASSWORD>"])
duely/BuildingGadgets
src/main/java/com/direwolf20/buildinggadgets/common/items/gadgets/ModeGadget.java
package com.direwolf20.buildinggadgets.common.items.gadgets; import com.direwolf20.buildinggadgets.common.network.packets.PacketRotateMirror.Operation; import com.direwolf20.buildinggadgets.common.util.GadgetUtils; import net.minecraft.entity.player.PlayerEntity; import net.minecraft.item.ItemStack; import net.minecraft.util.ResourceLocation; import java.util.function.IntSupplier; public abstract class ModeGadget extends AbstractGadget { public ModeGadget(Properties builder, IntSupplier undoLengthSupplier, String undoName, ResourceLocation whiteListTag, ResourceLocation blackListTag) { super(builder, undoLengthSupplier, undoName, whiteListTag, blackListTag); } @Override public boolean performRotate(ItemStack stack, PlayerEntity player) { GadgetUtils.rotateOrMirrorToolBlock(stack, player, Operation.ROTATE); return true; } @Override public boolean performMirror(ItemStack stack, PlayerEntity player) { GadgetUtils.rotateOrMirrorToolBlock(stack, player, Operation.MIRROR); return true; } }
BortoBoy/jabref2
src/main/java/org/jabref/model/search/matchers/NotMatcher.java
package org.jabref.model.search.matchers; import java.util.Objects; import org.jabref.model.entry.BibEntry; import org.jabref.model.search.SearchMatcher; /** * Inverts the search result. * <p> * Example: * false --> true * true --> false */ public class NotMatcher implements SearchMatcher { private final SearchMatcher otherMatcher; public NotMatcher(SearchMatcher otherMatcher) { this.otherMatcher = Objects.requireNonNull(otherMatcher); } @Override public boolean isMatch(BibEntry entry) { return !otherMatcher.isMatch(entry); } }
AlirezaIvaz/OneUI-Design-Library
yanndroid/oneui/src/main/java/androidx/reflect/provider/SeslSystemReflector.java
<filename>yanndroid/oneui/src/main/java/androidx/reflect/provider/SeslSystemReflector.java package androidx.reflect.provider; import android.os.Build; import android.provider.Settings; import androidx.reflect.SeslBaseReflector; import java.lang.reflect.Field; import java.lang.reflect.Method; public class SeslSystemReflector { public static final Class<?> mClass = Settings.System.class; public static String getField_SEM_PEN_HOVERING() { int i = Build.VERSION.SDK_INT; Object obj = null; if (i >= 29) { Method declaredMethod = SeslBaseReflector.getDeclaredMethod(mClass, "hidden_SEM_PEN_HOVERING", new Class[0]); if (declaredMethod != null) { obj = SeslBaseReflector.invoke(null, declaredMethod, new Object[0]); } } else { Field field = SeslBaseReflector.getField(mClass, i >= 24 ? "SEM_PEN_HOVERING" : "PEN_HOVERING"); if (field != null) { obj = SeslBaseReflector.get(null, field); } } return obj instanceof String ? (String) obj : "pen_hovering"; } public static String getField_SEM_ACCESSIBILITY_REDUCE_TRANSPARENCY() { Method declaredMethod; Object obj = null; if (Build.VERSION.SDK_INT >= 31 && (declaredMethod = SeslBaseReflector.getDeclaredMethod(mClass, "hidden_SEM_ACCESSIBILITY_REDUCE_TRANSPARENCY", new Class[0])) != null) { obj = SeslBaseReflector.invoke(null, declaredMethod, new Object[0]); } return obj instanceof String ? (String) obj : "not_supported"; } }
denis-mludek/SupSMS
SupSMS-ejb/src/java/com/supsms/webservice/SmsWebService.java
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package com.supsms.webservice; import com.supsms.entity.Conversation; import com.supsms.entity.Sms; import com.supsms.service.ConversationService; import com.supsms.service.SmsService; import com.supsms.service.UserService; import javax.ejb.EJB; import javax.ejb.Stateless; import javax.jws.WebMethod; import javax.jws.WebParam; import javax.jws.WebService; import java.util.ArrayList; import java.util.List; /** * @author EPTR */ @WebService(serviceName = "SmsWebService") @Stateless public class SmsWebService { @EJB private SmsService smsService; @EJB private UserService userService; @EJB private ConversationService conversationService; // App -> Client @WebMethod(operationName = "getAllSmsForUser") public List<Sms> getAllSmsForUser(@WebParam(name = "userPhoneNumber") String phone) { List<Conversation> listConv = conversationService.getConversationByUser(userService.getUserByPhoneNumber(phone)); List<Sms> listSms = new ArrayList(); for (Conversation c : listConv) { for (Sms s : c.getListSms()) { listSms.add(s); } } return listSms; } // Client -> App /* @WebMethod(operationName = "updateListSms") @Oneway public void updateListSms(@WebParam(name = "listSms")List<Sms> listSms, @WebParam(name="userPhoneNumber")String phoneUser) { User user = userService.getUserByPhoneNumber(phoneUser); List<String> phones; for(Sms s : listSms){ phones = s.getConversation().getPhoneNumber(); conversationService.getConversationByPhoneNumbers(phones.get(0), phones.get(1)); } } user.setListContacts(listContacts); userService.updateUser(user); } */ }
nguyenduylinh0201/tttn
src/Components/Dashboard/TopMenu.js
<reponame>nguyenduylinh0201/tttn import React from 'react'; import logoutImg from '../../img/logout.svg'; var Boolean = false; function contentDb(left, width) { document.querySelector('.menu-dashboard').style.display = left; for (let i = 0; i < document.getElementsByClassName("content-dashboard").length; i++) { document.getElementsByClassName("content-dashboard")[i].style.flex = '0 0 ' + width; document.getElementsByClassName("content-dashboard")[i].style.maxWidth = width; } Boolean = !Boolean } function showAdmin() { if (window.innerWidth > '768') { if (Boolean) { contentDb("none", "100%") } else { contentDb("block", "80%") } } else { if (Boolean) { contentDb("none", "100%") } else { contentDb("block", "100%") } } } const logOut = () => { sessionStorage.removeItem('userData') window.location.pathname = ('/') } const TopMenu = () => { return ( <header className="header-dashboard"> <span className="header-dashboard__logo"> <a href="#" onClick={(e) => { e.preventDefault(); showAdmin(); }}>Admin</a> </span> <span className="header-dashboard__logout"> <img src={logoutImg} alt="logo" onClick={logOut} /> </span> </header> ) } export default TopMenu;
relwell/cw-app-rewrite
app/containers/AuthHeader/messages.js
/* * AuthHeader Messages * * This contains all the text for the AuthHeader component. */ import { defineMessages } from 'react-intl'; export default defineMessages({ header: { id: 'app.containers.AuthHeader.header', defaultMessage: 'This is AuthHeader container !', }, });
shinesolutions/cloudmanager-api-clients
clients/scala-finch/generated/src/main/scala/org/openapitools/apis/EnvironmentsApi.scala
package org.openapitools.apis import java.io._ import org.openapitools._ import org.openapitools.models._ import org.openapitools.models.BadRequestError import org.openapitools.models.Environment import org.openapitools.models.EnvironmentList import org.openapitools.models.EnvironmentLogs import io.finch.circe._ import io.circe.generic.semiauto._ import com.twitter.concurrent.AsyncStream import com.twitter.finagle.Service import com.twitter.finagle.Http import com.twitter.finagle.http.{Request, Response} import com.twitter.finagle.http.exp.Multipart.{FileUpload, InMemoryFileUpload, OnDiskFileUpload} import com.twitter.util.Future import com.twitter.io.Buf import io.finch._, items._ import java.io.File import java.time._ object EnvironmentsApi { /** * Compiles all service endpoints. * @return Bundled compilation of all service endpoints. */ def endpoints(da: DataAccessor) = deleteEnvironment(da) :+: downloadLogs(da) :+: getEnvironment(da) :+: getEnvironmentLogs(da) :+: getEnvironments(da) private def checkError(e: CommonError) = e match { case InvalidInput(_) => BadRequest(e) case MissingIdentifier(_) => BadRequest(e) case RecordNotFound(_) => NotFound(e) case _ => InternalServerError(e) } implicit class StringOps(s: String) { import java.time.format.DateTimeFormatter lazy val localformatter: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd") lazy val datetimeformatter: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ") def toLocalDateTime: LocalDateTime = LocalDateTime.parse(s,localformatter) def toZonedDateTime: ZonedDateTime = ZonedDateTime.parse(s, datetimeformatter) } /** * * @return An endpoint representing a Environment */ private def deleteEnvironment(da: DataAccessor): Endpoint[Environment] = delete("api" :: "program" :: string :: "environment" :: string :: header("x-gw-ims-org-id") :: header("Authorization") :: header("x-api-key")) { (programId: String, environmentId: String, xGwImsOrgId: String, authorization: String, xApiKey: String) => da.Environments_deleteEnvironment(programId, environmentId, xGwImsOrgId, authorization, xApiKey) match { case Left(error) => checkError(error) case Right(data) => Ok(data) } } handle { case e: Exception => BadRequest(e) } /** * * @return An endpoint representing a Unit */ private def downloadLogs(da: DataAccessor): Endpoint[Unit] = get("api" :: "program" :: string :: "environment" :: string :: "logs" :: "download" :: param("service") :: param("name") :: param("date") :: header("x-gw-ims-org-id") :: header("Authorization") :: header("x-api-key") :: headerOption("Accept")) { (programId: String, environmentId: String, service: String, name: String, date: String, xGwImsOrgId: String, authorization: String, xApiKey: String, accept: Option[String]) => da.Environments_downloadLogs(programId, environmentId, service, name, date, xGwImsOrgId, authorization, xApiKey, accept) match { case Left(error) => checkError(error) case Right(data) => Ok(data) } } handle { case e: Exception => BadRequest(e) } /** * * @return An endpoint representing a Environment */ private def getEnvironment(da: DataAccessor): Endpoint[Environment] = get("api" :: "program" :: string :: "environment" :: string :: header("x-gw-ims-org-id") :: header("Authorization") :: header("x-api-key")) { (programId: String, environmentId: String, xGwImsOrgId: String, authorization: String, xApiKey: String) => da.Environments_getEnvironment(programId, environmentId, xGwImsOrgId, authorization, xApiKey) match { case Left(error) => checkError(error) case Right(data) => Ok(data) } } handle { case e: Exception => BadRequest(e) } /** * * @return An endpoint representing a EnvironmentLogs */ private def getEnvironmentLogs(da: DataAccessor): Endpoint[EnvironmentLogs] = get("api" :: "program" :: string :: "environment" :: string :: "logs" :: param("days").map(_.toInt) :: header("x-gw-ims-org-id") :: header("Authorization") :: header("x-api-key") :: params("service") :: params("name")) { (programId: String, environmentId: String, days: Int, xGwImsOrgId: String, authorization: String, xApiKey: String, service: Seq[String], name: Seq[String]) => da.Environments_getEnvironmentLogs(programId, environmentId, days, xGwImsOrgId, authorization, xApiKey, service, name) match { case Left(error) => checkError(error) case Right(data) => Ok(data) } } handle { case e: Exception => BadRequest(e) } /** * * @return An endpoint representing a EnvironmentList */ private def getEnvironments(da: DataAccessor): Endpoint[EnvironmentList] = get("api" :: "program" :: string :: "environments" :: header("x-gw-ims-org-id") :: header("Authorization") :: header("x-api-key") :: paramOption("type")) { (programId: String, xGwImsOrgId: String, authorization: String, xApiKey: String, _type: Option[String]) => da.Environments_getEnvironments(programId, xGwImsOrgId, authorization, xApiKey, _type) match { case Left(error) => checkError(error) case Right(data) => Ok(data) } } handle { case e: Exception => BadRequest(e) } implicit private def fileUploadToFile(fileUpload: FileUpload) : File = { fileUpload match { case upload: InMemoryFileUpload => bytesToFile(Buf.ByteArray.Owned.extract(upload.content)) case upload: OnDiskFileUpload => upload.content case _ => null } } private def bytesToFile(input: Array[Byte]): java.io.File = { val file = File.createTempFile("tmpEnvironmentsApi", null) val output = new FileOutputStream(file) output.write(input) file } // This assists in params(string) application (which must be Seq[A] in parameter list) when the param is used as a List[A] elsewhere. implicit def seqList[A](input: Seq[A]): List[A] = input.toList }
hakkisabah/tsentiment-services
oAuth/controllers/userpanel.js
<filename>oAuth/controllers/userpanel.js const { getRedis } = require('../helpers/redis') const logInfo = { servicename: 'oauth', file: 'controllers/userpanel.js' } exports.userPanel = async (request, response) => { try { const userAppToken = await getRedis(`${request.currentAuthData.user_id}_token`) if (userAppToken){ logInfo.line = 11 logInfo.clientInfo = { user: request.currentAuthData.user_id, ip: request.clientIp, agent: request.useragent } logInfo.logdata = `${request.currentAuthData.user_id} user is finded` logInfo.type = 'info' return response.sendData({token:userAppToken}, logInfo) } logInfo.line = 17 logInfo.clientInfo = { user: request.currentAuthData.user_id, ip: request.clientIp, agent: request.useragent } logInfo.logdata = `user App token not find on redis` logInfo.type = 'error' return response.sendData({token:'error (refresh your token)',errors:'token not find!'}, logInfo) } catch (e) { logInfo.line = 23 logInfo.clientInfo = request.currentAuthData.user_id logInfo.logdata = e return response.sendError('Panel error', 500, logInfo) } }
ONSdigital/ledr-dashboard
src/main/app/src/navigation/SubNavigationBar.js
<filename>src/main/app/src/navigation/SubNavigationBar.js import React, {Component} from 'react' import {Link, withRouter} from 'react-router-dom'; import {Menu} from "semantic-ui-react"; import {ROUTER_PATH} from "../utils/Constants"; /** * This class renders the top navigation bar for the application */ class SubNavBar extends Component { handleItemClick = (e, {id}) => { this.setState({activeItem: id}); }; render() { const activeItem = this.props.location.pathname; console.log(activeItem); return ( <Menu borderless id='sub-menu'> <Link to={ROUTER_PATH.HOME}> <Menu.Item id={ROUTER_PATH.HOME} as='span' active={activeItem === ROUTER_PATH.HOME} onClick={this.handleItemClick}>Home</Menu.Item> </Link> <Link to={ROUTER_PATH.DATA_QUALITY}> <Menu.Item id={ROUTER_PATH.DATA_QUALITY} as='span' active={activeItem === ROUTER_PATH.DATA_QUALITY} onClick={this.handleItemClick}>Data Quality</Menu.Item> </Link> <Link to={ROUTER_PATH.CODING}> <Menu.Item id={ROUTER_PATH.CODING} as='span' active={activeItem === ROUTER_PATH.CODING} onClick={this.handleItemClick}>Coding</Menu.Item> </Link> <Link to={ROUTER_PATH.DASHBOARD}> <Menu.Item id={ROUTER_PATH.DASHBOARD} as='span' active={activeItem === ROUTER_PATH.DASHBOARD} onClick={this.handleItemClick}>Dashboard</Menu.Item> </Link> </Menu> ); } } const SubNavigationBar = withRouter(SubNavBar); export default SubNavigationBar
DigitEgal/one
src/fireedge/src/client/models/ProvisionTemplate.js
export const isValidProvisionTemplate = ({ defaults, hosts, name, provider, provision_type: provisionType }) => { const providerName = defaults?.provision?.provider_name ?? hosts?.[0]?.provision.provider_name return !( providerName === undefined || [name, provisionType, provider].includes(undefined) ) }
guileschool/BEAGLEBONE-tutorials
BBB-firmware/u-boot-v2018.05-rc2/include/configs/MPC8641HPCN.h
/* * Copyright 2006, 2010-2011 Freescale Semiconductor. * * <NAME> (<EMAIL>) * * SPDX-License-Identifier: GPL-2.0+ */ /* * MPC8641HPCN board configuration file * * Make sure you change the MAC address and other network params first, * search for CONFIG_SERVERIP, etc. in this file. */ #ifndef __CONFIG_H #define __CONFIG_H /* High Level Configuration Options */ #define CONFIG_MP 1 /* support multiple processors */ #define CONFIG_LINUX_RESET_VEC 0x100 /* Reset vector used by Linux */ #define CONFIG_ADDR_MAP 1 /* Use addr map */ /* * default CCSRBAR is at 0xff700000 * assume U-Boot is less than 0.5MB */ #ifdef RUN_DIAG #define CONFIG_SYS_DIAG_ADDR CONFIG_SYS_FLASH_BASE #endif /* * virtual address to be used for temporary mappings. There * should be 128k free at this VA. */ #define CONFIG_SYS_SCRATCH_VA 0xe0000000 #define CONFIG_SYS_SRIO #define CONFIG_SRIO1 /* SRIO port 1 */ #define CONFIG_PCIE1 1 /* PCIE controller 1 (ULI bridge) */ #define CONFIG_PCIE2 1 /* PCIE controller 2 (slot) */ #define CONFIG_FSL_PCI_INIT 1 /* Use common FSL init code */ #define CONFIG_SYS_PCI_64BIT 1 /* enable 64-bit PCI resources */ #define CONFIG_ENV_OVERWRITE #define CONFIG_BAT_RW 1 /* Use common BAT rw code */ #define CONFIG_HIGH_BATS 1 /* High BATs supported and enabled */ #define CONFIG_SYS_NUM_ADDR_MAP 8 /* Number of addr map slots = 8 dbats */ #define CONFIG_ALTIVEC 1 /* * L2CR setup -- make sure this is right for your board! */ #define CONFIG_SYS_L2 #define L2_INIT 0 #define L2_ENABLE (L2CR_L2E) #ifndef CONFIG_SYS_CLK_FREQ #ifndef __ASSEMBLY__ extern unsigned long get_board_sys_clk(unsigned long dummy); #endif #define CONFIG_SYS_CLK_FREQ get_board_sys_clk(0) #endif #define CONFIG_SYS_MEMTEST_START 0x00200000 /* memtest region */ #define CONFIG_SYS_MEMTEST_END 0x00400000 /* * With the exception of PCI Memory and Rapid IO, most devices will simply * add CONFIG_SYS_PHYS_ADDR_HIGH to the front of the 32-bit VA to get the PA * when 36-bit is enabled. When 36-bit is not enabled, these bits are 0. */ #ifdef CONFIG_PHYS_64BIT #define CONFIG_SYS_PHYS_ADDR_HIGH 0x0000000f #else #define CONFIG_SYS_PHYS_ADDR_HIGH 0x00000000 #endif /* * Base addresses -- Note these are effective addresses where the * actual resources get mapped (not physical addresses) */ #define CONFIG_SYS_CCSRBAR 0xffe00000 /* relocated CCSRBAR */ #define CONFIG_SYS_IMMR CONFIG_SYS_CCSRBAR /* PQII uses CONFIG_SYS_IMMR */ /* Physical addresses */ #define CONFIG_SYS_CCSRBAR_PHYS_LOW CONFIG_SYS_CCSRBAR #define CONFIG_SYS_CCSRBAR_PHYS_HIGH CONFIG_SYS_PHYS_ADDR_HIGH #define CONFIG_SYS_CCSRBAR_PHYS \ PAIRED_PHYS_TO_PHYS(CONFIG_SYS_CCSRBAR_PHYS_LOW, \ CONFIG_SYS_CCSRBAR_PHYS_HIGH) #define CONFIG_HWCONFIG /* use hwconfig to control memory interleaving */ /* * DDR Setup */ #define CONFIG_FSL_DDR_INTERACTIVE #define CONFIG_SPD_EEPROM /* Use SPD EEPROM for DDR setup */ #define CONFIG_DDR_SPD #define CONFIG_ECC_INIT_VIA_DDRCONTROLLER /* DDR controller or DMA? */ #define CONFIG_MEM_INIT_VALUE 0xDeadBeef #define CONFIG_SYS_DDR_SDRAM_BASE 0x00000000 /* DDR is system memory*/ #define CONFIG_SYS_SDRAM_BASE CONFIG_SYS_DDR_SDRAM_BASE #define CONFIG_SYS_MAX_DDR_BAT_SIZE 0x80000000 /* BAT mapping size */ #define CONFIG_VERY_BIG_RAM #define CONFIG_DIMM_SLOTS_PER_CTLR 2 #define CONFIG_CHIP_SELECTS_PER_CTRL (2 * CONFIG_DIMM_SLOTS_PER_CTLR) /* * I2C addresses of SPD EEPROMs */ #define SPD_EEPROM_ADDRESS1 0x51 /* CTLR 0 DIMM 0 */ #define SPD_EEPROM_ADDRESS2 0x52 /* CTLR 0 DIMM 1 */ #define SPD_EEPROM_ADDRESS3 0x53 /* CTLR 1 DIMM 0 */ #define SPD_EEPROM_ADDRESS4 0x54 /* CTLR 1 DIMM 1 */ /* * These are used when DDR doesn't use SPD. */ #define CONFIG_SYS_SDRAM_SIZE 256 /* DDR is 256MB */ #define CONFIG_SYS_DDR_CS0_BNDS 0x0000000F #define CONFIG_SYS_DDR_CS0_CONFIG 0x80010102 /* Enable, no interleaving */ #define CONFIG_SYS_DDR_TIMING_3 0x00000000 #define CONFIG_SYS_DDR_TIMING_0 0x00260802 #define CONFIG_SYS_DDR_TIMING_1 0x39357322 #define CONFIG_SYS_DDR_TIMING_2 0x14904cc8 #define CONFIG_SYS_DDR_MODE_1 0x00480432 #define CONFIG_SYS_DDR_MODE_2 0x00000000 #define CONFIG_SYS_DDR_INTERVAL 0x06090100 #define CONFIG_SYS_DDR_DATA_INIT 0xdeadbeef #define CONFIG_SYS_DDR_CLK_CTRL 0x03800000 #define CONFIG_SYS_DDR_OCD_CTRL 0x00000000 #define CONFIG_SYS_DDR_OCD_STATUS 0x00000000 #define CONFIG_SYS_DDR_CONTROL 0xe3008000 /* Type = DDR2 */ #define CONFIG_SYS_DDR_CONTROL2 0x04400000 #define CONFIG_ID_EEPROM #define CONFIG_SYS_I2C_EEPROM_NXID #define CONFIG_ID_EEPROM #define CONFIG_SYS_I2C_EEPROM_ADDR 0x57 #define CONFIG_SYS_I2C_EEPROM_ADDR_LEN 1 #define CONFIG_SYS_FLASH_BASE 0xef800000 /* start of FLASH 8M */ #define CONFIG_SYS_FLASH_BASE_PHYS_LOW CONFIG_SYS_FLASH_BASE #define CONFIG_SYS_FLASH_BASE_PHYS \ PAIRED_PHYS_TO_PHYS(CONFIG_SYS_FLASH_BASE_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) #define CONFIG_SYS_FLASH_BANKS_LIST {CONFIG_SYS_FLASH_BASE_PHYS} #define CONFIG_SYS_BR0_PRELIM (BR_PHYS_ADDR(CONFIG_SYS_FLASH_BASE_PHYS) \ | 0x00001001) /* port size 16bit */ #define CONFIG_SYS_OR0_PRELIM 0xff806ff7 /* 8MB Boot Flash area*/ #define CONFIG_SYS_BR2_PRELIM (BR_PHYS_ADDR(CF_BASE_PHYS) \ | 0x00001001) /* port size 16bit */ #define CONFIG_SYS_OR2_PRELIM 0xffffeff7 /* 32k Compact Flash */ #define CONFIG_SYS_BR3_PRELIM (BR_PHYS_ADDR(PIXIS_BASE_PHYS) \ | 0x00000801) /* port size 8bit */ #define CONFIG_SYS_OR3_PRELIM 0xffffeff7 /* 32k PIXIS area*/ /* * The LBC_BASE is the base of the region that contains the PIXIS and the CF. * The PIXIS and CF by themselves aren't large enough to take up the 128k * required for the smallest BAT mapping, so there's a 64k hole. */ #define CONFIG_SYS_LBC_BASE 0xffde0000 #define CONFIG_SYS_LBC_BASE_PHYS_LOW CONFIG_SYS_LBC_BASE #define CONFIG_FSL_PIXIS 1 /* use common PIXIS code */ #define PIXIS_BASE (CONFIG_SYS_LBC_BASE + 0x00010000) #define PIXIS_BASE_PHYS_LOW (CONFIG_SYS_LBC_BASE_PHYS_LOW + 0x00010000) #define PIXIS_BASE_PHYS PAIRED_PHYS_TO_PHYS(PIXIS_BASE_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) #define PIXIS_SIZE 0x00008000 /* 32k */ #define PIXIS_ID 0x0 /* Board ID at offset 0 */ #define PIXIS_VER 0x1 /* Board version at offset 1 */ #define PIXIS_PVER 0x2 /* PIXIS FPGA version at offset 2 */ #define PIXIS_RST 0x4 /* PIXIS Reset Control register */ #define PIXIS_AUX 0x6 /* PIXIS Auxiliary register; Scratch register */ #define PIXIS_SPD 0x7 /* Register for SYSCLK speed */ #define PIXIS_VCTL 0x10 /* VELA Control Register */ #define PIXIS_VCFGEN0 0x12 /* VELA Config Enable 0 */ #define PIXIS_VCFGEN1 0x13 /* VELA Config Enable 1 */ #define PIXIS_VBOOT 0x16 /* VELA VBOOT Register */ #define PIXIS_VBOOT_FMAP 0x80 /* VBOOT - CFG_FLASHMAP */ #define PIXIS_VBOOT_FBANK 0x40 /* VBOOT - CFG_FLASHBANK */ #define PIXIS_VSPEED0 0x17 /* VELA VSpeed 0 */ #define PIXIS_VSPEED1 0x18 /* VELA VSpeed 1 */ #define PIXIS_VCLKH 0x19 /* VELA VCLKH register */ #define PIXIS_VCLKL 0x1A /* VELA VCLKL register */ #define CONFIG_SYS_PIXIS_VBOOT_MASK 0x40 /* Reset altbank mask*/ /* Compact flash shares a BAT with PIXIS; make sure they're contiguous */ #define CF_BASE (PIXIS_BASE + PIXIS_SIZE) #define CF_BASE_PHYS (PIXIS_BASE_PHYS + PIXIS_SIZE) #define CONFIG_SYS_MAX_FLASH_BANKS 1 /* number of banks */ #define CONFIG_SYS_MAX_FLASH_SECT 128 /* sectors per device */ #undef CONFIG_SYS_FLASH_CHECKSUM #define CONFIG_SYS_FLASH_ERASE_TOUT 60000 /* Flash Erase Timeout (ms) */ #define CONFIG_SYS_FLASH_WRITE_TOUT 500 /* Flash Write Timeout (ms) */ #define CONFIG_SYS_MONITOR_BASE CONFIG_SYS_TEXT_BASE /* start of monitor */ #define CONFIG_SYS_MONITOR_BASE_EARLY 0xfff00000 /* early monitor loc */ #define CONFIG_FLASH_CFI_DRIVER #define CONFIG_SYS_FLASH_CFI #define CONFIG_SYS_FLASH_EMPTY_INFO #if (CONFIG_SYS_MONITOR_BASE < CONFIG_SYS_FLASH_BASE) #define CONFIG_SYS_RAMBOOT #else #undef CONFIG_SYS_RAMBOOT #endif #if defined(CONFIG_SYS_RAMBOOT) #undef CONFIG_SPD_EEPROM #define CONFIG_SYS_SDRAM_SIZE 256 #endif #undef CONFIG_CLOCKS_IN_MHZ #define CONFIG_SYS_INIT_RAM_LOCK 1 #ifndef CONFIG_SYS_INIT_RAM_LOCK #define CONFIG_SYS_INIT_RAM_ADDR 0x0fd00000 /* Initial RAM address */ #else #define CONFIG_SYS_INIT_RAM_ADDR 0xf8400000 /* Initial RAM address */ #endif #define CONFIG_SYS_INIT_RAM_SIZE 0x4000 /* Size of used area in RAM */ #define CONFIG_SYS_GBL_DATA_OFFSET (CONFIG_SYS_INIT_RAM_SIZE - GENERATED_GBL_DATA_SIZE) #define CONFIG_SYS_INIT_SP_OFFSET CONFIG_SYS_GBL_DATA_OFFSET #define CONFIG_SYS_MONITOR_LEN (512 * 1024) /* Reserve 512 kB for Mon */ #define CONFIG_SYS_MALLOC_LEN (1024 * 1024) /* Reserved for malloc */ /* Serial Port */ #define CONFIG_SYS_NS16550_SERIAL #define CONFIG_SYS_NS16550_REG_SIZE 1 #define CONFIG_SYS_NS16550_CLK get_bus_freq(0) #define CONFIG_SYS_BAUDRATE_TABLE \ {300, 600, 1200, 2400, 4800, 9600, 19200, 38400,115200} #define CONFIG_SYS_NS16550_COM1 (CONFIG_SYS_CCSRBAR+0x4500) #define CONFIG_SYS_NS16550_COM2 (CONFIG_SYS_CCSRBAR+0x4600) /* * I2C */ #define CONFIG_SYS_I2C #define CONFIG_SYS_I2C_FSL #define CONFIG_SYS_FSL_I2C_SPEED 400000 #define CONFIG_SYS_FSL_I2C_SLAVE 0x7F #define CONFIG_SYS_FSL_I2C_OFFSET 0x3100 #define CONFIG_SYS_I2C_NOPROBES { {0, 0x69} } /* * RapidIO MMU */ #define CONFIG_SYS_SRIO1_MEM_BASE 0x80000000 /* base address */ #ifdef CONFIG_PHYS_64BIT #define CONFIG_SYS_SRIO1_MEM_PHYS_LOW 0x00000000 #define CONFIG_SYS_SRIO1_MEM_PHYS_HIGH 0x0000000c #else #define CONFIG_SYS_SRIO1_MEM_PHYS_LOW CONFIG_SYS_SRIO1_MEM_BASE #define CONFIG_SYS_SRIO1_MEM_PHYS_HIGH 0x00000000 #endif #define CONFIG_SYS_SRIO1_MEM_PHYS \ PAIRED_PHYS_TO_PHYS(CONFIG_SYS_SRIO1_MEM_PHYS_LOW, \ CONFIG_SYS_SRIO1_MEM_PHYS_HIGH) #define CONFIG_SYS_SRIO1_MEM_SIZE 0x20000000 /* 128M */ /* * General PCI * Addresses are mapped 1-1. */ #define CONFIG_SYS_PCIE1_NAME "ULI" #define CONFIG_SYS_PCIE1_MEM_VIRT 0x80000000 #ifdef CONFIG_PHYS_64BIT #define CONFIG_SYS_PCIE1_MEM_BUS 0xe0000000 #define CONFIG_SYS_PCIE1_MEM_PHYS_LOW 0x00000000 #define CONFIG_SYS_PCIE1_MEM_PHYS_HIGH 0x0000000c #else #define CONFIG_SYS_PCIE1_MEM_BUS CONFIG_SYS_PCIE1_MEM_VIRT #define CONFIG_SYS_PCIE1_MEM_PHYS_LOW CONFIG_SYS_PCIE1_MEM_VIRT #define CONFIG_SYS_PCIE1_MEM_PHYS_HIGH 0x00000000 #endif #define CONFIG_SYS_PCIE1_MEM_PHYS \ PAIRED_PHYS_TO_PHYS(CONFIG_SYS_PCIE1_MEM_PHYS_LOW, \ CONFIG_SYS_PCIE1_MEM_PHYS_HIGH) #define CONFIG_SYS_PCIE1_MEM_SIZE 0x20000000 /* 512M */ #define CONFIG_SYS_PCIE1_IO_BUS 0x00000000 #define CONFIG_SYS_PCIE1_IO_VIRT 0xffc00000 #define CONFIG_SYS_PCIE1_IO_PHYS_LOW CONFIG_SYS_PCIE1_IO_VIRT #define CONFIG_SYS_PCIE1_IO_PHYS \ PAIRED_PHYS_TO_PHYS(CONFIG_SYS_PCIE1_IO_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) #define CONFIG_SYS_PCIE1_IO_SIZE 0x00010000 /* 64K */ #ifdef CONFIG_PHYS_64BIT /* * Use the same PCI bus address on PCIE1 and PCIE2 if we have PHYS_64BIT. * This will increase the amount of PCI address space available for * for mapping RAM. */ #define CONFIG_SYS_PCIE2_MEM_BUS CONFIG_SYS_PCIE1_MEM_BUS #else #define CONFIG_SYS_PCIE2_MEM_BUS (CONFIG_SYS_PCIE1_MEM_BUS \ + CONFIG_SYS_PCIE1_MEM_SIZE) #endif #define CONFIG_SYS_PCIE2_MEM_VIRT (CONFIG_SYS_PCIE1_MEM_VIRT \ + CONFIG_SYS_PCIE1_MEM_SIZE) #define CONFIG_SYS_PCIE2_MEM_PHYS_LOW (CONFIG_SYS_PCIE1_MEM_PHYS_LOW \ + CONFIG_SYS_PCIE1_MEM_SIZE) #define CONFIG_SYS_PCIE2_MEM_PHYS_HIGH CONFIG_SYS_PCIE1_MEM_PHYS_HIGH #define CONFIG_SYS_PCIE2_MEM_PHYS (CONFIG_SYS_PCIE1_MEM_PHYS \ + CONFIG_SYS_PCIE1_MEM_SIZE) #define CONFIG_SYS_PCIE2_MEM_SIZE 0x20000000 /* 512M */ #define CONFIG_SYS_PCIE2_IO_BUS 0x00000000 #define CONFIG_SYS_PCIE2_IO_VIRT (CONFIG_SYS_PCIE1_IO_VIRT \ + CONFIG_SYS_PCIE1_IO_SIZE) #define CONFIG_SYS_PCIE2_IO_PHYS_LOW (CONFIG_SYS_PCIE1_IO_PHYS_LOW \ + CONFIG_SYS_PCIE1_IO_SIZE) #define CONFIG_SYS_PCIE2_IO_PHYS (CONFIG_SYS_PCIE1_IO_PHYS \ + CONFIG_SYS_PCIE1_IO_SIZE) #define CONFIG_SYS_PCIE2_IO_SIZE CONFIG_SYS_PCIE1_IO_SIZE #if defined(CONFIG_PCI) #define CONFIG_PCI_SCAN_SHOW /* show pci devices on startup */ #undef CONFIG_EEPRO100 #undef CONFIG_TULIP /************************************************************ * USB support ************************************************************/ #define CONFIG_PCI_OHCI 1 #define CONFIG_USB_OHCI_NEW 1 #define CONFIG_SYS_USB_OHCI_SLOT_NAME "ohci_pci" #define CONFIG_SYS_USB_OHCI_MAX_ROOT_PORTS 15 #define CONFIG_SYS_OHCI_SWAP_REG_ACCESS 1 /*PCIE video card used*/ #define VIDEO_IO_OFFSET CONFIG_SYS_PCIE2_IO_VIRT /*PCI video card used*/ /*#define VIDEO_IO_OFFSET CONFIG_SYS_PCIE1_IO_VIRT*/ /* video */ #if defined(CONFIG_VIDEO) #define CONFIG_BIOSEMU #define CONFIG_ATI_RADEON_FB #define CONFIG_VIDEO_LOGO #define CONFIG_SYS_ISA_IO_BASE_ADDRESS CONFIG_SYS_PCIE2_IO_VIRT #endif #undef CONFIG_PCI_SCAN_SHOW /* show pci devices on startup */ #ifdef CONFIG_SCSI_AHCI #define CONFIG_SATA_ULI5288 #define CONFIG_SYS_SCSI_MAX_SCSI_ID 4 #define CONFIG_SYS_SCSI_MAX_LUN 1 #define CONFIG_SYS_SCSI_MAX_DEVICE (CONFIG_SYS_SCSI_MAX_SCSI_ID * CONFIG_SYS_SCSI_MAX_LUN) #define CONFIG_SYS_SCSI_MAXDEVICE CONFIG_SYS_SCSI_MAX_DEVICE #endif #endif /* CONFIG_PCI */ #if defined(CONFIG_TSEC_ENET) #define CONFIG_MII 1 /* MII PHY management */ #define CONFIG_TSEC1 1 #define CONFIG_TSEC1_NAME "eTSEC1" #define CONFIG_TSEC2 1 #define CONFIG_TSEC2_NAME "eTSEC2" #define CONFIG_TSEC3 1 #define CONFIG_TSEC3_NAME "eTSEC3" #define CONFIG_TSEC4 1 #define CONFIG_TSEC4_NAME "eTSEC4" #define TSEC1_PHY_ADDR 0 #define TSEC2_PHY_ADDR 1 #define TSEC3_PHY_ADDR 2 #define TSEC4_PHY_ADDR 3 #define TSEC1_PHYIDX 0 #define TSEC2_PHYIDX 0 #define TSEC3_PHYIDX 0 #define TSEC4_PHYIDX 0 #define TSEC1_FLAGS (TSEC_GIGABIT | TSEC_REDUCED) #define TSEC2_FLAGS (TSEC_GIGABIT | TSEC_REDUCED) #define TSEC3_FLAGS (TSEC_GIGABIT | TSEC_REDUCED) #define TSEC4_FLAGS (TSEC_GIGABIT | TSEC_REDUCED) #define CONFIG_ETHPRIME "eTSEC1" #endif /* CONFIG_TSEC_ENET */ #ifdef CONFIG_PHYS_64BIT #define PHYS_HIGH_TO_BXPN(x) ((x & 0x0000000e) << 8) #define PHYS_HIGH_TO_BX(x) ((x & 0x00000001) << 2) /* Put physical address into the BAT format */ #define BAT_PHYS_ADDR(low, high) \ (low | PHYS_HIGH_TO_BXPN(high) | PHYS_HIGH_TO_BX(high)) /* Convert high/low pairs to actual 64-bit value */ #define PAIRED_PHYS_TO_PHYS(low, high) (low | ((u64)high << 32)) #else /* 32-bit systems just ignore the "high" bits */ #define BAT_PHYS_ADDR(low, high) (low) #define PAIRED_PHYS_TO_PHYS(low, high) (low) #endif /* * BAT0 DDR */ #define CONFIG_SYS_DBAT0L (BATL_PP_RW | BATL_MEMCOHERENCE) #define CONFIG_SYS_IBAT0L (BATL_PP_RW | BATL_MEMCOHERENCE) /* * BAT1 LBC (PIXIS/CF) */ #define CONFIG_SYS_DBAT1L (BAT_PHYS_ADDR(CONFIG_SYS_LBC_BASE_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT | \ BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT1U (CONFIG_SYS_LBC_BASE | BATU_BL_128K \ | BATU_VS | BATU_VP) #define CONFIG_SYS_IBAT1L (BAT_PHYS_ADDR(CONFIG_SYS_LBC_BASE_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) \ | BATL_PP_RW | BATL_MEMCOHERENCE) #define CONFIG_SYS_IBAT1U CONFIG_SYS_DBAT1U /* if CONFIG_PCI: * BAT2 PCIE1 and PCIE1 MEM * if CONFIG_RIO * BAT2 Rapidio Memory */ #ifdef CONFIG_PCI #define CONFIG_PCI_INDIRECT_BRIDGE #define CONFIG_SYS_DBAT2L (BAT_PHYS_ADDR(CONFIG_SYS_PCIE1_MEM_PHYS_LOW, \ CONFIG_SYS_PCIE1_MEM_PHYS_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT \ | BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT2U (CONFIG_SYS_PCIE1_MEM_VIRT | BATU_BL_1G \ | BATU_VS | BATU_VP) #define CONFIG_SYS_IBAT2L (BAT_PHYS_ADDR(CONFIG_SYS_PCIE1_MEM_PHYS_LOW, \ CONFIG_SYS_PCIE1_MEM_PHYS_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT) #define CONFIG_SYS_IBAT2U CONFIG_SYS_DBAT2U #else /* CONFIG_RIO */ #define CONFIG_SYS_DBAT2L (BAT_PHYS_ADDR(CONFIG_SYS_SRIO1_MEM_PHYS_LOW, \ CONFIG_SYS_SRIO1_MEM_PHYS_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT | \ BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT2U (CONFIG_SYS_SRIO1_MEM_BASE | BATU_BL_512M \ | BATU_VS | BATU_VP) #define CONFIG_SYS_IBAT2L (BAT_PHYS_ADDR(CONFIG_SYS_SRIO1_MEM_PHYS_LOW, \ CONFIG_SYS_SRIO1_MEM_PHYS_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT) #define CONFIG_SYS_IBAT2U CONFIG_SYS_DBAT2U #endif /* * BAT3 CCSR Space */ #define CONFIG_SYS_DBAT3L (BAT_PHYS_ADDR(CONFIG_SYS_CCSRBAR_PHYS_LOW, \ CONFIG_SYS_CCSRBAR_PHYS_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT \ | BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT3U (CONFIG_SYS_CCSRBAR | BATU_BL_1M | BATU_VS \ | BATU_VP) #define CONFIG_SYS_IBAT3L (BAT_PHYS_ADDR(CONFIG_SYS_CCSRBAR_PHYS_LOW, \ CONFIG_SYS_CCSRBAR_PHYS_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT) #define CONFIG_SYS_IBAT3U CONFIG_SYS_DBAT3U #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR) #define CONFIG_SYS_CCSR_DEFAULT_DBATL (CONFIG_SYS_CCSRBAR_DEFAULT \ | BATL_PP_RW | BATL_CACHEINHIBIT \ | BATL_GUARDEDSTORAGE) #define CONFIG_SYS_CCSR_DEFAULT_DBATU (CONFIG_SYS_CCSRBAR_DEFAULT \ | BATU_BL_1M | BATU_VS | BATU_VP) #define CONFIG_SYS_CCSR_DEFAULT_IBATL (CONFIG_SYS_CCSRBAR_DEFAULT \ | BATL_PP_RW | BATL_CACHEINHIBIT) #define CONFIG_SYS_CCSR_DEFAULT_IBATU CONFIG_SYS_CCSR_DEFAULT_DBATU #endif /* * BAT4 PCIE1_IO and PCIE2_IO */ #define CONFIG_SYS_DBAT4L (BAT_PHYS_ADDR(CONFIG_SYS_PCIE1_IO_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT \ | BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT4U (CONFIG_SYS_PCIE1_IO_VIRT | BATU_BL_128K \ | BATU_VS | BATU_VP) #define CONFIG_SYS_IBAT4L (BAT_PHYS_ADDR(CONFIG_SYS_PCIE1_IO_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT) #define CONFIG_SYS_IBAT4U CONFIG_SYS_DBAT4U /* * BAT5 Init RAM for stack in the CPU DCache (no backing memory) */ #define CONFIG_SYS_DBAT5L (CONFIG_SYS_INIT_RAM_ADDR | BATL_PP_RW | BATL_MEMCOHERENCE) #define CONFIG_SYS_DBAT5U (CONFIG_SYS_INIT_RAM_ADDR | BATU_BL_128K | BATU_VS | BATU_VP) #define CONFIG_SYS_IBAT5L CONFIG_SYS_DBAT5L #define CONFIG_SYS_IBAT5U CONFIG_SYS_DBAT5U /* * BAT6 FLASH */ #define CONFIG_SYS_DBAT6L (BAT_PHYS_ADDR(CONFIG_SYS_FLASH_BASE_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) \ | BATL_PP_RW | BATL_CACHEINHIBIT \ | BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT6U (CONFIG_SYS_FLASH_BASE | BATU_BL_8M | BATU_VS \ | BATU_VP) #define CONFIG_SYS_IBAT6L (BAT_PHYS_ADDR(CONFIG_SYS_FLASH_BASE_PHYS_LOW, \ CONFIG_SYS_PHYS_ADDR_HIGH) \ | BATL_PP_RW | BATL_MEMCOHERENCE) #define CONFIG_SYS_IBAT6U CONFIG_SYS_DBAT6U /* Map the last 1M of flash where we're running from reset */ #define CONFIG_SYS_DBAT6L_EARLY (CONFIG_SYS_MONITOR_BASE_EARLY | BATL_PP_RW \ | BATL_CACHEINHIBIT | BATL_GUARDEDSTORAGE) #define CONFIG_SYS_DBAT6U_EARLY (CONFIG_SYS_TEXT_BASE | BATU_BL_1M | BATU_VS | BATU_VP) #define CONFIG_SYS_IBAT6L_EARLY (CONFIG_SYS_MONITOR_BASE_EARLY | BATL_PP_RW \ | BATL_MEMCOHERENCE) #define CONFIG_SYS_IBAT6U_EARLY CONFIG_SYS_DBAT6U_EARLY /* * BAT7 FREE - used later for tmp mappings */ #define CONFIG_SYS_DBAT7L 0x00000000 #define CONFIG_SYS_DBAT7U 0x00000000 #define CONFIG_SYS_IBAT7L 0x00000000 #define CONFIG_SYS_IBAT7U 0x00000000 /* * Environment */ #ifndef CONFIG_SYS_RAMBOOT #define CONFIG_ENV_ADDR \ (CONFIG_SYS_MONITOR_BASE + CONFIG_SYS_MONITOR_LEN) #define CONFIG_ENV_SECT_SIZE 0x10000 /* 64K(one sector) for env */ #else #define CONFIG_ENV_ADDR (CONFIG_SYS_MONITOR_BASE - 0x1000) #endif #define CONFIG_ENV_SIZE 0x2000 #define CONFIG_LOADS_ECHO 1 /* echo on for serial download */ #define CONFIG_SYS_LOADS_BAUD_CHANGE 1 /* allow baudrate change */ /* * BOOTP options */ #define CONFIG_BOOTP_BOOTFILESIZE #undef CONFIG_WATCHDOG /* watchdog disabled */ /* * Miscellaneous configurable options */ #define CONFIG_SYS_LOAD_ADDR 0x2000000 /* default load address */ /* * For booting Linux, the board info and command line data * have to be in the first 8 MB of memory, since this is * the maximum mapped by the Linux kernel during initialization. */ #define CONFIG_SYS_BOOTMAPSZ (256 << 20) /* Initial Memory map for Linux*/ #define CONFIG_SYS_BOOTM_LEN (256 << 20) /* Increase max gunzip size */ #if defined(CONFIG_CMD_KGDB) #define CONFIG_KGDB_BAUDRATE 230400 /* speed to run kgdb serial port */ #endif /* * Environment Configuration */ #define CONFIG_HAS_ETH0 1 #define CONFIG_HAS_ETH1 1 #define CONFIG_HAS_ETH2 1 #define CONFIG_HAS_ETH3 1 #define CONFIG_IPADDR 192.168.1.100 #define CONFIG_HOSTNAME "unknown" #define CONFIG_ROOTPATH "/opt/nfsroot" #define CONFIG_BOOTFILE "uImage" #define CONFIG_UBOOTPATH u-boot.bin /* U-Boot image on TFTP server */ #define CONFIG_SERVERIP 192.168.1.1 #define CONFIG_GATEWAYIP 192.168.1.1 #define CONFIG_NETMASK 255.255.255.0 /* default location for tftp and bootm */ #define CONFIG_LOADADDR 0x10000000 #define CONFIG_EXTRA_ENV_SETTINGS \ "netdev=eth0\0" \ "uboot=" __stringify(CONFIG_UBOOTPATH) "\0" \ "tftpflash=tftpboot $loadaddr $uboot; " \ "protect off " __stringify(CONFIG_SYS_TEXT_BASE) \ " +$filesize; " \ "erase " __stringify(CONFIG_SYS_TEXT_BASE) \ " +$filesize; " \ "cp.b $loadaddr " __stringify(CONFIG_SYS_TEXT_BASE) \ " $filesize; " \ "protect on " __stringify(CONFIG_SYS_TEXT_BASE) \ " +$filesize; " \ "cmp.b $loadaddr " __stringify(CONFIG_SYS_TEXT_BASE) \ " $filesize\0" \ "consoledev=ttyS0\0" \ "ramdiskaddr=0x18000000\0" \ "ramdiskfile=your.ramdisk.u-boot\0" \ "fdtaddr=0x17c00000\0" \ "fdtfile=mpc8641_hpcn.dtb\0" \ "en-wd=mw.b ffdf0010 0x08; echo -expect:- 08; md.b ffdf0010 1\0" \ "dis-wd=mw.b ffdf0010 0x00; echo -expect:- 00; md.b ffdf0010 1\0" \ "maxcpus=2" #define CONFIG_NFSBOOTCOMMAND \ "setenv bootargs root=/dev/nfs rw " \ "nfsroot=$serverip:$rootpath " \ "ip=$ipaddr:$serverip:$gatewayip:$netmask:$hostname:$netdev:off " \ "console=$consoledev,$baudrate $othbootargs;" \ "tftp $loadaddr $bootfile;" \ "tftp $fdtaddr $fdtfile;" \ "bootm $loadaddr - $fdtaddr" #define CONFIG_RAMBOOTCOMMAND \ "setenv bootargs root=/dev/ram rw " \ "console=$consoledev,$baudrate $othbootargs;" \ "tftp $ramdiskaddr $ramdiskfile;" \ "tftp $loadaddr $bootfile;" \ "tftp $fdtaddr $fdtfile;" \ "bootm $loadaddr $ramdiskaddr $fdtaddr" #define CONFIG_BOOTCOMMAND CONFIG_NFSBOOTCOMMAND #endif /* __CONFIG_H */
l2dy/machg
Classes/FilesView/FSViewerPaneCell.h
<gh_stars>0 // // FSBrowserCell.h // // Copyright (c) 2001-2007, Apple Inc. All rights reserved. // // FSBrowserCell knows how to display file system info obtained from an FSNodeInfo object. // // Extensively modified by <NAME>. // Copyright 2010 <NAME>. All rights reserved. // This software is licensed under the "New BSD License". The full license text is given in the file License.txt // #import <Cocoa/Cocoa.h> #import "Common.h" #define ICON_INSET_VERT 2.0 // The size of empty space between the icon end the top/bottom of the cell #define ICON_SIZE 16.0 // Our Icons are ICON_SIZE x ICON_SIZE #define ICON_INSET_HORIZ 4.0 // Distance to inset the icon from the left edge. #define ICON_INTERSPACING 5.0 // Distance between the status icons and the file icon if the file icon is present. #define ICON_TEXT_SPACING 4.0 // Distance between the end of the icon and the text part #define DISCLOSURE_SIZE 7.0 // The space to reserve for the disclosure arrow if we are in an outline cell #define DISCLOSURE_SPACING 3.0 // The space on either side to reserve for the disclosure arrow if we are in an outline cell @interface FSViewerPaneCell : NSTextFieldCell @property FSNodeInfo* nodeInfo; @property FSNodeInfo* parentNodeInfo; + (NSSize) iconRowSize:(FSNodeInfo*)parentNodeInfo; // The maximum size needed for any child's icon row - (void) loadCellContents; @end @interface FSViewerPaneIconedCell : FSViewerPaneCell @property NSImage* fileIcon; @end @interface FSViewerOutlinePaneIconedCell : FSViewerPaneIconedCell @end
rdkcmf/rdkc-rms
sources/thelib/src/protocols/rawmedia/rawmediaprotocol.cpp
/** ########################################################################## # If not stated otherwise in this file or this component's LICENSE # file the following copyright and licenses apply: # # Copyright 2019 RDK Management # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ########################################################################## **/ #ifdef HAS_PROTOCOL_RAWMEDIA #include "application/baseclientapplication.h" #include "protocols/rawmedia/rawmediaprotocol.h" #include "protocols/rawmedia/streaming/innetrawstream.h" #include "streaming/streamstypes.h" #include "streaming/codectypes.h" #define MAX_BUFFER_THRESHOLD 1024 * 128 #define FLAG_PACKET_TYPE_CONFIG 0x80 #define FLAG_STREAM_CONFIG 0x40 #define FLAG_MEDIA_TYPE_VIDEO 0x0 #define FLAG_MEDIA_TYPE_AUDIO 0x1 //#define FLAG_PACKET_TYPE_DATA 0x02 #define FLAG_PACKET_TYPE_DATA 0x00 //#define FLAG_CONFIG (FLAG_PACKET_TYPE_CONFIG | FLAG_STREAM_CONFIG | FLAG_MEDIA_TYPE_VIDEO | FLAG_MEDIA_TYPE_AUDIO | FLAG_PACKET_TYPE_DATA) #define FLAG_CONFIG (FLAG_PACKET_TYPE_CONFIG | FLAG_STREAM_CONFIG) #define RESP_OK 0x00 #define RESP_STREAMNAME_TAKEN 0x01 #define FLAG_AUDIO_TYPE_CONFIG 0x06 #define FLAG_AUDIO_TYPE_AAC 0x00 #define FLAG_AUDIO_TYPE_G711 0x02 RawMediaProtocol::RawMediaProtocol() :BaseProtocol(PT_RAW_MEDIA){ _pStream = NULL; } RawMediaProtocol::~RawMediaProtocol() { if(_pStream != NULL) { INFO("Deleting _pStream\n"); delete _pStream; _pStream = NULL; } } bool RawMediaProtocol::AllowFarProtocol(uint64_t type) { return (type == PT_TCP) || (type == PT_UDP) || (type == PT_UDS) || (type == PT_API_INTEGRATION); } bool RawMediaProtocol::AllowNearProtocol(uint64_t type) { return false; } IOBuffer * RawMediaProtocol::GetOutputBuffer() { if (GETAVAILABLEBYTESCOUNT(_outputBuffer) > 0) return &_outputBuffer; return NULL; } bool RawMediaProtocol::SignalInputData(int32_t recvAmount) { NYIR; } bool RawMediaProtocol::SignalInputData(IOBuffer &buffer) { uint8_t *pData = GETIBPOINTER(buffer); uint32_t dataLength = GETAVAILABLEBYTESCOUNT(buffer) - 4; // Get the expected length and see if we need to wait for the rest uint32_t expectedLength = ENTOHLP(pData); if (expectedLength > dataLength) { return true; } dataLength = expectedLength; // only read what is expected pData += 4; // skip the expected length uint8_t configType = (pData[0] & FLAG_CONFIG); //#ifdef UDS_DEBUG // FATAL("pdata: %"PRIu8", configType: %"PRIu8, pData[0], configType); //#endif //FINE("RawMediaProtocol::SignalInputData pData[0]: %x configType: %x FLAG_CONFIG %x\n", pData[0], configType, FLAG_CONFIG); /*if ( (configType == FLAG_PACKET_TYPE_DATA) || (configType == FLAG_MEDIA_TYPE_VIDEO) || (configType == FLAG_MEDIA_TYPE_AUDIO) ) {*/ if ( (configType == FLAG_PACKET_TYPE_DATA) || (pData[0] == FLAG_MEDIA_TYPE_AUDIO)) { if (_pStream == NULL) { // no stream yet WARN("No existing stream yet"); buffer.IgnoreAll(); return true; } // audio or video ? bool isAudio = (pData[0] & FLAG_MEDIA_TYPE_AUDIO); //FINE("RawMediaProtocol::SignalInputData pData[0]: %d isAudio: %d\n", pData[0], isAudio); pData++; dataLength--; //#ifdef UDS_DEBUG // FATAL("[Debug] Got %s data", isAudio ? "audio" : "video"); //#endif // get timestamps (value will be in microseconds) uint64_t rawTs = 0; for (uint32_t i = 0; i < 8; i++) { rawTs = rawTs << 8; rawTs |= pData[i]; } double ts = (double) rawTs / 1000; // normalize to milliseconds //#ifdef UDS_DEBUG // FATAL("[Debug] ts = %.3f", ts); //#endif pData += 8; dataLength -= 8; _pStream->FeedData(pData, dataLength, ts, isAudio); } else if (configType == FLAG_PACKET_TYPE_CONFIG) { if (_pStream == NULL) { // no stream yet WARN("No existing stream yet"); buffer.IgnoreAll(); return true; } bool isAudio = (pData[0] & FLAG_MEDIA_TYPE_AUDIO); #ifdef UDS_DEBUG FATAL("[Debug] Got %s config", isAudio ? "audio" : "video"); #endif if (isAudio) { // 0x00 - AAC // 0x02 - G711 uint64_t codec = CODEC_AUDIO_AAC; if ((pData[0] & FLAG_AUDIO_TYPE_CONFIG) == FLAG_AUDIO_TYPE_G711) { codec = CODEC_AUDIO_G711; } pData++; dataLength--; INFO("RawMediaProtocol::SignalInputData codec--------> %d dataLength-----------> %d\n", codec, dataLength); _pStream->SetAudioConfig(codec, pData, dataLength); } else { WARN("Video config not handled."); } } else if (configType == FLAG_STREAM_CONFIG) { // stream config flag is on // check _pStream if (_pStream != NULL) { // already existing WARN("Protocol has an existing stream. Ignoring config"); buffer.IgnoreAll(); return true; } // gather streamname pData++; dataLength--; string streamname((char *) pData, dataLength); #ifdef UDS_DEBUG FATAL("streamName: %s", STR(streamname)); #endif // check with streams manager BaseClientApplication *pApp = GetApplication(); if (pApp == NULL){ buffer.IgnoreAll(); return false; } StreamsManager *pSM = pApp->GetStreamsManager(); if (pSM == NULL) { buffer.IgnoreAll(); return true; } uint8_t responseCode = RESP_OK; map<uint32_t, BaseStream *> streamResults = pSM->FindByName(streamname); if (streamResults.size() > 0) { // already existing FOR_MAP(streamResults, uint32_t, BaseStream *, i) { if (TAG_KIND_OF(MAP_VAL(i)->GetType(), ST_IN)) { WARN("Stream name already exist. Cannot create stream"); responseCode = RESP_STREAMNAME_TAKEN; break; } } } else { // create stream _pStream = new InNetRawStream(this, streamname); _pStream->SetStreamsManager(pSM); } _outputBuffer.ReadFromByte(responseCode); #ifdef UDS_DEBUG FATAL("Response code: %"PRIu8, responseCode); #endif buffer.Ignore(expectedLength + 4); // ignore the read data and length header return EnqueueForOutbound(); } else { INFO("Unknown packets"); // return false; } buffer.Ignore(expectedLength + 4); // ignore the read data and length header return true; } #endif /* HAS_PROTOCOL_RAWMEDIA */
tobiasraabe/respy_for_ma
respy/python/simulate/simulate_auxiliary.py
<filename>respy/python/simulate/simulate_auxiliary.py import copy import os import numpy as np import pandas as pd from respy.pre_processing.data_checking import check_estimation_dataset from respy.python.shared.shared_auxiliary import dist_class_attributes from respy.python.shared.shared_auxiliary import get_conditional_probabilities def construct_transition_matrix(base_df): """ This method constructs the transition matrix. """ df = base_df.copy(deep=True) df["Choice_Next"] = df.groupby(level="Identifier")["Choice"].shift(-1) args = [] for label in ["Choice", "Choice_Next"]: args += [pd.Categorical(df[label], categories=range(1, 5))] tm = pd.crosstab(*args, normalize="index", dropna=False).to_numpy() return tm def get_final_education(agent): """ This method construct the final level of schooling for each individual. """ edu_final = agent["Years_Schooling"].iloc[0] + agent["Choice"].eq(3).sum() # As a little test, we just ensure that the final level of education is equal or # less the level the agent entered the final period. valid = [agent["Years_Schooling"].iloc[-1], agent["Years_Schooling"].iloc[-1] + 1] np.testing.assert_equal(edu_final in valid, True) return edu_final def write_info(respy_obj, data_frame): """ Write information about the simulated economy. """ # Distribute class attributes optim_paras, num_types, file_sim, seed_sim, edu_spec = dist_class_attributes( respy_obj, "optim_paras", "num_types", "file_sim", "seed_sim", "edu_spec" ) # Get basic information num_agents_sim = data_frame["Identifier"].unique().shape[0] num_periods = data_frame["Period"].unique().shape[0] # Write information to file with open(file_sim + ".respy.info", "w") as file_: file_.write("\n Simulated Economy\n\n") file_.write(" Number of Agents: " + str(num_agents_sim) + "\n\n") file_.write(" Number of Periods: " + str(num_periods) + "\n\n") file_.write(" Seed: " + str(seed_sim) + "\n\n\n") file_.write(" Choices\n\n") fmt_ = "{:>10}" + "{:>14}" * 4 + "\n\n" labels = ["Period", "Work A", "Work B", "School", "Home"] file_.write(fmt_.format(*labels)) choices = data_frame["Choice"] for t in range(num_periods): args = [] for decision in [1, 2, 3, 4]: args += [choices.loc[slice(None), t].eq(decision).sum()] args = [x / float(num_agents_sim) for x in args] fmt_ = "{:>10}" + "{:14.4f}" * 4 + "\n" file_.write(fmt_.format((t + 1), *args)) # We also print out the transition matrix as it provides some insights about the # persistence of choices. However, we can only compute this transition matrix if # the number of periods is larger than one. if num_periods > 1: file_.write("\n\n") file_.write(" Transition Matrix\n\n") fmt_ = "{:>10}" + "{:>14}" * 4 + "\n\n" labels = ["Work A", "Work B", "School", "Home"] file_.write(fmt_.format(*[""] + labels)) tb = construct_transition_matrix(data_frame) for i in range(4): fmt_ = " {:6}" + "{:14.4f}" * 4 + "\n" line = [labels[i]] + tb[i, :].tolist() # In contrast to the official documentation, the crosstab command omits # categories in the current pandas release when they are not part of the # data. We suspect this will be ironed out in the next releases. try: file_.write(fmt_.format(*line)) except IndexError: pass file_.write("\n\n") # Now we can turn to the outcome information. file_.write(" Outcomes\n\n") for j, label in enumerate(["A", "B"]): file_.write(" Occupation " + label + "\n\n") fmt_ = "{:>10}" + "{:>14}" * 6 + "\n\n" labels = [] labels += [" Period", "Counts", "Mean", "S.-Dev.", "2. Decile"] labels += ["5. Decile", "8. Decile"] file_.write(fmt_.format(*labels)) for t in range(num_periods): is_working = choices.loc[slice(None), t] == j + 1 wages = data_frame["Wage"].loc[slice(None), t][is_working] count = wages.count() if count > 0: mean, sd = np.mean(wages), np.sqrt(np.var(wages)) percentiles = np.percentile(wages, [20, 50, 80]).tolist() else: mean, sd = "---", "---" percentiles = ["---", "---", "---"] values = [t + 1] values += [count, mean, sd] values += percentiles fmt_ = "{:>10} " + "{:>10} " * 6 + "\n" if count > 0: fmt_ = "{:>10} {:>10}" + "{:14.4f}" * 5 + "\n" file_.write(fmt_.format(*values)) file_.write("\n") file_.write("\n") # Additional information about the simulated economy fmt_ = " {:<16}" + " {:15.5f}\n" file_.write(" Additional Information\n\n") stat = data_frame["Choice"].eq(1).sum() / float(num_agents_sim) file_.write(fmt_.format(*["Average Work A", stat])) stat = data_frame["Choice"].eq(2).sum() / float(num_agents_sim) file_.write(fmt_.format(*["Average Work B", stat])) # The calculation of years of schooling is a little more difficult to determine # as we need to account for the different levels of initial schooling. The # column on Years_Schooling only contains information on the level of schooling # attainment going in the period, thus is not identical to the final level of # schooling for individuals that enroll in school in the very last period. stat = data_frame.groupby(level="Identifier").apply(get_final_education).mean() file_.write(fmt_.format(*["Average School", stat])) stat = data_frame["Choice"].eq(4).sum() / float(num_agents_sim) file_.write(fmt_.format(*["Average Home", stat])) file_.write("\n") file_.write("\n\n Schooling by Type\n\n") cat_schl = pd.Categorical( data_frame["Years_Schooling"][:, 0], categories=edu_spec["start"] ) cat_type = pd.Categorical(data_frame["Type"][:, 0], categories=range(num_types)) for normalize in ["all", "columns", "index"]: if normalize == "columns": file_.write("\n ... by Type \n\n") num_columns = num_types + 1 elif normalize == "index": file_.write("\n ... by Schooling \n\n") num_columns = num_types else: file_.write("\n ... jointly \n\n") num_columns = num_types info = pd.crosstab( cat_schl, cat_type, normalize=normalize, dropna=False, margins=True ).to_numpy() fmt_ = " {:>10} " + "{:>25}" * num_columns + "\n\n" line = ["Schooling"] for i in range(num_types): line += ["Type " + str(i)] if num_columns == num_types + 1: line += ["All"] file_.write(fmt_.format(*line)) fmt_ = " {:>10} " + "{:25.5f}" * num_columns + "\n" for i, start in enumerate(edu_spec["start"]): line = [start] + info[i, :].tolist() file_.write(fmt_.format(*line)) if normalize == "index": fmt_ = " {:>10} " + "{:25.5f}" * num_columns + "\n" line = ["All"] + info[-1, :].tolist() file_.write(fmt_.format(*line)) # We want to provide information on the value of the lagged activity when # entering the model based on the level of initial education. cat_1 = pd.Categorical( data_frame["Years_Schooling"][:, 0], categories=edu_spec["start"] ) cat_2 = pd.Categorical(data_frame["Lagged_Choice"][:, 0], categories=[3, 4]) info = pd.crosstab(cat_1, cat_2, normalize=normalize, dropna=False).to_numpy() file_.write("\n\n Initial Lagged Activity by Schooling\n\n") fmt_ = "\n {:>10}" + " {:>25}" + "{:>25}\n\n" file_.write(fmt_.format(*["Schooling", "Lagged Schooling", "Lagged Home"])) for i, edu_start in enumerate(edu_spec["start"]): fmt_ = " {:>10}" + " {:25.5f}" + "{:25.5f}\n" file_.write(fmt_.format(*[edu_start] + info[i].tolist())) file_.write("\n\n Economic Parameters\n\n") fmt_ = "\n {0:>10}" + " {1:>25}\n\n" file_.write(fmt_.format(*["Identifier", "Value"])) vector = get_estimation_vector(optim_paras) fmt_ = " {:>10}" + " {:25.5f}\n" for i, stat in enumerate(vector): file_.write(fmt_.format(*[i, stat])) def write_out(respy_obj, data_frame): """ Write dataset to file. """ # Distribute class attributes file_sim = respy_obj.get_attr("file_sim") # We maintain several versions of the file. with open(file_sim + ".respy.dat", "w") as file_: data_frame.to_string(file_, index=False, header=True, na_rep=".") data_frame.to_pickle(file_sim + ".respy.pkl") def format_float(x): """ Pretty formatting for floats """ if pd.isnull(x): return " ." else: return "{0:10.2f}".format(x) def format_integer(x): """ Pretty formatting for integers. """ if pd.isnull(x): return " ." else: return "{0:<5}".format(int(x)) def get_estimation_vector(optim_paras): """ Construct the vector estimation arguments. """ # Auxiliary objects num_types = int(len(optim_paras["type_shares"]) / 2) # Collect parameters vector = [] vector += optim_paras["delta"].tolist() vector += optim_paras["coeffs_a"].tolist() vector += optim_paras["coeffs_b"].tolist() vector += optim_paras["coeffs_edu"].tolist() vector += optim_paras["coeffs_home"].tolist() vector += optim_paras["shocks_cholesky"][0, :1].tolist() vector += optim_paras["shocks_cholesky"][1, :2].tolist() vector += optim_paras["shocks_cholesky"][2, :3].tolist() vector += optim_paras["shocks_cholesky"][3, :4].tolist() vector += optim_paras["type_shares"].tolist() for i in range(1, num_types): vector += optim_paras["type_shifts"][i, :].tolist() # Type conversion vector = np.array(vector) # Finishing return vector def check_dataset_sim(data_frame, respy_obj): """ This routine runs some consistency checks on the simulated dataset. Some more restrictions are imposed on the simulated dataset than the observed data. """ # Distribute class attributes num_agents = respy_obj.get_attr("num_agents_sim") num_periods = respy_obj.get_attr("num_periods") num_types = respy_obj.get_attr("num_types") # Some auxiliary functions for later def check_check_time_constant(group): np.testing.assert_equal(group["Type"].nunique(), 1) def check_number_periods(group): np.testing.assert_equal(group["Period"].count(), num_periods) # So, we run all checks on the observed dataset. check_estimation_dataset(data_frame, respy_obj) # Checks for PERIODS dat = data_frame["Period"] np.testing.assert_equal(dat.max(), num_periods - 1) # Checks for IDENTIFIER dat = data_frame["Identifier"] np.testing.assert_equal(dat.max(), num_agents - 1) # Checks for TYPES dat = data_frame["Type"] np.testing.assert_equal(dat.max() <= num_types - 1, True) np.testing.assert_equal(dat.isnull().any(), False) data_frame.groupby(level="Identifier").apply(check_check_time_constant) # Check that there are not missing wage observations if an agent is working. Also, # we check that if an agent is not working, there also is no wage observation. is_working = data_frame["Choice"].isin([1, 2]) dat = data_frame["Wage"][is_working] np.testing.assert_equal(dat.isnull().any(), False) dat = data_frame["Wage"][~is_working] np.testing.assert_equal(dat.isnull().all(), True) # Check that there are no missing observations and we follow an agent each period. data_frame.groupby(level="Identifier").apply(check_number_periods) def sort_type_info(optim_paras, num_types): """ We fix an order for the sampling of the types. """ type_info = {} # We simply fix the order by the size of the intercepts. type_info["order"] = np.argsort(optim_paras["type_shares"].tolist()[0::2]) # We need to reorder the coefficients determining the type probabilities # accordingly. type_shares = [] for i in range(num_types): lower, upper = i * 2, (i + 1) * 2 type_shares += [optim_paras["type_shares"][lower:upper].tolist()] type_info["shares"] = np.array( [type_shares[i] for i in type_info["order"]] ).flatten() return type_info def sort_edu_spec(edu_spec): """ This function sorts the dictionary that provides the information about initial education. It adjusts the order of the shares accordingly. """ edu_start_ordered = sorted(edu_spec["start"]) edu_share_ordered = [] edu_lag_ordered = [] for start in edu_start_ordered: idx = edu_spec["start"].index(start) edu_share_ordered += [edu_spec["share"][idx]] edu_lag_ordered += [edu_spec["lagged"][idx]] edu_spec_ordered = copy.deepcopy(edu_spec) edu_spec_ordered["start"] = edu_start_ordered edu_spec_ordered["share"] = edu_share_ordered edu_spec_ordered["lagged"] = edu_lag_ordered return edu_spec_ordered def get_random_types(num_types, optim_paras, num_agents_sim, edu_start, is_debug): """ This function provides random draws for the types, or reads them in from a file. """ # We want to ensure that the order of types in the initialization file does not # matter for the simulated sample. type_info = sort_type_info(optim_paras, num_types) if is_debug and os.path.exists(".types.respy.test"): types = np.genfromtxt(".types.respy.test") else: types = [] for i in range(num_agents_sim): probs = get_conditional_probabilities( type_info["shares"], np.array([edu_start[i]]) ) types += np.random.choice(type_info["order"], p=probs, size=1).tolist() # If we only have one individual, we need to ensure that types are a vector. types = np.array(types, ndmin=1) return types def get_random_edu_start(edu_spec, num_agents_sim, is_debug): """ This function provides random draws for the initial schooling level, or reads them in from a file. """ # We want to ensure that the order of initial schooling levels in the initialization # files does not matter for the simulated sample. That is why we create an ordered # version for this function. edu_spec_ordered = sort_edu_spec(edu_spec) if is_debug and os.path.exists(".initial_schooling.respy.test"): edu_start = np.genfromtxt(".initial_schooling.respy.test") else: # As we do not want to be too strict at the user-level the sum of edu_spec might # be slightly larger than one. This needs to be corrected here. probs = edu_spec_ordered["share"] / np.sum(edu_spec_ordered["share"]) edu_start = np.random.choice( edu_spec_ordered["start"], p=probs, size=num_agents_sim ) # If we only have one individual, we need to ensure that types are a vector. edu_start = np.array(edu_start, ndmin=1) return edu_start def get_random_choice_lagged_start(edu_spec, num_agents_sim, edu_start, is_debug): """ This function provides values for the initial lagged choice. The values are random draws or read in from a file. """ if is_debug and os.path.exists(".initial_lagged.respy.test"): lagged_start = np.genfromtxt(".initial_lagged.respy.test") else: lagged_start = [] for i in range(num_agents_sim): idx = edu_spec["start"].index(edu_start[i]) probs = edu_spec["lagged"][idx], 1 - edu_spec["lagged"][idx] lagged_start += np.random.choice([3, 4], p=probs, size=1).tolist() # If we only have one individual, we need to ensure that activities are a vector. lagged_start = np.array(lagged_start, ndmin=1) return lagged_start
visp-streaming/pathfinder
src/test/java/net/knasmueller/pathfinder/unit_tests/RuleBasedNexusTests.java
<gh_stars>1-10 package net.knasmueller.pathfinder.unit_tests; import net.knasmueller.pathfinder.entities.operator_statistics.SingleOperatorStatistics; import net.knasmueller.pathfinder.exceptions.InvalidCircuitBreakerTransition; import net.knasmueller.pathfinder.service.CircuitBreaker; import net.knasmueller.pathfinder.service.nexus.INexus; import net.knasmueller.pathfinder.service.nexus.RuleBasedNexus; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; @RunWith(SpringJUnit4ClassRunner.class) public class RuleBasedNexusTests { RuleBasedNexus ruleBasedNexus; @Before public void init() { ruleBasedNexus = new RuleBasedNexus(); } @Test public void test_lowSourceConsumption_isFailed() { SingleOperatorStatistics statistics = SingleOperatorStatistics.fromDefault(); statistics.setIncomingRate(0.0); statistics.setItemsWaiting(150); statistics.setDeliveryRate(0.0); assert(ruleBasedNexus.predict(statistics).equals(INexus.OperatorClassification.FAILED)); } @Test public void test_lowCpuLowRam_isFailed() { SingleOperatorStatistics statistics = SingleOperatorStatistics.fromDefault(); statistics.setActualMemory(0); statistics.setActualCpuCores(0.0); assert(ruleBasedNexus.predict(statistics).equals(INexus.OperatorClassification.FAILED)); } @Test public void test_defaultOperator_isWorking() { SingleOperatorStatistics statistics = SingleOperatorStatistics.fromDefault(); assert(ruleBasedNexus.predict(statistics).equals(INexus.OperatorClassification.WORKING)); } }
madhuriattarde/Laravel-Book-Service
public/js/NotesController.js
app.controller('NotesController', function($state, $http, $rootScope, $scope,$auth, $filter, $location, $window, $uibModal) { $scope.noteAdd= $scope.reply = {}; $scope.messages={}; $scope.utcTime = ' UTC'; $scope.submitted = false; var url = $location.path().split('/'); $scope.quote_id = url[url.length - 1]; if(url.indexOf('quoteview') > -1){ $scope.noteAdd.note_type = $scope.reply.note_type = 'External'; $scope.noteAdd.assign_to = '1'; } if($auth.isAuthenticated()){ var $user = JSON.parse(localStorage.getItem('user')); $scope.user_id = $user.id; } $scope.preval='Internal'; $scope.ShowConfirm = function () { if (confirm("Please confirm to show this message to customer?")) { $scope.preval = $scope.noteAdd.note_type; } else { $scope.noteAdd.note_type = $scope.preval; } } $scope.getDepartments = function(){ $http.get('/api/notes/get_dept_list'). success(function(data, status, headers, config) { $scope.dept = data; }); } $scope.getSubjects = function(){ $http.get('/api/notes/get_subject_list'). success(function(data, status, headers, config) { $scope.subject = data; }); } $scope.getNotes = function(value){ $http({ url: '/api/notes', method: "GET", params: { quote_id: value}, headers: {'Content-Type': 'text/json'} }).success(function (data,status) { if(data.error_msg) { $scope.messages = data.error_msg; } else { $scope.notes = data; $scope.notes.lastNote = $scope.notes[$scope.notes.length - 1]; } }); } $scope.save = function(value){ $scope.submitted = true; if ($scope.notesForm.$valid && angular.isNumber(+$scope.quote_id)) { if(value=='reject'){ $scope.noteAdd.subject =10; $scope.noteAdd = angular.extend($scope.noteAdd, { 'quote_status' :5 }); } $scope.noteAdd = angular.extend($scope.noteAdd, { 'quote_id' :$scope.quote_id, 'subject_id':$scope.noteAdd.subject, 'department_id':$scope.noteAdd.assign_to, 'user_id':$scope.user_id, 'created_by':$scope.user_id, 'service_type':$scope.selectedService.service_name }); $http.post('/api/notes',$scope.noteAdd).success(function (data) { $('#NotesFormWrapper').modal('hide'); $scope.disabled= false; $scope.messages = data.succ_msg; $window.location.reload(); }) .error(function (data, status){ console.log("Error status : " + status); }), function(error){ $scope.error = error; }; } }; $scope.replyNotes = function(parentNote) { if ($scope.NotesReplyForm.$valid) { $scope.reply = angular.extend($scope.reply, { 'department_id':parentNote.department_id, 'subject_id':parentNote.subject_id, 'quote_id':parentNote.quote_id, 'parent_id':parentNote.id, 'user_id':parentNote.user_id, 'created_by':$scope.user_id, 'service_type':$scope.selectedService.service_name }); $http.post('/api/notes',$scope.reply).success(function (data) { $('#NotesReply').modal('hide'); $scope.messages = data.succ_msg; $window.location.reload(); }) .error(function (data, status){ console.log("Error status : " + status); }); } }; $scope.sendReply = function(parentNote){ $scope.replyNote = parentNote; }; $scope.setQuoteStatus = function(quoteStatus){ $rootScope.quoteReject = ''; if(quoteStatus!='') { $rootScope.quoteReject = quoteStatus; } }; $scope.changeStatus = function(noteDetail){ $scope.statusNote = noteDetail; }; $scope.update = function(noteDetail){ if ($scope.noteStatusForm.$valid){ $scope.statusNote = angular.extend($scope.statusNote, { 'quote_id':noteDetail.quote_id, 'user_id':noteDetail.user_id, 'created_by':noteDetail.user_id, 'status': $scope.statusNote.statusvalue }); $http.put('/api/notes/'+ noteDetail.id, $scope.statusNote).success(function(data){ if(data.error_msg) $scope.messages = data.error_msg; else { $scope.messages = data.succ_msg; $('#NotesStatusWrapper').modal('hide'); } }) .error(function (data, status){ console.log("Error status : " + status); }); } }; });//end
mirunix/libelektra
src/libs/tools/include/merging/mergetask.hpp
/** * @file * * @brief Models a merge task * * @copyright BSD License (see LICENSE.md or https://www.libelektra.org) * */ #ifndef MERGETASK_HPP_ #define MERGETASK_HPP_ #include <kdb.h> namespace kdb { namespace tools { namespace merging { class MergeKeys { public: const KeySet keys; const Key parent; protected: MergeKeys (const KeySet & _keys, const Key & _parentKey) : keys (_keys), parent (_parentKey) { } }; class BaseMergeKeys : public MergeKeys { public: BaseMergeKeys (const KeySet & _keys, const Key & _parentKey) : MergeKeys (_keys, _parentKey) { } }; class TheirMergeKeys : public MergeKeys { public: TheirMergeKeys (const KeySet & _keys, const Key & _parentKey) : MergeKeys (_keys, _parentKey) { } }; class OurMergeKeys : public MergeKeys { public: OurMergeKeys (const KeySet & _keys, const Key & _parentKey) : MergeKeys (_keys, _parentKey) { } }; class MergeTask { public: KeySet base; KeySet ours; KeySet theirs; Key baseParent; Key ourParent; Key theirParent; Key mergeRoot; /** * @param _base the KeySet containing the base keys and the base parentKey * @param _ours the KeySet containing our keys and our parentKey * @param _theirs the KeySet containing their keys and their parentKey * @param _mergeRoot the parentKey for the merged keys */ MergeTask (const BaseMergeKeys & _base, const OurMergeKeys & _ours, const TheirMergeKeys & _theirs, const Key & _mergeRoot) : base (_base.keys), ours (_ours.keys), theirs (_theirs.keys), baseParent (_base.parent), ourParent (_ours.parent), theirParent (_theirs.parent), mergeRoot (_mergeRoot) { } ~MergeTask () { } MergeTask reverse () const { return MergeTask (BaseMergeKeys (base, baseParent), OurMergeKeys (theirs, theirParent), TheirMergeKeys (ours, ourParent), mergeRoot); } }; } // namespace merging } // namespace tools } // namespace kdb #endif /* MERGETASK_HPP_ */
ifiri/react-spa
assets/scripts/Api/Middlewares/ChunkUploader.js
import RequestFactory from '../RequestFactory'; import RequestExecuter from '../RequestExecuter'; import CommonUtils from '../../Utils/Common'; export default class ChunkUploader { constructor(request, executer) { const data = request.params.body; this.request = request; this.executer = executer; this.file = (data && data.file) || null; if(this.file) { this.chunkSize = this.getChunkSize(); this.total = this.getTotalChunksCountFor(this.file); this.fileSize = this.getFilesizeInKilobytes(this.file); this.fileName = this.generateFilename(this.file); this.fileExt = this.getExtensionByMimeFor(this.file); this.currentChunkIndex = 0; //this.progressbar = document.querySelector('[data-progress-for="' + request.progressbar + '"]'); //this.submitButton = this.progressbar.closest('form').querySelector('[type="submit"]'); } } apply() { if(!this.file) { return this.request; } this.updateProgress(0); const RequestFactoryInstance = new RequestFactory; const Request = this.request; const type = Request.type; let MutatedRequest; switch(type) { case 'chunkuploader/init': MutatedRequest = RequestFactoryInstance.createRequestWith(Request.alias, Request.data, Request.params); MutatedRequest.setBody({ ...Request.body, 'chunks': this.total, 'extention': this.fileExt, 'filename': this.fileName, }); break; case 'chunkuploader/upload': const initResponse = Request.currentResponse.responses['chunkuploader/init']; this.uploadQueueFor(initResponse.id); // this.executer.requests[this.executer.requests.length - 1].canExecuting = false; // Return dummy request MutatedRequest = RequestFactoryInstance.createRequestWith(Request.alias, Request.data, Request.params); MutatedRequest.setBody(null); break; case 'chunkuploader/chunk': // break; } return MutatedRequest || Request; } uploadQueueFor(uploadId) { const Request = this.request; const RequestFactoryInstance = new RequestFactory; const offset = this.request.offset; for(let i = 0; i < this.total; i++) { const UploadRequest = RequestFactoryInstance.createRequestWith(Request.alias, Request.data, { ...Request.params, map: { video: uploadId, current: this.currentChunkIndex, } }); delete UploadRequest.middleware; delete UploadRequest.type; delete UploadRequest.data.type; delete UploadRequest.data.middleware; UploadRequest.onComplete = this.updateProgress.bind(this, this.currentChunkIndex); // UploadRequest.data.type = 'chunkuploader/chunk'; UploadRequest.setBody({ blob: this.getCurrentChunkBy(this.currentChunkIndex) }); // console.log('uploadddd'); // console.log(UploadRequest); this.executer.requests.splice(this.executer.requests.length - offset, 0, UploadRequest); this.currentChunkIndex = i + 1; } this.executer.requests.splice(this.executer.requestIndex, 0); // this.executer.requestIndex--; console.log(':: REQUEEEEEEEEEEEEEEEEEEESTS'); console.log(this.executer.requests); // this.uploadChunkWith(UploadRequest); // if(!this.submitButton.disabled) { // CommonUtils.toggleSubmitButton(this.submitButton, true); // } else { // CommonUtils.lockSubmitButton(this.submitButton); // } } uploadChunkWith(Request) { const RequestExecuterInstance = new RequestExecuter; Request.setBody({ blob: this.getCurrentChunkBy(this.currentChunkIndex) }); RequestExecuterInstance.execute(Request).then(response => { if(response.successfull || response.video_url) { // if media upload or company model // if(request.success) { // this._ApiHandler[request.success](response, form, this._type); // } this.resetProgress(); //CommonUtils.toggleSubmitButton(this.submitButton, true); } else { this.currentChunkIndex++; if(this.currentChunkIndex < this.total) { this.uploadChunkWith(Request); if(this.currentChunkIndex + 1 !== this.total) { this.updateProgress(this.currentChunkIndex, this.total); } else { this.updateProgressFieldWith('Видео загружено, идет обработка...'); } } } }); } getChunksQueue() { const total_chunks = this.total; const queue = []; for(let chunkNumber = 0; chunkNumber < total_chunks; chunkNumber++) { const chunk = this.getCurrentChunkBy(chunkNumber); queue.push({ blob: chunk }); } return queue; } getCurrentChunkBy(chunkNumber) { const total_chunks = this.total; const chunk_size = this.getChunkSize(); const chunk_start_at = chunkNumber * chunk_size; const chunk_end_at = chunk_start_at + chunk_size; const current_chunk = this.file.slice(chunk_start_at, chunk_end_at, this.file.type); return current_chunk; } getChunkSize() { return 1024 * (1024 / 8); } getTotalChunksCountFor(file) { const chunkSize = this.getChunkSize(); // in bytes const totalChunks = Math.ceil(file.size / chunkSize); return totalChunks - 1; } getFilesizeInKilobytes(file) { let filesize = file.size / 1024; // in kb return filesize; } getExtensionByMimeFor(file) { let extension = null; switch(file.type) { case 'video/mp4': extension = 'mp4'; break; case 'image/jpeg': case 'image/jpg': extension = 'jpg'; break; case 'image/png': extension = 'png'; break; } return extension; } generateFilename() { const letters = 'qwertyuiopasdfghjklzxcvbnm1234567890'; let filename = ''; while(filename.length < 50) { const letter = Math.floor(Math.random() * letters.length - 1) + 1; filename += letters[letter]; } return filename; } resetProgress() { this.updateProgressFieldWith(''); } updateProgress(chunkIndex, response = null) { // console.log('::: ON COMPLETE'); // console.log(response); // console.log(chunkIndex); let current_progress = 0; if(chunkIndex) { const total_chunks = this.total; current_progress = Math.floor((chunkIndex / (total_chunks - 1)) * 100); } this.updateProgressFieldWith(current_progress + '%'); } updateProgressFieldWith(value) { //const progress_field = this.progressbar; const progress_field = $('#uploadLine'); // console.log('::: PROGREEEEEEEEEEEEEES FIELD'); if(progress_field) { $('#uploadPlaceholder').hide(); $('#uploadLine').show(); $('#uploadInfo').text(value); $('#uploadLine').find('.ready').css('width',value); } } }
xiang12835/JavaLearning
BasicSyntax/src/WuZiQiHuaQiPan.java
public class WuZiQiHuaQiPan { public static void main(String[] args) { // 代表不同的角色 int black = 0; int white = 1; int empty = 2; // 不同的角色的不同的棋子字符 char[] qizi = new char[3]; qizi[0] = '●'; qizi[1] = '○'; qizi[2] = ' '; // 刚刚下好的棋子,用不一样的图形提示 // 这里使用了一种新的创建数组的语法,可以不显示的指定数组的大小,而是在一对打括号里按照顺序指定数组里的每个元素的值 // 这样数组的创建和赋值就合二为一了。当然这样创建的数组大小也是不可变的,结果没有不同。 char[] qiziJustMove = new char[]{'■', '□', ' '}; int size = 20; int qipan[][] = new int[size][size]; int roleJustMove = 1; int justMoveLine = size / 2; int justMoveColumn = size / 2; for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { if (i == justMoveLine && j == justMoveColumn) { qipan[i][j] = black; } else { qipan[i][j] = (i + j) % qizi.length; } } } // String form = "└┘┌┐├┤│┬┴ ┼"; String header = "\t"; for (int j = 0; j < size; j++) { header += (j + 1) + "\t"; } System.out.println(header); for (int i = 0; i < size; i++) { String line = "" + (i + 1) + "\t"; for (int j = 0; j < size; j++) { char[] arrayToUse = // Java中的三元操作符,?前面是一个boolean表达式, // 如果boolean表达式的值为true,则这个表达式的值为冒号前面的值, // 如果boolean表达式的值为false,则这个表达式的值为冒号后面的值 (i == justMoveLine && j == justMoveColumn) ? qiziJustMove : qizi; //这个表达式等价于 // char[] arrayToUse; // if (i == justMoveLine && j == justMoveColumn) { // arrayToUse = qiziJustMove; // } else { // arrayToUse = qizi; // } line += arrayToUse[qipan[i][j]] + "\t"; } System.out.println(line + (i + 1)); } System.out.println(header); } }
watilde/web-platform-tests
geolocation-API/support.js
<filename>geolocation-API/support.js<gh_stars>0 var geo; setup(function() { geo = navigator.geolocation; }, {explicit_done: true}); // The spec states that an implementation SHOULD acquire user permission before // beggining the position acquisition steps. If an implementation follows this // advice, set the following flag to aid debugging. var isUsingPreemptivePermission = false; var dummyFunction = function() {}; var positionToString = function(pos) { var c = pos.coords; return '[lat: ' + c.latitude + ', lon: ' + c.longitude + ', acc: ' + c.accuracy + ']'; }; var errorToString = function(err) { var codeString; switch(err.code) { case err.UNKNOWN_ERROR: codeString = 'UNKNOWN_ERROR'; break; case err.PERMISSION_DENIED: codeString = 'PERMISSION_DENIED'; break; case err.POSITION_UNAVAILABLE: codeString = 'POSITION_UNAVAILABLE'; break; case err.TIMEOUT: codeString = 'TIMEOUT'; break; default: codeString = 'undefined error code'; break; } return '[code: ' + codeString + ' (' + err.code + '), message: ' + (err.message ? err.message : '(empty)') + ']'; };
ScalablyTyped/SlinkyTyped
g/googleapis/src/main/scala/typingsSlinky/googleapis/v33Mod/dfareportingV33/SchemaCrossDimensionReachReportCompatibleFields.scala
<gh_stars>10-100 package typingsSlinky.googleapis.v33Mod.dfareportingV33 import org.scalablytyped.runtime.StObject import scala.scalajs.js import scala.scalajs.js.`|` import scala.scalajs.js.annotation.{JSGlobalScope, JSGlobal, JSImport, JSName, JSBracketAccess} /** * Represents fields that are compatible to be selected for a report of type * &quot;CROSS_DIMENSION_REACH&quot;. */ @js.native trait SchemaCrossDimensionReachReportCompatibleFields extends StObject { /** * Dimensions which are compatible to be selected in the * &quot;breakdown&quot; section of the report. */ var breakdown: js.UndefOr[js.Array[SchemaDimension]] = js.native /** * Dimensions which are compatible to be selected in the * &quot;dimensionFilters&quot; section of the report. */ var dimensionFilters: js.UndefOr[js.Array[SchemaDimension]] = js.native /** * The kind of resource this is, in this case * dfareporting#crossDimensionReachReportCompatibleFields. */ var kind: js.UndefOr[String] = js.native /** * Metrics which are compatible to be selected in the * &quot;metricNames&quot; section of the report. */ var metrics: js.UndefOr[js.Array[SchemaMetric]] = js.native /** * Metrics which are compatible to be selected in the * &quot;overlapMetricNames&quot; section of the report. */ var overlapMetrics: js.UndefOr[js.Array[SchemaMetric]] = js.native } object SchemaCrossDimensionReachReportCompatibleFields { @scala.inline def apply(): SchemaCrossDimensionReachReportCompatibleFields = { val __obj = js.Dynamic.literal() __obj.asInstanceOf[SchemaCrossDimensionReachReportCompatibleFields] } @scala.inline implicit class SchemaCrossDimensionReachReportCompatibleFieldsMutableBuilder[Self <: SchemaCrossDimensionReachReportCompatibleFields] (val x: Self) extends AnyVal { @scala.inline def setBreakdown(value: js.Array[SchemaDimension]): Self = StObject.set(x, "breakdown", value.asInstanceOf[js.Any]) @scala.inline def setBreakdownUndefined: Self = StObject.set(x, "breakdown", js.undefined) @scala.inline def setBreakdownVarargs(value: SchemaDimension*): Self = StObject.set(x, "breakdown", js.Array(value :_*)) @scala.inline def setDimensionFilters(value: js.Array[SchemaDimension]): Self = StObject.set(x, "dimensionFilters", value.asInstanceOf[js.Any]) @scala.inline def setDimensionFiltersUndefined: Self = StObject.set(x, "dimensionFilters", js.undefined) @scala.inline def setDimensionFiltersVarargs(value: SchemaDimension*): Self = StObject.set(x, "dimensionFilters", js.Array(value :_*)) @scala.inline def setKind(value: String): Self = StObject.set(x, "kind", value.asInstanceOf[js.Any]) @scala.inline def setKindUndefined: Self = StObject.set(x, "kind", js.undefined) @scala.inline def setMetrics(value: js.Array[SchemaMetric]): Self = StObject.set(x, "metrics", value.asInstanceOf[js.Any]) @scala.inline def setMetricsUndefined: Self = StObject.set(x, "metrics", js.undefined) @scala.inline def setMetricsVarargs(value: SchemaMetric*): Self = StObject.set(x, "metrics", js.Array(value :_*)) @scala.inline def setOverlapMetrics(value: js.Array[SchemaMetric]): Self = StObject.set(x, "overlapMetrics", value.asInstanceOf[js.Any]) @scala.inline def setOverlapMetricsUndefined: Self = StObject.set(x, "overlapMetrics", js.undefined) @scala.inline def setOverlapMetricsVarargs(value: SchemaMetric*): Self = StObject.set(x, "overlapMetrics", js.Array(value :_*)) } }
benja-M-1/homebrew-cask
Casks/aladin.rb
<reponame>benja-M-1/homebrew-cask cask "aladin" do version :latest sha256 :no_check url "https://aladin.u-strasbg.fr/java/download/Aladin.dmg" name "Aladin Desktop" homepage "https://aladin.u-strasbg.fr/AladinDesktop/" app "Aladin.app" end
yantarou/midonet
nsdb/src/main/java/org/midonet/cluster/backend/zookeeper/SessionUnawareConnectionWatcher.java
<reponame>yantarou/midonet<filename>nsdb/src/main/java/org/midonet/cluster/backend/zookeeper/SessionUnawareConnectionWatcher.java<gh_stars>0 /* * Copyright 2015 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.midonet.cluster.backend.zookeeper; import org.apache.commons.lang.NotImplementedException; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.slf4j.Logger; import static org.slf4j.LoggerFactory.getLogger; /** * Zookeeper connection watcher for the API server. */ public class SessionUnawareConnectionWatcher implements ZkConnectionAwareWatcher { private final static Logger log = getLogger(SessionUnawareConnectionWatcher.class); private ZkConnection conn; @Override synchronized public void process(WatchedEvent watchedEvent) { log.debug("ZookeeperConnWatcher.process: Entered with event {}", watchedEvent.getState()); // The ZK client re-connects automatically. However, after it // successfully reconnects, if the session had expired, we need to // create a new session. if (watchedEvent.getState() == Watcher.Event.KeeperState.Expired && conn != null) { log.info("Session expired, reconnecting to ZK with a new session"); try { conn.reopen(); } catch (Exception e) { throw new RuntimeException("Zookeeper could not be " + "restarted", e); } } log.debug("ZookeeperConnWatcher.process: Exiting"); } @Override public void setZkConnection(ZkConnection conn) { this.conn = conn; } @Override public void scheduleOnReconnect(Runnable runnable) { throw new NotImplementedException(); } @Override public void scheduleOnDisconnect(Runnable runnable) { throw new NotImplementedException(); } @Override public void handleError(String operationDesc, Runnable retry, KeeperException e) { log.warn("handleError(): ignoring: {}", e); } @Override public void handleError(String operationDesc, Runnable retry, StateAccessException e) { log.warn("handleError(): ignoring: {}", e); } @Override public void handleTimeout(Runnable runnable) { // do nothing } @Override public void handleDisconnect(Runnable runnable) { // do nothing } }
evgenyboxer/near-wallet
src/components/common/Disclaimer.js
import React from 'react' import { Grid } from 'semantic-ui-react' import { Translate } from 'react-localize-redux' import styled from 'styled-components' const DisclaimerGrid = styled(Grid)` && .disclaimer { margin-top: 100px; border-top: 2px solid #f8f8f8; > .column { padding-left: 0px; padding-right: 0px; } } .disclaimer-info { font-weight: 500; letter-spacing: 2px; } @media screen and (max-width: 767px) { && .disclaimer { margin-top: 50px; font-size: 12px; margin-left: 1rem; margin-right: 1rem; } } ` const Disclaimer = () => ( <DisclaimerGrid> <Grid.Row className='disclaimer'> <Grid.Column computer={16} tablet={16} mobile={16}> <span className='disclaimer-info'><Translate id='disclaimer.title' />: </span> <Translate id='disclaimer.text' /> <a href='http://nearprotocol.com'>nearprotocol.com</a> </Grid.Column> </Grid.Row> </DisclaimerGrid> ) export default Disclaimer
willroberts/duelyst
app/view/nodes/reward/MysteryCrateNode.js
<reponame>willroberts/duelyst //pragma PKGS: mystery_crate_node var CONFIG = require("app/common/config"); var EventBus = require('app/common/eventbus'); var EVENTS = require('app/common/event_types'); var RSX = require("app/data/resources"); var PKGS = require("app/data/packages"); var BaseSprite = require('app/view/nodes/BaseSprite'); var GlowSprite = require('app/view/nodes/GlowSprite'); var LootCrateNode = require('./LootCrateNode'); var CrateManager = require("app/ui/managers/crate_manager"); var Promise = require("bluebird"); /**************************************************************************** MysteryCrateNode - abstract base class for mystery loot crates (do not use this class directly) ****************************************************************************/ var MysteryCrateNode = LootCrateNode.extend({ _lootCrateKeySprite: null, _showKeyPromise: null, _stopShowingKeyPromise: null, /* region GETTERS / SETTERS */ _getLootCrateKeySpriteIdentifier: function () { return null; }, getCrateCount: function () { return CrateManager.getInstance().getCosmeticChestCountForType(this.getCrateType()); }, getCrateKeyCount: function () { return CrateManager.getInstance().getCosmeticChestKeyCountForType(this.getCrateType()); }, getCrateCountLabelBasePosition: function () { var position = this.getCrateTypeLabelBasePosition(); position.x += -15.0; position.y += -40.0; return position; }, getCrateMaxCountLabelBasePosition: function () { var position = this.getCrateCountLabelBasePosition(); position.x += 31.0; return position; }, getUsesKeys: function () { return false; }, /* endregion GETTERS / SETTERS */ /* region LABELS */ showCrateMaxCountLabel: function () { LootCrateNode.prototype.showCrateMaxCountLabel.apply(this, arguments); this._crateMaxCountLabel.setString("/ 5"); }, /* endregion LABELS */ /* region REWARDS */ /** * Shows key for crate. * @param {Number} [duration=0.0] * @returns {Promise} */ showKey: function (duration) { if (this._showKeyPromise == null) { // cancel hiding key if (this._stopShowingKeyPromise != null) { this._stopShowingKeyPromise.cancel(); this._stopShowingKeyPromise = null; } // create/show key this._showKeyPromise = this.whenRequiredResourcesReady().then(function (requestId) { if (!this.getAreResourcesValid(requestId)) return; // load invalidated or resources changed return new Promise(function (resolve) { this._showKeyPromise = null; // key sprite if (this._lootCrateKeySprite == null) { this._lootCrateKeySprite = BaseSprite.create(this._getLootCrateKeySpriteIdentifier()); this._lootCrateKeySprite.setVisible(false); this._lootCrateKeySprite.setRotation(90.0); this.addChild(this._lootCrateKeySprite, this._zOrderBehindCrate); } // animate key in var contentSize = this.getContentSize(); var centerPosition = this.getCenterPosition(); this._lootCrateKeySprite.setPosition(centerPosition.x, centerPosition.y - contentSize.height * 0.5 - this._lootCrateKeySprite.getContentSize().width * 0.5 - 30.0); this._lootCrateKeySprite.fadeTo(duration, 255.0, function () { resolve(); }); }.bind(this)) .catch(function (error) { EventBus.getInstance().trigger(EVENTS.error, error); }); }.bind(this)) .cancellable() .catch(Promise.CancellationError, function () { Logger.module("APPLICATION").log("MysteryCrateNode -> key show promise chain cancelled"); }); } return this._showKeyPromise; }, /** * Stops showing key. * @param {Number} [duration=0.0] * @returns {Promise} */ stopShowingKey: function (duration) { // cancel showing key if (this._showKeyPromise != null) { this._showKeyPromise.cancel(); this._showKeyPromise = null; } // hide key if (this._lootCrateKeySprite != null) { if (duration == null) { duration = 0.0; } this._stopShowingKeyPromise = this.whenRequiredResourcesReady().then(function (requestId) { if (!this.getAreResourcesValid(requestId)) return; // load invalidated or resources changed return new Promise(function (resolve) { this._stopShowingKeyPromise = null; this._lootCrateKeySprite.fadeToInvisible(duration, function () { resolve(); }); }.bind(this)) .catch(function (error) { EventBus.getInstance().trigger(EVENTS.error, error); }); }.bind(this)) .cancellable() .catch(Promise.CancellationError, function () { Logger.module("APPLICATION").log("MysteryCrateNode -> key hide promise chain cancelled"); }); } return this._stopShowingKeyPromise; }, showOpeningAndRewards: function () { return this.showKey(CONFIG.ANIMATE_MEDIUM_DURATION).then(function () { // show unlock return new Promise(function (resolve) { // show crate as static but preserve fx this.showStaticState(CONFIG.ANIMATE_FAST_DURATION, true); // animate key into box var contentSize = this.getContentSize(); var centerPosition = this.getCenterPosition(); this._lootCrateKeySprite.runAction(cc.sequence( cc.spawn( cc.rotateTo(CONFIG.ANIMATE_SLOW_DURATION, 0.0).easing(cc.easeCubicActionInOut()), cc.moveBy(CONFIG.ANIMATE_SLOW_DURATION, 0.0, -60.0).easing(cc.easeCubicActionInOut()) ), cc.moveTo(CONFIG.ANIMATE_MEDIUM_DURATION, centerPosition.x, centerPosition.y - contentSize.height * 0.5).easing(cc.easeBackIn()), cc.spawn( cc.sequence( cc.fadeOut(CONFIG.ANIMATE_FAST_DURATION), cc.hide() ), cc.callFunc(function () { resolve(); }.bind(this)) ) )); }.bind(this)) .catch(function (error) { EventBus.getInstance().trigger(EVENTS.error, error); }); }.bind(this)).then(function () { // show actual opening return LootCrateNode.prototype.showOpeningAndRewards.call(this); }.bind(this)); }, /* endregion REWARDS */ }); MysteryCrateNode.create = function(node) { return LootCrateNode.create(node || new MysteryCrateNode()); }; module.exports = MysteryCrateNode;
MaceM8/validarium
packages/intl/src/utils/unlessIsNil.js
import { unless, isNil } from 'ramda'; export default unless(isNil);
tbepler/JProbe
src/jprobe/services/Debug.java
<filename>src/jprobe/services/Debug.java package jprobe.services; public enum Debug { OFF, LOG, FULL; private static volatile Debug debugLevel; public static void setLevel(Debug level){ Debug.debugLevel = level; } public static Debug getLevel(){ return debugLevel; } public static Debug fromString(String s){ return fromInt(Integer.parseInt(s)); } public static Debug fromInt(int i){ if(i == 0){ return OFF; } if(i == 1){ return LOG; } if(i == 2){ return FULL; } return LOG; } @Override public String toString(){ switch(this){ case OFF: return "0"; case LOG: return "1"; case FULL: return "2"; default: return "1"; } } }
std-freejia/hymni.study
Code/20200824_Hide_and_Seek_LDH.c
<reponame>std-freejia/hymni.study #include <stdio.h> int main(int argc, char* argv[], char* env[]) { int positionOfSubin = 0; //수빈의 위치 int positionOfSister = 0; //동생의 위치 int Q[250000] = {0, }; int visit[100001] = {0, }; int front = -1; int back = -1; int d[100001] = {0, }; //수빈 위치, 동생 위치 입력 받음 scanf("%d %d", &positionOfSubin, &positionOfSister); //두 위치가 같을 경우 종료 if(positionOfSubin == positionOfSister) { printf("0"); return 0; } Q[++back] = positionOfSubin; while(front != back) { front++; //x-1, x+1, x*2 세가지 이동 //x*2 이동 if(visit[Q[front] * 2] == 0 && (Q[front] * 2) < 100001 && (Q[front] * 2 >= 0)) { Q[++back] = Q[front] * 2; d[back] = d[front] + 1; visit[Q[back]] = 1; } //동생 찾는 경우 if(Q[back] == positionOfSister) { break; } //x-1 이동 if(visit[Q[front] - 1] == 0 && (Q[front] - 1<100001) && (Q[front] -1 >= 0)) { Q[++back] = Q[front] - 1; d[back] = d[front] + 1; visit[Q[back]] = 1; } //동생 찾는 경우 if(Q[back] == positionOfSister) { break; } //x+1 이동 if(visit[Q[front] + 1] == 0 && (Q[front] + 1<100001) && (Q[front] +1 >= 0)) { Q[++back] = Q[front] + 1; d[back] = d[front] + 1; visit[Q[back]] = 1; } //동생 찾는 경우 if(Q[back] == positionOfSister) { break; } } printf("%d", d[back]); return 0; }