repo_name
stringlengths 6
101
| path
stringlengths 4
300
| text
stringlengths 7
1.31M
|
|---|---|---|
vampy/university
|
aspect-oriented-programming/labs/src/com/aop/controller/GUIController.java
|
package com.aop.controller;
import com.aop.log.Log;
import com.aop.model.Book;
import com.aop.model.Library;
import java.util.Observable;
import java.util.Observer;
public class GUIController extends AbstractController
{
private Library library;
private BooksMainTableModel tableModel;
public GUIController(Library library)
{
this.library = library;
tableModel = new BooksMainTableModel(library.getAllBooks());
}
public Library getLibrary()
{
return library;
}
/**
* Getter for property 'tableModel'.
*
* @return Value for property 'tableModel'.
*/
public BooksMainTableModel getTableModel()
{
return tableModel;
}
public void loanBook(Book book, int userID)
{
library.loanBook(book, userID);
}
public void returnBook(Book book)
{
library.returnBook(book);
}
public void setTableBooksFromLibrary()
{
tableModel.setBooks(library.getAllBooks());
}
// @Override
// public void update(Object arg)
// {
// setTableBooksFromLibrary();
// }
@Override
public void close()
{
}
}
|
aukgit/scala-open-real-time-bidding-rtb
|
app/shared/com/ortb/model/wrappers/persistent/EntityWithJoinedTableRowsWrapperModel.scala
|
<filename>app/shared/com/ortb/model/wrappers/persistent/EntityWithJoinedTableRowsWrapperModel.scala
package shared.com.ortb.model.wrappers.persistent
import shared.io.helpers.EmptyValidateHelper
case class EntityWithJoinedTableRowsWrapperModel[TBase, TChildRowsType](
row : Option[TBase],
innerRows : Option[Seq[TChildRowsType]]
) {
lazy val hasRow : Boolean = EmptyValidateHelper.isDefined(row)
lazy val hasInnerRows : Boolean = EmptyValidateHelper.hasAnyItem(innerRows)
}
|
derailed/gobot
|
event.go
|
<reponame>derailed/gobot
package gobot
type Event struct {
Chan chan interface{}
Callbacks []func(interface{})
}
func NewEvent() *Event {
e := &Event{
Chan: make(chan interface{}, 1),
Callbacks: []func(interface{}){},
}
go func() {
for {
e.Read()
}
}()
return e
}
func (e *Event) Write(data interface{}) {
select {
case e.Chan <- data:
default:
}
}
func (e *Event) Read() {
for s := range e.Chan {
for _, f := range e.Callbacks {
go f(s)
}
}
}
|
GuusLieben/DarwinServerSources
|
hartshorn-core/src/main/java/org/dockbox/hartshorn/application/ActivatorHolder.java
|
/*
* Copyright 2019-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dockbox.hartshorn.application;
import java.lang.annotation.Annotation;
import java.util.Set;
public interface ActivatorHolder {
Set<Annotation> activators();
boolean hasActivator(Class<? extends Annotation> activator);
<A> A activator(Class<A> activator);
}
|
42iscool42/SpongeCommon
|
src/main/java/org/spongepowered/common/util/PathTokens.java
|
/*
* This file is part of Sponge, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.common.util;
import com.google.common.collect.Maps;
import org.spongepowered.common.SpongeImpl;
import org.spongepowered.common.launch.SpongeLaunch;
import java.util.Map;
public final class PathTokens {
/**
* Token which contains the fully-qualified path to the game directory (profile root)
*/
public static final String PATHTOKEN_CANONICAL_GAME_DIR = "CANONICAL_GAME_DIR";
/**
* Token which contains the fully-qualified path to FML's "mods" folder
*/
public static final String PATHTOKEN_CANONICAL_MODS_DIR = "CANONICAL_MODS_DIR";
/**
* Token which contains the fully-qualified path to FML's "config" folder
*/
public static final String PATHTOKEN_CANONICAL_CONFIG_DIR = "CANONICAL_CONFIG_DIR";
/**
* Token which contains the current minecraft version as a string
*/
public static final String PATHTOKEN_MC_VERSION = "MC_VERSION";
private PathTokens() {
}
public static String replace(String string) {
final Map<String, String> tokens = getPathTokens();
for (final Map.Entry<String, String> token : tokens.entrySet()) {
string = string.replace(token.getKey(), token.getValue());
}
return string;
}
private static Map<String, String> getPathTokens() {
final Map<String, String> tokens = Maps.newHashMap();
tokens.put(formatToken(PATHTOKEN_CANONICAL_MODS_DIR), SpongeLaunch.getPluginsDir().toFile().getAbsolutePath());
tokens.put(formatToken(PATHTOKEN_CANONICAL_GAME_DIR), SpongeLaunch.getGameDir().toFile().getAbsolutePath());
tokens.put(formatToken(PATHTOKEN_CANONICAL_CONFIG_DIR), SpongeLaunch.getConfigDir().toFile().getAbsolutePath());
tokens.put(formatToken(PATHTOKEN_MC_VERSION), SpongeImpl.MINECRAFT_VERSION.getName());
return tokens;
}
private static String formatToken(String name) {
return String.format("${%s}", name);
}
}
|
mjenrungrot/algorithm
|
UVa Online Judge/v126/12643.py
|
<reponame>mjenrungrot/algorithm
import sys
lines = sys.stdin.readlines()
for line in lines:
line = line.strip()
N, i, j = list(map(int, line.split()))
ans = 0
while i != j:
if i % 2:
i = (i + 1) // 2
else:
i = i // 2
if j % 2:
j = (j + 1) // 2
else:
j = j // 2
ans += 1
print(ans)
|
coinForRich/coin-for-rich
|
common/helpers/numbers.py
|
<reponame>coinForRich/coin-for-rich
# This module contains common number helpers
from decimal import Decimal
from typing import Union
def round_decimal(
number: Union[float, int, Decimal, str],
n_decimals: int=2
) -> Union[Decimal, None]:
'''
Rounds a `number` to `n_decimals` decimals
If number is None, returns None
:params:
`number`: float, int or Decimal type or str representing float
`n_decimals`: number of decimals
'''
if number is None:
return None
return round(Decimal(number), n_decimals)
|
synesenom/ranjs
|
src/dist/bradford.js
|
import Distribution from './_distribution'
/**
* Generator for the [Bradford distribution]{@link https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_bradford.html}:
*
* $$f(x; c) = \frac{c}{\ln(1 + c) (1 + c x)},$$
*
* with $c > 0$. Support: $x \in \[0, 1\]$.
*
* @class Bradford
* @memberof ran.dist
* @param {number=} c Shape parameter. Default value is 1.
* @constructor
*/
export default class extends Distribution {
constructor (c = 1) {
super('continuous', arguments.length)
// Validate parameters
this.p = { c }
Distribution.validate({ c }, [
'c > 0'
])
// Set support
this.s = [{
value: 0,
closed: true
}, {
value: 1,
closed: true
}]
// Speed-up constants
const c0 = Math.log(1 + c)
this.c = [
c0,
c / c0
]
}
_generator () {
// Inverse transform sampling
return this._q(this.r.next())
}
_pdf (x) {
return this.c[1] / (1 + this.p.c * x)
}
_cdf (x) {
return Math.log(1 + this.p.c * x) / this.c[0]
}
_q (p) {
return (Math.exp(this.c[0] * p) - 1) / this.p.c
}
}
|
JianYT/mobile-sdk
|
all/native/core/BinaryData.cpp
|
<gh_stars>100-1000
#include "BinaryData.h"
#include <algorithm>
#include <sstream>
namespace carto {
BinaryData::BinaryData() :
_dataPtr(std::make_shared<std::vector<unsigned char> >())
{
}
BinaryData::BinaryData(std::vector<unsigned char> data) :
_dataPtr(std::make_shared<std::vector<unsigned char> >(std::move(data)))
{
}
BinaryData::BinaryData(const unsigned char* data, std::size_t size) :
_dataPtr(std::make_shared<std::vector<unsigned char> >(data, data + size))
{
}
bool BinaryData::empty() const {
return _dataPtr->empty();
}
std::size_t BinaryData::size() const {
return _dataPtr->size();
}
const unsigned char* BinaryData::data() const {
return _dataPtr->data();
}
std::shared_ptr<std::vector<unsigned char> > BinaryData::getDataPtr() const {
return _dataPtr;
}
bool BinaryData::operator ==(const BinaryData& data) const {
if (_dataPtr->size() != data._dataPtr->size()) {
return false;
}
return std::equal(_dataPtr->begin(), _dataPtr->end(), data._dataPtr->begin());
}
bool BinaryData::operator !=(const BinaryData& data) const {
return !(*this == data);
}
int BinaryData::hash() const {
return static_cast<int>(std::hash<std::string>()(std::string(reinterpret_cast<const char*>(_dataPtr->data()), _dataPtr->size())));
}
std::string BinaryData::toString() const {
std::stringstream ss;
ss << "BinaryData [size=" << _dataPtr->size() << "]";
return ss.str();
}
}
|
lisongwang/java-core
|
src/main/java/com/lisong/learn/core/polymorphism/animal/Frog.java
|
package com.lisong.learn.core.polymorphism.animal;
import static com.lisong.learn.core.util.Print.print;
public class Frog extends Amphibian {
private Characteristic c = new Characteristic("Croaks");
private Description d = new Description("Eats Bugs");
public Frog() { print("Frog()"); }
@Override
public void dispose() {
print("Frog dispose");
d.dispose();
c.dispose();
super.dispose();
}
}
|
bobmcwhirter/drools
|
drools-verifier/src/main/java/org/drools/verifier/dao/DataTree.java
|
<gh_stars>10-100
package org.drools.verifier.dao;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
public class DataTree<K, V> {
private Map<K, Set<V>> map = new TreeMap<K, Set<V>>();
public void put(K key, V value) {
if (map.containsKey(key)) {
Set<V> set = map.get(key);
set.add(value);
} else {
Set<V> set = new TreeSet<V>();
set.add(value);
map.put(key, set);
}
}
public Set<K> keySet() {
return map.keySet();
}
public Set<V> getBranch(K key) {
Set<V> set = map.get(key);
if (set != null) {
return set;
} else {
return Collections.emptySet();
}
}
public Collection<V> values() {
Collection<V> values = new ArrayList<V>();
for (Set<V> set : map.values()) {
for (V value : set) {
values.add(value);
}
}
return values;
}
public boolean remove(K key, V value) {
Set<V> set = map.get(key);
if (set != null) {
return set.remove(value);
} else {
return false;
}
}
@Override
public String toString() {
return values().toString();
}
}
|
jleopold28/snippets-and-notes
|
puppet/conjur/conjur/lib/puppet/parser/functions/conjur_fetch.rb
|
module Puppet::Parser::Functions
newfunction(:conjur_fetch, type: :rvalue) do |args|
`/usr/local/bin/conjur variable value #{args[0]}/#{args[1]}`
end
end
|
kkcookies99/UAST
|
Dataset/Leetcode/test/9/152.py
|
class Solution(object):
def XXX(self,x):
if x<0:return False
stk=[]
cnt=0
while pow(10,cnt)<=x:
cnt+=1
cur=(x % pow(10,cnt))//pow(10,cnt-1)
stk.append(cur)
while stk!=[] and len(stk)!=1:
if stk[0]==stk[-1]:
stk.pop(0)
stk.pop()
continue
return False
return True
if __name__=='__main__':
x=123321
sl=Solution()
print(sl.XXX(x))
|
chingu-voyages/v19-geckos-team-04
|
src/components/App/App.js
|
import React, { Component, useContext } from 'react';
import { BrowserRouter, Route } from 'react-router-dom';
import queryString from 'query-string';
import './App.scss';
import Dashboard from '../Dashboard/Dashboard';
import SignIn from '../LoggedOut/SignIn';
import ThemeContextProvider from '../../context/ThemeContext';
import { DarkTheme } from '../Shared/Styles/DarkTheme';
import { LightTheme } from '../Shared/Styles/LightTheme';
import { ThemeContext } from '../../context/ThemeContext';
import UserContextProvider from '../../context/UserContext';
import styled, { ThemeProvider, createGlobalStyle } from 'styled-components';
import GlobalTheme from '../Shared/Styles/GlobalStyle';
class App extends Component {
constructor(props) {
super(props);
this.state = {
serverData: { user: false },
filterString: '',
isModalOpen: false,
isDarkMode: true,
accessToken: '',
username: '',
};
}
componentDidMount() {
let parsed = queryString.parse(window.location.search);
let urlAccessToken = parsed.access_token;
let urlUsername = parsed.username;
if (urlAccessToken) {
this.setState({ accessToken: urlAccessToken, username: urlUsername });
fetch('https://api.spotify.com/v1/me', {
headers: { Authorization: 'Bearer ' + urlAccessToken }
})
.catch(error => {
console.log(error);
})
.then(res => res.json())
.then(data => this.setState({ serverData: { user: data } }));
} else {
this.setState({ serverData: { user: '' } });
}
}
render() {
return (
<BrowserRouter>
<ThemeContextProvider>
<UserContextProvider>
<GlobalStyle />
{this.state.serverData.user.display_name ? (
<>
<Route
path="/dashboard"
render={props => (
<Dashboard
{...props}
userData={this.state.serverData.user}
accessToken={this.state.accessToken}
username={this.state.username}
/>
)}
/>
</>
) : (
<>
<Route
path="/"
render={props => (
<SignIn {...props} isDark={this.state.isDarkMode} />
)}
/>
</>
)}
</UserContextProvider>
</ThemeContextProvider>
</BrowserRouter>
);
}
}
export default App;
const GlobalStyle = createGlobalStyle`
body, html {
background-color: ${(props) => true ? DarkTheme.gunmetal : LightTheme.lightcream };
}
.logo-text-TEMPORARY {
color: ${(props) => true ? DarkTheme.lightgray : LightTheme.black};
}
.header-text {
color: ${(props) => true ? DarkTheme.white : LightTheme.white };
}
.tour-btn {
/* Tour button has two color sets. */
background-color: ${(props) => true ? DarkTheme.tourbtn : LightTheme.lightgray};
&:hover {
background-color: ${(props) => true ? DarkTheme.mediumgray : LightTheme.tourbtnhover }
}
}
.subhead-text {
color: ${(props) => true ? DarkTheme.lightgray : LightTheme.darkgray };
}
`;
|
todbot/Blink1Control2
|
src/renderer/components/gui/blink1TabViews.js
|
<filename>src/renderer/components/gui/blink1TabViews.js
"use strict";
import React from 'react';
import { Tabs } from 'react-bootstrap';
import { Tab } from 'react-bootstrap';
import { Button } from 'react-bootstrap';
import BigButtonSet from './bigButtonSet';
import ToolTable from './toolTable';
import { ipcRenderer } from 'electron';
export default class Blink1TabViews extends React.Component {
constructor(props) {
super(props);
this.openHelpWindow = this.openHelpWindow.bind(this);
}
openHelpWindow() {
ipcRenderer.send('openHelpWindow');
}
render() {
var tabstyle = {height: 220, padding: 5, margin: 0, background: "#fff", border: "solid 1px #ddd"};
return (
<div style={{width:705}}>
<div style={{float:'right'}}><Button bsStyle="link" onClick={this.openHelpWindow}>Help</Button></div>
<Tabs defaultActiveKey={1} animation={false} id='blink1tabview' >
<Tab eventKey={1} title={<span><i className="fa fa-long-arrow-right"></i> Buttons</span>}>
<div style={tabstyle}>
<BigButtonSet />
</div>
</Tab>
<Tab eventKey={2} title={<span><i className="fa fa-plug"></i> Event Sources</span>}>
<div style={tabstyle}>
<ToolTable />
</div>
</Tab>
</Tabs>
</div>
);
}
}
|
ooibc88/Hyperledger-Fabric-
|
ustore_home/include/recovery/log_reader.h
|
<reponame>ooibc88/Hyperledger-Fabric-<gh_stars>100-1000
// Copyright (c) 2017 The Ustore Authors.
#ifndef USTORE_RECOVERY_LOG_READER_H_
#define USTORE_RECOVERY_LOG_READER_H_
#include "recovery/log_entry.h"
#include "recovery/log_cursor.h"
#include "recovery/single_log_reader.h"
namespace ustore {
namespace recovery {
class LogReader {
public:
static const uint64_t WAIT_TIME = 1000000; // us ==> 1s
static const uint64_t FAIL_TIME = 60;
public:
UStoreLogReader();
virtual ~UStoreLogReader();
/*
* @brief initailize UStoreLogReader class, which should be invoked after
* the object malloc and before the other functions' invokation
* @param [in] reader: read a single log file
* @param [in] log_dir: log directory
* @param [in] log_file_id_start: start file id
* @param [in] log_seq: previous log entry sequence id that are read
* @param [in] is_retry: whether retry when error occurs
* */
int Init(SingleLogReader *reader, const char* log_dir,
uint64_t log_file_id_start, uint64_t log_seq, bool is_retry);
/*
* @brief read a log entry from the log file. If the log command is
* USTORE_SWITCH_LOG, the open the next log file directly.
* However, if next log file does not exist,
* it may because the log under construction.
* In this case, wait 1ms and retry 10 times. Otherwise, return error
* @return USTORE_LOG_SUCCESS, USTORE_LOG_NOTHING, USTORE_LOG_ERROR
* */
int ReadLog(LogCommand* cmd, uint64_t* seq_id,
char** log_data, uint64_t* data_length);
void SetMaxLogFileId(uint64_t max_log_file_id);
uint64_t GetMaxLogFileId() const;
bool GetHasMax() const;
int SetHasNoMax();
uint64_t GetCurLogFileId();
uint64_t GetCurLogSeqId();
uint64_t GetLastLogOffset();
int GetCursor(LogCursor* cursor);
private:
int Seek_(uint64_t log_seq);
int OpenLog_(uint64_t log_file_id, uint64_t last_log_seq = 0);
int ReadLog_(LogCommand* cmd, uint64_t* log_seq,
char** log_data, uint64_t* data_length);
uint64_t cur_log_file_id_;
uint64_t cur_log_seq_id_;
uint64_t max_log_file_id_;
SingleLogReader log_file_reader_;
bool is_init_;
bool is_retry_;
bool has_max_;
};
} // namespace recovery
} // namespace ustore
#endif // USTORE_RECOVERY_LOG_READER_H_
|
openlibraryenvironment/rice
|
rice-framework/krad-app-framework/src/main/java/org/kuali/rice/krad/service/XmlObjectSerializerService.java
|
<reponame>openlibraryenvironment/rice<filename>rice-framework/krad-app-framework/src/main/java/org/kuali/rice/krad/service/XmlObjectSerializerService.java
/**
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.krad.service;
/**
* This interface defines methods that an XmlObjectSerializer Service must provide. This will wrap our needs for xml to object and
* object to xml functionality.
*
*
*/
public interface XmlObjectSerializerService {
/**
* Marshals out XML from an object instance.
*
* @param object
* @return
*/
public String toXml(Object object);
/**
* Retrieves an Object instance from a String of XML - unmarshals.
*
* @param xml
* @return
*/
public Object fromXml(String xml);
}
|
kaustubh2708/serritor
|
src/main/java/com/github/peterbencze/serritor/internal/CrawlEvent.java
|
<reponame>kaustubh2708/serritor
/*
* Copyright 2017 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.peterbencze.serritor.internal;
import com.github.peterbencze.serritor.api.CrawlCandidate;
/**
* Base class from which all crawl event classes shall be derived.
*/
public abstract class CrawlEvent {
private final CrawlCandidate crawlCandidate;
/**
* Base constructor of all crawl event classes.
*
* @param crawlCandidate the current crawl candidate
*/
protected CrawlEvent(final CrawlCandidate crawlCandidate) {
this.crawlCandidate = crawlCandidate;
}
/**
* Returns the current crawl candidate.
*
* @return the current crawl candidate
*/
public final CrawlCandidate getCrawlCandidate() {
return crawlCandidate;
}
}
|
GaloisInc/adapt
|
legacy/tools/signal_completion.py
|
#! /usr/bin/env python3
# Copyright 2016, Palo Alto Research Center.
# Developed with sponsorship of DARPA.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the software or the use or other dealings in
# the software.
#
'''
Injects a kafka message that says some component is done with processing.
'''
import argparse
import kafka
import logging
import struct
__author__ = '<EMAIL>'
log = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
STATUS_IN_PROGRESS = b'\x00'
STATUS_DONE = b'\x01'
class Injector:
def __init__(self, url):
self.producer = kafka.KafkaProducer(bootstrap_servers=[url])
def report_status(self, status, topic):
def to_int(status_byte):
return struct.unpack("B", status_byte)[0]
log.info("reporting %d", to_int(status))
s = self.producer.send(topic, status).get()
log.info("sent: %s", s)
def arg_parser():
p = argparse.ArgumentParser(
description='Injects a "done" kafka message,'
' signaling that processing is complete.')
p.add_argument('--kafka', help='location of the kafka pub-sub service',
default='localhost:9092')
p.add_argument('topic',
help="Send message via this kafka channel, e.g.: se")
return p
if __name__ == '__main__':
args = arg_parser().parse_args()
Injector(args.kafka).report_status(STATUS_DONE, args.topic)
|
zipated/src
|
components/viz/test/test_gpu_memory_buffer_manager.cc
|
<reponame>zipated/src
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/viz/test/test_gpu_memory_buffer_manager.h"
#include <stddef.h>
#include <stdint.h>
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/gpu_memory_buffer.h"
namespace viz {
namespace {
class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
public:
GpuMemoryBufferImpl(TestGpuMemoryBufferManager* manager,
int id,
const gfx::Size& size,
gfx::BufferFormat format,
std::unique_ptr<base::SharedMemory> shared_memory,
size_t offset,
size_t stride)
: manager_(manager),
id_(id),
size_(size),
format_(format),
shared_memory_(std::move(shared_memory)),
offset_(offset),
stride_(stride),
mapped_(false) {}
~GpuMemoryBufferImpl() override { manager_->OnGpuMemoryBufferDestroyed(id_); }
// Overridden from gfx::GpuMemoryBuffer:
bool Map() override {
DCHECK(!mapped_);
DCHECK_EQ(stride_, gfx::RowSizeForBufferFormat(size_.width(), format_, 0));
if (!shared_memory_->Map(offset_ +
gfx::BufferSizeForBufferFormat(size_, format_)))
return false;
mapped_ = true;
return true;
}
void* memory(size_t plane) override {
DCHECK(mapped_);
DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
return reinterpret_cast<uint8_t*>(shared_memory_->memory()) + offset_ +
gfx::BufferOffsetForBufferFormat(size_, format_, plane);
}
void Unmap() override {
DCHECK(mapped_);
shared_memory_->Unmap();
mapped_ = false;
}
gfx::Size GetSize() const override { return size_; }
gfx::BufferFormat GetFormat() const override { return format_; }
int stride(size_t plane) const override {
DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
return base::checked_cast<int>(gfx::RowSizeForBufferFormat(
size_.width(), format_, static_cast<int>(plane)));
}
gfx::GpuMemoryBufferId GetId() const override { return id_; }
gfx::GpuMemoryBufferHandle GetHandle() const override {
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::SHARED_MEMORY_BUFFER;
handle.handle = shared_memory_->handle();
handle.offset = base::checked_cast<uint32_t>(offset_);
handle.stride = base::checked_cast<int32_t>(stride_);
return handle;
}
ClientBuffer AsClientBuffer() override {
return reinterpret_cast<ClientBuffer>(this);
}
private:
TestGpuMemoryBufferManager* manager_;
gfx::GpuMemoryBufferId id_;
const gfx::Size size_;
gfx::BufferFormat format_;
std::unique_ptr<base::SharedMemory> shared_memory_;
size_t offset_;
size_t stride_;
bool mapped_;
};
class GpuMemoryBufferFromClient : public gfx::GpuMemoryBuffer {
public:
GpuMemoryBufferFromClient(TestGpuMemoryBufferManager* manager,
int id,
gfx::GpuMemoryBuffer* client_buffer)
: manager_(manager), id_(id), client_buffer_(client_buffer) {}
~GpuMemoryBufferFromClient() override {
manager_->OnGpuMemoryBufferDestroyed(id_);
}
bool Map() override { return client_buffer_->Map(); }
void* memory(size_t plane) override { return client_buffer_->memory(plane); }
void Unmap() override { client_buffer_->Unmap(); }
gfx::Size GetSize() const override { return client_buffer_->GetSize(); }
gfx::BufferFormat GetFormat() const override {
return client_buffer_->GetFormat();
}
int stride(size_t plane) const override {
return client_buffer_->stride(plane);
}
gfx::GpuMemoryBufferId GetId() const override { return id_; }
gfx::GpuMemoryBufferHandle GetHandle() const override {
return client_buffer_->GetHandle();
}
ClientBuffer AsClientBuffer() override {
return client_buffer_->AsClientBuffer();
}
private:
TestGpuMemoryBufferManager* manager_;
gfx::GpuMemoryBufferId id_;
gfx::GpuMemoryBuffer* client_buffer_;
};
} // namespace
TestGpuMemoryBufferManager::TestGpuMemoryBufferManager() {}
TestGpuMemoryBufferManager::~TestGpuMemoryBufferManager() {
{
base::AutoLock hold(buffers_lock_);
DCHECK(buffers_.empty());
}
DCHECK(clients_.empty());
if (parent_gpu_memory_buffer_manager_)
parent_gpu_memory_buffer_manager_->clients_.erase(client_id_);
}
std::unique_ptr<TestGpuMemoryBufferManager>
TestGpuMemoryBufferManager::CreateClientGpuMemoryBufferManager() {
std::unique_ptr<TestGpuMemoryBufferManager> client(
new TestGpuMemoryBufferManager);
client->client_id_ = ++last_client_id_;
client->parent_gpu_memory_buffer_manager_ = this;
clients_[client->client_id_] = client.get();
return client;
}
void TestGpuMemoryBufferManager::OnGpuMemoryBufferDestroyed(
gfx::GpuMemoryBufferId gpu_memory_buffer_id) {
base::AutoLock hold(buffers_lock_);
DCHECK(buffers_.find(gpu_memory_buffer_id.id) != buffers_.end());
buffers_.erase(gpu_memory_buffer_id.id);
}
std::unique_ptr<gfx::GpuMemoryBuffer>
TestGpuMemoryBufferManager::CreateGpuMemoryBuffer(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
gpu::SurfaceHandle surface_handle) {
std::unique_ptr<base::SharedMemory> shared_memory(new base::SharedMemory);
const size_t buffer_size = gfx::BufferSizeForBufferFormat(size, format);
if (!shared_memory->CreateAnonymous(buffer_size))
return nullptr;
last_gpu_memory_buffer_id_ += 1;
std::unique_ptr<gfx::GpuMemoryBuffer> result(new GpuMemoryBufferImpl(
this, last_gpu_memory_buffer_id_, size, format, std::move(shared_memory),
0,
base::checked_cast<int>(
gfx::RowSizeForBufferFormat(size.width(), format, 0))));
base::AutoLock hold(buffers_lock_);
buffers_[last_gpu_memory_buffer_id_] = result.get();
return result;
}
void TestGpuMemoryBufferManager::SetDestructionSyncToken(
gfx::GpuMemoryBuffer* buffer,
const gpu::SyncToken& sync_token) {}
} // namespace viz
|
Huynhvantoan/uHotel
|
app/src/main/java/com/acuteksolutions/uhotel/ui/adapter/concierge/RoomAdapter.java
|
<gh_stars>0
package com.acuteksolutions.uhotel.ui.adapter.concierge;
import android.annotation.SuppressLint;
import android.view.View;
import android.widget.TextView;
import com.acuteksolutions.uhotel.R;
import com.acuteksolutions.uhotel.interfaces.SaveDataRoomListener;
import com.acuteksolutions.uhotel.interfaces.ViewpagerListener;
import com.acuteksolutions.uhotel.mvp.model.conciege.Room;
import com.acuteksolutions.uhotel.mvp.model.conciege.RoomExpand;
import com.chad.library.adapter.base.BaseMultiItemQuickAdapter;
import com.chad.library.adapter.base.BaseViewHolder;
import com.chad.library.adapter.base.entity.MultiItemEntity;
import org.adw.library.widgets.discreteseekbar.DiscreteSeekBar;
import java.util.List;
public class RoomAdapter extends BaseMultiItemQuickAdapter<MultiItemEntity, BaseViewHolder> {
private SaveDataRoomListener saveDataRoomListener;
private ViewpagerListener viewpagerListener;
private int posExpand=0;
private boolean isExpand=false;
public static final int TYPE_EXPANDABLE = 0;
public static final int TYPE_ROOM = 1;
public RoomAdapter(List<MultiItemEntity> data,SaveDataRoomListener saveDataRoomListener,ViewpagerListener viewpagerListener) {
super(data);
addItemType(TYPE_EXPANDABLE, R.layout.room_list_item_expand);
addItemType(TYPE_ROOM, R.layout.room_list_item_slider);
this.saveDataRoomListener=saveDataRoomListener;
this.viewpagerListener=viewpagerListener;
}
@SuppressLint("DefaultLocale")
@Override
protected void convert(BaseViewHolder holder, MultiItemEntity item) {
switch (holder.getItemViewType()) {
case TYPE_EXPANDABLE:
RoomExpand roomExpand = (RoomExpand)item;
TextView total=(TextView)holder.getView(R.id.txt_total);
total.setText(roomExpand.getTotal() > 0 ? String.format(" (%d)", roomExpand.getTotal()) : "0");
total.setTextColor(roomExpand.isExpanded() ? mContext.getResources().getColor(R.color.tab_select) : mContext.getResources().getColor(R.color.white));
holder.setText(R.id.txt_name_expandable, roomExpand.getTitle())
.setTextColor(R.id.txt_name_expandable,roomExpand.isExpanded() ? mContext.getResources().getColor(R.color.tab_select) : mContext.getResources().getColor(R.color.white))
.setImageResource(R.id.img_arrow, roomExpand.isExpanded() ? R.drawable.room_arrow_up : R.drawable.room_arrow_down);
if(roomExpand.getTotal()==0)
total.setVisibility(View.GONE);
holder.itemView.setOnClickListener(v -> {
if(isExpand && posExpand != holder.getAdapterPosition()){
collapse(posExpand);
saveDataRoomListener.refreshList();
}
isExpand = posExpand != holder.getAdapterPosition();
posExpand = holder.getAdapterPosition();
if (roomExpand.isExpanded()) {
collapse(posExpand);
saveDataRoomListener.refreshList();
} else {
expand(posExpand);
}
});
break;
case TYPE_ROOM:
Room room = (Room)item;
TextView mProgress=holder.getView(R.id.txt_progress);
mProgress.setText(String.valueOf(room.getValue()));
holder.setText(R.id.txt_name, room.getName());
DiscreteSeekBar discreteSeekBar=holder.getView(R.id.seekBar);
discreteSeekBar.setProgress(room.getValue());
discreteSeekBar.setOnProgressChangeListener(new DiscreteSeekBar.OnProgressChangeListener() {
@Override
public void onProgressChanged(DiscreteSeekBar discreteSeekBar, int progress, boolean b) {
saveDataRoomListener.saveData(posExpand,room.getPosition(),progress);
mProgress.setText(String.valueOf(progress));
}
@Override
public void onStartTrackingTouch(DiscreteSeekBar discreteSeekBar) {
viewpagerListener.disableSwipe(false);
}
@Override
public void onStopTrackingTouch(DiscreteSeekBar discreteSeekBar) {
viewpagerListener.disableSwipe(true);
}
});
break;
}
}
}
|
Kitingu/SendIT-Api
|
app/api/v1/views/user.py
|
from flask_restplus import Resource, Namespace, reqparse, fields
from werkzeug.security import check_password_hash, generate_password_hash
from flask import request
from app.api.utils.app_docs import v1_user, new_user, user_login
from app.api.utils.parcel_validator import UserSchema, LoginSchema, validator
from ..models.user_model import UserModel
from ..models.orders_model import OrdersModel
from marshmallow import post_load
from instance.config import Config
import datetime
import jwt
user_db = UserModel()
order_db = OrdersModel()
class User(Resource):
@v1_user.expect(new_user)
@post_load()
def post(self):
"""route for user registration"""
if not request.is_json:
return {"msg": "Missing JSON in request"}, 400
data = v1_user.payload
schema = UserSchema()
error_types = ['username', 'email', 'password']
errors = validator(schema, error_types, data)
if errors:
return errors
hashed_pass = generate_password_hash(data['password'])
new_user = user_db.get_single_user(data['email'])
if new_user:
return "user with email: {} already exists".format(data["email"]), 409
if check_password_hash(hashed_pass, data['confirm_password']):
user_db.save(data['email'], data['username'], hashed_pass)
return {"message": "User registered successfully"}, 201
return {"error": "passwords do not match"}, 401
def get(self):
"""route for getting all users"""
return user_db.get_all_users()
class Login(Resource):
@v1_user.expect(user_login)
def post(self):
"""route that allows users to log in"""
if not request.is_json:
return {"msg": "Missing JSON in request"}, 400
data = v1_user.payload
email = str(data['email'])
password = str(data['password'])
user = user_db.get_single_user(email)
if user:
if check_password_hash(user_db.db[email]['password'], password):
access_token = jwt.encode(
{"email": email, "exp": datetime.datetime.utcnow() +
datetime.timedelta(minutes=1)},
Config.SECRET_KEY)
return {"access_token": access_token.decode('utf-8')}, 200
return {"msg": "Invalid email or password"}, 401
return "user does not exist", 400
class UserParcels(Resource):
""" route for getting orders made by a specific user"""
def get(self, user_id):
"""route for getting order by specific user"""
resp = order_db.get_by_specific_user(user_id)
if resp:
return order_db.get_by_specific_user(user_id)
return {"message": "user does not exist"}, 404
v1_user.add_resource(User, '', strict_slashes=False)
v1_user.add_resource(Login, '/login', strict_slashes=False)
v1_user.add_resource(UserParcels, '/<user_id>/parcels', strict_slashes=False)
|
CoderHam/WebScraper
|
node_modules/webdriverio/test/spec/submitForm.js
|
<reponame>CoderHam/WebScraper<filename>node_modules/webdriverio/test/spec/submitForm.js
/* global beforeEach */
describe('submitForm', function() {
beforeEach(h.setup());
var elementShouldBeNotExisting = function (isExisting) {
/**
* because there was no element found isExisting is false
*/
isExisting.should.be.exactly(false);
};
var elementShouldBeExisting = function (isExisting) {
isExisting.should.be.exactly(true);
};
it('should send data from form', function () {
return this.client
.isExisting('.gotDataA').then(elementShouldBeNotExisting)
.isExisting('.gotDataB').then(elementShouldBeNotExisting)
.isExisting('.gotDataC').then(elementShouldBeNotExisting)
.submitForm('.send')
.pause(1000)
.isExisting('.gotDataA').then(elementShouldBeExisting)
.isExisting('.gotDataB').then(elementShouldBeExisting)
.isExisting('.gotDataC').then(elementShouldBeExisting);
});
});
|
zhuhanming/duchess
|
src/main/java/duke/util/MagicStrings.java
|
package duke.util;
/**
* The {@code Constants} class contains all magic constants.
*/
public class MagicStrings {
public static final String BLANK = "";
// Date time helper strings.
public static final String DATE_TIME_OVERDUE = " [OVERDUE]";
public static final String DATE_TIME_TODAY = "Today ";
public static final String DATE_TIME_TOMORROW = "Tomorrow ";
public static final String DATE_TIME_YESTERDAY = "Yesterday ";
// Error messages
public static final String ERROR_CANNOT_UNDO = "Failed to save your actions... You wont't be able to undo!";
public static final String ERROR_COMMAND_MISSING_INDEX = "Your command needs the position of the task to "
+ "operate with!";
public static final String ERROR_COMMAND_TOO_MANY_INDICES = "Your command can only work with a single index!";
public static final String ERROR_DEADLINE_MISSING_CONTENT = "Your deadline content cannot be empty! "
+ "Type help if you need help.";
public static final String ERROR_DEADLINE_MISSING_DEADLINE = "I don't know when your deadline is! "
+ "Please use /by [deadline here].";
public static final String ERROR_EVENT_MISSING_CONTENT = "Your event content cannot be empty! "
+ "Type help if you need help.";
public static final String ERROR_EVENT_MISSING_TIME_FRAME = "I don't know when is your event! "
+ "Please use /at [time here].";
public static final String ERROR_INDEX_OUT_OF_BOUNDS = "You're referring to a task which does not exist!";
public static final String ERROR_INVALID_COMMAND = "I don't see what I can do with what you just told me.";
// public static final String ERROR_INVALID_FILE_PATH = "Your file path is invalid. A new JSON file will "
// + "be used for saving.";
public static final String ERROR_INVALID_SNOOZE_DURATION = "Your snooze duration is not valid! Something "
+ "like '/for 3 days' would work";
public static final String ERROR_INVALID_UNIT_OF_TIME = "Your unit of time is not recognised! Please use "
+ "hours/days/weeks/months/years.";
public static final String ERROR_FAIL_TO_LOAD = "Failed to load save file! Creating new save file.";
public static final String ERROR_FAIL_TO_LOAD_AND_SAVE = "Failed to load save file! "
+ "You will also not be able to save.";
public static final String ERROR_FAIL_TO_SAVE = "Facing difficulties saving your tasks right now.";
public static final String ERROR_NO_COMPLETED_TASKS = "You don't have any completed tasks in your main list!";
public static final String ERROR_NO_MORE_UNDOS = "Sorry, you're all out of undos!";
public static final String ERROR_RECURRING_TASK_MISSING_DEADLINE = "This recurring task "
+ "seem to have no deadline!";
public static final String ERROR_SNOOZING_NON_DEADLINE = "You can't snooze a task with no deadline!";
public static final String ERROR_SORTING_EMPTY_LIST = "You don't have any tasks... Sort yourself out first.";
public static final String ERROR_STATS_INVALID_TIME = "I need to know the time frame: today/this week/this month/"
+ "this year!";
public static final String ERROR_TASK_ALREADY_COMPLETED = "You have already completed this task!";
public static final String ERROR_TASK_CREATED_BEFORE = "You have an existing task with the same information!";
public static final String ERROR_TODO_MISSING_CONTENT = "Your todo content cannot be empty! "
+ "Type help if you need help.";
public static final String ERROR_USED_FOR_TESTING = "This error message is purely for testing purposes. If you "
+ "see this in production, something is wrong.";
public static final String ERROR_WRONG_DATE_FORMAT = "Your date is of the wrong format.\n"
+ "Type help to view the accepted formats.";
public static final String ERROR_WRONG_FREQUENCY_FORMAT = "Your frequency is of the wrong format.\n"
+ "Type help to view the accepted formats.";
// GSON helper strings.
public static final String GSON_ATTR_COMPLETION_TIME = "completionTime";
public static final String GSON_ATTR_CREATION_TIME = "creationTime";
public static final String GSON_ATTR_DEADLINE = "deadline";
public static final String GSON_ATTR_DESCRIPTION = "description";
public static final String GSON_ATTR_FREQUENCY = "frequency";
public static final String GSON_ATTR_IS_COMPLETED = "isCompleted";
public static final String GSON_ATTR_IS_COMPLETED_ON_TIME = "isCompletedOnTime";
public static final String GSON_ATTR_TIME_FRAME = "timeFrame";
public static final String GSON_ATTR_REPEAT_END_TIME = "repeatEndTime";
}
|
kipsigman/play-extensions
|
src/test/scala/kipsigman/play/mvc/AjaxHelperSpec.scala
|
package kipsigman.play.mvc
import scala.concurrent.Future
import org.scalatestplus.play.PlaySpec
import org.slf4j.LoggerFactory
import kipsigman.domain.entity.Category
import kipsigman.domain.entity.Role
import kipsigman.domain.entity.UserBasic
import play.api.Configuration
import play.api.Environment
import play.api.i18n.DefaultLangs
import play.api.i18n.DefaultMessagesApi
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import play.api.libs.json.JsLookupResult.jsLookupResultToJsLookup
import play.api.libs.json.JsValue
import play.api.libs.json.JsValue.jsValueToJsLookup
import play.api.mvc.Result
import play.api.mvc.Results
import play.api.test.FakeRequest
import play.api.test.Helpers.OK
import play.api.test.Helpers.contentAsJson
import play.api.test.Helpers.defaultAwaitTimeout
import play.api.test.Helpers.status
class AjaxHelperSpec extends PlaySpec with Results with I18nSupport {
override val messagesApi: MessagesApi = new DefaultMessagesApi(Environment.simple(), Configuration.reference, new DefaultLangs(Configuration.reference))
private val logger = LoggerFactory.getLogger(this.getClass)
val user = UserBasic(Option(66), Option("Johnny"), Option("Utah"), "<EMAIL>", None, Set(Role.Member))
implicit val userOption = Option(user)
implicit val request = FakeRequest()
private def assertOk(resultFuture: Future[Result]): JsValue = {
status(resultFuture) mustEqual OK
val content = contentAsJson(resultFuture)
(content \ AjaxHelper.Status.key).as[String] mustBe AjaxHelper.Status.Success.name
logger.debug(s"content=$content")
content
}
private def assertOk(result: Result): JsValue = {
val resultFuture = Future.successful(result)
assertOk(resultFuture)
}
"entitySaveSuccessResultId" should {
"return OK JSON" in {
val result = AjaxHelper.entitySaveSuccessResult(99)
val content = assertOk(result)
(content \ AjaxHelper.Key.id).as[Int] mustBe 99
}
}
"entitySaveSuccessResultEntity" should {
"return OK JSON" in {
val entity = Category(Option(1), "entertainment", 0)
val result = AjaxHelper.entitySaveSuccessResult(entity)
val content = assertOk(result)
(content \ AjaxHelper.Key.id).as[Int] mustBe 1
val entityJsValue = (content \ AjaxHelper.Key.entity)
(entityJsValue \ "id").asOpt[Int] mustBe entity.id
(entityJsValue \ "name").as[String] mustBe entity.name
(entityJsValue \ "order").as[Int] mustBe entity.order
}
}
}
|
snkmr/shirasagi
|
app/models/gws/qna/post.rb
|
<gh_stars>100-1000
# "Post" class for BBS. It represents "comment" models.
class Gws::Qna::Post
include Gws::Referenceable
include Gws::Qna::Postable
include Gws::Addon::Contributor
include SS::Addon::Markdown
include Gws::Addon::File
include Gws::Qna::DescendantsFileInfo
include Gws::Addon::GroupPermission
include Gws::Addon::History
# indexing to elasticsearch via companion object
around_save ::Gws::Elasticsearch::Indexer::QnaPostJob.callback
around_destroy ::Gws::Elasticsearch::Indexer::QnaPostJob.callback
delegate :subscribed_users, to: :topic
end
|
nicoddemus/dependencies
|
tests/helpers/django_project/api/version.py
|
<reponame>nicoddemus/dependencies<filename>tests/helpers/django_project/api/version.py<gh_stars>0
from rest_framework.versioning import BaseVersioning
from django_project.api.exceptions import VersionError
class DenyVersion(BaseVersioning):
def determine_version(self, request, *args, **kwargs):
raise VersionError
|
6l17ch-3xpl017/uchat
|
server/resources/libmx/src/mx_nbr_length.c
|
<reponame>6l17ch-3xpl017/uchat
#include "libmx.h"
int mx_nbr_length(int num) {
int len = 1;
while (num /= 10)
len++;
return len;
}
|
filscorporation/Engine
|
engine/source/Steel/Scene/UUID.h
|
<filename>engine/source/Steel/Scene/UUID.h
#pragma once
#include <cstdint>
#define NULL_UUID 0
using UUID = uint64_t;
|
WeilerP/cellrank
|
cellrank/tl/estimators/mixins/__init__.py
|
from cellrank.tl.estimators.mixins.decomposition import EigenMixin, SchurMixin
from cellrank.tl.estimators.mixins._lineage_drivers import LinDriversMixin
from cellrank.tl.estimators.mixins._absorption_probabilities import AbsProbsMixin
|
AlexArtaud-Dev/Genconf
|
src/main/java/fr/uga/iut2/genconf/modele/enums/TypeSession.java
|
<gh_stars>1-10
package fr.uga.iut2.genconf.modele.enums;
import java.util.Optional;
public enum TypeSession {
Keynote,
Article,
Tutorial;
public static Optional<TypeSession> parseFrom(final String token) {
switch (token.toLowerCase()){
case "keynote":
return Optional.of(TypeSession.Keynote);
case "article":
return Optional.of(TypeSession.Article);
case "tutorial":
return Optional.of(TypeSession.Tutorial);
default:
return Optional.empty();
}
}
}
|
suggestio/suggestio
|
src1/server/www/app/util/geo/GeoIpUtil.scala
|
package util.geo
import javax.inject.Inject
import io.suggest.geo.{IGeoFindIp, IGeoFindIpResult, MGeoLoc}
import io.suggest.geo.ipgeobase.IpgbUtil
import io.suggest.playx.CacheApiUtil
import io.suggest.util.logs.MacroLogsImpl
import play.api.inject.Injector
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
/**
* Suggest.io
* User: <NAME> <<EMAIL>>
* Created: 20.09.16 12:05
* Description: Над-модельная утиль для геолокации по IP.
* Модуль создан, чтобы прятать в себе все используемые geoip-подсистемы.
*
* Изначально поддерживалась только IPGeoBase.
*/
class GeoIpUtil @Inject() (
injector : Injector,
)
extends IGeoFindIp
with MacroLogsImpl
{
override type FindIpRes_t = IGeoFindIpResult
private lazy val ipgbUtil = injector.instanceOf[IpgbUtil]
private lazy val cacheApiUtil = injector.instanceOf[CacheApiUtil]
implicit private lazy val ec = injector.instanceOf[ExecutionContext]
/** Сколько секунд кешировать результат работы findIdCached()? */
def CACHE_TTL_SEC = 10
/**
* Поиск геоданных для IP в geoip-подсистемах.
*
* @param ip ip-адрес типа "172.16.31.10".
* @return Фьючерс с опциональным результатом геолокации.
*/
override def findIp(ip: String): Future[Option[IGeoFindIpResult]] = {
val resFut = ipgbUtil.findIp(ip)
// Логгировать результат, если трассировка активна.
if (LOGGER.underlying.isTraceEnabled()) {
val startedAtMs = System.currentTimeMillis()
for (r <- resFut)
LOGGER.trace(s"findIp($ip) => $r ;; Took ${System.currentTimeMillis() - startedAtMs} ms.")
}
// Вернуть исходный фьючерс.
resFut
}
/**
* Кэишруемый аналог findIp().
*
* @param ip ip-адрес типа "172.16.31.10".
* @return Фьючерс с опциональным результатом геолокации.
*/
def findIpCached(ip: String): Future[Option[IGeoFindIpResult]] = {
cacheApiUtil.getOrElseFut(ip + ".gIpF", expiration = CACHE_TTL_SEC.seconds) {
findIp(ip)
}
}
/** Костыль: приведение IGeoFindIpResult к MGeoLoc. */
def geoIpRes2geoLocOptFut(geoIpResOptFut: Future[Option[IGeoFindIpResult]]): Future[Option[MGeoLoc]] = {
for (geoIpOpt <- geoIpResOptFut) yield {
for (geoIp <- geoIpOpt) yield {
geoIp.toGeoLoc
}
}
}
/**
* Попытаться заполнить возможно-пустующие данные модели геолокации по данным из geoip.
* @param geoLocOpt0 Исходные опциональные данные геолокации.
* @param geoIpLocOptFut Функция запуска geoip-геолокации.
* @return Фьючерс с опциональным результатом MGeoLoc, как правило Some().
*/
def geoLocOrFromIp(geoLocOpt0: Seq[MGeoLoc])
(geoIpLocOptFut: => Future[Seq[MGeoLoc]]): Future[Seq[MGeoLoc]] = {
if (geoLocOpt0.isEmpty) {
// Подавить и залоггировать возможные проблемы.
geoIpLocOptFut.recover { case ex: Throwable =>
LOGGER.warn(s"geoLocOrFromIp($geoLocOpt0): failed to geoIP", ex)
Nil
}
} else {
Future.successful( geoLocOpt0 )
}
}
}
|
Show-vars/overstream
|
overstream-http/src/main/java/com/overstreamapp/http/support/ConnectionPoint.java
|
<filename>overstream-http/src/main/java/com/overstreamapp/http/support/ConnectionPoint.java<gh_stars>1-10
/*
* Copyright 2019 Bunjlabs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.overstreamapp.http.support;
import com.bunjlabs.fuga.util.Assert;
import java.util.Objects;
class ConnectionPoint {
private final String host;
private final int port;
private final boolean ssl;
ConnectionPoint(String host, int port, boolean ssl) {
Assert.isTrue(port >= 1 && port <= 65535);
this.host = Assert.hasText(host);
this.port = port;
this.ssl = ssl;
}
String getHost() {
return host;
}
int getPort() {
return port;
}
boolean isSsl() {
return ssl;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ConnectionPoint that = (ConnectionPoint) o;
return port == that.port &&
ssl == that.ssl &&
host.equals(that.host);
}
@Override
public int hashCode() {
return Objects.hash(host, port, ssl);
}
@Override
public String toString() {
return String.format("%s:%d ssl:%b", host, port, ssl);
}
}
|
yangyahu-1994/Python-Crash-Course
|
VSCode_work/chapter7/chapter7_7_7.py
|
# 无限循环,小心使用
while True:
print(1)
|
julienstroheker/azure-golang-sdk-sandbox
|
pkg/sdk/deployment_test.go
|
package sdk
import "testing"
func TestGetDeploy(t *testing.T) {
dc := getDeploymentClient()
getDeployment(dc, "55d30728-a303-48a3-a274-b1781dd03479")
}
|
DanIverson/OpenVnmrJ
|
src/vnmr/makeslice.c
|
<reponame>DanIverson/OpenVnmrJ
/*
* Copyright (C) 2015 University of Oregon
*
* You may distribute under the terms of either the GNU General Public
* License or the Apache License, as specified in the LICENSE file.
*
* For more information, see the LICENSE file.
*/
/* makeslice.c Manchester version 6.1B 20iv99 */
/* makeslice.c builds up a 2D spectrum from the input table diffusion_display_3D.inp */
/*
Dr. <NAME>
Deparment of Chemistry
University of Manchester
Manchester M13 9PL
UK
*/
/* it only shows peaks with a diffusion integral in the slice of CERTAINTY
* times the full integral
*/
/* Don't insist on data being S_COMPLEX, otherwise absolute value data are
* only usable if pmode='partial'
*/
/* Try allowing real, complex or hypercomplex data; f1pts per data point in F1 */
/*MN changed spurious userdir to curexpdir and directory name "Dosy" to "dosy" */
/*GM 13iv09 don't bother keeping copy in data.orig - dosy macro takes care of this */
/*GM 14iv09 test to ensure 2D data; add debugging */
/*GM 14iv09 Change order in init_makeslice to check data format before reading in parameters */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "group.h"
#include "tools.h"
#include "data.h"
#include "variables.h"
#include "vnmrsys.h"
#include "disp.h"
#include "pvars.h"
#include "wjunk.h"
/* #define DEBUG_MAKESLICE 1 */ /*Comment out not to compile the debugging code */
#define ERROR 1
#define TRUE 1
#define FALSE 0
#define NOT_DONE 0
#define PARTIAL 2
#define DONE 1
#define COMPLETE 0
#define MAXPOINTS 50
#define MAXBLOCKS 128
#define MAXNUMPEAKS 8192
#define SQR(a) (sqrarg=(a),sqrarg*sqrarg)
#define LABEL_LEN 20
#define COMMENT_LEN 80
#define CERTAINTY 0.95
#define NINTSTEPS 100
#ifdef DEBUG_MAKESLICE
FILE *debug;
#endif
extern double ll2d_dp_to_frq(double, double, int);
extern int ll2d_frq_to_dp(double, double, int);
static int init_makeslice(int, char * []);
static int compute_integral(int, double);
static dfilehead phasehead,fidhead;
extern int start_from_ft;
/* MANCHESTER VERSION 6.1 14 vii 98 GAM */
void swap(double *a, double *b);/* added by PB to swap two doubles */
static int f1pts,r,fn_int,fn1_int;
static char display_mode;
static double fn,fn1,sw,sw1,sqrarg,mean_diff,diff_window,start_diff,finish_diff;
static struct region { int num, status;
double diff_coef, std_dev, intensity;
int block,two_blks;
int h_min_pt,h_max_pt,v_min_pt,v_max_pt;
} pk[MAXNUMPEAKS];
/*****************************************************************************
* structure for a peak record
*****************************************************************************/
typedef struct pk_struct {
double f1, f2;
double amp;
double f1_min, f1_max,
f2_min, f2_max;
double fwhh1, fwhh2;
double vol;
int key;
struct pk_struct *next;
char label[LABEL_LEN+1];
char comment[COMMENT_LEN+1];
} peak_struct;
/*****************************************************************************
* structure for peak table
* First entry in header is the number of peaks in the table, next
* entries tell whether the corresponding key is in use (PEAK_FILE_FULL)
* or not (PEAK_FILE_EMPTY). (i.e. header[20] tells whether a peak with
* key 20 is currently in existence.)
*****************************************************************************/
typedef struct {
int num_peaks;
FILE *file;
float version;
peak_struct *head;
short header[MAXNUMPEAKS+1];
char f1_label,f2_label;
int experiment;
int planeno;
} peak_table_struct;
static peak_table_struct *peak_table = NULL;
static peak_struct *peak;
static int within_diffusion_boundaries(int);
extern void delete_peak_table(/*peak_table*/);
char rubbish[1024];
/*************************************
makeslice()
**************************************/
int makeslice(int argc, char *argv[], int retc, char *retv[])
{
int i,j,k,l,m,n,h_min_pt,h_max_pt,v_min_pt,v_max_pt;
int trace_number_in_block,ending_block;
float *inp,*buffer;
dpointers inblock;
int data_in_block[MAXBLOCKS];
/* MANCHESTER VERSION 6.1 14 vii 98 GAM */
/* This function present in file ll2d.c */
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "w"); /* file for debugging information */
fprintf (debug, "Start of makeslice\n");
fclose(debug);
#endif
/* initialization bits */
/*
Wclear_text();
*/
disp_index(0);
Wsettextdisplay("clear");
if (init_makeslice(argc,argv))
ABORT;
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "After init_makeslice\n");
fclose(debug);
#endif
for (i=0;i<MAXBLOCKS;i++) data_in_block[i] = FALSE;
m = 0;
h_min_pt = h_max_pt = 0;
/* Start of the loop to evaluate the blocks in which the various peaks are */
for (n = 0; n < peak_table->num_peaks; n++)
{
if (pk[m].num == peak->key)
{
/* Calculate the limits of peak region, always as follows (does not depend
on trace value) because working with data file */
v_min_pt = ll2d_frq_to_dp(peak->f1_min,sw1,fn1_int*f1pts);
if (peak->f1_min > ll2d_dp_to_frq((double)v_min_pt,sw1,fn1_int*f1pts))
v_min_pt--;
v_max_pt = ll2d_frq_to_dp(peak->f1_max,sw1,fn1_int*f1pts);
if (peak->f1_max < ll2d_dp_to_frq((double)v_max_pt,sw1,fn1_int*f1pts))
v_max_pt++;
h_min_pt = ll2d_frq_to_dp(peak->f2_min,sw,fn_int);
if (peak->f2_min > ll2d_dp_to_frq((double)h_min_pt,sw,fn_int))
h_min_pt--;
h_max_pt = ll2d_frq_to_dp(peak->f2_max,sw,fn_int);
if (peak->f2_max < ll2d_dp_to_frq((double)h_max_pt,sw,fn_int))
h_max_pt++;
pk[m].h_min_pt = h_min_pt;
pk[m].h_max_pt = h_max_pt;
pk[m].v_min_pt = v_min_pt;
pk[m].v_max_pt = v_max_pt;
/* find out in which block(s) the peak lives */
pk[m].block = (int)(h_max_pt/fidhead.ntraces);
if (!data_in_block[pk[m].block]) data_in_block[pk[m].block] = TRUE;
ending_block = (int)(h_min_pt/fidhead.ntraces);
pk[m].status = NOT_DONE;
if (ending_block == pk[m].block) pk[m].two_blks = FALSE;
else
{
pk[m].two_blks = TRUE;
if (!data_in_block[pk[m].block+1]) data_in_block[pk[m].block+1] = TRUE;
}
m++;
}
peak = peak->next;
}
/* allocate memory for the temporary data buffer */
if ((buffer = (float *)malloc(sizeof(float)*fidhead.ntraces*fidhead.np)) == 0)
{
Werrprintf("makeslice: could not allocate memory\n");
ABORT;
}
for (n = 0; n < fidhead.nblocks; n++)
{
disp_index(n+1);
/* First get the data buffer ... */
if ( (r = D_getbuf(D_DATAFILE, fidhead.nblocks, n, &inblock)) )
{
D_error(r);
}
inp = (float *)inblock.data;
/* ... and zero 'inp' while keeping a copy in 'buffer' */
for (j = 0; j < fidhead.np*fidhead.ntraces; j++)
{
buffer[j] = inp[j];
inp[j] = 0.0;
}
if (data_in_block[n]) /* If some peaks of interest in the block */
{
m = 0;
peak = peak_table->head;
for (l = 0; l < peak_table->num_peaks; l++)
{
if (pk[m].num == peak->key) /* found one peak */
{
if (pk[m].block == n && pk[m].status != DONE)
{
if (pk[m].status == NOT_DONE)
{
if (pk[m].two_blks == FALSE)
{
h_max_pt = pk[m].h_max_pt;
h_min_pt = pk[m].h_min_pt;
}
if (pk[m].two_blks == TRUE)
{
h_max_pt = pk[m].h_max_pt;
h_min_pt = ((n+1)*fidhead.ntraces)-1;
}
}
else if (pk[m].status == PARTIAL)
{
h_max_pt = (n*fidhead.ntraces);
h_min_pt = pk[m].h_min_pt;
}
if (display_mode == 's')
{
for (i = h_max_pt; i <= h_min_pt; i++)
{
trace_number_in_block = (n > 0 ? (i % (n*fidhead.ntraces)) : i);
for (j = 0; j <= pk[m].v_min_pt-pk[m].v_max_pt; j++)
{
k = j+(trace_number_in_block)*fidhead.np+pk[m].v_max_pt;
inp[k] = buffer[k];
}
}
}
else if (display_mode == 'i')
{
for (i = h_max_pt; i <= h_min_pt; i++)
{
trace_number_in_block = (n > 0 ? (i % (n*fidhead.ntraces)) : i);
for (j = 0; j <= pk[m].v_min_pt-pk[m].v_max_pt; j++)
{
k = j+(trace_number_in_block)*fidhead.np+pk[m].v_max_pt;
/* The signal intensity is multiplied by the (part integral / full integral) ratio */
inp[k] = pk[m].intensity*buffer[k];
}
}
}
if (pk[m].two_blks == FALSE) pk[m].status = DONE;
if (pk[m].two_blks == TRUE)
{
if (pk[m].status == PARTIAL) pk[m].status = DONE;
else if (pk[m].status == NOT_DONE)
{
pk[m].status = PARTIAL;
(pk[m].block)++;
}
}
}
m++;
}
peak = peak->next;
}
}
if ( (r = D_markupdated(D_DATAFILE,n)) )
{
D_error(r);
ABORT;
}
if ( (r = D_release(D_DATAFILE,n)) )
{
D_error(r);
ABORT;
}
}
delete_peak_table(&peak_table);
free(buffer);
start_from_ft=TRUE;
releasevarlist();
appendvarlist("dconi");
Wsetgraphicsdisplay("dconi");
disp_index(0);
RETURN;
}
/*---------------------------------------
| |
| init_makeslice() |
| |
+--------------------------------------*/
static int init_makeslice(int argc, char *argv[])
{
char path1[MAXPATHL],diffname[MAXPATHL];
short status_mask;
int i;
FILE *diffusion_file;
double integ_step;
extern int read_peak_file(/*peak_table,filename*/);
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "At start of init_makeslice\n");
fclose(debug);
#endif
if (
P_getreal(PROCESSED,"fn",&fn,1) ||
P_getreal(CURRENT,"sw",&sw,1) ||
P_getreal(PROCESSED,"fn1",&fn1,1) ||
P_getreal(CURRENT,"sw1",&sw1,1)
)
{
Werrprintf("makeslice: Error accessing parameters\n");
return(ERROR);
}
/* PB: The following lines have been added to check the number of arguments to makeslice */
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "fn = %f\n",fn);
fprintf (debug, "sw = %f\n",sw);
fprintf (debug, "fn1 = %f\n",fn);
fprintf (debug, "sw1 = %f\n",sw1);
fprintf (debug, "After reading parameters in init_makeslice\n");
fclose(debug);
#endif
if((argc<3)||(argc>4)){
Werrprintf("makeslice: incorrect number of arguments \n");
Werrprintf("makeslice('<mode>(optional)',start,finish)\n");
return(ERROR);
}
if(argc==3) {
display_mode='i';
start_diff = atof(argv[1]);
finish_diff = atof(argv[2]);
}
else {
if((argv[1][0]!='i')&&(argv[1][0]!='s')){
Werrprintf("makeslice: if 'mode' is specified, it must be supplied as first argument!");
return(ERROR);
}
display_mode = argv[1][0];
start_diff = atof(argv[2]);
finish_diff = atof(argv[3]);
}
if(start_diff>finish_diff) swap(&start_diff,&finish_diff);
mean_diff = 0.5*(start_diff+finish_diff);
diff_window = finish_diff-start_diff;
/* initialise the data files in the present experiment ... */
if ( (r = D_gethead(D_DATAFILE, &fidhead)) )
{
if (r == D_NOTOPEN)
{
/* Wscrprintf("spectrum had to be re-opened?\n"); */
strcpy(path1, curexpdir);
strcat(path1, "/datdir/data");
r = D_open(D_DATAFILE, path1, &fidhead); /* open the file */
}
if (r)
{
D_error(r);
return(ERROR);
}
}
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "After reading header in init_makeslice\n");
fclose(debug);
#endif
f1pts=1;
status_mask = (S_COMPLEX);
if ((fidhead.status & status_mask) == status_mask ) f1pts=2;
status_mask = (S_HYPERCOMPLEX);
if ((fidhead.status & status_mask) == status_mask ) f1pts=4;
status_mask = (S_DATA|S_SPEC|S_FLOAT|S_SECND);
if ( (fidhead.status & status_mask) != status_mask )
{
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "Status is %d in init_makeslice\n",fidhead.status);
fclose(debug);
#endif
Werrprintf("No 2D spectrum available, please use undosy3D or do 2D transform before using makeslice\n");
return(ERROR);
}
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "Before initialising peak table in init_makeslice\n");
fclose(debug);
#endif
fn_int = (int) fn;
fn1_int = (int) fn1;
/* Initialise the peak table */
strcpy(path1,curexpdir);
strcat(path1,"/ll2d/peaks.bin");
if (read_peak_file(&peak_table,path1))
{
Wscrprintf("makeslice: Could not read ll2d peak file !\n");
delete_peak_table(&peak_table);
return(ERROR);
}
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "Opening diffusion_display_3d.inp file in init_makeslice\n");
fclose(debug);
#endif
peak = peak_table->head;
strcpy(diffname,curexpdir);
strcat(diffname,"/dosy/diffusion_display_3D.inp");
if ((diffusion_file=fopen(diffname,"r"))==NULL)
{
Werrprintf("Error opening %s file\n",diffname);
return(ERROR);
}
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "Reading diffusion_display_3d.inp file in init_makeslice\n");
fclose(debug);
#endif
/* Read in the diffusion information, and check whether peaks are in the diffusion band */
i=0;
integ_step = diff_window/(2.0*(double)NINTSTEPS); /* the calculation is divided in NINTSTEPS steps */
/*Wscrprintf("\n\tDiffusion range visible: %.2lf +/- %.2lf\n\n",mean_diff,0.5*diff_window);*/
Wscrprintf("\n\tDiffusion range visible: from %.2lf to %.2lf (*10e-10m2s-1)\n\n",start_diff,finish_diff);
while(fscanf(diffusion_file,"%d %lf %lf\n",&pk[i].num,&pk[i].diff_coef,&pk[i].std_dev)!=EOF && i<MAXNUMPEAKS)
{
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "Reading peak %d in init_makeslice\n",i);
fclose(debug);
#endif
if (display_mode == 's')
{
if (within_diffusion_boundaries(i))
{
if (compute_integral(i,integ_step) == TRUE) i++;
}
}
else if (display_mode == 'i')
{
r = compute_integral(i,integ_step);
/* If less than 1 % of intensity, then assume 0 */
if (pk[i].intensity >= 0.01)
{
/* If at least 95 % of the intensity within, then assume 100 % */
if (pk[i].intensity >= 0.95) pk[i].intensity = 1.0;
i++;
}
}
}
if (display_mode == 's')
{
/* Check that the last peak IS within the slice because it does not get eliminated by the above loop */
if (!within_diffusion_boundaries(i))
{
pk[i].num = 0;
pk[i].diff_coef = 0.0;
pk[i].std_dev = 0.0;
}
else if (compute_integral(i,integ_step) != TRUE)
{
pk[i].num = 0;
pk[i].diff_coef = 0.0;
pk[i].std_dev = 0.0;
}
}
if (display_mode == 'i')
{
/* Check the final peak */
if (pk[i-1].intensity < 0.01)
{
pk[i-1].num = 0.0;
pk[i-1].diff_coef = 0.0;
pk[i-1].std_dev = 0.0;
pk[i-1].intensity = 0.0;
}
}
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "Before closing diffusion_display_3d.inp file in init_makeslice\n");
fclose(debug);
#endif
fclose(diffusion_file);
if (i == MAXNUMPEAKS)
{
Wscrprintf("number of peaks reduced to %d\n",MAXNUMPEAKS);
}
#ifdef DEBUG_MAKESLICE
strcpy (rubbish, curexpdir);
strcat (rubbish, "/dosy/debug_makeslice");
debug = fopen (rubbish, "a"); /* file for debugging information */
fprintf (debug, "After reading peak file in init_makeslice\n");
fclose(debug);
#endif
/* Set PHASFILE status to !S_DATA - this is required to
force a recalculation of the display from the new data
in DATAFILE (in the ds routine, see proc2d.c) */
if ( (r = D_gethead(D_PHASFILE,&phasehead)) )
{
if (r == D_NOTOPEN)
{
/* Wscrprintf("phas NOTOPEN\n"); */
strcpy(path1,curexpdir);
strcat(path1,"/datdir/phasefile");
r = D_open(D_PHASFILE,path1,&phasehead);
}
if (r)
{
D_error(r);
return(ERROR);
}
}
phasehead.status = 0;
if ( (r = D_updatehead(D_PHASFILE, &phasehead)) )
{
D_error(r);
Wscrprintf("PHASE updatehead\n");
return(ERROR);
}
return(COMPLETE);
}
static int within_diffusion_boundaries(int index)
{
if (pk[index].diff_coef >= (start_diff)
&& pk[index].diff_coef <= (finish_diff)) return(TRUE);
else return(FALSE);
}
/*
The function 'compute_integral' calculates the integral of exp{-(x/sigma)^2}
It returns TRUE as soon as the value of the integral between the diffusion . . .
. . . slice reaches (CERTAINTY*full_integral)
It returns FALSE if the integral value in the slice is less than (CERTAINTY*full_integral)
*/
static int compute_integral(int index, double step)
{
register int i;
int max_steps;
double function_value,part_integral,full_integral,limit_value,old_int,delta,diffusion_difference;
delta = 0.00005;
max_steps = display_mode == 'i' ? 5*NINTSTEPS : 3*NINTSTEPS;
/* initialise the full integral with the value of the function at zero */
full_integral = 1.0;
old_int = 0.0;
/*
First the "full" integral (over 3 ('s' mode) or 5 ('i' mode) times the slice width) is calculated
It is considered that if the change in full integral from one loop
to the next is less than (100*delta) per cent, than full integral calculation is complete
*/
for (i=0;i<max_steps && ((full_integral-old_int)/old_int) > delta;i++)
{
old_int = full_integral;
function_value = ((double)(i+1)*step)/pk[index].std_dev;
function_value = exp(-1.0*SQR(function_value));
full_integral += 2.0*function_value;
}
/*
Now can calculate the part within the slice width.
*/
/* initialise the integral with the value of the function at centre of diffusion slice */
diffusion_difference = mean_diff - pk[index].diff_coef;
function_value = diffusion_difference/pk[index].std_dev;
function_value = exp(-1.0*SQR(function_value));
part_integral = function_value;
/*
Start the integration loop, it is terminated when NINTSTEPS is reached, . . .
. . . or when the integral is larger than (full_integral*CERTAINTY)
*/
if (display_mode == 's')
{
limit_value = CERTAINTY*full_integral;
for (i=0;i<NINTSTEPS && part_integral <= limit_value;i++)
{
function_value = (diffusion_difference+(double)(i+1)*step)/pk[index].std_dev;
function_value = exp(-1.0*SQR(function_value));
part_integral += function_value;
function_value = (diffusion_difference-(double)(i+1)*step)/pk[index].std_dev;
function_value = exp(-1.0*SQR(function_value));
part_integral += function_value;
}
if (part_integral > limit_value) return(TRUE);
else return(FALSE);
}
else if (display_mode == 'i')
{
for (i=0;i<NINTSTEPS;i++)
{
function_value = (diffusion_difference+(double)(i+1)*step)/pk[index].std_dev;
function_value = exp(-1.0*SQR(function_value));
part_integral += function_value;
function_value = (diffusion_difference-(double)(i+1)*step)/pk[index].std_dev;
function_value = exp(-1.0*SQR(function_value));
part_integral += function_value;
}
pk[index].intensity = part_integral/full_integral;
return(TRUE);
}
return(FALSE);
}
void swap(double *a, double *b) {
double temp;
temp = *a;
*a = *b;
*b = temp;
}
|
objectiser/scribble-java
|
scribble-core/src/main/java/org/scribble/ast/local/LChoice.java
|
/**
* Copyright 2008 The Scribble Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.scribble.ast.local;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.antlr.runtime.tree.CommonTree;
import org.scribble.ast.AstFactory;
import org.scribble.ast.Choice;
import org.scribble.ast.ProtocolBlock;
import org.scribble.ast.ScribNodeBase;
import org.scribble.ast.name.simple.RoleNode;
import org.scribble.del.ScribDel;
import org.scribble.main.ScribbleException;
import org.scribble.sesstype.Message;
import org.scribble.sesstype.kind.Local;
import org.scribble.sesstype.name.Role;
import org.scribble.util.ScribUtil;
import org.scribble.visit.context.ProjectedChoiceSubjectFixer;
public class LChoice extends Choice<Local> implements LCompoundInteractionNode
{
public LChoice(CommonTree source, RoleNode subj, List<LProtocolBlock> blocks)
{
super(source, subj, blocks);
}
@Override
protected ScribNodeBase copy()
{
return new LChoice(this.source, this.subj, getBlocks());
}
@Override
public LChoice clone(AstFactory af)
{
RoleNode subj = this.subj.clone(af);
List<LProtocolBlock> blocks = ScribUtil.cloneList(af, getBlocks());
return af.LChoice(this.source, subj, blocks);
}
@Override
public LChoice reconstruct(RoleNode subj, List<? extends ProtocolBlock<Local>> blocks)
{
ScribDel del = del();
LChoice lc = new LChoice(this.source, subj, castBlocks(blocks));
lc = (LChoice) lc.del(del);
return lc;
}
@Override
public List<LProtocolBlock> getBlocks()
{
return castBlocks(super.getBlocks());
}
@Override
public Role inferLocalChoiceSubject(ProjectedChoiceSubjectFixer fixer)
{
// Relies on: will never be inferring from a "continue X;" -- if choice is first statement in seq, continue must be guarded; if continue is not guarded, choice cannot be first statement in seq
return getBlocks().get(0).getInteractionSeq().getInteractions().get(0).inferLocalChoiceSubject(fixer);
}
private static List<LProtocolBlock> castBlocks(List<? extends ProtocolBlock<Local>> blocks)
{
return blocks.stream().map((b) -> (LProtocolBlock) b).collect(Collectors.toList());
}
@Override
public LChoice merge(AstFactory af, LInteractionNode ln) throws ScribbleException
{
if (!(ln instanceof LChoice) || !this.canMerge(ln))
{
throw new ScribbleException("Cannot merge " + this.getClass() + " and " + ln.getClass() + ": " + this + ", " + ln);
}
LChoice them = ((LChoice) ln);
/*if (!this.subj.toName().equals(them.subj.toName())) // NO: pointless, always DummyProjectionRoleNode at this point -- maybe unnecessary?
{
throw new ScribbleException("Cannot merge choices for " + this.subj + " and " + them.subj + ": " + this + ", " + ln);
}*/
List<LProtocolBlock> blocks = new LinkedList<>();
getBlocks().forEach(b -> blocks.add(b.clone(af)));
them.getBlocks().forEach(b -> blocks.add(b.clone(af)));
return af.LChoice(this.source, this.subj, blocks); // Not reconstruct: leave context building to post-projection passes
// Hacky: this.source
}
@Override
public boolean canMerge(LInteractionNode ln)
{
// Merge currently does "nothing"; validation takes direct non-deterministic interpretation -- purpose of syntactic merge is to convert non-det to "equivalent" safe det in certain sitations
return ln instanceof LChoice;
}
@Override
public Set<Message> getEnabling()
{
return getBlocks().stream().flatMap((b) -> b.getEnabling().stream()).collect(Collectors.toSet());
}
/*// FIXME: shouldn't be needed, but here due to Eclipse bug https://bugs.eclipse.org/bugs/show_bug.cgi?id=436350
@Override
public Local getKind()
{
return LCompoundInteractionNode.super.getKind();
}*/
}
|
fouadsan/ims-soft
|
ims_soft/products/utils.py
|
from django.http import JsonResponse
from .models import Category, Product
def objects_list_and_create(request, form):
instance = form.save(commit=False)
instance.created_by = request.user
instance = form.save()
print(instance)
if hasattr(instance, 'article_num'):
return JsonResponse({
'id': instance.id,
'name': instance.name,
'category': instance.category.name,
'article_num': instance.article_num,
'created_by': instance.created_by.username,
'created_at': instance.created_at,
'updated_at': instance.updated_at
})
else:
return JsonResponse({
'id': instance.id,
'name': instance.name,
'created_at': instance.created_at
})
def load_objects(request, num_objs, model):
model = eval(model)
visible = 5
upper = num_objs
lower = upper - visible
size = model.objects.all().count()
qs = model.objects.all()
data = []
for obj in qs:
if model == Product:
item = {
'id': obj.id,
'name': obj.name,
'category': obj.category.name,
'article_num': obj.article_num,
'created_by': obj.created_by.username,
'created_at': obj.created_at,
'updated_at': obj.updated_at
}
else:
item = {
'id': obj.id,
'name': obj.name,
'created_at': obj.created_at
}
data.append(item)
return JsonResponse({'data': data[lower:upper], 'size': size})
def object_data(request, model, pk):
model = eval(model)
obj = model.objects.get(pk=pk)
if model == Product:
data = {
'id': obj.id,
'name': obj.name,
'category': obj.category.name,
'article_num': obj.article_num,
'created_by': obj.created_by.username,
'created_at': obj.created_at,
'updated_at': obj.updated_at
}
else:
data = {
'id': obj.id,
'name': obj.name,
'created_at': obj.created_at
}
return JsonResponse({'data': data})
def update_object(request, model, pk):
model = eval(model)
obj = model.objects.get(pk=pk)
if model == Product:
new_name = request.POST.get('name')
new_category = request.POST.get('category')
new_article_num = request.POST.get('article_num')
obj.name = new_name
obj.category = Category.objects.get(id=new_category)
obj.article_num = new_article_num
obj.save()
return JsonResponse({
'name': new_name,
'category': obj.category.name,
'article_num': new_article_num,
})
else:
new_name = request.POST.get('name')
obj.name = new_name
obj.save()
return JsonResponse({
'name': new_name
})
def delete_object(request, model, pk):
model = eval(model)
obj = model.objects.get(pk=pk)
obj.delete()
return JsonResponse({'msg': 'Object has been deleted'})
def delete_selected_objects(request, model):
model = eval(model)
object_ids = request.POST.getlist(('id_list[]'))
for id in object_ids:
obj = model.objects.get(pk=id)
obj.delete()
return JsonResponse({'msg': 'Objects have been deleted'})
|
HiltonRoscoe/exchangerxml
|
src/com/cladonia/xngreditor/LatestNewsDialog.java
|
/*
* $Id: LatestNewsDialog.java,v 1.0 7 Jun 2007 09:59:20 Administrator Exp $
*
* Copyright (C) 2005, Cladonia Ltd. All rights reserved.
*
* This software is the proprietary information of Cladonia Ltd. Use is subject
* to license terms.
*/
package com.cladonia.xngreditor;
import java.awt.BorderLayout;
import java.awt.Dimension;
import java.awt.FlowLayout;
import java.awt.Font;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.KeyEvent;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import javax.swing.AbstractAction;
import javax.swing.JButton;
import javax.swing.JComponent;
import javax.swing.JEditorPane;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.KeyStroke;
import javax.swing.SwingUtilities;
import javax.swing.border.EmptyBorder;
import javax.swing.text.html.HTMLEditorKit;
import com.l2fprod.common.swing.TipModel;
import com.l2fprod.common.swing.TipModel.Tip;
import com.l2fprod.common.swing.tips.DefaultTipModel;
/**
*
*
* @version $Revision: 1.0 $, $Date: 7 Jun 2007 09:59:20 $
* @author <NAME> <<EMAIL>>
*/
public class LatestNewsDialog extends XngrDialogHeader {
/**
* Key used to store the status of the "Show tip on startup" checkbox"
*/
public static final String PREFERENCE_KEY = "ShowTipOnStartup";
/**
* Used when generating PropertyChangeEvents for the "currentTip" property
*/
public static final String CURRENT_TIP_CHANGED_KEY = "currentTip";
private TipModel model;
private int currentTip = 0;
private static final Dimension SIZE = new Dimension( 400, 300);
// The components that contain the values
private JEditorPane tipField;
private JButton closeButton;
private JButton previousButton;
private JButton nextButton;
/**
* Initialise the class LatestNewsDialog.java
*
* @param frame
* @param modal
*/
public LatestNewsDialog(JFrame frame, boolean modal) {
this(frame, modal, new DefaultTipModel(new Tip[0]));
// TODO Auto-generated constructor stub
}
public LatestNewsDialog(JFrame frame, boolean modal, TipModel model) {
super(frame, modal);
this.model = model;
//setResizable( false);
setTitle( "Latest News");
setDialogDescription( "The Latest Cladonia News");
JPanel main = new JPanel( new BorderLayout());
main.setBorder( new EmptyBorder( 10, 5, 5, 5));
main.getActionMap().put( "escapeAction", new AbstractAction() {
public void actionPerformed( ActionEvent event) {
cancelButtonPressed();
}
});
main.getInputMap( JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put( KeyStroke.getKeyStroke( KeyEvent.VK_ESCAPE, 0, false), "escapeAction");
//tipField = new TextArea();
tipField = new JEditorPane();
tipField.setEditorKit(new HTMLEditorKit());
tipField.setEditable(false);
JScrollPane tipScrollPane = new JScrollPane(tipField);
main.add( tipScrollPane, BorderLayout.CENTER);
//removed for xngr-dialog
closeButton = new JButton( "Close");
closeButton.setMnemonic( 'C');
closeButton.setFont( closeButton.getFont().deriveFont( Font.PLAIN));
closeButton.addActionListener( new ActionListener() {
public void actionPerformed( ActionEvent e) {
cancelButtonPressed();
}
});
previousButton = new JButton( "Previous");
previousButton.addActionListener( new ActionListener() {
public void actionPerformed( ActionEvent e) {
previousButtonPressed();
}
});
nextButton = new JButton( "Next");
nextButton.addActionListener( new ActionListener() {
public void actionPerformed( ActionEvent e) {
nextButtonPressed();
}
});
getRootPane().setDefaultButton( closeButton);
JPanel buttonPanel = new JPanel( new FlowLayout( FlowLayout.RIGHT, 5, 5));
buttonPanel.setBorder( new EmptyBorder( 5, 0, 3, 0));
buttonPanel.add( previousButton);
buttonPanel.add( nextButton);
buttonPanel.add( closeButton);
main.add( buttonPanel, BorderLayout.SOUTH);
setContentPane( main);
addWindowListener( new WindowAdapter() {
public void windowClosing( WindowEvent e) {
cancelButtonPressed();
}
});
setDefaultCloseOperation( HIDE_ON_CLOSE);
getRootPane().setDefaultButton(closeButton);
pack();
setSize( new Dimension( Math.max( SIZE.width, getSize().width), Math.max( SIZE.height, getSize().height)));
setLocationRelativeTo( frame);
}
protected void nextButtonPressed() {
nextTip();
SwingUtilities.invokeLater(new Runnable() {
public void run() {
updateTip();
}
});
}
protected void previousButtonPressed() {
previousTip();
SwingUtilities.invokeLater(new Runnable() {
public void run() {
updateTip();
}
});
}
protected void okButtonPressed() {
cancelled = false;
hide();
}
protected void cancelButtonPressed() {
super.cancelButtonPressed();
}
/**
* Initialises the values in the dialog.
*/
public void show() {
updateTip();
super.show();
}
public TipModel getModel() {
return model;
}
public void setModel(TipModel model) {
TipModel old = this.model;
this.model = model;
firePropertyChange("model", old, model);
}
public int getCurrentTip() {
return currentTip;
}
public void updateTip() {
if(this.getModel() != null) {
if(this.getModel().getTipCount() >= this.currentTip) {
Tip tip = this.getModel().getTipAt(this.currentTip);
if(tip != null) {
if(tip.getTip() instanceof String) {
this.tipField.setText((String) tip.getTip());
if(this.getModel().getTipCount() <= this.currentTip+1) {
this.nextButton.setEnabled(false);
}
else {
this.nextButton.setEnabled(true);
}
if(this.currentTip == 0) {
this.previousButton.setEnabled(false);
}
else {
this.previousButton.setEnabled(true);
}
}
}
}
else {
this.nextButton.setEnabled(false);
}
}
}
/**
* Sets the index of the tip to show
*
* @param currentTip
* @throw IllegalArgumentException if currentTip is not within the bounds
* [0, getModel().getTipCount()[.
*/
public void setCurrentTip(int currentTip) {
if (currentTip < 0 || currentTip >= getModel().getTipCount()) {
throw new IllegalArgumentException(
"Current tip must be within the bounds [0, "
+ getModel().getTipCount() + "[");
}
int oldTip = this.currentTip;
this.currentTip = currentTip;
firePropertyChange(CURRENT_TIP_CHANGED_KEY, oldTip, currentTip);
}
/**
* Shows the next tip in the list. It cycles the tip list.
*/
public void nextTip() {
int count = getModel().getTipCount();
if (count == 0) {
return;
}
int nextTip = currentTip + 1;
if (nextTip >= count) {
nextTip = 0;
}
setCurrentTip(nextTip);
}
/**
* Shows the previous tip in the list. It cycles the tip list.
*/
public void previousTip() {
int count = getModel().getTipCount();
if (count == 0) {
return;
}
int previousTip = currentTip - 1;
if (previousTip < 0) {
previousTip = count - 1;
}
setCurrentTip(previousTip);
}
}
|
barryquan/jpa-springboot-code-generator
|
src/main/java/com/github/barry/akali/generator/db/DataBaseProperties.java
|
<gh_stars>0
package com.github.barry.akali.generator.db;
import lombok.Data;
/**
* 数据库的连接配置类
*
* @author barry
*
*/
@Data
public class DataBaseProperties {
/**
* 数据库连接,如:jdbc:mysql://localhost:3306/test?characterEncoding=UTF-8&serverTimezone=Asia/Shanghai
*/
private String jdbcUrl;
/**
* 数据库的用户名,如:root
*/
private String user;
/**
* 数据库的密码,如:<PASSWORD>
*/
private String password;
/**
* 数据库的驱动,如:com.mysql.cj.jdbc.Driver
*/
private String driver;
}
|
sosan/NoahGameFrame
|
NFComm/NFKernelPlugin/NFCEventModule.h
|
<reponame>sosan/NoahGameFrame<filename>NFComm/NFKernelPlugin/NFCEventModule.h
// -------------------------------------------------------------------------
// @FileName : NFCEventModule.h
// @Author : LvSheng.Huang
// @Date : 2012-12-15
// @Module : NFCEventModule
//
// -------------------------------------------------------------------------
#ifndef NFC_EVENT_MODULE_H
#define NFC_EVENT_MODULE_H
#include <iostream>
#include "NFComm/NFCore/NFIObject.h"
#include "NFComm/NFPluginModule/NFGUID.h"
#include "NFComm/NFPluginModule/NFIEventModule.h"
#include "NFComm/NFPluginModule/NFIKernelModule.h"
class NFCEventModule
: public NFIEventModule
{
public:
NFCEventModule(NFIPluginManager* p)
{
pPluginManager = p;
}
virtual ~NFCEventModule()
{
}
virtual bool Init();
virtual bool AfterInit();
virtual bool BeforeShut();
virtual bool Shut();
virtual bool Execute();
virtual bool DoEvent(const NFEventDefine nEventID, const NFDataList& valueList);
virtual bool ExistEventCallBack(const NFEventDefine nEventID);
virtual bool RemoveEventCallBack(const NFEventDefine nEventID);
//////////////////////////////////////////////////////////
virtual bool DoEvent(const NFGUID self, const NFEventDefine nEventID, const NFDataList& valueList);
virtual bool ExistEventCallBack(const NFGUID self, const NFEventDefine nEventID);
virtual bool RemoveEventCallBack(const NFGUID self, const NFEventDefine nEventID);
virtual bool RemoveEventCallBack(const NFGUID self);
protected:
virtual bool AddEventCallBack(const NFEventDefine nEventID, const MODULE_EVENT_FUNCTOR_PTR cb);
virtual bool AddEventCallBack(const NFGUID self, const NFEventDefine nEventID, const OBJECT_EVENT_FUNCTOR_PTR cb);
private:
NFIKernelModule* m_pKernelodule;
private:
// for module
NFList<NFEventDefine> mModuleRemoveListEx;
NFMapEx<NFEventDefine, NFList<MODULE_EVENT_FUNCTOR_PTR>> mModuleEventInfoMapEx;
//for object
NFList<NFGUID> mObjectRemoveListEx;
NFMapEx<NFGUID, NFMapEx<NFEventDefine, NFList<OBJECT_EVENT_FUNCTOR_PTR>>> mObjectEventInfoMapEx;
};
#endif
|
acearchive/acearchive.lgbt
|
assets/js/replayweb-url.js
|
import normalizeCid from "./normalize-cid";
const forms = document.querySelectorAll(".replayweb-url-form form");
for (const form of forms) {
const cidInputGroup = form.querySelector(".cid-input");
const cidInputElement = cidInputGroup.querySelector("input");
const filenameInputGroup = form.querySelector(".filename-input");
const filenameInputElement = filenameInputGroup.querySelector("input");
const urlInputGroup = form.querySelector(".url-input");
const urlInputElement = urlInputGroup.querySelector("input");
const submitButton = form.querySelector(".submit-button");
const cancelButton = form.querySelector(".cancel-button");
const urlOutput = form.querySelector(".url-output");
const getUrl = () =>
`https://replayweb.page/?source=ipfs://${cidInputElement.value}?filename=${encodeURIComponent(
filenameInputElement.value
)}#view=resources&urlSearchType=prefix&url=${encodeURIComponent(urlInputElement.value)}`;
for (const inputGroup of form.querySelectorAll(".needs-validated")) {
const inputElement = inputGroup.querySelector("input");
inputElement.addEventListener("input", () => {
inputGroup.classList.remove("was-validated");
});
}
cidInputElement.addEventListener("invalid", () => {
const feedbackElement = cidInputGroup.querySelector(".invalid-feedback");
feedbackElement.innerText = cidInputElement.validationMessage;
});
filenameInputElement.addEventListener("invalid", () => {
const feedbackElement = filenameInputGroup.querySelector(".invalid-feedback");
if (filenameInputElement.validity.patternMismatch) {
feedbackElement.innerText =
"File names must use hyphens between words and have a .warc extension.";
} else {
feedbackElement.innerText = filenameInputElement.validationMessage;
}
});
urlInputElement.addEventListener("invalid", () => {
const feedbackElement = urlInputGroup.querySelector(".invalid-feedback");
if (urlInputElement.validity.typeMismatch) {
feedbackElement.innerText = "This must be a valid URL.";
} else if (urlInputElement.validity.patternMismatch) {
feedbackElement.innerText = "This must be an http:// or https:// URL.";
} else {
feedbackElement.innerText = urlInputElement.validationMessage;
}
});
const setFormStateLoading = (loading) => {
for (const inputElement of form.querySelectorAll("input")) {
inputElement.disabled = loading;
}
submitButton.disabled = loading;
cancelButton.hidden = !loading;
if (loading) {
submitButton.innerHTML = `
<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span>
Validating...
`;
} else {
submitButton.innerText = "Submit";
}
};
const checkValidity = async () => {
if (cidInputElement.value.length > 0) {
const abortController = new AbortController();
cancelButton.onclick = () => {
abortController.abort();
setFormStateLoading(false);
};
setFormStateLoading(true);
const { result, message } = await normalizeCid(cidInputElement.value, {
signal: abortController.signal,
});
setFormStateLoading(false);
if (result === undefined) {
cidInputElement.setCustomValidity(message);
} else {
cidInputElement.setCustomValidity("");
cidInputElement.value = result;
}
} else {
cidInputElement.setCustomValidity("");
}
return form.checkValidity();
};
submitButton.addEventListener("click", () => {
urlOutput.classList.remove("d-flex");
urlOutput.classList.add("d-none");
checkValidity().then((isValid) => {
for (const validationElement of form.querySelectorAll(".needs-validated")) {
validationElement.classList.add("was-validated");
}
if (isValid) {
urlOutput.classList.remove("d-none");
urlOutput.classList.add("d-flex");
const generatedUrl = getUrl();
urlOutput.querySelector("a").innerText = generatedUrl;
urlOutput.querySelector("a").setAttribute("href", generatedUrl);
urlOutput.querySelector("clipboard-copy").value = generatedUrl;
}
});
});
}
|
plegner/quizmaster
|
app/shared/src/main/scala/hydro/common/ScalaUtils.scala
|
<gh_stars>10-100
package hydro.common
import scala.concurrent._
object ScalaUtils {
/** Returns the name of the object as defined by "object X {}" */
def objectName(obj: AnyRef): String = {
obj.getClass.getSimpleName.replace("$", "")
}
def callbackSettingFuturePair(): (() => Unit, Future[Unit]) = {
val promise = Promise[Unit]()
val callback: () => Unit = () => promise.success()
(callback, promise.future)
}
def toPromise[T](future: Future[T]): Promise[T] = Promise[T]().completeWith(future)
def ifThenOption[T](condition: Boolean)(value: => T): Option[T] = {
if (condition) {
Some(value)
} else {
None
}
}
def stripRequiredPrefix(s: String, prefix: String): String = {
require(s.startsWith(prefix), s"string doesn't start with prefix: prefix = $prefix, string = $s")
s.stripPrefix(prefix)
}
}
|
ankurqss2009/AUSK_NEW
|
modules/user/server-scala/src/main/scala/services/MessageTemplateService.scala
|
<gh_stars>1000+
package services
import com.github.jurajburian.mailer.{Content, Message}
import javax.mail.internet.InternetAddress
import model.User
class MessageTemplateService {
def createConfirmRegistrationMessage(user: User, appName: String, fromEmail: String, followLink: String): Message =
Message(
subject = "Confirm Email",
content = Content().html(s"""<p>Hi, ${user.username}!</p>
| <p>Welcome to $appName. Please click the following link to confirm your email:</p>
| <p><a href="$followLink">$followLink</a></p>""".stripMargin),
from = new InternetAddress(fromEmail),
to = Seq(new InternetAddress(user.email))
)
def createRecoverPasswordMessage(user: User, appName: String, fromEmail: String, followLink: String): Message =
Message(
subject = "Reset Password",
content = Content().html(s"""<p>Please click this link to reset your password:</p>
| <a href="$followLink">$followLink</a>""".stripMargin),
from = new InternetAddress(fromEmail),
to = Seq(new InternetAddress(user.email))
)
}
|
guidojw/amber-api
|
spec/requests/v1/users_controller/activate_account_spec.rb
|
<filename>spec/requests/v1/users_controller/activate_account_spec.rb
require 'rails_helper'
describe V1::UsersController do
describe 'POST /users/:id/activate_account', version: 1 do
let(:activation_token) { Faker::Crypto.sha256 }
let(:record) do
create(:user, login_enabled: true,
activation_token: activation_token,
activation_token_valid_till: 1.hour.from_now)
end
let(:record_url) { "/v1/users/#{record.id}/activate_account" }
let(:valid_password) { Faker::Internet.password(min_length: 12) }
let(:params) { { activationToken: activation_token, password: <PASSWORD> } }
subject(:request) { post(record_url, params) }
context 'when without parameters' do
let(:params) { nil }
it_behaves_like '404 Not Found'
end
context 'when with invalid token' do
before { params.merge!(activationToken: 'invalid_token') }
it_behaves_like '404 Not Found'
end
context 'when activation token has expired' do
let(:record) do
create(:user, login_enabled: true,
activation_token: activation_token,
activation_token_valid_till: 1.second.ago)
end
before { request }
it_behaves_like '404 Not Found'
end
context 'when with correct activation token' do
before { request && record.reload }
it_behaves_like '204 No Content'
it 'sets password correctly' do
expect(record.authenticate(valid_password)).to eq record
end
end
end
end
|
ggj2010/javabase
|
thread/src/main/java/concurrent/AtomicBooleanTest.java
|
<reponame>ggj2010/javabase
package concurrent;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* @author:gaoguangjin
* @date:2018/9/3
*/
public class AtomicBooleanTest {
private static AtomicBoolean flag = new AtomicBoolean();
//只能保证 可见性与有序性 不能保证原子性
private static volatile boolean notSafeflag ;
public static void main(String[] args) {
testNotSafe();
// test();
}
private static void test() {
for (int i = 0; i < 10; i++) {
getThread().start();
}
}
private static void testNotSafe() {
for (int i = 0; i < 10; i++) {
getNotSafeThread().start();
}
}
public static Thread getThread() {
return new Thread() {
@Override
public void run() {
if (flag.compareAndSet(false, true)) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println("do job");
}else{
System.out.println("not do job");
}
}
};
}
public static Thread getNotSafeThread() {
return new Thread() {
@Override
public void run() {
dogetNotSafeThread();
}
};
}
private static void dogetNotSafeThread() {
if (!notSafeflag) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println("before do notSafeflag job");
notSafeflag=true;
System.out.println("after do notSafeflag job");
}else{
System.out.println("not do job");
}
}
}
|
296114187/AndroidFramework
|
core/src/main/java/com/voidid/core/enums/EDBType.java
|
package com.voidid.core.enums;
/**
* 数据库类型枚举
*/
public enum EDBType {
NONE,
SQLite,
SQLCipher
}
|
zermelo-software/zermelo-app
|
www/touch/examples/touchstyle/app/view/Categories.js
|
Ext.define('TouchStyle.view.Categories', {
extend: 'Ext.dataview.DataView',
xtype: 'categories',
config: {
baseCls: 'categories-list',
itemTpl: [
'<div class="image" style="background-image:url(http://resources.shopstyle.com/static/mobile/image2-iPad/{urlId}.png)"></div>',
'<div class="name">{label}</div>'
].join(''),
records: null
},
applyData: function(data) {
this.setRecords(data);
return Ext.pluck(data || [], 'data');
},
onItemTap: function(container, target, index, e) {
var me = this,
store = me.getStore(),
records = me.getRecords(),
record = store && records[index];
me.fireEvent('itemtap', me, index, target, record, e);
}
});
|
AsahiOS/gate
|
usr/src/uts/common/io/pm.c
|
<reponame>AsahiOS/gate<filename>usr/src/uts/common/io/pm.c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* pm This driver now only handles the ioctl interface. The scanning
* and policy stuff now lives in common/os/sunpm.c.
* Not DDI compliant
*/
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/modctl.h>
#include <sys/callb.h> /* callback registration for cpu_deep_idle */
#include <sys/conf.h> /* driver flags and functions */
#include <sys/open.h> /* OTYP_CHR definition */
#include <sys/stat.h> /* S_IFCHR definition */
#include <sys/pathname.h> /* name -> dev_info xlation */
#include <sys/kmem.h> /* memory alloc stuff */
#include <sys/debug.h>
#include <sys/pm.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/epm.h>
#include <sys/vfs.h>
#include <sys/mode.h>
#include <sys/mkdev.h>
#include <sys/promif.h>
#include <sys/consdev.h>
#include <sys/ddi_impldefs.h>
#include <sys/poll.h>
#include <sys/note.h>
#include <sys/taskq.h>
#include <sys/policy.h>
#include <sys/cpu_pm.h>
/*
* Minor number is instance<<8 + clone minor from range 1-254; (0 reserved
* for "original")
*/
#define PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE -1))
#define PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
#define PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
#define PM_MAJOR(dip) ddi_driver_major(dip)
#define PM_RELE(dip) ddi_release_devi(dip)
#define PM_IDLEDOWN_TIME 10
#define MAXSMBIOSSTRLEN 64 /* from SMBIOS spec */
#define MAXCOPYBUF (MAXSMBIOSSTRLEN + 1)
extern kmutex_t pm_scan_lock; /* protects autopm_enable, pm_scans_disabled */
extern kmutex_t pm_clone_lock; /* protects pm_clones array */
extern int autopm_enabled;
extern pm_cpupm_t cpupm;
extern pm_cpupm_t cpupm_default_mode;
extern int pm_default_idle_threshold;
extern int pm_system_idle_threshold;
extern int pm_cpu_idle_threshold;
extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
extern uint_t pm_poll_cnt[PM_MAX_CLONE];
extern int autoS3_enabled;
extern void pm_record_thresh(pm_thresh_rec_t *);
extern void pm_register_watcher(int, dev_info_t *);
extern int pm_get_current_power(dev_info_t *, int, int *);
extern int pm_interest_registered(int);
extern void pm_all_to_default_thresholds(void);
extern int pm_current_threshold(dev_info_t *, int, int *);
extern void pm_deregister_watcher(int, dev_info_t *);
extern void pm_unrecord_threshold(char *);
extern int pm_S3_enabled;
extern int pm_ppm_searchlist(pm_searchargs_t *);
extern psce_t *pm_psc_clone_to_direct(int);
extern psce_t *pm_psc_clone_to_interest(int);
/*
* The soft state of the power manager. Since there will only
* one of these, just reference it through a static pointer.
*/
static struct pmstate {
dev_info_t *pm_dip; /* ptr to our dev_info node */
int pm_instance; /* for ddi_get_instance() */
timeout_id_t pm_idledown_id; /* pm idledown timeout id */
uchar_t pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
struct cred *pm_cred[PM_MAX_CLONE]; /* cred for each unique open */
} pm_state = { NULL, -1, (timeout_id_t)0 };
typedef struct pmstate *pm_state_t;
static pm_state_t pmstp = &pm_state;
static int pm_open(dev_t *, int, int, cred_t *);
static int pm_close(dev_t, int, int, cred_t *);
static int pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
static int pm_chpoll(dev_t, short, int, short *, struct pollhead **);
static struct cb_ops pm_cb_ops = {
pm_open, /* open */
pm_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
pm_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
pm_chpoll, /* poll */
ddi_prop_op, /* prop_op */
NULL, /* streamtab */
D_NEW | D_MP /* driver compatibility flag */
};
static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
void **result);
static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
static struct dev_ops pm_ops = {
DEVO_REV, /* devo_rev */
0, /* refcnt */
pm_getinfo, /* info */
nulldev, /* identify */
nulldev, /* probe */
pm_attach, /* attach */
pm_detach, /* detach */
nodev, /* reset */
&pm_cb_ops, /* driver operations */
NULL, /* bus operations */
NULL, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
static struct modldrv modldrv = {
&mod_driverops,
"power management driver",
&pm_ops
};
static struct modlinkage modlinkage = {
MODREV_1, &modldrv, 0
};
/* Local functions */
#ifdef DEBUG
static int print_info(dev_info_t *, void *);
#endif
int
_init(void)
{
return (mod_install(&modlinkage));
}
int
_fini(void)
{
return (mod_remove(&modlinkage));
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
static int
pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
int i;
switch (cmd) {
case DDI_ATTACH:
if (pmstp->pm_instance != -1) /* Only allow one instance */
return (DDI_FAILURE);
pmstp->pm_instance = ddi_get_instance(dip);
if (ddi_create_minor_node(dip, "pm", S_IFCHR,
(pmstp->pm_instance << 8) + 0,
DDI_PSEUDO, 0) != DDI_SUCCESS) {
return (DDI_FAILURE);
}
pmstp->pm_dip = dip; /* pm_init and getinfo depend on it */
for (i = 0; i < PM_MAX_CLONE; i++)
cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
ddi_report_dev(dip);
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/* ARGSUSED */
static int
pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
int i;
switch (cmd) {
case DDI_DETACH:
/*
* Don't detach while idledown timeout is pending. Note that
* we already know we're not in pm_ioctl() due to framework
* synchronization, so this is a sufficient test
*/
if (pmstp->pm_idledown_id)
return (DDI_FAILURE);
for (i = 0; i < PM_MAX_CLONE; i++)
cv_destroy(&pm_clones_cv[i]);
ddi_remove_minor_node(dip, NULL);
pmstp->pm_instance = -1;
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
static int
pm_close_direct_pm_device(dev_info_t *dip, void *arg)
{
int clone;
char *pathbuf;
pm_info_t *info = PM_GET_PM_INFO(dip);
clone = *((int *)arg);
if (!info)
return (DDI_WALK_CONTINUE);
pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
PM_LOCK_DIP(dip);
if (clone == info->pmi_clone) {
PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
PM_DEVICE(dip)))
ASSERT(PM_ISDIRECT(dip));
info->pmi_dev_pm_state &= ~PM_DIRECT;
PM_UNLOCK_DIP(dip);
pm_proceed(dip, PMP_RELEASE, -1, -1);
/* Bring ourselves up if there is a keeper that is up */
(void) ddi_pathname(dip, pathbuf);
pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
pathbuf, PM_DEP_NOWAIT, NULL, 0);
PM_LOCK_DIP(dip);
info->pmi_clone = 0;
PM_UNLOCK_DIP(dip);
} else {
PM_UNLOCK_DIP(dip);
}
kmem_free(pathbuf, MAXPATHLEN);
/* restart autopm on device released from direct pm */
pm_rescan(dip);
return (DDI_WALK_CONTINUE);
}
#define PM_REQ 1
#define NOSTRUCT 2
#define DIP 3
#define NODIP 4
#define NODEP 5
#define DEP 6
#define PM_PSC 7
#define PM_SRCH 8
#define CHECKPERMS 0x001
#define SU 0x002
#define SG 0x004
#define OWNER 0x008
#define INWHO 0x001
#define INDATAINT 0x002
#define INDATASTRING 0x004
#define INDEP 0x008
#define INDATAOUT 0x010
#define INDATA (INDATAOUT | INDATAINT | INDATASTRING | INDEP)
struct pm_cmd_info {
int cmd; /* command code */
char *name; /* printable string */
int supported; /* true if still supported */
int str_type; /* PM_REQ or NOSTRUCT */
int inargs; /* INWHO, INDATAINT, INDATASTRING, INDEP, */
/* INDATAOUT */
int diptype; /* DIP or NODIP */
int deptype; /* DEP or NODEP */
int permission; /* SU, GU, or CHECKPERMS */
};
#ifdef DEBUG
char *pm_cmd_string;
int pm_cmd;
#endif
/*
* Returns true if permission granted by credentials
*/
static int
pm_perms(int perm, cred_t *cr)
{
if (perm == 0) /* no restrictions */
return (1);
if (perm == CHECKPERMS) /* ok for now (is checked later) */
return (1);
if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
return (1);
if ((perm & SG) && (crgetgid(cr) == 0)) /* group 0 is ok */
return (1);
return (0);
}
#ifdef DEBUG
static int
print_info(dev_info_t *dip, void *arg)
{
_NOTE(ARGUNUSED(arg))
pm_info_t *info;
int i, j;
struct pm_component *cp;
extern int pm_cur_power(pm_component_t *cp);
info = PM_GET_PM_INFO(dip);
if (!info)
return (DDI_WALK_CONTINUE);
cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
for (i = 0; i < PM_NUMCMPTS(dip); i++) {
cp = PM_CP(dip, i);
cmn_err(CE_CONT, "\tThresholds[%d] =", i);
for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
cmn_err(CE_CONT, "\n");
cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
pm_cur_power(cp));
}
if (PM_ISDIRECT(dip))
cmn_err(CE_CONT, "\tDirect power management\n");
return (DDI_WALK_CONTINUE);
}
#endif
/*
* command, name, supported, str_type, inargs, diptype, deptype, permission
*/
static struct pm_cmd_info pmci[] = {
{PM_SCHEDULE, "PM_SCHEDULE", 0},
{PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
{PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
{PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
{PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
{PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
{PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
{PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
{PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
{PM_GET_DEP, "PM_GET_DEP", 0},
{PM_ADD_DEP, "PM_ADD_DEP", 0},
{PM_REM_DEP, "PM_REM_DEP", 0},
{PM_REM_DEVICE, "PM_REM_DEVICE", 0},
{PM_REM_DEVICES, "PM_REM_DEVICES", 0},
{PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
NODEP},
{PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
{PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
{PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
{PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
INWHO, NODIP, NODEP, SU},
{PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
{PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
1, NOSTRUCT},
{PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
0, 0, 0, SU},
{PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
DIP, NODEP},
{PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
DIP, NODEP},
{PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
DIP, NODEP},
{PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
INWHO | INDATAOUT, DIP, NODEP},
{PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
DIP, NODEP},
{PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
INWHO | INDATAOUT, DIP, NODEP},
{PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
DIP, NODEP},
{PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
{PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
{PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
(SU | SG)},
{PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
DIP, NODEP},
{PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
{PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
INWHO, DIP, NODEP, SU},
{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
{PM_GET_AUTOS3_STATE, "PM_GET_AUTOS3_STATE", 1, NOSTRUCT},
{PM_GET_S3_SUPPORT_STATE, "PM_GET_S3_SUPPORT_STATE", 1, NOSTRUCT},
{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
DIP, NODEP},
{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
INWHO | INDATAINT, NODIP, NODEP, SU},
{PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
INWHO | INDATAOUT, DIP, NODEP},
{PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
PM_REQ, INWHO, DIP, NODEP},
{PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
NODEP},
{PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
NODEP},
{PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
NODEP},
{PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
DIP, DEP, SU},
{PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
{PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
INWHO | INDATASTRING, NODIP, DEP, SU},
{PM_START_CPUPM, "PM_START_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_START_CPUPM_EV, "PM_START_CPUPM_EV", 1, NOSTRUCT, 0,
0, 0, SU},
{PM_START_CPUPM_POLL, "PM_START_CPUPM_POLL", 1, NOSTRUCT, 0,
0, 0, SU},
{PM_STOP_CPUPM, "PM_STOP_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_GET_CPU_THRESHOLD, "PM_GET_CPU_THRESHOLD", 1, NOSTRUCT},
{PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
0, 0, 0, SU},
{PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
{PM_START_AUTOS3, "PM_START_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_STOP_AUTOS3, "PM_STOP_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_ENABLE_S3, "PM_ENABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_DISABLE_S3, "PM_DISABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_ENTER_S3, "PM_ENTER_S3", 1, NOSTRUCT, 0, 0, 0, SU},
{PM_SEARCH_LIST, "PM_SEARCH_LIST", 1, PM_SRCH, 0, 0, 0, SU},
{PM_GET_CMD_NAME, "PM_GET_CMD_NAME", 1, PM_REQ, INDATAOUT, NODIP,
NODEP, 0},
{PM_DISABLE_CPU_DEEP_IDLE, "PM_DISABLE_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
0, 0, SU},
{PM_ENABLE_CPU_DEEP_IDLE, "PM_START_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
0, 0, SU},
{PM_DEFAULT_CPU_DEEP_IDLE, "PM_DFLT_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
0, 0, SU},
{0, NULL}
};
struct pm_cmd_info *
pc_info(int cmd)
{
struct pm_cmd_info *pcip;
for (pcip = pmci; pcip->name; pcip++) {
if (cmd == pcip->cmd)
return (pcip);
}
return (NULL);
}
static char *
pm_decode_cmd(int cmd)
{
static char invbuf[64];
struct pm_cmd_info *pcip = pc_info(cmd);
if (pcip != NULL)
return (pcip->name);
(void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
return (invbuf);
}
/*
* Allocate scan resource, create taskq, then dispatch scan,
* called only if autopm is enabled.
*/
int
pm_start_pm_walk(dev_info_t *dip, void *arg)
{
int cmd = *((int *)arg);
#ifdef PMDDEBUG
char *cmdstr = pm_decode_cmd(cmd);
#endif
if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
return (DDI_WALK_CONTINUE);
switch (cmd) {
case PM_START_CPUPM:
case PM_START_CPUPM_POLL:
if (!PM_ISCPU(dip))
return (DDI_WALK_CONTINUE);
mutex_enter(&pm_scan_lock);
if (!PM_CPUPM_DISABLED && !PM_EVENT_CPUPM)
pm_scan_init(dip);
mutex_exit(&pm_scan_lock);
break;
case PM_START_PM:
mutex_enter(&pm_scan_lock);
if (PM_ISCPU(dip) && (PM_CPUPM_DISABLED || PM_EVENT_CPUPM)) {
mutex_exit(&pm_scan_lock);
return (DDI_WALK_CONTINUE);
}
if (autopm_enabled)
pm_scan_init(dip);
mutex_exit(&pm_scan_lock);
break;
}
/*
* Start doing pm on device: ensure pm_scan data structure initiated,
* no need to guarantee a successful scan run.
*/
PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
PM_DEVICE(dip)))
pm_rescan(dip);
return (DDI_WALK_CONTINUE);
}
/*
* Bring devices to full power level, then stop scan
*/
int
pm_stop_pm_walk(dev_info_t *dip, void *arg)
{
pm_info_t *info = PM_GET_PM_INFO(dip);
int cmd = *((int *)arg);
#ifdef PMDDEBUG
char *cmdstr = pm_decode_cmd(cmd);
#endif
if (!info)
return (DDI_WALK_CONTINUE);
switch (cmd) {
case PM_STOP_PM:
/*
* If CPU devices are being managed independently, then don't
* stop them as part of PM_STOP_PM. Only stop them as part of
* PM_STOP_CPUPM and PM_RESET_PM.
*/
if (PM_ISCPU(dip) && PM_POLLING_CPUPM)
return (DDI_WALK_CONTINUE);
break;
case PM_STOP_CPUPM:
/*
* If stopping CPU devices and this device is not marked
* as a CPU device, then skip.
*/
if (!PM_ISCPU(dip))
return (DDI_WALK_CONTINUE);
break;
}
/*
* Stop the current scan, and then bring it back to normal power.
*/
if (!PM_ISBC(dip)) {
PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
"%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
pm_scan_stop(dip);
}
if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
!pm_all_at_normal(dip)) {
PM_LOCK_DIP(dip);
if (info->pmi_dev_pm_state & PM_DETACHING) {
PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
"all_to_normal because %s@%s(%s#%d) is detaching\n",
cmdstr, PM_DEVICE(dip)))
info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
PM_UNLOCK_DIP(dip);
return (DDI_WALK_CONTINUE);
}
PM_UNLOCK_DIP(dip);
if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
"(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
}
}
return (DDI_WALK_CONTINUE);
}
static int
pm_start_idledown(dev_info_t *dip, void *arg)
{
int flag = (int)(intptr_t)arg;
pm_scan_t *scanp = PM_GET_PM_SCAN(dip);
if (!scanp)
return (DDI_WALK_CONTINUE);
PM_LOCK_DIP(dip);
scanp->ps_idle_down |= flag;
PM_UNLOCK_DIP(dip);
pm_rescan(dip);
return (DDI_WALK_CONTINUE);
}
/*ARGSUSED*/
static int
pm_end_idledown(dev_info_t *dip, void *ignore)
{
pm_scan_t *scanp = PM_GET_PM_SCAN(dip);
if (!scanp)
return (DDI_WALK_CONTINUE);
PM_LOCK_DIP(dip);
/*
* The PMID_TIMERS bits are place holder till idledown expires.
* The bits are also the base for regenerating PMID_SCANS bits.
* While it's up to scan thread to clear up the PMID_SCANS bits
* after each scan run, PMID_TIMERS ensure aggressive scan down
* performance throughout the idledown period.
*/
scanp->ps_idle_down &= ~PMID_TIMERS;
PM_UNLOCK_DIP(dip);
return (DDI_WALK_CONTINUE);
}
/*ARGSUSED*/
static void
pm_end_idledown_walk(void *ignore)
{
PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
"off\n", (ulong_t)pmstp->pm_idledown_id));
mutex_enter(&pm_scan_lock);
pmstp->pm_idledown_id = 0;
mutex_exit(&pm_scan_lock);
ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
}
/*
* pm_timeout_idledown - keep idledown effect for 10 seconds.
*
* Return 0 if another competing caller scheduled idledown timeout,
* otherwise, return idledown timeout_id.
*/
static timeout_id_t
pm_timeout_idledown(void)
{
timeout_id_t to_id;
/*
* Keep idle-down in effect for either 10 seconds
* or length of a scan interval, which ever is greater.
*/
mutex_enter(&pm_scan_lock);
if (pmstp->pm_idledown_id != 0) {
to_id = pmstp->pm_idledown_id;
pmstp->pm_idledown_id = 0;
mutex_exit(&pm_scan_lock);
(void) untimeout(to_id);
mutex_enter(&pm_scan_lock);
if (pmstp->pm_idledown_id != 0) {
PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
"another caller got it, idledown_id(%lx)!\n",
(ulong_t)pmstp->pm_idledown_id))
mutex_exit(&pm_scan_lock);
return (0);
}
}
pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
PM_IDLEDOWN_TIME * hz);
PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
(ulong_t)pmstp->pm_idledown_id))
mutex_exit(&pm_scan_lock);
return (pmstp->pm_idledown_id);
}
static int
pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
struct pollhead **phpp)
{
extern struct pollhead pm_pollhead; /* common/os/sunpm.c */
int clone;
clone = PM_MINOR_TO_CLONE(getminor(dev));
PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
*reventsp |= (POLLIN | POLLRDNORM);
PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
} else {
*reventsp = 0;
if (!anyyet) {
PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
*phpp = &pm_pollhead;
}
#ifdef DEBUG
else {
PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
}
#endif
}
return (0);
}
/*
* called by pm_dicard_entries to free up the memory. It also decrements
* pm_poll_cnt, if direct is non zero.
*/
static void
pm_free_entries(psce_t *pscep, int clone, int direct)
{
pm_state_change_t *p;
if (pscep) {
p = pscep->psce_out;
while (p->size) {
if (direct) {
PMD(PMD_IOCTL, ("ioctl: discard: "
"pm_poll_cnt[%d] is %d before "
"ASSERT\n", clone,
pm_poll_cnt[clone]))
ASSERT(pm_poll_cnt[clone]);
pm_poll_cnt[clone]--;
}
kmem_free(p->physpath, p->size);
p->size = 0;
if (p == pscep->psce_last)
p = pscep->psce_first;
else
p++;
}
pscep->psce_out = pscep->psce_first;
pscep->psce_in = pscep->psce_first;
mutex_exit(&pscep->psce_lock);
}
}
/*
* Discard entries for this clone. Calls pm_free_entries to free up memory.
*/
static void
pm_discard_entries(int clone)
{
psce_t *pscep;
int direct = 0;
mutex_enter(&pm_clone_lock);
if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
direct = 1;
pm_free_entries(pscep, clone, direct);
pscep = pm_psc_clone_to_interest(clone);
pm_free_entries(pscep, clone, 0);
mutex_exit(&pm_clone_lock);
}
static void
pm_set_idle_threshold(dev_info_t *dip, int thresh, int flag)
{
if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
case PMC_DEF_THRESH:
case PMC_CPU_THRESH:
PMD(PMD_IOCTL, ("ioctl: set_idle_threshold: set "
"%s@%s(%s#%d) default thresh to 0t%d\n",
PM_DEVICE(dip), thresh))
pm_set_device_threshold(dip, thresh, flag);
break;
default:
break;
}
}
}
static int
pm_set_idle_thresh_walk(dev_info_t *dip, void *arg)
{
int cmd = *((int *)arg);
if (!PM_GET_PM_INFO(dip))
return (DDI_WALK_CONTINUE);
switch (cmd) {
case PM_SET_SYSTEM_THRESHOLD:
if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
break;
pm_set_idle_threshold(dip, pm_system_idle_threshold,
PMC_DEF_THRESH);
pm_rescan(dip);
break;
case PM_SET_CPU_THRESHOLD:
if (!PM_ISCPU(dip))
break;
pm_set_idle_threshold(dip, pm_cpu_idle_threshold,
PMC_CPU_THRESH);
pm_rescan(dip);
break;
}
return (DDI_WALK_CONTINUE);
}
/*ARGSUSED*/
static int
pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
{
dev_t dev;
int instance;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
if (pmstp->pm_instance == -1)
return (DDI_FAILURE);
*result = pmstp->pm_dip;
return (DDI_SUCCESS);
case DDI_INFO_DEVT2INSTANCE:
dev = (dev_t)arg;
instance = getminor(dev) >> 8;
*result = (void *)(uintptr_t)instance;
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*ARGSUSED1*/
static int
pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
{
int clone;
if (otyp != OTYP_CHR)
return (EINVAL);
mutex_enter(&pm_clone_lock);
for (clone = 1; clone < PM_MAX_CLONE; clone++)
if (!pmstp->pm_clones[clone])
break;
if (clone == PM_MAX_CLONE) {
mutex_exit(&pm_clone_lock);
return (ENXIO);
}
pmstp->pm_cred[clone] = cr;
crhold(cr);
*devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
pmstp->pm_clones[clone] = 1;
mutex_exit(&pm_clone_lock);
return (0);
}
/*ARGSUSED1*/
static int
pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
{
int clone;
if (otyp != OTYP_CHR)
return (EINVAL);
clone = PM_MINOR_TO_CLONE(getminor(dev));
PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
clone))
/*
* Walk the entire device tree to find the corresponding
* device and operate on it.
*/
ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
(void *) &clone);
crfree(pmstp->pm_cred[clone]);
pmstp->pm_cred[clone] = 0;
pmstp->pm_clones[clone] = 0;
pm_discard_entries(clone);
ASSERT(pm_poll_cnt[clone] == 0);
pm_deregister_watcher(clone, NULL);
return (0);
}
/*ARGSUSED*/
static int
pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
{
struct pm_cmd_info *pc_info(int);
struct pm_cmd_info *pcip = pc_info(cmd);
pm_req_t req;
dev_info_t *dip = NULL;
pm_info_t *info = NULL;
int clone;
char *cmdstr = pm_decode_cmd(cmd);
/*
* To keep devinfo nodes from going away while we're holding a
* pointer to their dip, pm_name_to_dip() optionally holds
* the devinfo node. If we've done that, we set dipheld
* so we know at the end of the ioctl processing to release the
* node again.
*/
int dipheld = 0;
int icount = 0;
int i;
int comps;
size_t lencopied;
int ret = ENOTTY;
int curpower;
char who[MAXNAMELEN];
size_t wholen; /* copyinstr length */
size_t deplen = MAXNAMELEN;
char *dep, i_dep_buf[MAXNAMELEN];
char pathbuf[MAXNAMELEN];
struct pm_component *cp;
#ifdef _MULTI_DATAMODEL
pm_state_change32_t *pscp32;
pm_state_change32_t psc32;
pm_searchargs32_t psa32;
size_t copysize32;
#endif
pm_state_change_t *pscp;
pm_state_change_t psc;
pm_searchargs_t psa;
char listname[MAXCOPYBUF];
char manufacturer[MAXCOPYBUF];
char product[MAXCOPYBUF];
size_t copysize;
PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
#ifdef DEBUG
if (cmd == 666) {
ddi_walk_devs(ddi_root_node(), print_info, NULL);
return (0);
}
ret = 0x0badcafe; /* sanity checking */
pm_cmd = cmd; /* for ASSERT debugging */
pm_cmd_string = cmdstr; /* for ASSERT debugging */
#endif
if (pcip == NULL) {
PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
return (ENOTTY);
}
if (pcip == NULL || pcip->supported == 0) {
PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
pcip->name))
return (ENOTTY);
}
wholen = 0;
dep = i_dep_buf;
i_dep_buf[0] = 0;
clone = PM_MINOR_TO_CLONE(getminor(dev));
if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
ret = EPERM;
return (ret);
}
switch (pcip->str_type) {
case PM_REQ:
{
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
pm_req32_t req32;
if (ddi_copyin((caddr_t)arg, &req32,
sizeof (req32), mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
ret = EFAULT;
break;
}
req.component = req32.component;
req.value = req32.value;
req.datasize = req32.datasize;
if (pcip->inargs & INWHO) {
ret = copyinstr((char *)(uintptr_t)
req32.physpath, who, MAXNAMELEN, &wholen);
if (ret) {
PMD(PMD_ERROR, ("ioctl: %s: "
"copyinstr fails returning %d\n",
cmdstr, ret))
break;
}
req.physpath = who;
PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
cmdstr, req.physpath))
}
if (pcip->inargs & INDATA) {
req.data = (void *)(uintptr_t)req32.data;
req.datasize = req32.datasize;
} else {
req.data = NULL;
req.datasize = 0;
}
switch (pcip->diptype) {
case DIP:
if (!(dip =
pm_name_to_dip(req.physpath, 1))) {
PMD(PMD_ERROR, ("ioctl: %s: "
"pm_name_to_dip for %s failed\n",
cmdstr, req.physpath))
return (ENODEV);
}
ASSERT(!dipheld);
dipheld++;
break;
case NODIP:
break;
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("invalid diptype %d for cmd %d (%s)\n",
pcip->diptype, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
if (pcip->inargs & INDATAINT) {
int32_t int32buf;
int32_t *i32p;
int *ip;
icount = req32.datasize / sizeof (int32_t);
if (icount <= 0) {
PMD(PMD_ERROR, ("ioctl: %s: datasize"
" 0 or neg EFAULT\n\n", cmdstr))
ret = EFAULT;
break;
}
ASSERT(!(pcip->inargs & INDATASTRING));
req.datasize = icount * sizeof (int);
req.data = kmem_alloc(req.datasize, KM_SLEEP);
ip = req.data;
ret = 0;
for (i = 0,
i32p = (int32_t *)(uintptr_t)req32.data;
i < icount; i++, i32p++) {
if (ddi_copyin((void *)i32p, &int32buf,
sizeof (int32_t), mode)) {
kmem_free(req.data,
req.datasize);
PMD(PMD_ERROR, ("ioctl: %s: "
"entry %d EFAULT\n",
cmdstr, i))
ret = EFAULT;
break;
}
*ip++ = (int)int32buf;
}
if (ret)
break;
}
if (pcip->inargs & INDATASTRING) {
ASSERT(!(pcip->inargs & INDATAINT));
ASSERT(pcip->deptype == DEP);
if (req32.data != 0) {
if (copyinstr((void *)(uintptr_t)
req32.data, dep, deplen, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: "
"0x%p dep size %lx, EFAULT"
"\n", cmdstr,
(void *)req.data, deplen))
ret = EFAULT;
break;
}
#ifdef DEBUG
else {
PMD(PMD_DEP, ("ioctl: %s: "
"dep %s\n", cmdstr, dep))
}
#endif
} else {
PMD(PMD_ERROR, ("ioctl: %s: no "
"dependent\n", cmdstr))
ret = EINVAL;
break;
}
}
} else
#endif /* _MULTI_DATAMODEL */
{
if (ddi_copyin((caddr_t)arg,
&req, sizeof (req), mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
ret = EFAULT;
break;
}
if (pcip->inargs & INWHO) {
ret = copyinstr((char *)req.physpath, who,
MAXNAMELEN, &wholen);
if (ret) {
PMD(PMD_ERROR, ("ioctl: %s copyinstr"
" fails returning %d\n", cmdstr,
ret))
break;
}
req.physpath = who;
PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
cmdstr, req.physpath))
}
if (!(pcip->inargs & INDATA)) {
req.data = NULL;
req.datasize = 0;
}
switch (pcip->diptype) {
case DIP:
if (!(dip =
pm_name_to_dip(req.physpath, 1))) {
PMD(PMD_ERROR, ("ioctl: %s: "
"pm_name_to_dip for %s failed\n",
cmdstr, req.physpath))
return (ENODEV);
}
ASSERT(!dipheld);
dipheld++;
break;
case NODIP:
break;
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("invalid diptype %d for cmd %d (%s)\n",
pcip->diptype, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
if (pcip->inargs & INDATAINT) {
int *ip;
ASSERT(!(pcip->inargs & INDATASTRING));
ip = req.data;
icount = req.datasize / sizeof (int);
if (icount <= 0) {
PMD(PMD_ERROR, ("ioctl: %s: datasize"
" 0 or neg EFAULT\n\n", cmdstr))
ret = EFAULT;
break;
}
req.data = kmem_alloc(req.datasize, KM_SLEEP);
if (ddi_copyin((caddr_t)ip, req.data,
req.datasize, mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
ret = EFAULT;
break;
}
}
if (pcip->inargs & INDATASTRING) {
ASSERT(!(pcip->inargs & INDATAINT));
ASSERT(pcip->deptype == DEP);
if (req.data != NULL) {
if (copyinstr((caddr_t)req.data,
dep, deplen, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: "
"0x%p dep size %lu, "
"EFAULT\n", cmdstr,
(void *)req.data, deplen))
ret = EFAULT;
break;
}
#ifdef DEBUG
else {
PMD(PMD_DEP, ("ioctl: %s: "
"dep %s\n", cmdstr, dep))
}
#endif
} else {
PMD(PMD_ERROR, ("ioctl: %s: no "
"dependent\n", cmdstr))
ret = EINVAL;
break;
}
}
}
/*
* Now we've got all the args in for the commands that
* use the new pm_req struct.
*/
switch (cmd) {
case PM_REPARSE_PM_PROPS:
{
struct dev_ops *drv;
struct cb_ops *cb;
void *propval;
int length;
/*
* This ioctl is provided only for the ddivs pm test.
* We only do it to a driver which explicitly allows
* us to do so by exporting a pm-reparse-ok property.
* We only care whether the property exists or not.
*/
if ((drv = ddi_get_driver(dip)) == NULL) {
ret = EINVAL;
break;
}
if ((cb = drv->devo_cb_ops) != NULL) {
if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
"pm-reparse-ok", (caddr_t)&propval,
&length) != DDI_SUCCESS) {
ret = EINVAL;
break;
}
} else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
"pm-reparse-ok", (caddr_t)&propval,
&length) != DDI_SUCCESS) {
ret = EINVAL;
break;
}
kmem_free(propval, length);
ret = e_new_pm_props(dip);
break;
}
case PM_GET_DEVICE_THRESHOLD:
{
PM_LOCK_DIP(dip);
if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
cmdstr))
ret = ENODEV;
break;
}
*rval_p = DEVI(dip)->devi_pm_dev_thresh;
PM_UNLOCK_DIP(dip);
ret = 0;
break;
}
case PM_DIRECT_PM:
{
int has_dep;
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"ENODEV\n", cmdstr))
ret = ENODEV;
break;
}
/*
* Check to see if we are there is a dependency on
* this kept device, if so, return EBUSY.
*/
(void) ddi_pathname(dip, pathbuf);
pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
if (has_dep) {
PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
cmdstr))
ret = EBUSY;
break;
}
PM_LOCK_DIP(dip);
if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"%s@%s(%s#%d): EBUSY\n", cmdstr,
PM_DEVICE(dip)))
PM_UNLOCK_DIP(dip);
ret = EBUSY;
break;
}
info->pmi_dev_pm_state |= PM_DIRECT;
info->pmi_clone = clone;
PM_UNLOCK_DIP(dip);
PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
cmdstr, (void *)info, clone))
mutex_enter(&pm_clone_lock);
pm_register_watcher(clone, dip);
mutex_exit(&pm_clone_lock);
ret = 0;
break;
}
case PM_RELEASE_DIRECT_PM:
{
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"ENODEV\n", cmdstr))
ret = ENODEV;
break;
}
PM_LOCK_DIP(dip);
if (info->pmi_clone != clone) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"%s@%s(%s#%d) EINVAL\n", cmdstr,
PM_DEVICE(dip)))
ret = EINVAL;
PM_UNLOCK_DIP(dip);
break;
}
ASSERT(PM_ISDIRECT(dip));
info->pmi_dev_pm_state &= ~PM_DIRECT;
PM_UNLOCK_DIP(dip);
/* Bring ourselves up if there is a keeper. */
(void) ddi_pathname(dip, pathbuf);
pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
pm_discard_entries(clone);
pm_deregister_watcher(clone, dip);
/*
* Now we could let the other threads that are
* trying to do a DIRECT_PM thru
*/
PM_LOCK_DIP(dip);
info->pmi_clone = 0;
PM_UNLOCK_DIP(dip);
pm_proceed(dip, PMP_RELEASE, -1, -1);
PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
cmdstr))
pm_rescan(dip);
ret = 0;
break;
}
case PM_SET_CURRENT_POWER:
{
int comp = req.component;
int value = req.value;
PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
"%d\n", cmdstr, req.physpath, comp, value))
if (!e_pm_valid_comp(dip, comp, NULL) ||
!e_pm_valid_power(dip, comp, value)) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"physpath=%s, comp=%d, level=%d, fails\n",
cmdstr, req.physpath, comp, value))
ret = EINVAL;
break;
}
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"ENODEV\n", cmdstr))
ret = ENODEV;
break;
}
if (info->pmi_clone != clone) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"(not owner) %s fails; clone %d, owner %d"
"\n", cmdstr, req.physpath, clone,
info->pmi_clone))
ret = EINVAL;
break;
}
ASSERT(PM_ISDIRECT(dip));
if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
"pm_set_power for %s fails, errno=%d\n",
cmdstr, req.physpath, ret))
break;
}
pm_proceed(dip, PMP_SETPOWER, comp, value);
/*
* Power down all idle components if console framebuffer
* is powered off.
*/
if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
pm_default_idle_threshold)) {
dev_info_t *root = ddi_root_node();
if (PM_ISBC(dip)) {
if (comp == 0 && value == 0 &&
(pm_timeout_idledown() != 0)) {
ddi_walk_devs(root,
pm_start_idledown,
(void *)PMID_CFB);
}
} else {
int count = 0;
for (i = 0; i < PM_NUMCMPTS(dip); i++) {
ret = pm_get_current_power(dip,
i, &curpower);
if (ret == DDI_SUCCESS &&
curpower == 0)
count++;
}
if ((count == PM_NUMCMPTS(dip)) &&
(pm_timeout_idledown() != 0)) {
ddi_walk_devs(root,
pm_start_idledown,
(void *)PMID_CFB);
}
}
}
PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
cmdstr))
pm_rescan(dip);
*rval_p = 0;
ret = 0;
break;
}
case PM_GET_FULL_POWER:
{
int normal;
ASSERT(dip);
PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
cmdstr, req.physpath, req.component))
normal = pm_get_normal_power(dip, req.component);
if (normal == DDI_FAILURE) {
PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
"returns EINVAL\n", cmdstr))
ret = EINVAL;
break;
}
*rval_p = normal;
PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
cmdstr, normal))
ret = 0;
break;
}
case PM_GET_CURRENT_POWER:
{
if (pm_get_current_power(dip, req.component,
rval_p) != DDI_SUCCESS) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
"EINVAL\n", cmdstr))
ret = EINVAL;
break;
}
PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
cmdstr, req.physpath, req.component, *rval_p))
if (*rval_p == PM_LEVEL_UNKNOWN)
ret = EAGAIN;
else
ret = 0;
break;
}
case PM_GET_TIME_IDLE:
{
time_t timestamp;
int comp = req.component;
pm_component_t *cp;
if (!e_pm_valid_comp(dip, comp, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"component %d > numcmpts - 1 %d--EINVAL\n",
cmdstr, PM_DEVICE(dip), comp,
PM_NUMCMPTS(dip) - 1))
ret = EINVAL;
break;
}
timestamp = cp->pmc_timestamp;
if (timestamp) {
time_t now;
(void) drv_getparm(TIME, &now);
*rval_p = (now - timestamp);
} else {
*rval_p = 0;
}
ret = 0;
break;
}
case PM_ADD_DEPENDENT:
{
dev_info_t *kept_dip;
PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
dep, req.physpath))
/*
* hold and install kept while processing dependency
* keeper (in .physpath) has already been held.
*/
if (dep[0] == '\0') {
PMD(PMD_ERROR, ("kept NULL or null\n"))
ret = EINVAL;
break;
} else if ((kept_dip =
pm_name_to_dip(dep, 1)) == NULL) {
PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
ret = ENODEV;
break;
} else if (kept_dip == dip) {
PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
"self-dependency not allowed.\n",
dep, (void *)kept_dip, req.physpath,
(void *) dip))
PM_RELE(dip); /* release "double" hold */
ret = EINVAL;
break;
}
ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
/*
* record dependency, then walk through device tree
* independently on behalf of kept and keeper to
* establish newly created dependency.
*/
pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
req.physpath, dep, PM_DEP_WAIT, NULL, 0);
/*
* release kept after establishing dependency, keeper
* is released as part of ioctl exit processing.
*/
PM_RELE(kept_dip);
*rval_p = 0;
ret = 0;
break;
}
case PM_ADD_DEPENDENT_PROPERTY:
{
char *keeper, *kept;
if (dep[0] == '\0') {
PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
"null\n", cmdstr))
ret = EINVAL;
break;
}
kept = dep;
keeper = req.physpath;
/*
* record keeper - kept dependency, then walk through
* device tree to find out all attached keeper, walk
* through again to apply dependency to all the
* potential kept.
*/
pm_dispatch_to_dep_thread(
PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
PM_DEP_WAIT, NULL, 0);
*rval_p = 0;
ret = 0;
break;
}
case PM_SET_DEVICE_THRESHOLD:
{
pm_thresh_rec_t *rp;
pm_pte_t *ep; /* threshold header storage */
int *tp; /* threshold storage */
size_t size;
extern int pm_thresh_specd(dev_info_t *);
/*
* The header struct plus one entry struct plus one
* threshold plus the length of the string
*/
size = sizeof (pm_thresh_rec_t) +
(sizeof (pm_pte_t) * 1) +
(1 * sizeof (int)) +
strlen(req.physpath) + 1;
rp = kmem_zalloc(size, KM_SLEEP);
rp->ptr_size = size;
rp->ptr_numcomps = 0; /* means device threshold */
ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
rp->ptr_entries = ep;
tp = (int *)((intptr_t)ep +
(1 * sizeof (pm_pte_t)));
ep->pte_numthresh = 1;
ep->pte_thresh = tp;
*tp++ = req.value;
(void) strcat((char *)tp, req.physpath);
rp->ptr_physpath = (char *)tp;
ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
(intptr_t)rp + rp->ptr_size);
PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
"%s\n", cmdstr, req.value, req.physpath))
pm_record_thresh(rp);
/*
* Don't free rp, pm_record_thresh() keeps it.
* We don't try to apply it ourselves because we'd need
* to know too much about locking. Since we don't
* hold a lock the entry could be removed before
* we get here
*/
ASSERT(dip == NULL);
ret = 0; /* can't fail now */
if (!(dip = pm_name_to_dip(req.physpath, 1))) {
break;
}
(void) pm_thresh_specd(dip);
PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
cmdstr, PM_DEVICE(dip)))
PM_RELE(dip);
break;
}
case PM_RESET_DEVICE_THRESHOLD:
{
/*
* This only applies to a currently attached and power
* managed node
*/
/*
* We don't do this to old-style drivers
*/
info = PM_GET_PM_INFO(dip);
if (info == NULL) {
PMD(PMD_ERROR, ("ioctl: %s: %s not power "
"managed\n", cmdstr, req.physpath))
ret = EINVAL;
break;
}
if (PM_ISBC(dip)) {
PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
cmdstr, req.physpath))
ret = EINVAL;
break;
}
pm_unrecord_threshold(req.physpath);
if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
pm_set_device_threshold(dip,
pm_cpu_idle_threshold, PMC_CPU_THRESH);
else
pm_set_device_threshold(dip,
pm_system_idle_threshold, PMC_DEF_THRESH);
ret = 0;
break;
}
case PM_GET_NUM_COMPONENTS:
{
ret = 0;
*rval_p = PM_NUMCMPTS(dip);
break;
}
case PM_GET_DEVICE_TYPE:
{
ret = 0;
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
PMD(PMD_ERROR, ("ioctl: %s: "
"PM_NO_PM_COMPONENTS\n", cmdstr))
*rval_p = PM_NO_PM_COMPONENTS;
break;
}
if (PM_ISBC(dip)) {
*rval_p = PM_CREATE_COMPONENTS;
} else {
*rval_p = PM_AUTOPM;
}
break;
}
case PM_SET_COMPONENT_THRESHOLDS:
{
int comps = 0;
int *end = (int *)req.data + icount;
pm_thresh_rec_t *rp;
pm_pte_t *ep; /* threshold header storage */
int *tp; /* threshold storage */
int *ip;
int j;
size_t size;
extern int pm_thresh_specd(dev_info_t *);
extern int pm_valid_thresh(dev_info_t *,
pm_thresh_rec_t *);
for (ip = req.data; *ip; ip++) {
if (ip >= end) {
ret = EFAULT;
break;
}
comps++;
/* skip over indicated number of entries */
for (j = *ip; j; j--) {
if (++ip >= end) {
ret = EFAULT;
break;
}
}
if (ret)
break;
}
if (ret)
break;
if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
/* did not exactly fill buffer */
ret = EINVAL;
break;
}
if (comps == 0) {
PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
"--EINVAL\n", cmdstr, req.physpath))
ret = EINVAL;
break;
}
/*
* The header struct plus one entry struct per component
* plus the size of the lists minus the counts
* plus the length of the string
*/
size = sizeof (pm_thresh_rec_t) +
(sizeof (pm_pte_t) * comps) + req.datasize -
((comps + 1) * sizeof (int)) +
strlen(req.physpath) + 1;
rp = kmem_zalloc(size, KM_SLEEP);
rp->ptr_size = size;
rp->ptr_numcomps = comps;
ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
rp->ptr_entries = ep;
tp = (int *)((intptr_t)ep +
(comps * sizeof (pm_pte_t)));
for (ip = req.data; *ip; ep++) {
ep->pte_numthresh = *ip;
ep->pte_thresh = tp;
for (j = *ip++; j; j--) {
*tp++ = *ip++;
}
}
(void) strcat((char *)tp, req.physpath);
rp->ptr_physpath = (char *)tp;
ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
(intptr_t)rp + rp->ptr_size);
ASSERT(dip == NULL);
/*
* If this is not a currently power managed node,
* then we can't check for validity of the thresholds
*/
if (!(dip = pm_name_to_dip(req.physpath, 1))) {
/* don't free rp, pm_record_thresh uses it */
pm_record_thresh(rp);
PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
"for %s failed\n", cmdstr, req.physpath))
ret = 0;
break;
}
ASSERT(!dipheld);
dipheld++;
if (!pm_valid_thresh(dip, rp)) {
PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
"for %s@%s(%s#%d)\n", cmdstr,
PM_DEVICE(dip)))
kmem_free(rp, size);
ret = EINVAL;
break;
}
/*
* We don't just apply it ourselves because we'd need
* to know too much about locking. Since we don't
* hold a lock the entry could be removed before
* we get here
*/
pm_record_thresh(rp);
(void) pm_thresh_specd(dip);
ret = 0;
break;
}
case PM_GET_COMPONENT_THRESHOLDS:
{
int musthave;
int numthresholds = 0;
int wordsize;
int numcomps;
caddr_t uaddr = req.data; /* user address */
int val; /* int value to be copied out */
int32_t val32; /* int32 value to be copied out */
caddr_t vaddr; /* address to copyout from */
int j;
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
wordsize = sizeof (int32_t);
} else
#endif /* _MULTI_DATAMODEL */
{
wordsize = sizeof (int);
}
ASSERT(dip);
numcomps = PM_NUMCMPTS(dip);
for (i = 0; i < numcomps; i++) {
cp = PM_CP(dip, i);
numthresholds += cp->pmc_comp.pmc_numlevels - 1;
}
musthave = (numthresholds + numcomps + 1) * wordsize;
if (req.datasize < musthave) {
PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
"%d--EINVAL\n", cmdstr, req.datasize,
musthave))
ret = EINVAL;
break;
}
PM_LOCK_DIP(dip);
for (i = 0; i < numcomps; i++) {
int *thp;
cp = PM_CP(dip, i);
thp = cp->pmc_comp.pmc_thresh;
/* first copyout the count */
if (wordsize == sizeof (int32_t)) {
val32 = cp->pmc_comp.pmc_numlevels - 1;
vaddr = (caddr_t)&val32;
} else {
val = cp->pmc_comp.pmc_numlevels - 1;
vaddr = (caddr_t)&val;
}
if (ddi_copyout(vaddr, (void *)uaddr,
wordsize, mode) != 0) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
"(%s#%d) vaddr %p EFAULT\n",
cmdstr, PM_DEVICE(dip),
(void*)vaddr))
ret = EFAULT;
break;
}
vaddr = uaddr;
vaddr += wordsize;
uaddr = (caddr_t)vaddr;
/* then copyout each threshold value */
for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
j++) {
if (wordsize == sizeof (int32_t)) {
val32 = thp[j + 1];
vaddr = (caddr_t)&val32;
} else {
val = thp[i + 1];
vaddr = (caddr_t)&val;
}
if (ddi_copyout(vaddr, (void *) uaddr,
wordsize, mode) != 0) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: "
"%s@%s(%s#%d) uaddr %p "
"EFAULT\n", cmdstr,
PM_DEVICE(dip),
(void *)uaddr))
ret = EFAULT;
break;
}
vaddr = uaddr;
vaddr += wordsize;
uaddr = (caddr_t)vaddr;
}
}
if (ret)
break;
/* last copyout a terminating 0 count */
if (wordsize == sizeof (int32_t)) {
val32 = 0;
vaddr = (caddr_t)&val32;
} else {
ASSERT(wordsize == sizeof (int));
val = 0;
vaddr = (caddr_t)&val;
}
if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"vaddr %p (0 count) EFAULT\n", cmdstr,
PM_DEVICE(dip), (void *)vaddr))
ret = EFAULT;
break;
}
/* finished, so don't need to increment addresses */
PM_UNLOCK_DIP(dip);
ret = 0;
break;
}
case PM_GET_STATS:
{
time_t now;
time_t *timestamp;
extern int pm_cur_power(pm_component_t *cp);
int musthave;
int wordsize;
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
wordsize = sizeof (int32_t);
} else
#endif /* _MULTI_DATAMODEL */
{
wordsize = sizeof (int);
}
comps = PM_NUMCMPTS(dip);
if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
PMD(PMD_ERROR, ("ioctl: %s: %s no components"
" or not power managed--EINVAL\n", cmdstr,
req.physpath))
ret = EINVAL;
break;
}
musthave = comps * 2 * wordsize;
if (req.datasize < musthave) {
PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
"%d--EINVAL\n", cmdstr, req.datasize,
musthave))
ret = EINVAL;
break;
}
PM_LOCK_DIP(dip);
(void) drv_getparm(TIME, &now);
timestamp = kmem_zalloc(comps * sizeof (time_t),
KM_SLEEP);
pm_get_timestamps(dip, timestamp);
/*
* First the current power levels
*/
for (i = 0; i < comps; i++) {
int curpwr;
int32_t curpwr32;
caddr_t cpaddr;
cp = PM_CP(dip, i);
if (wordsize == sizeof (int)) {
curpwr = pm_cur_power(cp);
cpaddr = (caddr_t)&curpwr;
} else {
ASSERT(wordsize == sizeof (int32_t));
curpwr32 = pm_cur_power(cp);
cpaddr = (caddr_t)&curpwr32;
}
if (ddi_copyout(cpaddr, (void *) req.data,
wordsize, mode) != 0) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
"(%s#%d) req.data %p EFAULT\n",
cmdstr, PM_DEVICE(dip),
(void *)req.data))
ASSERT(!dipheld);
return (EFAULT);
}
cpaddr = (caddr_t)req.data;
cpaddr += wordsize;
req.data = cpaddr;
}
/*
* Then the times remaining
*/
for (i = 0; i < comps; i++) {
int retval;
int32_t retval32;
caddr_t rvaddr;
int curpwr;
cp = PM_CP(dip, i);
curpwr = cp->pmc_cur_pwr;
if (curpwr == 0 || timestamp[i] == 0) {
PMD(PMD_STATS, ("ioctl: %s: "
"cur_pwer %x, timestamp %lx\n",
cmdstr, curpwr, timestamp[i]))
retval = INT_MAX;
} else {
int thresh;
(void) pm_current_threshold(dip, i,
&thresh);
retval = thresh - (now - timestamp[i]);
PMD(PMD_STATS, ("ioctl: %s: current "
"thresh %x, now %lx, timestamp %lx,"
" retval %x\n", cmdstr, thresh, now,
timestamp[i], retval))
}
if (wordsize == sizeof (int)) {
rvaddr = (caddr_t)&retval;
} else {
ASSERT(wordsize == sizeof (int32_t));
retval32 = retval;
rvaddr = (caddr_t)&retval32;
}
if (ddi_copyout(rvaddr, (void *) req.data,
wordsize, mode) != 0) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
"(%s#%d) req.data %p EFAULT\n",
cmdstr, PM_DEVICE(dip),
(void *)req.data))
ASSERT(!dipheld);
kmem_free(timestamp,
comps * sizeof (time_t));
return (EFAULT);
}
rvaddr = (caddr_t)req.data;
rvaddr += wordsize;
req.data = (int *)rvaddr;
}
PM_UNLOCK_DIP(dip);
*rval_p = comps;
ret = 0;
kmem_free(timestamp, comps * sizeof (time_t));
break;
}
case PM_GET_CMD_NAME:
{
PMD(PMD_IOCTL, ("%s: %s\n", cmdstr,
pm_decode_cmd(req.value)))
if (ret = copyoutstr(pm_decode_cmd(req.value),
(char *)req.data, req.datasize, &lencopied)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"copyoutstr %p failed--EFAULT\n", cmdstr,
PM_DEVICE(dip), (void *)req.data))
break;
}
*rval_p = lencopied;
ret = 0;
break;
}
case PM_GET_COMPONENT_NAME:
{
ASSERT(dip);
if (!e_pm_valid_comp(dip, req.component, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"component %d > numcmpts - 1 %d--EINVAL\n",
cmdstr, PM_DEVICE(dip), req.component,
PM_NUMCMPTS(dip) - 1))
ret = EINVAL;
break;
}
if (ret = copyoutstr(cp->pmc_comp.pmc_name,
(char *)req.data, req.datasize, &lencopied)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"copyoutstr %p failed--EFAULT\n", cmdstr,
PM_DEVICE(dip), (void *)req.data))
break;
}
*rval_p = lencopied;
ret = 0;
break;
}
case PM_GET_POWER_NAME:
{
int i;
ASSERT(dip);
if (!e_pm_valid_comp(dip, req.component, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"component %d > numcmpts - 1 %d--EINVAL\n",
cmdstr, PM_DEVICE(dip), req.component,
PM_NUMCMPTS(dip) - 1))
ret = EINVAL;
break;
}
if ((i = req.value) < 0 ||
i > cp->pmc_comp.pmc_numlevels - 1) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"value %d > num_levels - 1 %d--EINVAL\n",
cmdstr, PM_DEVICE(dip), req.value,
cp->pmc_comp.pmc_numlevels - 1))
ret = EINVAL;
break;
}
dep = cp->pmc_comp.pmc_lnames[req.value];
if (ret = copyoutstr(dep,
req.data, req.datasize, &lencopied)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"copyoutstr %p failed--EFAULT\n", cmdstr,
PM_DEVICE(dip), (void *)req.data))
break;
}
*rval_p = lencopied;
ret = 0;
break;
}
case PM_GET_POWER_LEVELS:
{
int musthave;
int numlevels;
int wordsize;
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
wordsize = sizeof (int32_t);
} else
#endif /* _MULTI_DATAMODEL */
{
wordsize = sizeof (int);
}
ASSERT(dip);
if (!e_pm_valid_comp(dip, req.component, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"has %d components, component %d requested"
"--EINVAL\n", cmdstr, PM_DEVICE(dip),
PM_NUMCMPTS(dip), req.component))
ret = EINVAL;
break;
}
numlevels = cp->pmc_comp.pmc_numlevels;
musthave = numlevels * wordsize;
if (req.datasize < musthave) {
PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
"%d--EINVAL\n", cmdstr, req.datasize,
musthave))
ret = EINVAL;
break;
}
PM_LOCK_DIP(dip);
for (i = 0; i < numlevels; i++) {
int level;
int32_t level32;
caddr_t laddr;
if (wordsize == sizeof (int)) {
level = cp->pmc_comp.pmc_lvals[i];
laddr = (caddr_t)&level;
} else {
level32 = cp->pmc_comp.pmc_lvals[i];
laddr = (caddr_t)&level32;
}
if (ddi_copyout(laddr, (void *) req.data,
wordsize, mode) != 0) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
"(%s#%d) laddr %p EFAULT\n",
cmdstr, PM_DEVICE(dip),
(void *)laddr))
ASSERT(!dipheld);
return (EFAULT);
}
laddr = (caddr_t)req.data;
laddr += wordsize;
req.data = (int *)laddr;
}
PM_UNLOCK_DIP(dip);
*rval_p = numlevels;
ret = 0;
break;
}
case PM_GET_NUM_POWER_LEVELS:
{
if (!e_pm_valid_comp(dip, req.component, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"component %d > numcmpts - 1 %d--EINVAL\n",
cmdstr, PM_DEVICE(dip), req.component,
PM_NUMCMPTS(dip) - 1))
ret = EINVAL;
break;
}
*rval_p = cp->pmc_comp.pmc_numlevels;
ret = 0;
break;
}
case PM_GET_DEVICE_THRESHOLD_BASIS:
{
ret = 0;
PM_LOCK_DIP(dip);
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
PM_UNLOCK_DIP(dip);
PMD(PMD_ERROR, ("ioctl: %s: "
"PM_NO_PM_COMPONENTS\n", cmdstr))
*rval_p = PM_NO_PM_COMPONENTS;
break;
}
if (PM_ISDIRECT(dip)) {
PM_UNLOCK_DIP(dip);
*rval_p = PM_DIRECTLY_MANAGED;
break;
}
switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
case PMC_DEF_THRESH:
case PMC_NEXDEF_THRESH:
*rval_p = PM_DEFAULT_THRESHOLD;
break;
case PMC_DEV_THRESH:
*rval_p = PM_DEVICE_THRESHOLD;
break;
case PMC_COMP_THRESH:
*rval_p = PM_COMPONENT_THRESHOLD;
break;
case PMC_CPU_THRESH:
*rval_p = PM_CPU_THRESHOLD;
break;
default:
if (PM_ISBC(dip)) {
*rval_p = PM_OLD_THRESHOLD;
break;
}
PMD(PMD_ERROR, ("ioctl: %s: default, not "
"BC--EINVAL", cmdstr))
ret = EINVAL;
break;
}
PM_UNLOCK_DIP(dip);
break;
}
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("invalid diptype %d for cmd %d (%s)\n",
pcip->diptype, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
break;
}
case PM_PSC:
{
/*
* Commands that require pm_state_change_t as arg
*/
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
pscp32 = (pm_state_change32_t *)arg;
if (ddi_copyin((caddr_t)arg, &psc32,
sizeof (psc32), mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
ASSERT(!dipheld);
return (EFAULT);
}
psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
psc.size = psc32.size;
} else
#endif /* _MULTI_DATAMODEL */
{
pscp = (pm_state_change_t *)arg;
if (ddi_copyin((caddr_t)arg, &psc,
sizeof (psc), mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
ASSERT(!dipheld);
return (EFAULT);
}
}
switch (cmd) {
case PM_GET_STATE_CHANGE:
case PM_GET_STATE_CHANGE_WAIT:
{
psce_t *pscep;
pm_state_change_t *p;
caddr_t physpath;
size_t physlen;
/*
* We want to know if any device has changed state.
* We look up by clone. In case we have another thread
* from the same process, we loop.
* pm_psc_clone_to_interest() returns a locked entry.
* We create an internal copy of the event entry prior
* to copyout to user space because we don't want to
* hold the psce_lock while doing copyout as we might
* hit page fault which eventually brings us back
* here requesting the same lock.
*/
mutex_enter(&pm_clone_lock);
if (!pm_interest_registered(clone))
pm_register_watcher(clone, NULL);
while ((pscep =
pm_psc_clone_to_interest(clone)) == NULL) {
if (cmd == PM_GET_STATE_CHANGE) {
PMD(PMD_IOCTL, ("ioctl: %s: "
"EWOULDBLOCK\n", cmdstr))
mutex_exit(&pm_clone_lock);
ASSERT(!dipheld);
return (EWOULDBLOCK);
} else {
if (cv_wait_sig(&pm_clones_cv[clone],
&pm_clone_lock) == 0) {
mutex_exit(&pm_clone_lock);
PMD(PMD_ERROR, ("ioctl: %s "
"EINTR\n", cmdstr))
ASSERT(!dipheld);
return (EINTR);
}
}
}
mutex_exit(&pm_clone_lock);
physlen = pscep->psce_out->size;
physpath = NULL;
/*
* If we were unable to store the path while bringing
* up the console fb upon entering the prom, we give
* a "" name with the overrun event set
*/
if (physlen == (size_t)-1) { /* kmemalloc failed */
physpath = kmem_zalloc(1, KM_SLEEP);
physlen = 1;
}
if ((psc.physpath == NULL) || (psc.size < physlen)) {
PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
mutex_exit(&pscep->psce_lock);
ret = EFAULT;
break;
}
if (physpath == NULL) {
physpath = kmem_zalloc(physlen, KM_SLEEP);
bcopy((const void *) pscep->psce_out->physpath,
(void *) physpath, physlen);
}
p = pscep->psce_out;
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
#ifdef DEBUG
size_t usrcopysize;
#endif
psc32.flags = (ushort_t)p->flags;
psc32.event = (ushort_t)p->event;
psc32.timestamp = (int32_t)p->timestamp;
psc32.component = (int32_t)p->component;
psc32.old_level = (int32_t)p->old_level;
psc32.new_level = (int32_t)p->new_level;
copysize32 = ((intptr_t)&psc32.size -
(intptr_t)&psc32.component);
#ifdef DEBUG
usrcopysize = ((intptr_t)&pscp32->size -
(intptr_t)&pscp32->component);
ASSERT(usrcopysize == copysize32);
#endif
} else
#endif /* _MULTI_DATAMODEL */
{
psc.flags = p->flags;
psc.event = p->event;
psc.timestamp = p->timestamp;
psc.component = p->component;
psc.old_level = p->old_level;
psc.new_level = p->new_level;
copysize = ((long)&p->size -
(long)&p->component);
}
if (p->size != (size_t)-1)
kmem_free(p->physpath, p->size);
p->size = 0;
p->physpath = NULL;
if (pscep->psce_out == pscep->psce_last)
p = pscep->psce_first;
else
p++;
pscep->psce_out = p;
mutex_exit(&pscep->psce_lock);
ret = copyoutstr(physpath, psc.physpath,
physlen, &lencopied);
kmem_free(physpath, physlen);
if (ret) {
PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
"failed--EFAULT\n", cmdstr,
(void *)psc.physpath))
break;
}
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
if (ddi_copyout(&psc32.component,
&pscp32->component, copysize32, mode)
!= 0) {
PMD(PMD_ERROR, ("ioctl: %s: copyout "
"failed--EFAULT\n", cmdstr))
ret = EFAULT;
break;
}
} else
#endif /* _MULTI_DATAMODEL */
{
if (ddi_copyout(&psc.component,
&pscp->component, copysize, mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: copyout "
"failed--EFAULT\n", cmdstr))
ret = EFAULT;
break;
}
}
ret = 0;
break;
}
case PM_DIRECT_NOTIFY:
case PM_DIRECT_NOTIFY_WAIT:
{
psce_t *pscep;
pm_state_change_t *p;
caddr_t physpath;
size_t physlen;
/*
* We want to know if any direct device of ours has
* something we should know about. We look up by clone.
* In case we have another thread from the same process,
* we loop.
* pm_psc_clone_to_direct() returns a locked entry.
*/
mutex_enter(&pm_clone_lock);
while (pm_poll_cnt[clone] == 0 ||
(pscep = pm_psc_clone_to_direct(clone)) == NULL) {
if (cmd == PM_DIRECT_NOTIFY) {
PMD(PMD_IOCTL, ("ioctl: %s: "
"EWOULDBLOCK\n", cmdstr))
mutex_exit(&pm_clone_lock);
ASSERT(!dipheld);
return (EWOULDBLOCK);
} else {
if (cv_wait_sig(&pm_clones_cv[clone],
&pm_clone_lock) == 0) {
mutex_exit(&pm_clone_lock);
PMD(PMD_ERROR, ("ioctl: %s: "
"EINTR\n", cmdstr))
ASSERT(!dipheld);
return (EINTR);
}
}
}
mutex_exit(&pm_clone_lock);
physlen = pscep->psce_out->size;
if ((psc.physpath == NULL) || (psc.size < physlen)) {
mutex_exit(&pscep->psce_lock);
PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
cmdstr))
ret = EFAULT;
break;
}
physpath = kmem_zalloc(physlen, KM_SLEEP);
bcopy((const void *) pscep->psce_out->physpath,
(void *) physpath, physlen);
p = pscep->psce_out;
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
#ifdef DEBUG
size_t usrcopysize;
#endif
psc32.component = (int32_t)p->component;
psc32.flags = (ushort_t)p->flags;
psc32.event = (ushort_t)p->event;
psc32.timestamp = (int32_t)p->timestamp;
psc32.old_level = (int32_t)p->old_level;
psc32.new_level = (int32_t)p->new_level;
copysize32 = (intptr_t)&psc32.size -
(intptr_t)&psc32.component;
PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
"%d -> %d\n", cmdstr, physpath,
p->component, p->old_level, p->new_level))
#ifdef DEBUG
usrcopysize = (intptr_t)&pscp32->size -
(intptr_t)&pscp32->component;
ASSERT(usrcopysize == copysize32);
#endif
} else
#endif
{
psc.component = p->component;
psc.flags = p->flags;
psc.event = p->event;
psc.timestamp = p->timestamp;
psc.old_level = p->old_level;
psc.new_level = p->new_level;
copysize = (intptr_t)&p->size -
(intptr_t)&p->component;
PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
"%d -> %d\n", cmdstr, physpath,
p->component, p->old_level, p->new_level))
}
mutex_enter(&pm_clone_lock);
PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
"before decrement\n", cmdstr, clone,
pm_poll_cnt[clone]))
pm_poll_cnt[clone]--;
mutex_exit(&pm_clone_lock);
kmem_free(p->physpath, p->size);
p->size = 0;
p->physpath = NULL;
if (pscep->psce_out == pscep->psce_last)
p = pscep->psce_first;
else
p++;
pscep->psce_out = p;
mutex_exit(&pscep->psce_lock);
ret = copyoutstr(physpath, psc.physpath,
physlen, &lencopied);
kmem_free(physpath, physlen);
if (ret) {
PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
"failed--EFAULT\n", cmdstr,
(void *)psc.physpath))
break;
}
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
if (ddi_copyout(&psc32.component,
&pscp32->component, copysize32, mode)
!= 0) {
PMD(PMD_ERROR, ("ioctl: %s: copyout "
"failed--EFAULT\n", cmdstr))
ret = EFAULT;
break;
}
} else
#endif /* _MULTI_DATAMODEL */
{
if (ddi_copyout(&psc.component,
&pscp->component, copysize, mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: copyout "
"failed--EFAULT\n", cmdstr))
ret = EFAULT;
break;
}
}
ret = 0;
break;
}
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("invalid diptype %d for cmd %d (%s)\n",
pcip->diptype, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
break;
}
case PM_SRCH: /* command that takes a pm_searchargs_t arg */
{
/*
* If no ppm, then there is nothing to search.
*/
if (DEVI(ddi_root_node())->devi_pm_ppm == NULL) {
ret = ENODEV;
break;
}
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
if (ddi_copyin((caddr_t)arg, &psa32,
sizeof (psa32), mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
return (EFAULT);
}
if (copyinstr((void *)(uintptr_t)psa32.pms_listname,
listname, MAXCOPYBUF, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
"%d, " "EFAULT\n", cmdstr,
(void *)(uintptr_t)psa32.pms_listname,
MAXCOPYBUF))
ret = EFAULT;
break;
}
if (copyinstr((void *)(uintptr_t)psa32.pms_manufacturer,
manufacturer, MAXCOPYBUF, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
"%d, " "EFAULT\n", cmdstr,
(void *)(uintptr_t)psa32.pms_manufacturer,
MAXCOPYBUF))
ret = EFAULT;
break;
}
if (copyinstr((void *)(uintptr_t)psa32.pms_product,
product, MAXCOPYBUF, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
"%d, " "EFAULT\n", cmdstr,
(void *)(uintptr_t)psa32.pms_product,
MAXCOPYBUF))
ret = EFAULT;
break;
}
} else
#endif /* _MULTI_DATAMODEL */
{
if (ddi_copyin((caddr_t)arg, &psa,
sizeof (psa), mode) != 0) {
PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
"EFAULT\n\n", cmdstr))
return (EFAULT);
}
if (copyinstr(psa.pms_listname,
listname, MAXCOPYBUF, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
"%d, " "EFAULT\n", cmdstr,
(void *)psa.pms_listname, MAXCOPYBUF))
ret = EFAULT;
break;
}
if (copyinstr(psa.pms_manufacturer,
manufacturer, MAXCOPYBUF, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
"%d, " "EFAULT\n", cmdstr,
(void *)psa.pms_manufacturer, MAXCOPYBUF))
ret = EFAULT;
break;
}
if (copyinstr(psa.pms_product,
product, MAXCOPYBUF, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
"%d, " "EFAULT\n", cmdstr,
(void *)psa.pms_product, MAXCOPYBUF))
ret = EFAULT;
break;
}
}
psa.pms_listname = listname;
psa.pms_manufacturer = manufacturer;
psa.pms_product = product;
switch (cmd) {
case PM_SEARCH_LIST:
ret = pm_ppm_searchlist(&psa);
break;
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("invalid diptype %d for cmd %d (%s)\n",
pcip->diptype, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
break;
}
case NOSTRUCT:
{
switch (cmd) {
case PM_START_PM:
case PM_START_CPUPM:
case PM_START_CPUPM_EV:
case PM_START_CPUPM_POLL:
{
pm_cpupm_t new_mode = PM_CPUPM_NOTSET;
pm_cpupm_t old_mode = PM_CPUPM_NOTSET;
int r;
mutex_enter(&pm_scan_lock);
if ((cmd == PM_START_PM && autopm_enabled) ||
(cmd == PM_START_CPUPM && PM_DEFAULT_CPUPM) ||
(cmd == PM_START_CPUPM_EV && PM_EVENT_CPUPM) ||
(cmd == PM_START_CPUPM_POLL && PM_POLLING_CPUPM)) {
mutex_exit(&pm_scan_lock);
PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n", cmdstr))
ret = EBUSY;
break;
}
if (cmd == PM_START_PM) {
autopm_enabled = 1;
} else if (cmd == PM_START_CPUPM) {
old_mode = cpupm;
new_mode = cpupm = cpupm_default_mode;
} else if (cmd == PM_START_CPUPM_EV) {
old_mode = cpupm;
new_mode = cpupm = PM_CPUPM_EVENT;
} else if (cmd == PM_START_CPUPM_POLL) {
old_mode = cpupm;
new_mode = cpupm = PM_CPUPM_POLLING;
}
mutex_exit(&pm_scan_lock);
/*
* If we are changing CPUPM modes, and it is active,
* then stop it from operating in the old mode.
*/
if (old_mode == PM_CPUPM_POLLING) {
int c = PM_STOP_CPUPM;
ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk,
&c);
} else if (old_mode == PM_CPUPM_EVENT) {
r = cpupm_set_policy(CPUPM_POLICY_DISABLED);
/*
* Disabling CPUPM policy should always
* succeed
*/
ASSERT(r == 0);
}
/*
* If we are changing to event based CPUPM, enable it.
* In the event it's not supported, fall back to
* polling based CPUPM.
*/
if (new_mode == PM_CPUPM_EVENT &&
cpupm_set_policy(CPUPM_POLICY_ELASTIC) < 0) {
mutex_enter(&pm_scan_lock);
new_mode = cpupm = PM_CPUPM_POLLING;
cmd = PM_START_CPUPM_POLL;
mutex_exit(&pm_scan_lock);
}
if (new_mode == PM_CPUPM_POLLING ||
cmd == PM_START_PM) {
ddi_walk_devs(ddi_root_node(), pm_start_pm_walk,
&cmd);
}
ret = 0;
break;
}
case PM_RESET_PM:
case PM_STOP_PM:
case PM_STOP_CPUPM:
{
extern void pm_discard_thresholds(void);
pm_cpupm_t old_mode = PM_CPUPM_NOTSET;
mutex_enter(&pm_scan_lock);
if ((cmd == PM_STOP_PM && !autopm_enabled) ||
(cmd == PM_STOP_CPUPM && PM_CPUPM_DISABLED)) {
mutex_exit(&pm_scan_lock);
PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
cmdstr))
ret = EINVAL;
break;
}
if (cmd == PM_STOP_PM) {
autopm_enabled = 0;
pm_S3_enabled = 0;
autoS3_enabled = 0;
} else if (cmd == PM_STOP_CPUPM) {
old_mode = cpupm;
cpupm = PM_CPUPM_DISABLE;
} else {
autopm_enabled = 0;
autoS3_enabled = 0;
old_mode = cpupm;
cpupm = PM_CPUPM_NOTSET;
}
mutex_exit(&pm_scan_lock);
/*
* bring devices to full power level, stop scan
* If CPUPM was operating in event driven mode, disable
* that.
*/
if (old_mode == PM_CPUPM_EVENT) {
(void) cpupm_set_policy(CPUPM_POLICY_DISABLED);
}
ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
ret = 0;
if (cmd == PM_STOP_PM || cmd == PM_STOP_CPUPM)
break;
/*
* Now do only PM_RESET_PM stuff.
*/
pm_system_idle_threshold = pm_default_idle_threshold;
pm_cpu_idle_threshold = 0;
pm_discard_thresholds();
pm_all_to_default_thresholds();
pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
NULL, NULL, PM_DEP_WAIT, NULL, 0);
break;
}
case PM_GET_SYSTEM_THRESHOLD:
{
*rval_p = pm_system_idle_threshold;
ret = 0;
break;
}
case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
{
*rval_p = pm_default_idle_threshold;
ret = 0;
break;
}
case PM_GET_CPU_THRESHOLD:
{
*rval_p = pm_cpu_idle_threshold;
ret = 0;
break;
}
case PM_SET_SYSTEM_THRESHOLD:
case PM_SET_CPU_THRESHOLD:
{
if ((int)arg < 0) {
PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
"--EINVAL\n", cmdstr, (int)arg))
ret = EINVAL;
break;
}
PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
(int)arg, (int)arg))
if (cmd == PM_SET_SYSTEM_THRESHOLD)
pm_system_idle_threshold = (int)arg;
else {
pm_cpu_idle_threshold = (int)arg;
}
ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
(void *) &cmd);
ret = 0;
break;
}
case PM_IDLE_DOWN:
{
if (pm_timeout_idledown() != 0) {
ddi_walk_devs(ddi_root_node(),
pm_start_idledown, (void *)PMID_IOC);
}
ret = 0;
break;
}
case PM_GET_PM_STATE:
{
if (autopm_enabled) {
*rval_p = PM_SYSTEM_PM_ENABLED;
} else {
*rval_p = PM_SYSTEM_PM_DISABLED;
}
ret = 0;
break;
}
case PM_GET_CPUPM_STATE:
{
if (PM_POLLING_CPUPM || PM_EVENT_CPUPM)
*rval_p = PM_CPU_PM_ENABLED;
else if (PM_CPUPM_DISABLED)
*rval_p = PM_CPU_PM_DISABLED;
else
*rval_p = PM_CPU_PM_NOTSET;
ret = 0;
break;
}
case PM_GET_AUTOS3_STATE:
{
if (autoS3_enabled) {
*rval_p = PM_AUTOS3_ENABLED;
} else {
*rval_p = PM_AUTOS3_DISABLED;
}
ret = 0;
break;
}
case PM_GET_S3_SUPPORT_STATE:
{
if (pm_S3_enabled) {
*rval_p = PM_S3_SUPPORT_ENABLED;
} else {
*rval_p = PM_S3_SUPPORT_DISABLED;
}
ret = 0;
break;
}
/*
* pmconfig tells us if the platform supports S3
*/
case PM_ENABLE_S3:
{
mutex_enter(&pm_scan_lock);
if (pm_S3_enabled) {
mutex_exit(&pm_scan_lock);
PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
cmdstr))
ret = EBUSY;
break;
}
pm_S3_enabled = 1;
mutex_exit(&pm_scan_lock);
ret = 0;
break;
}
case PM_DISABLE_S3:
{
mutex_enter(&pm_scan_lock);
pm_S3_enabled = 0;
mutex_exit(&pm_scan_lock);
ret = 0;
break;
}
case PM_START_AUTOS3:
{
mutex_enter(&pm_scan_lock);
if (autoS3_enabled) {
mutex_exit(&pm_scan_lock);
PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
cmdstr))
ret = EBUSY;
break;
}
autoS3_enabled = 1;
mutex_exit(&pm_scan_lock);
ret = 0;
break;
}
case PM_STOP_AUTOS3:
{
mutex_enter(&pm_scan_lock);
autoS3_enabled = 0;
mutex_exit(&pm_scan_lock);
ret = 0;
break;
}
case PM_ENABLE_CPU_DEEP_IDLE:
{
if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
PM_ENABLE_CPU_DEEP_IDLE) == NULL)
ret = 0;
else
ret = EBUSY;
break;
}
case PM_DISABLE_CPU_DEEP_IDLE:
{
if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
PM_DISABLE_CPU_DEEP_IDLE) == NULL)
ret = 0;
else
ret = EINVAL;
break;
}
case PM_DEFAULT_CPU_DEEP_IDLE:
{
if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
PM_DEFAULT_CPU_DEEP_IDLE) == NULL)
ret = 0;
else
ret = EBUSY;
break;
}
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("invalid diptype %d for cmd %d (%s)\n",
pcip->diptype, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
break;
}
default:
/*
* Internal error, invalid ioctl description
* force debug entry even if pm_debug not set
*/
#ifdef DEBUG
pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
pcip->str_type, cmd, pcip->name);
#endif
ASSERT(0);
return (EIO);
}
ASSERT(ret != 0x0badcafe); /* some cmd in wrong case! */
if (dipheld) {
ASSERT(dip);
PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
"exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
PM_RELE(dip);
}
PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
return (ret);
}
|
MICommunity/PSI-JAMI
|
jami-enricher/src/test/java/psidev/psi/mi/jami/enricher/impl/BasicExperimentEnricherTest.java
|
package psidev.psi.mi.jami.enricher.impl;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import psidev.psi.mi.jami.bridges.fetcher.mock.MockCvTermFetcher;
import psidev.psi.mi.jami.bridges.fetcher.mock.MockPublicationFetcher;
import psidev.psi.mi.jami.enricher.exception.EnricherException;
import psidev.psi.mi.jami.enricher.impl.minimal.MinimalCvTermEnricher;
import psidev.psi.mi.jami.enricher.impl.minimal.MinimalExperimentEnricher;
import psidev.psi.mi.jami.enricher.impl.minimal.MinimalPublicationEnricher;
import psidev.psi.mi.jami.enricher.listener.EnrichmentStatus;
import psidev.psi.mi.jami.enricher.listener.ExperimentEnricherListener;
import psidev.psi.mi.jami.enricher.listener.impl.ExperimentEnricherListenerManager;
import psidev.psi.mi.jami.model.*;
import psidev.psi.mi.jami.model.impl.DefaultCvTerm;
import psidev.psi.mi.jami.model.impl.DefaultExperiment;
import psidev.psi.mi.jami.model.impl.DefaultPublication;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Created with IntelliJ IDEA.
*
* @author <NAME> (<EMAIL>)
* @since 02/08/13
*/
public class BasicExperimentEnricherTest {
Publication persistentPublication = null;
Experiment persistentExperiment = null;
MinimalExperimentEnricher experimentEnricher;
@Before
public void setup(){
persistentPublication = new DefaultPublication();
persistentExperiment = new DefaultExperiment(persistentPublication);
experimentEnricher = new MinimalExperimentEnricher();
}
/**
* Show that the experiment enricher does not fail if the enrichers are not present
* @throws EnricherException
*/
@Test
public void test_enriching_an_experiment_without_fields_or_enrichers()
throws EnricherException {
experimentEnricher.enrich(persistentExperiment);
}
/**
* Show that even when fields are null but enrichers are present, the enrichment is still successful.
* @throws EnricherException
*/
@Test
public void test_enriching_an_experiment_without_fields()
throws EnricherException {
experimentEnricher.setCvTermEnricher(
new MinimalCvTermEnricher(
new MockCvTermFetcher()));
experimentEnricher.setPublicationEnricher(
new MinimalPublicationEnricher(
new MockPublicationFetcher()));
experimentEnricher.setExperimentEnricherListener( new ExperimentEnricherListenerManager(
//new ExperimentEnricherLogger(),
new ExperimentEnricherListener(){
public void onEnrichmentComplete(Experiment experiment, EnrichmentStatus status, String message) {
assertTrue(experiment == persistentExperiment);
assertEquals(EnrichmentStatus.SUCCESS , status);
}
public void onEnrichmentError(Experiment object, String message, Exception e) {
Assert.fail();
}
public void onPublicationUpdate(Experiment experiment, Publication oldPublication) {
Assert.fail();
}
public void onInteractionDetectionMethodUpdate(Experiment experiment, CvTerm oldCv) {
Assert.fail();
}
public void onHostOrganismUpdate(Experiment experiment, Organism oldOrganism) {
Assert.fail();
}
public void onAddedVariableParameter(Experiment o, VariableParameter added) {
Assert.fail();
}
public void onRemovedVariableParameter(Experiment o, VariableParameter removed) {
Assert.fail();
}
public void onAddedAnnotation(Experiment o, Annotation added) {
Assert.fail();
}
public void onRemovedAnnotation(Experiment o, Annotation removed) {
Assert.fail();
}
public void onAddedConfidence(Experiment o, Confidence added) {
Assert.fail();
}
public void onRemovedConfidence(Experiment o, Confidence removed) {
Assert.fail();
}
public void onAddedXref(Experiment o, Xref added) {
Assert.fail();
}
public void onRemovedXref(Experiment o, Xref removed) {
Assert.fail();
}
}
));
experimentEnricher.enrich(persistentExperiment);
}
@Test
public void test_enriching_an_experiment_with_an_interaction_detection_method()
throws EnricherException {
experimentEnricher.setCvTermEnricher(
new MinimalCvTermEnricher(
new MockCvTermFetcher()));
experimentEnricher.setExperimentEnricherListener( new ExperimentEnricherListenerManager(
//new ExperimentEnricherLogger(),
new ExperimentEnricherListener(){
public void onEnrichmentComplete(Experiment experiment, EnrichmentStatus status, String message) {
assertTrue(experiment == persistentExperiment);
assertEquals(EnrichmentStatus.SUCCESS , status);
}
public void onPublicationUpdate(Experiment experiment, Publication oldPublication) {
Assert.fail();
}
public void onEnrichmentError(Experiment object, String message, Exception e) {
Assert.fail();
}
public void onInteractionDetectionMethodUpdate(Experiment experiment, CvTerm oldCv) {
Assert.fail();
}
public void onHostOrganismUpdate(Experiment experiment, Organism oldOrganism) {
Assert.fail();
}
public void onAddedVariableParameter(Experiment o, VariableParameter added) {
Assert.fail();
}
public void onRemovedVariableParameter(Experiment o, VariableParameter removed) {
Assert.fail();
}
public void onAddedAnnotation(Experiment o, Annotation added) {
Assert.fail();
}
public void onRemovedAnnotation(Experiment o, Annotation removed) {
Assert.fail();
}
public void onAddedConfidence(Experiment o, Confidence added) {
Assert.fail();
}
public void onRemovedConfidence(Experiment o, Confidence removed) {
Assert.fail();
}
public void onAddedXref(Experiment o, Xref added) {
Assert.fail();
}
public void onRemovedXref(Experiment o, Xref removed) {
Assert.fail();
}
}
));
persistentExperiment.setInteractionDetectionMethod(new DefaultCvTerm("other short name"));
experimentEnricher.enrich(persistentExperiment);
}
@Test
public void test_enriching_an_experiment_with_a_publication()
throws EnricherException {
experimentEnricher.setPublicationEnricher(
new MinimalPublicationEnricher(
new MockPublicationFetcher()));
experimentEnricher.setExperimentEnricherListener( new ExperimentEnricherListenerManager(
//new ExperimentEnricherLogger(),
new ExperimentEnricherListener(){
public void onEnrichmentComplete(Experiment experiment, EnrichmentStatus status, String message) {
assertTrue(experiment == persistentExperiment);
assertEquals(EnrichmentStatus.SUCCESS , status);
}
public void onEnrichmentError(Experiment object, String message, Exception e) {
Assert.fail();
}
public void onPublicationUpdate(Experiment experiment, Publication oldPublication) {
Assert.fail();
}
public void onInteractionDetectionMethodUpdate(Experiment experiment, CvTerm oldCv) {
Assert.fail();
}
public void onHostOrganismUpdate(Experiment experiment, Organism oldOrganism) {
Assert.fail();
}
public void onAddedVariableParameter(Experiment o, VariableParameter added) {
Assert.fail();
}
public void onRemovedVariableParameter(Experiment o, VariableParameter removed) {
Assert.fail();
}
public void onAddedAnnotation(Experiment o, Annotation added) {
Assert.fail();
}
public void onRemovedAnnotation(Experiment o, Annotation removed) {
Assert.fail();
}
public void onAddedConfidence(Experiment o, Confidence added) {
Assert.fail();
}
public void onRemovedConfidence(Experiment o, Confidence removed) {
Assert.fail();
}
public void onAddedXref(Experiment o, Xref added) {
Assert.fail();
}
public void onRemovedXref(Experiment o, Xref removed) {
Assert.fail();
}
}
));
experimentEnricher.enrich(persistentExperiment);
}
@Test
public void test_enriching_an_experiment_with_an_organism()
throws EnricherException {
experimentEnricher.setExperimentEnricherListener( new ExperimentEnricherListenerManager(
//new ExperimentEnricherLogger(),
new ExperimentEnricherListener(){
public void onEnrichmentComplete(Experiment experiment, EnrichmentStatus status, String message) {
assertTrue(experiment == persistentExperiment);
assertEquals(EnrichmentStatus.SUCCESS , status);
}
public void onEnrichmentError(Experiment object, String message, Exception e) {
Assert.fail();
}
public void onPublicationUpdate(Experiment experiment, Publication oldPublication) {
}
public void onInteractionDetectionMethodUpdate(Experiment experiment, CvTerm oldCv) {
}
public void onHostOrganismUpdate(Experiment experiment, Organism oldOrganism) {
}
public void onAddedVariableParameter(Experiment o, VariableParameter added) {
}
public void onRemovedVariableParameter(Experiment o, VariableParameter removed) {
}
public void onAddedAnnotation(Experiment o, Annotation added) {
}
public void onRemovedAnnotation(Experiment o, Annotation removed) {
}
public void onAddedConfidence(Experiment o, Confidence added) {
}
public void onRemovedConfidence(Experiment o, Confidence removed) {
}
public void onAddedXref(Experiment o, Xref added) {
}
public void onRemovedXref(Experiment o, Xref removed) {
}
}
));
experimentEnricher.enrich(persistentExperiment);
}
}
|
searKing/sole
|
pkg/appinfo/version.go
|
// Copyright 2021 The searKing Author. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package appinfo
import "github.com/searKing/golang/go/version"
var (
// Version
// NOTE: The $Format strings are replaced during 'git archive' thanks to the
// companion .gitattributes file containing 'export-subst' in this same
// directory. See also https://git-scm.com/docs/gitattributes
Version = "v0.0.0-master+$Format:%h$" // git describe --long --tags --dirty --tags --always
BuildTime = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
GitHash = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
ServiceName = "" // 服务名称
ServiceDisplayName = "" // 服务全称
ServiceDescription = "" // 服务描述
ServiceId = "" // 服务实例ID
)
// GetVersion ...
func GetVersion() version.Version {
return version.Version{
RawVersion: Version,
BuildTime: BuildTime,
GitHash: GitHash,
}
}
|
IanClark-fullStack/Outright
|
client/src/components/JailStat.js
|
import { useState, useEffect } from 'react';
import search from '../utils/API';
export default function JailStat({ currLocation, queryString }) {
const findCountyData = async (url) => {
const [countyData, setCountyData] = useState({
loading: true,
flip_code: undefined,
last_update: undefined,
name: undefined,
state_name: undefined,
place_type: undefined,
title: undefined,
resident_population: undefined,
incarceration: undefined,
error: undefined,
});
useEffect(() => {
readRemoteFile(url, {
complete: (results, file) => {
console.log("Row data:", results.data);
setCountyData(...results.data);
}
})
}, [url]);
return await rows;
}
// useEffect(() => {
// const countyRes = getCountyData(county, state)
// console.log(countyRes);
// setCountyData(...countyRes)
// console.log(countyData);
// return countyData;
// }, [countyData]);
// console.log(countyData);
// const countyResults = useData(getCountyData(userCoords.countyLocation, userCoords.stateLocation));
let url =`https://raw.githubusercontent.com/vera-institute/jail-population-data/master/jail_population.csv`;
const rows = findCountyData(url);
return (
<div>
<h1>
{findJailData(queryString)}
</h1>
</div>
)
}
|
cuiwow/quantum
|
quantum/db/servicetype_db.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, VMware
#
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.sql import expression as expr
from quantum.common import exceptions as q_exc
from quantum import context
from quantum.db import api as db
from quantum.db import model_base
from quantum.db import models_v2
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum import policy
LOG = logging.getLogger(__name__)
DEFAULT_SVCTYPE_NAME = 'default'
default_servicetype_opts = [
cfg.StrOpt('description',
default='',
help=_('Textual description for the default service type')),
cfg.MultiStrOpt('service_definition',
help=_('Defines a provider for an advanced service '
'using the format: <service>:<plugin>[:<driver>]'))
]
cfg.CONF.register_opts(default_servicetype_opts, 'DEFAULT_SERVICETYPE')
def parse_service_definition_opt():
""" parse service definition opts and returns result """
results = []
svc_def_opt = cfg.CONF.DEFAULT_SERVICETYPE.service_definition
try:
for svc_def_str in svc_def_opt:
split = svc_def_str.split(':')
svc_def = {'service_class': split[0],
'plugin': split[1]}
try:
svc_def['driver'] = split[2]
except IndexError:
# Never mind, driver is optional
LOG.debug(_("Default service type - no driver for service "
"%(service_class)s and plugin %(plugin)s"),
svc_def)
results.append(svc_def)
return results
except (TypeError, IndexError):
raise q_exc.InvalidConfigurationOption(opt_name='service_definition',
opt_value=svc_def_opt)
class NoDefaultServiceDefinition(q_exc.QuantumException):
message = _("No default service definition in configuration file. "
"Please add service definitions using the service_definition "
"variable in the [DEFAULT_SERVICETYPE] section")
class ServiceTypeNotFound(q_exc.NotFound):
message = _("Service type %(service_type_id)s could not be found ")
class ServiceTypeInUse(q_exc.InUse):
message = _("There are still active instances of service type "
"'%(service_type_id)s'. Therefore it cannot be removed.")
class ServiceDefinition(model_base.BASEV2, models_v2.HasId):
service_class = sa.Column(sa.String(255), primary_key=True)
plugin = sa.Column(sa.String(255))
driver = sa.Column(sa.String(255))
service_type_id = sa.Column(sa.String(36),
sa.ForeignKey('servicetypes.id',
ondelete='CASCADE'),
primary_key=True)
class ServiceType(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
""" Service Type Object Model """
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
default = sa.Column(sa.Boolean(), nullable=False, default=False)
service_definitions = orm.relationship(ServiceDefinition,
backref='servicetypes',
lazy='joined',
cascade='all')
# Keep track of number of instances for this service type
num_instances = sa.Column(sa.Integer(), default=0)
def as_dict(self):
""" Convert a row into a dict """
ret_dict = {}
for c in self.__table__.columns:
ret_dict[c.name] = getattr(self, c.name)
return ret_dict
class ServiceTypeManager(object):
""" Manage service type objects in Quantum database """
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._initialize_db()
ctx = context.get_admin_context()
# Init default service type from configuration file
svc_defs = cfg.CONF.DEFAULT_SERVICETYPE.service_definition
if not svc_defs:
raise NoDefaultServiceDefinition()
def_service_type = {'name': DEFAULT_SVCTYPE_NAME,
'description':
cfg.CONF.DEFAULT_SERVICETYPE.description,
'service_definitions':
parse_service_definition_opt(),
'default': True}
# Create or update record in database
def_svc_type_db = self._get_default_service_type(ctx)
if not def_svc_type_db:
def_svc_type_db = self._create_service_type(ctx, def_service_type)
else:
self._update_service_type(ctx,
def_svc_type_db['id'],
def_service_type,
svc_type_db=def_svc_type_db)
LOG.debug(_("Default service type record updated in Quantum database. "
"identifier is '%s'"), def_svc_type_db['id'])
def _initialize_db(self):
db.configure_db()
# Register models for service type management
# Note this might have been already done if configure_db also
# created the engine
db.register_models(models_v2.model_base.BASEV2)
def _create_service_type(self, context, service_type):
svc_defs = service_type.pop('service_definitions')
with context.session.begin(subtransactions=True):
svc_type_db = ServiceType(**service_type)
# and now insert provided service type definitions
for svc_def in svc_defs:
svc_type_db.service_definitions.append(
ServiceDefinition(**svc_def))
# sqlalchemy save-update on relationship is on by
# default, the following will save both the service
# type and its service definitions
context.session.add(svc_type_db)
return svc_type_db
def _update_service_type(self, context, id, service_type,
svc_type_db=None):
with context.session.begin(subtransactions=True):
if not svc_type_db:
svc_type_db = self._get_service_type(context, id)
try:
svc_defs_map = dict([(svc_def['service'], svc_def)
for svc_def in
service_type.pop('service_definitions')])
except KeyError:
# No service defs in request
svc_defs_map = {}
svc_type_db.update(service_type)
for svc_def_db in svc_type_db.service_definitions:
try:
svc_def_db.update(svc_defs_map.pop(
svc_def_db['service_class']))
except KeyError:
# too bad, the service def was not there
# then we should delete it.
context.session.delete(svc_def_db)
# Add remaining service definitions
for svc_def in svc_defs_map:
context.session.add(ServiceDefinition(**svc_def))
return svc_type_db
def _check_service_type_view_auth(self, context, service_type):
# FIXME(salvatore-orlando): This should be achieved via policy
# engine without need for explicit checks in manager code.
# Also, the policy in this way does not make a lot of sense
return policy.check(context,
"extension:service_type:view_extended",
service_type)
def _get_service_type(self, context, svc_type_id):
try:
query = context.session.query(ServiceType)
return query.filter(ServiceType.id == svc_type_id).one()
# filter is on primary key, do not catch MultipleResultsFound
except orm_exc.NoResultFound:
raise ServiceTypeNotFound(service_type_id=svc_type_id)
def _get_default_service_type(self, context):
try:
query = context.session.query(ServiceType)
return query.filter(ServiceType.default == expr.true()).one()
except orm_exc.NoResultFound:
return
except orm_exc.MultipleResultsFound:
# This should never happen. If it does, take the first instance
query2 = context.session.query(ServiceType)
results = query2.filter(ServiceType.default == expr.true()).all()
LOG.warning(_("Multiple default service type instances found."
"Will use instance '%s'"), results[0]['id'])
return results[0]
def _make_svc_type_dict(self, context, svc_type, fields=None):
def _make_svc_def_dict(svc_def_db):
svc_def = {'service_class': svc_def_db['service_class']}
if self._check_service_type_view_auth(context,
svc_type.as_dict()):
svc_def.update({'plugin': svc_def_db['plugin'],
'driver': svc_def_db['driver']})
return svc_def
res = {'id': svc_type['id'],
'name': svc_type['name'],
'default': svc_type['default'],
'service_definitions':
[_make_svc_def_dict(svc_def) for svc_def
in svc_type['service_definitions']]}
if self._check_service_type_view_auth(context,
svc_type.as_dict()):
res['num_instances'] = svc_type['num_instances']
# Field selection
if fields:
return dict(((k, v) for k, v in res.iteritems()
if k in fields))
return res
def get_service_type(self, context, id, fields=None):
""" Retrieve a service type record """
return self._make_svc_type_dict(context,
self._get_service_type(context, id),
fields)
def get_service_types(self, context, fields=None, filters=None):
""" Retrieve a possibly filtered list of service types """
query = context.session.query(ServiceType)
if filters:
for key, value in filters.iteritems():
column = getattr(ServiceType, key, None)
if column:
query = query.filter(column.in_(value))
return [self._make_svc_type_dict(context, svc_type, fields)
for svc_type in query.all()]
def create_service_type(self, context, service_type):
""" Create a new service type """
svc_type_data = service_type['service_type']
svc_type_db = self._create_service_type(context, svc_type_data)
LOG.debug(_("Created service type object:%s"), svc_type_db['id'])
return self._make_svc_type_dict(context, svc_type_db)
def update_service_type(self, context, id, service_type):
""" Update a service type """
svc_type_data = service_type['service_type']
svc_type_db = self._update_service_type(context, id,
svc_type_data)
return self._make_svc_type_dict(context, svc_type_db)
def delete_service_type(self, context, id):
""" Delete a service type """
# Verify that the service type is not in use.
svc_type_db = self._get_service_type(context, id)
if svc_type_db['num_instances'] > 0:
raise ServiceTypeInUse(service_type_id=svc_type_db['id'])
with context.session.begin(subtransactions=True):
context.session.delete(svc_type_db)
def increase_service_type_refcount(self, context, id):
""" Increase references count for a service type object
This method should be invoked by plugins using the service
type concept everytime an instance of an object associated
with a given service type is created.
"""
#TODO(salvatore-orlando): Devise a better solution than this
#refcount mechanisms. Perhaps adding hooks into models which
#use service types in order to enforce ref. integrity and cascade
with context.session.begin(subtransactions=True):
svc_type_db = self._get_service_type(context, id)
svc_type_db['num_instances'] = svc_type_db['num_instances'] + 1
return svc_type_db['num_instances']
def decrease_service_type_refcount(self, context, id):
""" Decrease references count for a service type object
This method should be invoked by plugins using the service
type concept everytime an instance of an object associated
with a given service type is removed
"""
#TODO(salvatore-orlando): Devise a better solution than this
#refcount mechanisms. Perhaps adding hooks into models which
#use service types in order to enforce ref. integrity and cascade
with context.session.begin(subtransactions=True):
svc_type_db = self._get_service_type(context, id)
if svc_type_db['num_instances'] == 0:
LOG.warning(_("Number of instances for service type "
"'%s' is already 0."), svc_type_db['name'])
return
svc_type_db['num_instances'] = svc_type_db['num_instances'] - 1
return svc_type_db['num_instances']
|
hdost/opentelemetry-java
|
api/all/src/main/java/io/opentelemetry/api/metrics/ObservableLongMeasurement.java
|
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.api.metrics;
import io.opentelemetry.api.common.Attributes;
/** An interface for observing measurements with {@code long} values. */
public interface ObservableLongMeasurement extends ObservableMeasurement {
/**
* Records a measurement.
*
* @param value The measurement amount.
* @deprecated Use {@link #record(long)}.
*/
@Deprecated
default void observe(long value) {
record(value);
}
/**
* Records a measurement with a set of attributes.
*
* @param value The measurement amount.
* @param attributes A set of attributes to associate with the count.
* @deprecated Use {@link #record(long, Attributes)}.
*/
@Deprecated
default void observe(long value, Attributes attributes) {
record(value, attributes);
}
/**
* Records a measurement.
*
* @param value The measurement amount.
*/
void record(long value);
/**
* Records a measurement with a set of attributes.
*
* @param value The measurement amount.
* @param attributes A set of attributes to associate with the count.
*/
void record(long value, Attributes attributes);
}
|
objectiser/overlord-commons
|
overlord-commons-gwt/src/main/java/org/overlord/commons/gwt/client/local/widgets/ParagraphLabel.java
|
<gh_stars>0
/*
* Copyright 2013 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.overlord.commons.gwt.client.local.widgets;
import com.google.gwt.dom.client.Document;
import com.google.gwt.user.client.ui.HasText;
import com.google.gwt.user.client.ui.Widget;
/**
* Label with a 'p' tag as the root element instead of a div or a span.
*
* @author <EMAIL>
*/
public class ParagraphLabel extends Widget implements HasText {
/**
* Creates an empty label.
*/
public ParagraphLabel() {
setElement(Document.get().createPElement());
}
/**
* Creates a label with the specified text.
*
* @param text the new label's text
*/
public ParagraphLabel(String text) {
this();
setText(text);
}
/**
* @see com.google.gwt.user.client.ui.HasText#getText()
*/
@Override
public String getText() {
return getElement().getInnerText();
}
/**
* @see com.google.gwt.user.client.ui.HasText#setText(java.lang.String)
*/
@Override
public void setText(String text) {
getElement().setInnerText(text);
}
}
|
hardikpthv/lion
|
packages/select-rich/test/lion-select-rich-interaction.test.js
|
import { Required } from '@lion/form-core';
import { expect, html, triggerBlurFor, triggerFocusFor, fixture } from '@open-wc/testing';
import '@lion/core/src/differentKeyEventNamesShimIE.js';
import '@lion/listbox/lion-option.js';
import '@lion/listbox/lion-options.js';
import '../lion-select-rich.js';
describe('lion-select-rich interactions', () => {
describe('Keyboard navigation', () => {
it('navigates to first and last option with [Home] and [End] keys', async () => {
const el = await fixture(html`
<lion-select-rich opened interaction-mode="windows/linux">
<lion-options slot="input" name="foo">
<lion-option .choiceValue=${10}>Item 1</lion-option>
<lion-option .choiceValue=${20}>Item 2</lion-option>
<lion-option .choiceValue=${30} checked>Item 3</lion-option>
<lion-option .choiceValue=${40}>Item 4</lion-option>
</lion-options>
</lion-select-rich>
`);
expect(el.modelValue).to.equal(30);
el._listboxNode.dispatchEvent(new KeyboardEvent('keydown', { key: 'Home' }));
expect(el.modelValue).to.equal(10);
el._listboxNode.dispatchEvent(new KeyboardEvent('keydown', { key: 'End' }));
expect(el.modelValue).to.equal(40);
});
});
describe('Keyboard navigation Windows', () => {
it('navigates through list with [ArrowDown] [ArrowUp] keys activates and checks the option', async () => {
function expectOnlyGivenOneOptionToBeChecked(options, selectedIndex) {
options.forEach((option, i) => {
if (i === selectedIndex) {
expect(option.checked).to.be.true;
} else {
expect(option.checked).to.be.false;
}
});
}
const el = await fixture(html`
<lion-select-rich opened interaction-mode="windows/linux">
<lion-options slot="input">
<lion-option .choiceValue=${10}>Item 1</lion-option>
<lion-option .choiceValue=${20}>Item 2</lion-option>
<lion-option .choiceValue=${30}>Item 3</lion-option>
</lion-options>
</lion-select-rich>
`);
const options = Array.from(el.querySelectorAll('lion-option'));
expect(el.activeIndex).to.equal(0);
expect(el.checkedIndex).to.equal(0);
expectOnlyGivenOneOptionToBeChecked(options, 0);
el._listboxNode.dispatchEvent(new KeyboardEvent('keydown', { key: 'ArrowDown' }));
expect(el.activeIndex).to.equal(1);
expect(el.checkedIndex).to.equal(1);
expectOnlyGivenOneOptionToBeChecked(options, 1);
el._listboxNode.dispatchEvent(new KeyboardEvent('keydown', { key: 'ArrowUp' }));
expect(el.activeIndex).to.equal(0);
expect(el.checkedIndex).to.equal(0);
expectOnlyGivenOneOptionToBeChecked(options, 0);
});
it('navigates through list with [ArrowDown] [ArrowUp] keys checks the option while listbox unopened', async () => {
function expectOnlyGivenOneOptionToBeChecked(options, selectedIndex) {
options.forEach((option, i) => {
if (i === selectedIndex) {
expect(option.checked).to.be.true;
} else {
expect(option.checked).to.be.false;
}
});
}
const el = await fixture(html`
<lion-select-rich interaction-mode="windows/linux">
<lion-options slot="input">
<lion-option .choiceValue=${10}>Item 1</lion-option>
<lion-option .choiceValue=${20}>Item 2</lion-option>
<lion-option .choiceValue=${30}>Item 3</lion-option>
</lion-options>
</lion-select-rich>
`);
const options = Array.from(el.querySelectorAll('lion-option'));
expect(el.checkedIndex).to.equal(0);
expectOnlyGivenOneOptionToBeChecked(options, 0);
el.dispatchEvent(new KeyboardEvent('keyup', { key: 'ArrowDown' }));
expect(el.checkedIndex).to.equal(1);
expectOnlyGivenOneOptionToBeChecked(options, 1);
el.dispatchEvent(new KeyboardEvent('keyup', { key: 'ArrowUp' }));
expect(el.checkedIndex).to.equal(0);
expectOnlyGivenOneOptionToBeChecked(options, 0);
});
});
describe('Disabled', () => {
it('cannot be focused if disabled', async () => {
const el = await fixture(html`
<lion-select-rich disabled>
<lion-options slot="input"></lion-options>
</lion-select-rich>
`);
expect(el._invokerNode.tabIndex).to.equal(-1);
});
it('cannot be opened via click if disabled', async () => {
const el = await fixture(html`
<lion-select-rich disabled>
<lion-options slot="input"></lion-options>
</lion-select-rich>
`);
el._invokerNode.click();
expect(el.opened).to.be.false;
});
it('reflects disabled attribute to invoker', async () => {
const el = await fixture(html`
<lion-select-rich disabled>
<lion-options slot="input"></lion-options>
</lion-select-rich>
`);
expect(el._invokerNode.hasAttribute('disabled')).to.be.true;
el.removeAttribute('disabled');
await el.updateComplete;
expect(el._invokerNode.hasAttribute('disabled')).to.be.false;
});
});
describe('Interaction states', () => {
it('becomes touched if blurred once', async () => {
const el = await fixture(html`
<lion-select-rich>
<lion-options slot="input">
<lion-option .choiceValue=${10}>Item 1</lion-option>
<lion-option .choiceValue=${20}>Item 2</lion-option>
</lion-options>
</lion-select-rich>
`);
expect(el.touched).to.be.false;
await triggerFocusFor(el._invokerNode);
await triggerBlurFor(el._invokerNode);
expect(el.touched).to.be.true;
});
});
describe('Accessibility', () => {
it('sets [aria-invalid="true"] to "._invokerNode" when there is an error', async () => {
const el = await fixture(html`
<lion-select-rich .validators=${[new Required()]}>
<lion-options slot="input">
<lion-option .choiceValue=${null}>Please select a value</lion-option>
<lion-option .modelValue=${{ value: 10, checked: true }}>Item 1</lion-option>
</lion-options>
</lion-select-rich>
`);
const invokerNode = el._invokerNode;
const options = el.querySelectorAll('lion-option');
await el.feedbackComplete;
await el.updateComplete;
expect(invokerNode.getAttribute('aria-invalid')).to.equal('false');
options[0].checked = true;
await el.feedbackComplete;
await el.updateComplete;
expect(invokerNode.getAttribute('aria-invalid')).to.equal('true');
options[1].checked = true;
await el.feedbackComplete;
await el.updateComplete;
expect(invokerNode.getAttribute('aria-invalid')).to.equal('false');
});
});
});
|
SebastianBienert/ProjectsMap
|
Android/ProjectsMap/app/src/main/java/project/projectsmap/FetchDataMap.java
|
package project.projectsmap;
import android.content.Context;
import android.os.AsyncTask;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.json.JSONTokener;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import javax.net.ssl.HttpsURLConnection;
/**
* Created by Mateusz on 26.03.2018.
*/
public class FetchDataMap extends AsyncTask<Void,Void,Void> {
String data ="";
String numberId;
String numberCompanyId;
String webApiURL = "https://67a04196.ngrok.io";
String token = "";
//Building building;
ArrayList<Building> buildingsList = new ArrayList<Building>();
ArrayList<Floor> floorsList = new ArrayList<Floor>();
Context context;
public void setToken(String token_){ token = token_; }
public void setContext(Context context) {
this.context = context;
}
public void setNumberId(String number){
numberId = number;
}
public void setNumberCompanyId(String numberCompanyId) {
this.numberCompanyId = numberCompanyId;
}
@Override
protected Void doInBackground(Void... voids) {
try {
URL url = new URL(webApiURL+"/api/company/" + numberId + "/buildings");
HttpsURLConnection httpsURLConnection = (HttpsURLConnection) url.openConnection();
httpsURLConnection.addRequestProperty("Content-Type", "application/x-www-form-urlencoded");
httpsURLConnection.addRequestProperty("Authorization", "Bearer "+token);
InputStream inputStream = httpsURLConnection.getInputStream();
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
String line = "";
while(line != null) {
line = bufferedReader.readLine();
data = data + line;
}
Object json = new JSONTokener(data).nextValue();
if (json instanceof JSONObject) {
buildingsList.add(new Building(new JSONObject(data)));
} else if (json instanceof JSONArray) {
JSONArray JA = new JSONArray(data);
for (int i = 0; i < JA.length(); i++) {
buildingsList.add(new Building((JSONObject) JA.get(i)));
}
}
for(int i = 0; i < buildingsList.size(); i++){
for(int j = 0; j < buildingsList.get(i).Floors.size(); j++){
data = "";
URL urlFloor = new URL(webApiURL+"/api/floor/" + buildingsList.get(i).Floors.get(j));
HttpsURLConnection httpsURLConnectionFloor = (HttpsURLConnection) urlFloor.openConnection();
httpsURLConnectionFloor.addRequestProperty("Content-Type", "application/x-www-form-urlencoded");
httpsURLConnectionFloor.addRequestProperty("Authorization", "Bearer "+token);
InputStream inputStreamFloor = httpsURLConnectionFloor.getInputStream();
BufferedReader bufferedReaderFloor = new BufferedReader(new InputStreamReader(inputStreamFloor));
String lineFloor = "";
while(lineFloor != null) {
lineFloor = bufferedReaderFloor.readLine();
data = data + lineFloor;
}
Object jsonFloor = new JSONTokener(data).nextValue();
if (jsonFloor instanceof JSONObject) {
floorsList.add(new Floor(new JSONObject(data)));
} else if (jsonFloor instanceof JSONArray) {
JSONArray JA = new JSONArray(data);
for (int n = 0; n < JA.length(); n++) {
floorsList.add(new Floor((JSONObject) JA.get(n)));
}
}
}
}
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (JSONException e) {
e.printStackTrace();
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
super.onPostExecute(aVoid);
if(buildingsList!=null&&floorsList!=null){ // to tylko do testów
//ShowMapActivity.buildingDescription.setText(buildingsList.get(0).description());
//((ShowMapActivity)context).setDescription(buildingsList.get(0).description());
((ShowMapActivity)context).setArrayBulindings(buildingsList);
((ShowMapActivity)context).setArrayFloors(floorsList);
}else{
//ShowMapActivity.buildingDescription.setText("Brak budynku o tym numerze");
((ShowMapActivity)context).setStatement("Brak budynków dla wybranej firmy");
}
((ShowMapActivity)context).DisableProgressBar();
//ShowMapActivity.DisableProgressBar();
}
}
|
AsahiOS/gate
|
usr/src/uts/sun4v/sys/machthread.h
|
<filename>usr/src/uts/sun4v/sys/machthread.h
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_MACHTHREAD_H
#define _SYS_MACHTHREAD_H
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/asi.h>
#include <sys/sun4asi.h>
#include <sys/machasi.h>
#include <sys/bitmap.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASM
#define THREAD_REG %g7 /* pointer to current thread data */
/*
* CPU_INDEX(r, scr)
* Returns cpu id in r.
*/
#define CPU_INDEX(r, scr) \
mov SCRATCHPAD_CPUID, scr; \
ldxa [scr]ASI_SCRATCHPAD, r
/*
* Given a cpu id extract the appropriate word
* in the cpuset mask for this cpu id.
*/
#if CPUSET_SIZE > CLONGSIZE
#define CPU_INDEXTOSET(base, index, scr) \
srl index, BT_ULSHIFT, scr; \
and index, BT_ULMASK, index; \
sll scr, CLONGSHIFT, scr; \
add base, scr, base
#else
#define CPU_INDEXTOSET(base, index, scr)
#endif /* CPUSET_SIZE */
/*
* Assembly macro to find address of the current CPU.
* Used when coming in from a user trap - cannot use THREAD_REG.
* Args are destination register and one scratch register.
*/
#define CPU_ADDR(reg, scr) \
.global cpu; \
CPU_INDEX(scr, reg); \
sll scr, CPTRSHIFT, scr; \
set cpu, reg; \
ldn [reg + scr], reg
#define CINT64SHIFT 3
/*
* Assembly macro to find the physical address of the current CPU.
* All memory references using VA must be limited to nucleus
* memory to avoid any MMU side effect.
*/
#define CPU_PADDR(reg, scr) \
.global cpu_pa; \
CPU_INDEX(scr, reg); \
sll scr, CINT64SHIFT, scr; \
set cpu_pa, reg; \
ldx [reg + scr], reg
#endif /* _ASM */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_MACHTHREAD_H */
|
reubenjs/fat_free_crm
|
app/models/entities/contact.rb
|
<reponame>reubenjs/fat_free_crm
# Copyright (c) 2008-2013 <NAME> and contributors.
#
# Fat Free CRM is freely distributable under the terms of MIT license.
# See MIT-LICENSE file or http://www.opensource.org/licenses/mit-license.php
#------------------------------------------------------------------------------
# == Schema Information
#
# Table name: contacts
#
# id :integer not null, primary key
# user_id :integer
# lead_id :integer
# assigned_to :integer
# reports_to :integer
# first_name :string(64) default(""), not null
# last_name :string(64) default(""), not null
# access :string(8) default("Public")
# title :string(64)
# department :string(64)
# source :string(32)
# email :string(64)
# alt_email :string(64)
# phone :string(32)
# mobile :string(32)
# fax :string(32)
# blog :string(128)
# linkedin :string(128)
# facebook :string(128)
# twitter :string(128)
# born_on :date
# do_not_call :boolean default(FALSE), not null
# deleted_at :datetime
# created_at :datetime
# updated_at :datetime
# background_info :string(255)
# skype :string(128)
#
class Contact < ActiveRecord::Base
belongs_to :user
belongs_to :lead
belongs_to :assignee, :class_name => "User", :foreign_key => :assigned_to
belongs_to :reporting_user, :class_name => "User", :foreign_key => :reports_to
has_one :account_contact, :dependent => :destroy
has_one :account, :through => :account_contact
has_many :registrations, :dependent => :destroy
has_many :contact_opportunities, :dependent => :destroy
has_many :opportunities, :through => :contact_opportunities, :uniq => true, :order => "opportunities.id DESC"
has_many :tasks, :as => :asset, :dependent => :destroy
has_one :business_address, :dependent => :destroy, :as => :addressable, :class_name => "Address", :conditions => "address_type = 'Business'"
has_many :addresses, :dependent => :destroy, :as => :addressable, :class_name => "Address" # advanced search uses this
has_many :emails, :as => :mediator
has_many :contact_groups, :through => :memberships
has_many :memberships
##what about when you delete a contact? do you want to lose all attendance records?
#might be better to have an archive system for contacts so that deletion is only for
#contacts we really don't want to keep any trace of...
has_many :attendances, :dependent => :destroy
delegate :campaign, :to => :lead, :allow_nil => true
has_ransackable_associations %w(account opportunities tags activities emails addresses comments tasks contact_groups)
ransack_can_autocomplete
serialize :subscribed_users, Set
accepts_nested_attributes_for :business_address, :allow_destroy => true, :reject_if => proc {|attributes| Address.reject_address(attributes)}
scope :created_by, ->(user) { where( user_id: user.id ) }
scope :assigned_to, ->(user) { where( assigned_to: user.id ) }
scope :show_inactive, lambda {|inactive| where( "#{inactive ? "contacts.inactive = true" : "contacts.inactive = false OR contacts.inactive IS NULL"}") }
scope :text_search, ->(query) {
t = Contact.arel_table
# We can't always be sure that names are entered in the right order, so we must
# split the query into all possible first/last name permutations.
name_query = if query.include?(" ")
scope, *rest = query.name_permutations.map{ |first, last|
t[:first_name].matches("%#{first}%").and(t[:last_name].matches("%#{last}%"))
}
rest.map{|r| scope = scope.or(r)} if scope
scope
else
t[:first_name].matches("%#{query}%").or(t[:last_name].matches("%#{query}%")).or(t[:preferred_name].matches("%#{query}%"))
end
other = t[:email].matches("%#{query}%").or(t[:alt_email].matches("%#{query}%"))
other = other.or(t[:phone].matches("%#{query}%")).or(t[:mobile].matches("%#{query}%"))
where( name_query.nil? ? other : name_query.or(other) )
}
scope :state, lambda { |filters|
includes(:account_contact).where('account_contacts.account_id IN (?)' + (filters.delete('other') ? ' OR account_contacts.account_id IS NULL ' : ''), filters)
}
scope :user_state, lambda { |filters|
where('contacts.assigned_to IN (?)' + (filters.delete('other') ? ' OR contacts.assigned_to IS NULL ' : ''), filters)
}
scope :in_accounts, lambda { |accounts|
includes(:account_contact).where('account_contacts.account_id IN (?)', accounts)
}
uses_user_permissions
acts_as_commentable
uses_comment_extensions
acts_as_taggable_on :tags
has_paper_trail :ignore => [ :subscribed_users ]
has_fields
exportable
sortable :by => [ "first_name ASC", "last_name ASC", "created_at DESC", "updated_at DESC" ], :default => "created_at DESC"
validates_presence_of :first_name, :message => :missing_first_name, :if => -> { Setting.require_first_names }
validates_presence_of :last_name, :message => :missing_last_name, :if => -> { Setting.require_last_names }
validate :users_for_shared_access
# Default values provided through class methods.
#----------------------------------------------------------------------------
def self.per_page ; 20 ; end
def self.first_name_position ; "before" ; end
#----------------------------------------------------------------------------
def full_name(format = nil)
if format.nil? || format == "before"
if !self.cf_mailing_first_name.blank? && self.cf_mailing_first_name != self.first_name
"#{self.first_name} #{self.last_name} (#{self.cf_mailing_first_name})"
else
"#{self.first_name} #{self.preferred_name.present? ? "(#{self.preferred_name}) " : ""}#{self.last_name}"
end
else
"#{self.last_name}, #{self.first_name} #{self.preferred_name.present? ? "(#{self.preferred_name})" : ""}"
end
end
alias :name :full_name
def has_mailchimp_subscription?
!self.cf_weekly_emails[0].blank? && !self.email.blank?
end
def has_subscription?
has_mailchimp_subscription? || !self.cf_supporter_emails[0].blank?
end
def last_attendance_at_event_category(event_type)
events = Event.show_inactive(false).find_all_by_category(event_type)
last_attendance = self.attendances.where('events.id IN (?)', events.each.map(&:id)).order('event_instances.starts_at DESC').includes(:event, :event_instance).first
last_time = last_attendance.event_instance.starts_at unless last_attendance.nil?
end
def attendance_by_week_at_event_category(event_type, semester = 1)
events = Event.show_inactive(false).find_all_by_category(event_type)
attendances = self.attendances.where('events.id IN (?) AND events.semester = ?', events.each.map(&:id), semester).order('event_instances.starts_at DESC').includes(:event, :event_instance)
attendance_array = Array.new(13){""} #will end up as something like ["", "", bullet, "" ...]
attendances.each do |a|
a.event_instance.name.scan(/week (\d+)/)
if $1
attendance_array[$1.to_i - 1] = "\u{2022}"
end
end
attendance_array
end
def current_bsg
current_bsg = ""
groups = self.contact_groups.where(:inactive => false, :category => "bsg")
groups.each do |g|
if g.name.include?("BSG14S2-")
current_bsg = g.name.split("-")[2]
end
end
current_bsg
end
def registered_for?(event_id)
self.registrations.map(&:event_id).include?(event_id)
end
# Backend handler for [Create New Contact] form (see contact/create).
#----------------------------------------------------------------------------
def save_with_account_and_permissions(params)
save_account(params)
result = self.save
self.opportunities << Opportunity.find(params[:opportunity]) unless params[:opportunity].blank?
self.contact_groups << ContactGroup.find(params[:contact_group]) unless params[:contact_group].blank?
#if has_mailchimp_subscription?
# mailchimp_lists unless self.invalid?
#end
result
end
def subscriptions_in_words
if has_subscription?
subs = "subscriptions: "
items = [""]
if !self.cf_weekly_emails[0].blank?
items << "Adl" if self.cf_weekly_emails.include? "Adelaide"
items << "CE" if self.cf_weekly_emails.include? "City East"
items << "CW" if self.cf_weekly_emails.include? "City West"
end
if !self.cf_supporter_emails[0].blank?
items << "TT" if self.cf_supporter_emails.include? "TT Email"
items << "TT (mail)" if self.cf_supporter_emails.include? "TT Mail"
items << "PP" if self.cf_supporter_emails.include? "Prayer Points"
end
subs += items.length > 1 ? items.reject(&:blank?).join(", ") : items[0]
else
subs = ""
end
end
def merge_hook(duplicate)
if duplicate.saasu_uid.present?
invoices_for_contact = Saasu::Invoice.all(
:request_url => "invoiceList",
:contactUid => duplicate.saasu_uid,
:paidStatus => "all",
:invoiceDateFrom => "2000-01-01T00:00",
:invoiceDateTo => Date.today
)
invoices_for_contact += Saasu::Invoice.all(
:request_url => "invoiceList",
:transaction_type => "p",
:contactUid => duplicate.saasu_uid,
:paidStatus => "all",
:invoiceDateFrom => "2000-01-01T00:00",
:invoiceDateTo => Date.today
)
invoices_for_contact.each do |i|
invoice_to_update = Saasu::Invoice.find(i.uid)
invoice_to_update.contact_uid = self.saasu_uid
Saasu::Invoice.update(invoice_to_update)
end
Saasu::Contact.delete(duplicate.saasu_uid)
end
end
# Backend handler for [Update Contact] form (see contact/update).
#----------------------------------------------------------------------------
def update_with_account_and_permissions(params)
save_account(params)
# Must set access before user_ids, because user_ids= method depends on access value.
self.access = params[:contact][:access] if params[:contact][:access]
self.attributes = params[:contact]
self.save
end
# Attach given attachment to the contact if it hasn't been attached already.
#----------------------------------------------------------------------------
def attach!(attachment)
unless self.send("#{attachment.class.name.underscore.downcase}_ids").include?(attachment.id)
self.send(attachment.class.name.tableize) << attachment
end
end
# Discard given attachment from the contact.
#----------------------------------------------------------------------------
def discard!(attachment)
if attachment.is_a?(Task)
attachment.update_attribute(:asset, nil)
else # Opportunities
self.send(attachment.class.name.tableize).delete(attachment)
end
end
# Class methods.
#----------------------------------------------------------------------------
def self.create_for(model, account, opportunity, params)
attributes = {
:lead_id => model.id,
:user_id => params[:account][:user_id],
:assigned_to => params[:account][:assigned_to],
:access => params[:access]
}
%w(first_name last_name title source email alt_email phone mobile blog linkedin facebook twitter skype do_not_call background_info).each do |name|
attributes[name] = model.send(name.intern)
end
contact = Contact.new(attributes)
# Set custom fields.
if model.class.respond_to?(:fields)
model.class.fields.each do |field|
if contact.respond_to?(field.name)
contact.send "#{field.name}=", model.send(field.name)
end
end
end
contact.business_address = Address.new(:street1 => model.business_address.street1, :street2 => model.business_address.street2, :city => model.business_address.city, :state => model.business_address.state, :zipcode => model.business_address.zipcode, :country => model.business_address.country, :full_address => model.business_address.full_address, :address_type => "Business") unless model.business_address.nil?
# Save the contact only if the account and the opportunity have no errors.
if account.errors.empty? && opportunity.errors.empty?
# Note: contact.account = account doesn't seem to work here.
contact.account_contact = AccountContact.new(:account => account, :contact => contact) unless account.id.blank?
if contact.access != "Lead" || model.nil?
contact.save
else
contact.save_with_model_permissions(model)
end
contact.opportunities << opportunity unless opportunity.id.blank? # must happen after contact is saved
end
contact
end
private
# Make sure at least one user has been selected if the contact is being shared.
#----------------------------------------------------------------------------
def users_for_shared_access
errors.add(:access, :share_contact) if self[:access] == "Shared" && !self.permissions.any?
end
# Handles the saving of related accounts
#----------------------------------------------------------------------------
def save_account(params)
if params[:account][:id] == "" || params[:account][:name] == ""
self.account = nil
else
account = Account.create_or_select_for(self, params[:account])
if self.account != account and account.id.present?
self.account_contact = AccountContact.new(:account => account, :contact => self)
end
end
self.reload unless self.new_record? # ensure the account association is updated
end
ActiveSupport.run_load_hooks(:fat_free_crm_contact, self)
end
|
Xiaoyunnn/tp
|
src/main/java/seedu/address/model/person/exceptions/ClashingLessonException.java
|
<filename>src/main/java/seedu/address/model/person/exceptions/ClashingLessonException.java<gh_stars>0
package seedu.address.model.person.exceptions;
/**
* Signals that the operation will result in clashing lessons (Lessons are considered clashing if they have overlapping
* time range).
*/
public class ClashingLessonException extends RuntimeException {
public ClashingLessonException() {
super("Operation would result in clashing lessons");
}
}
|
esnet/netshell
|
kernel/src/main/java/net/es/netshell/kernel/acl/UserAccess.java
|
<reponame>esnet/netshell<gh_stars>1-10
/*
* ESnet Network Operating System (ENOS) Copyright (c) 2015, The Regents
* of the University of California, through Lawrence Berkeley National
* Laboratory (subject to receipt of any required approvals from the
* U.S. Dept. of Energy). All rights reserved.
*
* If you have questions about your rights to use or distribute this
* software, please contact Berkeley Lab's Innovation & Partnerships
* Office at <EMAIL>.
*
* NOTICE. This Software was developed under funding from the
* U.S. Department of Energy and the U.S. Government consequently retains
* certain rights. As such, the U.S. Government has been granted for
* itself and others acting on its behalf a paid-up, nonexclusive,
* irrevocable, worldwide license in the Software to reproduce,
* distribute copies to the public, prepare derivative works, and perform
* publicly and display publicly, and to permit other to do so.
*/
package net.es.netshell.kernel.acl;
import net.es.netshell.api.*;
import net.es.netshell.configuration.NetShellConfiguration;
import net.es.netshell.kernel.security.FileACL;
import net.es.netshell.kernel.exec.KernelThread;
import net.es.netshell.kernel.exec.annotations.SysCall;
import net.es.netshell.kernel.acl.UserAccessProfile;
import net.es.netshell.kernel.acl.UserAccessACL;
import net.es.netshell.kernel.acl.NetworkManageProfile;
import net.es.netshell.kernel.acl.UserManageProfile;
import net.es.netshell.kernel.acl.VMManageProfile;
import net.es.netshell.kernel.users.Users;
import net.es.netshell.kernel.users.User;
import net.es.netshell.shell.annotations.ShellCommand;
import net.es.netshell.shell.CommandResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.lang.reflect.Method;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.io.IOException;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
/**
* Created by amercian on 7/1/15.
*/
public final class UserAccess {
/**
* Extends Access Shell application
* Creates a folder /netshell-root/etc/acl/network/vconfig/acl/username
* Creates the table of users and privilege profiles in /netshell-root/etc/netshell.user.access
*/
private final static UserAccess users = new UserAccess();
List<String> privArray = Arrays.asList(UserAccessProfile.NETWORK, UserAccessProfile.USER, UserAccessProfile.VM);
public final static String USERS_DIR = "acl";
private Path aclFilePath; // Useful for checking if acl exists or not
private Path NetShellRootPath;
// Hash Table: key = user, value = accesses
// To include duplicate key but no duplicate values using SetMultimap
private SetMultimap<String, String> NetworkAccessList = HashMultimap.create();
private SetMultimap<String, String> UserAccessList = HashMultimap.create();
private SetMultimap<String, String> VMAccessList = HashMultimap.create();
private final Logger logger = LoggerFactory.getLogger(UserAccess.class);
public UserAccess() {
// Figure out the NetShell root directory.
String NetShellRootDir = NetShellConfiguration.getInstance().getGlobal().getRootDirectory();
this.NetShellRootPath = Paths.get(NetShellRootDir).normalize();
this.aclFilePath = FileUtils.toRealPath("/etc/acl");
// Read acl user file or create it if necessary
UserAccessProfile user = new UserAccessProfile();
/*String username = user.getName();
String map = user.getMap();
String[] access = map.split(":");
try {
this.readUserFile(username, access[0]);
} catch (IOException e) {
e.printStackTrace();
}*/
}
public static UserAccess getUsers() {
return users;
}
public CommandResponse createAccess(UserAccessProfile user) {
Method method = null;
CommandResponse commandResponse;
String resMessage = null;
boolean resCode = false;
try {
method = KernelThread.getSysCallMethod(this.getClass(), "do_createAccess");
// Only ROOT user can perform this function
if (KernelThread.currentKernelThread().isPrivileged()) {
KernelThread.doSysCall(this, method, user); //removed "true"
resCode = true;
resMessage = "User access added";
} else {
resCode = false;
resMessage = "Operation Not Permitted";
}
} catch (UserAlreadyExistException e) {
e.printStackTrace();
resCode = false;
resMessage = "User access already exists";
} catch (NoSuchMethodException e) {
e.printStackTrace();
resCode = false;
resMessage = "Method not implemented";
}catch(UserClassException e){
e.printStackTrace();
resCode = false;
resMessage = "User access must be network or user or vm";
}catch (Exception e) {
e.printStackTrace();
resCode = false;
resMessage = "Error in operation";
}
commandResponse = new CommandResponse(resMessage,resCode);
return commandResponse;
}
@SysCall(
name="do_createAccess"
)
public void do_createAccess (UserAccessProfile newUser) throws UserAlreadyExistException, UserException, UserClassException, IOException {
logger.info("do_createAccess entry");
String username = newUser.getName();
String map = newUser.getMap();
String[] access = map.split(":");
// Make sure application acl is available
if (!privArray.contains(access[0])) {
throw new UserClassException(access[0]);
}
// Checks if the user access already exists
try {
this.readUserFile(username, access[0]);
} catch (IOException e) {
logger.error("Cannot read access file");
}
// check which access profile should be performed and lists are created
if(access[0].equals("network")){
this.NetworkAccessList.put(username, map);
NetworkManageProfile user = new NetworkManageProfile(username, map);
}
else if(access[0].equals("user")){
this.UserAccessList.put(username, map);
UserManageProfile user = new UserManageProfile(username, map);
}
else if(access[0].equals("vm")){
this.VMAccessList.put(username, map);
VMManageProfile user = new VMManageProfile(username, map);
}
// Create hash table with list of user/access and write to /etc/netshell.user.access
// Including functionality to have duplicates in (hash) table
// Remove dependency on map TODO
this.writeUserFile(username, access[0]);
// Create home directory
File homeDir = new File (Paths.get(this.getHomePath().toString(), access[0], username).toString());
/* In case directory is required */
//homeDir.mkdirs();
// Create access only to specific application with path = netshell-root/acl/network/.acl/<username>
UserAccessACL acl = new UserAccessACL(homeDir.toPath());
acl.allowUserRead(username);
acl.allowUserWrite(username);
acl.allowUserExecute(username);
// Commit ACL's
//acl.store();
}
public boolean removeaccess (UserAccessProfile user) {
Method method = null;
KernelThread kt = KernelThread.currentKernelThread();
String currentUserName = kt.getUser().getName();
String userName = user.getName();
String map = user.getMap();
try {
method = KernelThread.getSysCallMethod(this.getClass(), "do_removeAccess");
if ((currentUserName.equals(userName)) ||
Users.isPrivileged(currentUserName)) {
logger.info("OK to remove");
KernelThread.doSysCall(this,
method,
user);
}
} catch (NonExistentUserException e) {
e.printStackTrace();
return false;
} catch (NoSuchMethodException e) {
e.printStackTrace();
return false;
} catch (Exception e) {
e.printStackTrace();
return false;
}
if (! Users.isPrivileged(currentUserName) || currentUserName.equals(userName)) {
// End user session if not privileged account (unless root removed own account)
kt.getThread().interrupt();
}
return true;
}
@SysCall(
name="do_removeAccess"
)
public void do_removeAccess(UserAccessProfile user) throws NonExistentUserException, IOException {
logger.info("do_removeAccess entry");
String username = user.getName();
String map = user.getMap();
String[] access = map.split(":");
// Make sure the user exists.
try {
this.readUserFile(username, access[0]);
} catch (IOException e) {
logger.error("Cannot read access file");
}
// Remove entry from the multimap
if(access[0].equals("network")){
this.NetworkAccessList.remove(username, map);
} else if(access[0].equals("user")){
this.UserAccessList.remove(username, map);
} else if(access[0].equals("vm")){
this.VMAccessList.remove(username, map);
}
// Delete .acl file associated with this user account
//File aclDelete = new File (Paths.get(this.getHomePath().toString(), access[0], ".acl", username).toString());
//aclDelete.delete();
// Save entry in the /etc/netshell.user.access
this.writeUserFile(username, access[0]);
}
/**
* Function to check if username has access privilege.
* Can be called from command with respective application name
* @param username accessing user
* @param map accessing application
* @return TRUE or FALSE
*/
public static boolean isAccessPrivileged (String username, String map) throws IOException{
// Access will depend on application
// map should be decoded for each application
String access;
String[] accessInList;
if(map.contains(":")){
accessInList = map.split(":");
access = accessInList[0];
} else {
access = map;
}
if ( access.equals("network") && UserAccess.getUsers().NetworkAccessList.isEmpty() && username.equals("admin")) {
// Initial configuration. Add admin user and create configuration file.
return true;
} else if (access.equals("user") && UserAccess.getUsers().UserAccessList.isEmpty() && username.equals("admin")) {
return true;
} else if (access.equals("vm") && UserAccess.getUsers().VMAccessList.isEmpty() && username.equals("admin")) {
return true;
}
if (username.equals(null)) {
// Not a user
return false;
} else if (/*accesslist.contains(access) ||*/ Users.isPrivileged(username)) {
return true;
} else if (UserAccess.getUsers().NetworkAccessList.containsKey(username) && access.equals("network") && NetworkManageProfile.isPrivileged(username, map)) {
return true;
} else if (UserAccess.getUsers().UserAccessList.containsKey(username) && access.equals("user") && UserManageProfile.isPrivileged(username, map)) {
return true;
} else if (UserAccess.getUsers().VMAccessList.containsKey(username) && access.equals("vm") && VMManageProfile.isPrivileged(username, map)) {
return true;
} else {
// Any other case
return false;
}
}
private synchronized void readUserFile(String username, String access) throws IOException {
this.aclFilePath = FileUtils.toRealPath(String.format("/etc/acl/%s",access));
File aclFile = new File(this.aclFilePath.toString());
aclFile.getParentFile().mkdirs();
if (!aclFile.exists()) {
// File does not exist yet, create it.
if (!aclFile.createNewFile()) {
// File could not be created, return a RuntimeError
throw new RuntimeException("Cannot create " + this.aclFilePath.toString());
}
}
BufferedReader reader = new BufferedReader(new FileReader(aclFile));
String line = null;
while ((line = reader.readLine()) != null) {
UserAccessProfile p = new UserAccessProfile(line);
if (p.getName() != null && access.equals("network")) {
this.NetworkAccessList.put(p.getName(), p.getMap());
}
else if (p.getName() != null && access.equals("user")) {
this.UserAccessList.put(p.getName(), p.getMap());
}
if (p.getName() != null && access.equals("vm")) {
this.VMAccessList.put(p.getName(), p.getMap());
}
else {
logger.error("Malformed user entry: {}", line);
}
}
}
private synchronized void writeUserFile(String username, String access) throws IOException {
this.aclFilePath = FileUtils.toRealPath(String.format("/etc/acl/%s",access));
File aclFile = new File(this.aclFilePath.toString());
aclFile.delete();
BufferedWriter writer = new BufferedWriter(new FileWriter(aclFile));
if(access.equals("network")){
for (Map.Entry p : this.NetworkAccessList.entries() ) {
if (p.getKey() != null) {
UserAccessProfile newEntry = new UserAccessProfile(p.getKey().toString(), p.getValue().toString());
//System.out.println("Key: " + p.getKey().toString() + "\t Value: " + p.getValue().toString() + "\n");
writer.write(newEntry.toString());
writer.newLine();
}
}
writer.flush();
writer.close();
} else if(access.equals("user")){
for (Map.Entry p : this.UserAccessList.entries() ) {
if (p.getKey() != null) {
UserAccessProfile newEntry = new UserAccessProfile(p.getKey().toString(), p.getValue().toString());
writer.write(newEntry.toString());
writer.newLine();
}
}
writer.flush();
writer.close();
} else if(access.equals("vm")){
for (Map.Entry p : this.VMAccessList.entries() ) {
if (p.getKey() != null) {
UserAccessProfile newEntry = new UserAccessProfile(p.getKey().toString(), p.getValue().toString());
writer.write(newEntry.toString());
writer.newLine();
}
}
writer.flush();
writer.close();
}
}
public Path getNetShellRootPath() { return NetShellRootPath; }
public Path getHomePath() {
return NetShellRootPath.resolve(USERS_DIR);
}
public Path getHomePath(String username) {
return getHomePath().resolve(username);
}
}
|
kkrampa/commcare-hq
|
corehq/apps/reports/filters/controllers.py
|
<reponame>kkrampa/commcare-hq
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from memoized import memoized
from corehq.apps.es import UserES, GroupES, groups
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.const import DEFAULT_PAGE_LIMIT
from corehq.apps.reports.filters.case_list import CaseListFilterUtils
from corehq.apps.reports.filters.users import EmwfUtils, UsersUtils
from corehq.apps.reports.util import SimplifiedUserInfo
from six.moves import map
def paginate_options(data_sources, query, start, size):
"""
Returns the appropriate slice of values from the data sources
data_sources is a list of (count_fn, getter_fn) tuples
count_fn returns the total number of entries in a data source,
getter_fn takes in a start and size parameter and returns entries
"""
# Note this is pretty confusing, check TestEmwfPagination for reference
options = []
total = 0
for get_size, get_objects in data_sources:
count = get_size(query)
total += count
if start > count: # skip over this whole data source
start -= count
continue
# return a page of objects
objects = list(get_objects(query, start, size))
start = 0
size -= len(objects) # how many more do we need for this page?
options.extend(objects)
return total, options
class EmwfOptionsController(object):
def __init__(self, request, domain, search):
self.request = request
self.domain = domain
self.search = search
@property
@memoized
def utils(self):
return EmwfUtils(self.domain)
def get_all_static_options(self, query):
return [user_type for user_type in self.utils.static_options
if query.lower() in user_type[1].lower()]
def get_static_options_size(self, query):
return len(self.get_all_static_options(query))
def get_static_options(self, query, start, size):
return self.get_all_static_options(query)[start:start + size]
def group_es_query(self, query, group_type="reporting"):
if group_type == "reporting":
type_filter = groups.is_reporting()
elif group_type == "case_sharing":
type_filter = groups.is_case_sharing()
else:
raise TypeError("group_type '{}' not recognized".format(group_type))
return (GroupES()
.domain(self.domain)
.filter(type_filter)
.not_deleted()
.search_string_query(query, default_fields=["name"]))
def get_groups_size(self, query):
return self.group_es_query(query).count()
def get_groups(self, query, start, size):
groups_query = (self.group_es_query(query)
.fields(['_id', 'name'])
.start(start)
.size(size)
.sort("name.exact"))
return [self.utils.reporting_group_tuple(g) for g in groups_query.run().hits]
@staticmethod
def _get_location_specific_custom_filters(query):
query_sections = query.split("/")
# first section would be u'"parent' or u'"parent_name"', so split with " to get
# ['', 'parent'] or ['', 'parent_name', '']
parent_name_section_splits = query_sections[0].split('"')
parent_name = parent_name_section_splits[1]
try:
search_query = query_sections[1]
except IndexError:
# when user has entered u'"parent_name"' without trailing "/"
# consider it same as u'"parent_name"/'
search_query = "" if len(parent_name_section_splits) == 3 else None
return parent_name, search_query
def get_locations_query(self, query):
show_inactive = json.loads(self.request.GET.get('show_inactive', 'false'))
if show_inactive:
included_objects = SQLLocation.inactive_objects
else:
included_objects = SQLLocation.active_objects
if self.search.startswith('"'):
parent_name, search_query = self._get_location_specific_custom_filters(query)
if search_query is None:
# autocomplete parent names while user is looking for just the parent name
# and has not yet entered any child location name
locations = included_objects.filter(name__istartswith=parent_name, domain=self.domain)
else:
# if any parent locations with name entered then
# find locations under them
# else just return empty queryset
parents = included_objects.filter(name__iexact=parent_name, domain=self.domain)
if parent_name and parents.count():
descendants = included_objects.get_queryset_descendants(parents, include_self=True)
locations = descendants.filter_by_user_input(self.domain, search_query)
else:
return included_objects.none()
else:
locations = included_objects.filter_path_by_user_input(self.domain, query)
return locations.accessible_to_user(self.domain, self.request.couch_user)
def get_locations_size(self, query):
return self.get_locations_query(query).count()
def get_locations(self, query, start, size):
"""
start: The index of the first item to be returned
size: The number of items to return
"""
return list(map(self.utils.location_tuple,
self.get_locations_query(query)[start:start + size]))
def _get_users(self, query, start, size, include_inactive=False):
if include_inactive:
user_query = self.all_user_es_query(query)
else:
user_query = self.active_user_es_query(query)
users = (user_query
.fields(SimplifiedUserInfo.ES_FIELDS)
.start(start)
.size(size)
.sort("username.exact"))
if not self.request.can_access_all_locations:
accessible_location_ids = SQLLocation.active_objects.accessible_location_ids(
self.request.domain, self.request.couch_user)
users = users.location(accessible_location_ids)
return [self.utils.user_tuple(u) for u in users.run().hits]
def active_user_es_query(self, query):
search_fields = ["first_name", "last_name", "base_username"]
return (UserES()
.domain(self.domain)
.search_string_query(query, default_fields=search_fields))
def all_user_es_query(self, query):
return self.active_user_es_query(query).show_inactive()
def get_all_users_size(self, query):
return self.all_user_es_query(query).count()
def get_active_users_size(self, query):
return self.active_user_es_query(query).count()
def get_all_users(self, query, start, size):
return self._get_users(query, start, size, include_inactive=True)
def get_active_users(self, query, start, size):
return self._get_users(query, start, size, include_inactive=False)
@property
def data_sources(self):
if self.request.can_access_all_locations:
return [
(self.get_static_options_size, self.get_static_options),
(self.get_groups_size, self.get_groups),
(self.get_locations_size, self.get_locations),
(self.get_all_users_size, self.get_all_users),
]
else:
return [
(self.get_locations_size, self.get_locations),
(self.get_all_users_size, self.get_all_users),
]
@property
@memoized
def page(self):
if self.request.method == 'POST':
return int(self.request.POST.get('page', 1))
return int(self.request.GET.get('page', 1))
@property
@memoized
def size(self):
if self.request.method == 'POST':
return int(self.request.POST.get('page_limit', DEFAULT_PAGE_LIMIT))
return int(self.request.GET.get('page_limit', DEFAULT_PAGE_LIMIT))
def get_options(self, show_more=False):
"""
If `show_more` = True, then the result returns a tuple where the first
value is a boolean of whether more additional pages are still available
(used by Select 2). Otherwise the first value in the tuple returned
is the total.
:param show_more: (optional)
:return: (int) count or (bool) has_more, (list) results
"""
start = self.size * (self.page - 1)
count, options = paginate_options(
self.data_sources,
self.search,
start,
self.size
)
results = [
{'id': entry[0], 'text': entry[1]} if len(entry) == 2 else
{'id': entry[0], 'text': entry[1], 'is_active': entry[2]} for entry
in options
]
if show_more:
has_more = (self.page * self.size) < count
return has_more, results
return count, results
class MobileWorkersOptionsController(EmwfOptionsController):
@property
@memoized
def utils(self):
return UsersUtils(self.domain)
def get_post_options(self):
page = int(self.request.POST.get('page', 1))
size = int(self.request.POST.get('page_limit', DEFAULT_PAGE_LIMIT))
start = size * (page - 1)
count, options = paginate_options(
self.data_sources,
self.search,
start,
size
)
return count, [{'id': id_, 'text': text} for id_, text in options]
@property
def data_sources(self):
return [
(self.get_active_users_size, self.get_active_users),
]
def active_user_es_query(self, query):
query = super(MobileWorkersOptionsController, self).active_user_es_query(query)
return query.mobile_users()
class CaseListFilterOptionsController(EmwfOptionsController):
def get_sharing_groups(self, query, start, size):
groups = (self.group_es_query(query, group_type="case_sharing")
.fields(['_id', 'name'])
.start(start)
.size(size)
.sort("name.exact"))
return list(map(self.utils.sharing_group_tuple, groups.run().hits))
@property
@memoized
def utils(self):
return CaseListFilterUtils(self.domain)
@property
# Case list shows all users, instead of just active users
def data_sources(self):
if self.request.can_access_all_locations:
return [
(self.get_static_options_size, self.get_static_options),
(self.get_groups_size, self.get_groups),
(self.get_sharing_groups_size, self.get_sharing_groups),
(self.get_locations_size, self.get_locations),
(self.get_all_users_size, self.get_all_users),
]
else:
return [
(self.get_locations_size, self.get_locations),
(self.get_active_users_size, self.get_active_users),
]
def get_sharing_groups_size(self, query):
return self.group_es_query(query, group_type="case_sharing").count()
class LocationGroupOptionsController(EmwfOptionsController):
@property
def data_sources(self):
return [
(self.get_groups_size, self.get_groups),
(self.get_locations_size, self.get_locations),
]
|
engelhamer/robot-runner
|
experiments/mini_mission/turtlebot_runner/__main__.py
|
<reponame>engelhamer/robot-runner<filename>experiments/mini_mission/turtlebot_runner/__main__.py
import rospy
import signal
import subprocess
from mission.mission import Mission
from common.ClientMetricsController import ClientMetricsController
rospy.init_node("turtlebot3_custom")
mission = Mission()
metrics = ClientMetricsController()
def handler(signum, frame):
print('Ctrl+Z pressed')
mission.exit()
metrics.exit()
exit()
signal.signal(signal.SIGTSTP, handler)
print("Initializing bringup")
# roslaunch turtlebot3_bringup turtlebot3_robot.launch
subprocess.Popen(['roslaunch', 'turtlebot3_bringup', 'turtlebot3_robot.launch'])
print("Initializing node!")
while not rospy.is_shutdown():
rospy.spin()
|
gipert/remage
|
include/RMGManagementDetectorConstruction.hh
|
<filename>include/RMGManagementDetectorConstruction.hh
#ifndef _RMG_MANAGEMENT_DETECTOR_CONSTRUCTION_HH_
#define _RMG_MANAGEMENT_DETECTOR_CONSTRUCTION_HH_
#include <map>
#include <memory>
#include <vector>
#include "globals.hh"
#include "G4VUserDetectorConstruction.hh"
#include "RMGMaterialTable.hh"
#include "RMGNavigationTools.hh"
class G4VPhysicalVolume;
class G4GenericMessenger;
class G4VPhysicalVolume;
class RMGManagementDetectorConstruction : public G4VUserDetectorConstruction {
public:
RMGManagementDetectorConstruction();
~RMGManagementDetectorConstruction() = default;
RMGManagementDetectorConstruction (RMGManagementDetectorConstruction const&) = delete;
RMGManagementDetectorConstruction& operator=(RMGManagementDetectorConstruction const&) = delete;
RMGManagementDetectorConstruction (RMGManagementDetectorConstruction&&) = delete;
RMGManagementDetectorConstruction& operator=(RMGManagementDetectorConstruction&&) = delete;
G4VPhysicalVolume* Construct() override;
void ConstructSDandField() override;
inline void IncludeGDMLFile(G4String filename) { fGDMLFiles.emplace_back(filename); }
inline virtual G4VPhysicalVolume* DefineGeometry() { return nullptr; }
inline void SetMaxStepLimit(G4String name, double max_step) {
fPhysVolStepLimits.insert_or_assign(name, max_step);
}
static inline RMGMaterialTable::BathMaterial GetBathMaterial() { return fBathMaterial; }
inline void PrintListOfLogicalVolumes() { RMGNavigationTools::PrintListOfLogicalVolumes(); }
inline void PrintListOfPhysicalVolumes() { RMGNavigationTools::PrintListOfPhysicalVolumes(); }
private:
std::vector<G4String> fGDMLFiles;
std::unique_ptr<RMGMaterialTable> fMaterialTable;
std::map<G4String, G4double> fPhysVolStepLimits;
static RMGMaterialTable::BathMaterial fBathMaterial;
std::unique_ptr<G4GenericMessenger> fMessenger;
void DefineCommands();
G4VPhysicalVolume* fWorld;
};
#endif
// vim: tabstop=2 shiftwidth=2 expandtab
|
1370156363/TTNews1
|
TTNews/Classes/NewWenDaViewController.h
|
<filename>TTNews/Classes/NewWenDaViewController.h
//
// NewWenDaViewController.h
// TTNews
//
// Created by mac on 2017/10/21.
// Copyright © 2017年 瑞文戴尔. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface NewWenDaViewController : UIViewController
@end
|
andrey19972004/algorithms_structures
|
yandex/yandex_algorithms/4/H.py
|
from collections import Counter
def modify_dict(symbols_dict, temp_dict, symbol, modifier):
ans = 0
if symbol not in temp_dict:
temp_dict[symbol] = 0
if symbol in symbols_dict and symbols_dict[symbol] == temp_dict[symbol]:
ans = -1
temp_dict[symbol] += modifier
if symbol in symbols_dict and symbols_dict[symbol] == temp_dict[symbol]:
ans = 1
return ans
def maya(len_word, len_seq, word, sequence):
symbols_dict = Counter(word)
temp_dict = Counter(sequence[:len_word])
match = 0
result = 0
for key, value in symbols_dict.items():
if value == temp_dict[key]:
match += 1
if match == len(symbols_dict):
result += 1
for i in range(len_word, len_seq):
match += modify_dict(symbols_dict, temp_dict,
sequence[i - len_word], -1)
match += modify_dict(symbols_dict, temp_dict, sequence[i], 1)
if match == len(symbols_dict):
result += 1
return result
def main():
with open('input.txt', 'r') as file:
len_word, len_seq = map(int, file.readline().strip().split())
word = file.readline().strip()
sequence = file.readline().strip()
print(maya(len_word, len_seq, word, sequence))
if __name__ == '__main__':
main()
|
av1m/cars
|
features/steps/US_0012.py
|
# coding: utf-8
import logging
from behave import *
from foods.formula import Formula
from foods.kebab import Kebab, TruckKebab
logger = logging.getLogger(__name__)
use_step_matcher("parse")
@given("An order placed in a kebab truck")
def step_impl(context):
context.kebab_truck = TruckKebab(
formulas=[Formula("Pepsi", [Kebab(sauce="Beef", price=14)])]
)
context.kebab_truck.add_order(1)
@when("Hatward undo the last order")
def step_impl(context):
context.last_order = context.kebab_truck.undo_last_order()
@then("the order is cancelled")
def step_impl(context):
assert context.last_order[0] == 1
@when("Hatward undo the last order that was not placed")
def step_impl(context):
try:
context.last_order = context.kebab_truck.undo_last_order()
except IndexError:
context.exception = True
|
RickWieman/adyen-java-api-library
|
src/main/java/com/adyen/model/posterminalmanagement/GetTerminalsUnderAccountRequest.java
|
<gh_stars>10-100
/*
* ######
* ######
* ############ ####( ###### #####. ###### ############ ############
* ############# #####( ###### #####. ###### ############# #############
* ###### #####( ###### #####. ###### ##### ###### ##### ######
* ###### ###### #####( ###### #####. ###### ##### ##### ##### ######
* ###### ###### #####( ###### #####. ###### ##### ##### ######
* ############# ############# ############# ############# ##### ######
* ############ ############ ############# ############ ##### ######
* ######
* #############
* ############
*
* Adyen Java API Library
*
* Copyright (c) 2020 <NAME>.V.
* This file is open source and available under the MIT license.
* See the LICENSE file for more info.
*/
package com.adyen.model.posterminalmanagement;
import java.util.Objects;
import com.google.gson.annotations.SerializedName;
/**
* GetTerminalsUnderAccountRequest
*/
public class GetTerminalsUnderAccountRequest {
@SerializedName("companyAccount")
private String companyAccount = null;
@SerializedName("merchantAccount")
private String merchantAccount = null;
@SerializedName("store")
private String store = null;
public GetTerminalsUnderAccountRequest companyAccount(String companyAccount) {
this.companyAccount = companyAccount;
return this;
}
/**
* Your company account. If you only specify this parameter, the response includes all terminals at all account levels.
*
* @return companyAccount
**/
public String getCompanyAccount() {
return companyAccount;
}
public void setCompanyAccount(String companyAccount) {
this.companyAccount = companyAccount;
}
public GetTerminalsUnderAccountRequest merchantAccount(String merchantAccount) {
this.merchantAccount = merchantAccount;
return this;
}
/**
* The merchant account. This is required if you are retrieving the terminals assigned to a store.If you don't specify a `store` the response includes the terminals assigned to the specified merchant account and the terminals assigned to the stores under this merchant account.
*
* @return merchantAccount
**/
public String getMerchantAccount() {
return merchantAccount;
}
public void setMerchantAccount(String merchantAccount) {
this.merchantAccount = merchantAccount;
}
public GetTerminalsUnderAccountRequest store(String store) {
this.store = store;
return this;
}
/**
* The store code of the store. With this parameter, the response only includes the terminals assigned to the specified store.
*
* @return store
**/
public String getStore() {
return store;
}
public void setStore(String store) {
this.store = store;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GetTerminalsUnderAccountRequest getTerminalsUnderAccountRequest = (GetTerminalsUnderAccountRequest) o;
return Objects.equals(this.companyAccount, getTerminalsUnderAccountRequest.companyAccount) &&
Objects.equals(this.merchantAccount, getTerminalsUnderAccountRequest.merchantAccount) &&
Objects.equals(this.store, getTerminalsUnderAccountRequest.store);
}
@Override
public int hashCode() {
return Objects.hash(companyAccount, merchantAccount, store);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class GetTerminalsUnderAccountRequest {\n");
sb.append(" companyAccount: ").append(toIndentedString(companyAccount)).append("\n");
sb.append(" merchantAccount: ").append(toIndentedString(merchantAccount)).append("\n");
sb.append(" store: ").append(toIndentedString(store)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}
|
ytorzuk-altran/openvino
|
src/core/tests/type_prop/result.cpp
|
<filename>src/core/tests/type_prop/result.cpp<gh_stars>1-10
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, result) {
const auto arg_shape = Shape{1, 2, 3, 4, 5};
auto arg = make_shared<opset1::Constant>(element::f32, arg_shape);
auto result = make_shared<opset1::Result>(arg);
EXPECT_EQ(result->get_output_element_type(0), element::f32);
EXPECT_EQ(result->get_output_shape(0), arg_shape);
}
TEST(type_prop, result_dynamic_shape) {
auto arg = make_shared<opset1::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(arg);
EXPECT_EQ(result->get_output_element_type(0), element::f32);
EXPECT_TRUE(result->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, result_layout) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(a);
result->set_layout("NHWC");
EXPECT_EQ(result->get_layout(), "NHWC");
result->set_layout(ov::Layout());
EXPECT_TRUE(result->get_layout().empty());
EXPECT_EQ(result->input(0).get_rt_info().count(ov::LayoutAttribute::get_type_info_static()), 0);
}
TEST(type_prop, result_layout_empty) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(a);
EXPECT_TRUE(result->get_layout().empty());
}
TEST(type_prop, result_layout_invalid) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(a);
result->input(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] = "NCHW"; // incorrect way
ASSERT_THROW(result->get_layout(), ov::Exception);
}
|
manibhushan05/transiq
|
web/transiq/fileupload/views.py
|
# encoding: utf-8
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import UnreadablePostError
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.utils.text import slugify
from django.views.generic import ListView
from rest_framework.authtoken.models import Token
from api import s3util
from api.helper import json_error_response
from api.models import S3Upload
from api.utils import get_ext, get_or_none, int_or_none, random_id
from fileupload.models import PODFile, VehicleFile, OwnerFile, DriverFile, ChequeFile, InvoiceReceiptFile, \
INVOICE_SENT_MODE_CHOICES, INVOICE_CONFIRM_MODE_CHOICES, WeighingSlip
from fileupload.response import JSONResponse, response_mimetype
from fileupload.serialize import serialize
from owner.vehicle_util import display_format
from restapi.helper_api import check_booking_status, create_new_booking_status, update_booking_status
from restapi.serializers.file_upload import PODFileSerializer
from supplier.models import Supplier,Vehicle,Driver
from team.helper.helper import django_date_format
from team.models import LrNumber, ManualBooking, Invoice
def upload_pod_page(request):
lr_numbers = LrNumber.objects.filter(Q(datetime__date__gte=datetime.now().date() - timedelta(days=180)) & (
Q(booking__pod_status='pending') | Q(booking__pod_status='rejected') | Q(
booking__pod_status='unverified'))).order_by('-datetime').values(
'id', 'lr_number')
bookings = []
for booking in ManualBooking.objects.filter(
(Q(pod_status__iexact='pending') | Q(pod_status__iexact='rejected')) & (
Q(booking_id__istartswith='BROKER') | Q(booking_id__istartswith='AB'))).exclude(
Q(booking_status='cancelled') | Q(deleted=True)):
bookings.append({'booking_id': booking.booking_id})
return render(
request=request,
template_name='fileupload/pod_upload.html',
context={'lr_numbers': lr_numbers, 'bookings': bookings}
)
def upload_pod(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
lr_number_id = request.POST.get('lr_number', None)
if not lr_number_id:
return json_error_response('lr_number id not provided', status=400)
if str(lr_number_id).startswith('BROKER') or str(lr_number_id).startswith('AB'):
booking = get_or_none(ManualBooking, booking_id=lr_number_id)
lr_obj = None
else:
lr_obj = get_or_none(LrNumber, id=lr_number_id)
booking = None if not isinstance(lr_obj, LrNumber) else lr_obj.booking
if not (isinstance(lr_obj, LrNumber) or isinstance(booking, ManualBooking)):
return json_error_response('LrNumber with id=%s not found' % lr_number_id, status=404)
if not request.FILES:
return json_error_response('no file to upload', status=400)
upload_file = request.FILES.get('file')
pod_file = create_pod_file(lr_number=lr_obj, upload_file=upload_file, user=token.user, booking=booking)
booking_unloaded = check_booking_status(booking, 'unloaded')
if not booking_unloaded:
create_new_booking_status(booking, 'unloaded', User.objects.get(username='admin'))
else:
update_booking_status(booking, 'unloaded', 'in_progress', User.objects.get(username='admin'))
booking_pod_uploaded = check_booking_status(booking, 'pod_uploaded')
if not booking_pod_uploaded:
create_new_booking_status(booking, 'pod_uploaded', token.user)
else:
update_booking_status(booking, 'pod_uploaded', 'in_progress', token.user)
return upload_json_response(request, data={'files': [serialize(pod_file)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/pod/')
def delete_pod(request, pk):
pod_file_id = pk
if not pod_file_id:
return json_error_response('pod_file_id not provided', status=400)
pod_file = get_or_none(PODFile, id=pod_file_id)
if not pod_file:
return json_error_response('PODFile with id=%s not found' % pod_file_id, status=404)
pod_file.s3_upload.delete_from_s3()
pod_file.delete()
return upload_json_response(request, True)
def pod_list(request):
files = [serialize(p) for p in PODFile.objects.select_related('lr_number', 's3_upload').all()]
return upload_json_response(request, data={'files': files})
def upload_json_response(request, data):
response = JSONResponse(data, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def get_new_serial(model, **kwargs):
retry = 0
while True:
if retry > 8:
raise AssertionError('Max retry reached, something is not right')
serial = random_id(num_digits=8)
exists = model.objects.filter(serial=serial, **kwargs).exists()
if not exists:
return serial
retry += 1
def create_pod_file(lr_number, upload_file, user, booking):
orig_filename = upload_file.name
if isinstance(booking, ManualBooking):
serial = get_new_serial(PODFile, booking=booking)
new_filename = 'POD-%s-%s.%s' % (slugify(booking.booking_id), serial, get_ext(orig_filename))
mb = ManualBooking.objects.get(booking_id=booking.booking_id)
mb.pod_status = 'unverified'
mb.pod_date = datetime.now()
mb.save()
elif isinstance(lr_number, LrNumber):
serial = get_new_serial(PODFile, lr_number=lr_number)
new_filename = 'POD-%s-%s.%s' % (slugify(lr_number.lr_number), serial, get_ext(orig_filename))
mb = ManualBooking.objects.get(booking_id=lr_number.booking.booking_id)
mb.pod_status = 'unverified'
mb.pod_date = datetime.now()
mb.save()
else:
serial = random_id(num_digits=8)
new_filename = 'POD-%s-%s.%s' % (serial, serial, get_ext(orig_filename))
s3_upload = s3util.save_to_s3_uploads_pod(new_filename, upload_file)
podfile_serializer = PODFileSerializer(data={
'serial': serial,
'lr_number': lr_number.id if isinstance(lr_number, LrNumber) else None,
'booking': booking.id if isinstance(booking, ManualBooking) else None,
'uploaded_by': user.username,
'changed_by': user.username,
's3_upload': s3_upload.id
})
if podfile_serializer.is_valid():
pod_file = podfile_serializer.save()
print(pod_file)
return pod_file
return None
'''
Upload weight receipt file
'''
def upload_weighing_slip_docs(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
mb_id = int_or_none(request.POST.get('mb_id', None))
if not mb_id:
return json_error_response('mb_id id not provided', status=400)
booking = get_or_none(ManualBooking, id=mb_id)
if not isinstance(booking, ManualBooking):
return json_error_response('Manual Booking with id=%s not found' % mb_id, status=404)
if not request.FILES:
return json_error_response('no file to upload', status=400)
upload_file = request.FILES.get('file')
weighing_slip_files = create_weighing_slip_file(booking, upload_file, token.user)
return upload_json_response(request, data={'files': [serialize(weighing_slip_files)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/upload-vehicle-document-page/')
def create_weighing_slip_file(booking, upload_file, user):
orig_filename = upload_file.name
serial = get_new_serial(WeighingSlip, booking=booking)
new_filename = 'weighing-slip-{}-{}.{}'.format(
slugify(booking.booking_id), serial, get_ext(orig_filename)
)
s3_upload = s3util.save_to_s3_uploads_weighing_slip(new_filename, upload_file)
weighing_slip = WeighingSlip.objects.create(booking=booking, serial=serial,verified=True,is_valid=True,
s3_upload=s3_upload, uploaded_by=user)
return weighing_slip
def create_weighing_slip_entries():
uploads = S3Upload.objects.filter(folder='uploads/weighingslip')
for up in uploads:
vehicle = None if up.filename.startswith('.') else up.filename.split('-')[1]
if not vehicle:
continue
if '_' in up.filename:
serial = up.filename[13:20]
else:
serial = ''
vehicle = Vehicle.objects.get(id=vehicle)
try:
VehicleFile.objects.create(
vehicle=vehicle,
serial=serial,
s3_upload=up,
)
except:
pass
def weighing_slip_list(request):
files = [serialize(p) for p in WeighingSlip.objects.all()]
return upload_json_response(request, data={'files': files})
def delete_weighing_slip(request, pk):
weighing_slip_id = pk
if not weighing_slip_id:
return json_error_response('weighing_slip_id not provided', status=400)
weighing_slip = get_or_none(WeighingSlip, id=weighing_slip_id)
if not weighing_slip:
return json_error_response('weighing_slip_id with id=%s not found' % weighing_slip_id, status=404)
weighing_slip.s3_upload.delete_from_s3()
weighing_slip.delete()
return upload_json_response(request, True)
'''
VEHICLE DOCUMENTS UPLAOD
'''
def upload_vehicle_document_page(request):
document_categories = (
('PUC', 'Puc Certificate'),
('FIT', 'Fitness Certificate'),
('REG', 'Registration Certificate'),
('PERM', 'Permit Certificate'),
('INS', 'Insurance Certificate'),
)
vehicles = [{'id': vehicle.id, 'vehicle_number': display_format(vehicle.vehicle_number)} for vehicle in
Vehicle.objects.all()]
return render(
request=request,
template_name='fileupload/upload_vehicle_documents.html',
context={'document_categories': document_categories, 'vehicles': vehicles}
)
def upload_vehicle_docs(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
vehicle_number_id = int_or_none(request.POST.get('vehicle_number', None))
document_category = request.POST.get('document_category', None)
if not vehicle_number_id:
return json_error_response('vehicle_number id not provided', status=400)
if not document_category:
return json_error_response('document category id not provided', status=400)
vehicle = get_or_none(Vehicle, id=vehicle_number_id)
if not vehicle:
return json_error_response('Vehicle with id=%s not found' % vehicle_number_id, status=404)
if not request.FILES:
return json_error_response('no file to upload', status=400)
upload_file = request.FILES.get('file')
vehicle_files = create_vehicle_file(vehicle, document_category, upload_file, token.user)
return upload_json_response(request, data={'files': [serialize(vehicle_files)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/upload-vehicle-document-page/')
def create_vehicle_file(vehicle, document_category, upload_file, user):
orig_filename = upload_file.name
serial = get_new_serial(VehicleFile, supplier_vehicle=vehicle)
new_filename = 'vehicle-%s-%s-%s.%s' % (
document_category.lower(), slugify(vehicle.vehicle_number), serial, get_ext(orig_filename)
)
s3_upload = s3util.save_to_s3_uploads_vehicle(new_filename, upload_file)
vehicle_file = VehicleFile.objects.create(supplier_vehicle=vehicle, document_category=document_category, serial=serial,
s3_upload=s3_upload, uploaded_by=user)
return vehicle_file
def create_vehicle_entries():
uploads = S3Upload.objects.filter(folder='uploads/vehicle')
for up in uploads:
vehicle = None if up.filename.startswith('.') else up.filename.split('-')[1]
if not vehicle:
continue
if '_' in up.filename:
serial = up.filename[13:20]
else:
serial = ''
vehicle = Vehicle.objects.get(id=vehicle)
try:
VehicleFile.objects.create(
vehicle=vehicle,
serial=serial,
s3_upload=up,
)
except:
pass
def vehicle_list(request):
files = [serialize(p) for p in VehicleFile.objects.select_related('vehicle', 's3_upload').all()]
return upload_json_response(request, data={'files': files})
def delete_vehicle(request, pk):
vehicle_file_id = pk
if not vehicle_file_id:
return json_error_response('vehicle_file_id not provided', status=400)
vehicle_file = get_or_none(VehicleFile, id=vehicle_file_id)
if not vehicle_file:
return json_error_response('VehicleFile with id=%s not found' % vehicle_file_id, status=404)
vehicle_file.s3_upload.delete_from_s3()
vehicle_file.delete()
return upload_json_response(request, True)
'''
SUPPLIER DOCUMENTS UPLAOD
'''
def upload_supplier_document_page(request):
document_categories = (
('PAN', 'PAN Card'),
('DL', 'Driving Licence'),
('EL', 'Election ID'),
('AC', 'Aadhar Card'),
('PT', 'Passport'),
('RC', 'Ration Card'),
('DEC', 'Declaration'),
)
suppliers = [{'id': supplier.id, 'name': supplier.name, 'phone': supplier.phone} for supplier in
Supplier.objects.all()]
return render(
request=request,
template_name='fileupload/upload_supplier_documents.html',
context={'document_categories': document_categories, 'suppliers': suppliers}
)
def upload_supplier_docs(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
print(token)
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
supplier_id = int_or_none(request.POST.get('supplier', None))
document_category = request.POST.get('document_category', None)
if not supplier_id:
return json_error_response('supplier_id id not provided', status=400)
if not document_category:
return json_error_response('document category id not provided', status=400)
supplier = get_or_none(Supplier, id=supplier_id)
print(supplier)
if not supplier:
return json_error_response('Supplier with id=%s not found' % supplier_id, status=404)
if not request.FILES:
return json_error_response('no file to upload', status=400)
upload_file = request.FILES.get('file')
vehicle_files = create_supplier_file(supplier, document_category, upload_file, token.user)
return upload_json_response(request, data={'files': [serialize(vehicle_files)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/upload-supplier-document-page/')
def create_supplier_file(supplier, document_category, upload_file, user):
orig_filename = upload_file.name
serial = get_new_serial(OwnerFile, supplier=supplier)
new_filename = 'supplier-%s-%s-%s-%s.%s' % (
document_category.lower(), slugify(supplier.name), slugify(supplier.phone), serial,
get_ext(orig_filename)
)
s3_upload = s3util.save_to_s3_uploads_supplier(new_filename, upload_file)
supplier_file = OwnerFile.objects.create(supplier=supplier, document_category=document_category, serial=serial,
s3_upload=s3_upload, uploaded_by=user)
return supplier_file
def create_supplier_entries():
uploads = S3Upload.objects.filter(folder='uploads/supplier')
for up in uploads:
supplier = None if up.filename.startswith('.') else up.filename.split('-')[1]
if not supplier:
continue
if '_' in up.filename:
serial = up.filename[13:20]
else:
serial = ''
supplier_obj = Supplier.objects.get(id=supplier)
try:
OwnerFile.objects.create(
supplier=supplier_obj,
serial=serial,
s3_upload=up,
)
except:
pass
def supplier_list(request):
files = [serialize(p) for p in OwnerFile.objects.select_related('supplier', 's3_upload').all()]
return upload_json_response(request, data={'files': files})
def delete_supplier(request, pk):
supplier_file_id = pk
if not supplier_file_id:
return json_error_response('supplier_file_id not provided', status=400)
supplier_file = get_or_none(OwnerFile, id=supplier_file_id)
if not supplier_file:
return json_error_response('SupplierFile with id=%s not found' % supplier_file_id, status=404)
supplier_file.s3_upload.delete_from_s3()
supplier_file.delete()
return upload_json_response(request, True)
'''
DRIVER DOCUMENTS UPLAOD
'''
def upload_driver_document_page(request):
document_categories = (
('PAN', 'PAN Card'),
('DL', 'Driving Licence'),
('EL', 'Election ID'),
('AC', 'Aadhar Card'),
('PT', 'Passport'),
('RC', 'Ration Card'),
)
drivers = [{'id': driver.id, 'name': driver.name, 'phone': driver.phone} for driver in
Driver.objects.all()]
return render(
request=request,
template_name='fileupload/upload_driver_documents.html',
context={'document_categories': document_categories, 'drivers': drivers}
)
def upload_driver_docs(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
driver_id = int_or_none(request.POST.get('driver', None))
document_category = request.POST.get('document_category', None)
if not driver_id:
return json_error_response('driver_id id not provided', status=400)
if not document_category:
return json_error_response('document category id not provided', status=400)
driver = get_or_none(Driver, id=driver_id)
if not driver:
return json_error_response('Driver with id=%s not found' % driver, status=404)
if not request.FILES:
return json_error_response('no file to upload', status=400)
upload_file = request.FILES.get('file')
driver_files = create_driver_file(driver, document_category, upload_file, token.user)
return upload_json_response(request, data={'files': [serialize(driver_files)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/upload-driver-document-page/')
def create_driver_file(driver, document_category, upload_file, user):
orig_filename = upload_file.name
serial = get_new_serial(DriverFile, supplier_driver=driver)
new_filename = 'driver-%s-%s-%s-%s.%s' % (
document_category.lower(), slugify(driver.name), slugify(driver.phone), serial,
get_ext(orig_filename)
)
s3_upload = s3util.save_to_s3_uploads_driver(new_filename, upload_file)
driver_file = DriverFile.objects.create(supplier_driver=driver, document_category=document_category, serial=serial,
s3_upload=s3_upload, uploaded_by=user)
return driver_file
def create_driver_entries():
uploads = S3Upload.objects.filter(folder='uploads/driver')
for up in uploads:
driver = None if up.filename.startswith('.') else up.filename.split('-')[1]
if not driver:
continue
if '_' in up.filename:
serial = up.filename[13:20]
else:
serial = ''
driver_obj = Driver.objects.get(id=driver)
try:
DriverFile.objects.create(
driver=driver_obj,
serial=serial,
s3_upload=up,
)
except:
pass
def driver_list(request):
files = [serialize(p) for p in DriverFile.objects.select_related('driver', 's3_upload').all()]
return upload_json_response(request, data={'files': files})
def delete_driver(request, pk):
driver_file_id = pk
if not driver_file_id:
return json_error_response('driver_file_id not provided', status=400)
driver_file = get_or_none(DriverFile, id=driver_file_id)
if not driver_file:
return json_error_response('DriverFile with id=%s not found' % driver_file_id, status=404)
driver_file.s3_upload.delete_from_s3()
driver_file.delete()
return upload_json_response(request, True)
def upload_cheque_document_page(request):
return render(
request=request,
template_name='fileupload/upload_cheque.html',
context={}
)
def upload_cheque_docs(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
customer_name = request.POST.get('customer_name', None)
amount = int_or_none(request.POST.get('amount', None))
cheque_number = request.POST.get('cheque_number', None)
remarks = request.POST.get('remarks', None)
cheque_date = request.POST.get('cheque_date', None)
if not customer_name:
return json_error_response('customer_name id not provided', status=400)
if not amount:
return json_error_response('amount id not provided', status=400)
if not cheque_date:
return json_error_response('Cheque Date not provided', status=400)
if not (cheque_number and len(cheque_number) == 6):
return json_error_response('cheque_number id not provided', status=400)
if ChequeFile.objects.filter(cheque_number__iexact=cheque_number,
cheque_date=django_date_format(cheque_date)):
return json_error_response('Cheque Number is not unique', status=400)
if not request.FILES:
return json_error_response('no file to upload', status=400)
upload_file = request.FILES.get('file')
cheque_files = create_cheque_file(customer_name=customer_name, amount=amount, cheque_number=cheque_number,
remarks=remarks, upload_file=upload_file, user=token.user,
cheque_date=django_date_format(cheque_date))
return upload_json_response(request, data={'files': [serialize(cheque_files)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/upload-cheque-document-page/')
def create_cheque_file(customer_name, amount, cheque_number, remarks, upload_file, user, cheque_date):
orig_filename = upload_file.name
serial = get_new_serial(ChequeFile, customer_name=customer_name)
new_filename = 'cheque-%s-%s.%s' % (
slugify(customer_name), serial, get_ext(orig_filename)
)
s3_upload = s3util.save_to_s3_payment_cheque(new_filename, upload_file)
cheque_file = ChequeFile.objects.create(
customer_name=customer_name,
amount=amount,
cheque_number=cheque_number,
remarks=remarks,
serial=serial,
s3_upload=s3_upload,
uploaded_by=user,
cheque_date=cheque_date
)
return cheque_file
def create_cheque_entries():
uploads = S3Upload.objects.filter(folder='uploads/cheque')
for up in uploads:
cheque = None if up.filename.startswith('.') else up.filename.split('-')[1]
if not cheque:
continue
if '_' in up.filename:
serial = up.filename[13:20]
else:
serial = ''
try:
ChequeFile.objects.create(
serial=serial,
s3_upload=up,
)
except:
pass
def cheque_list(request):
files = [serialize(p) for p in ChequeFile.objects.select_related('customer_name', 's3_upload').all()]
return upload_json_response(request, data={'files': files})
class ChequeFileListView(ListView):
model = ChequeFile
def render_to_response(self, context, **response_kwargs):
files = [serialize(p) for p in self.get_queryset().order_by('cheque_date')[0:0]]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def delete_cheque(request, pk):
cheque_file_id = pk
if not cheque_file_id:
return json_error_response('cheque_file_id not provided', status=400)
cheque_file = get_or_none(ChequeFile, id=cheque_file_id)
if not cheque_file:
return json_error_response('ChequeFile with id=%s not found' % cheque_file_id, status=404)
cheque_file.s3_upload.delete_from_s3()
cheque_file.delete()
return upload_json_response(request, True)
def upload_invoice_receipt_page(request):
return render(
request=request,
template_name='fileupload/invoice_receipt.html',
context={}
)
def update_invoice_booking_status(invoice_id, mode, user):
invoice = get_or_none(Invoice, id=invoice_id)
if invoice:
i_bookings = invoice.bookings.all()
for booking in i_bookings:
if mode == 'party_invoice_sent':
ManualBooking.objects.filter(id=booking.id).update(invoice_status='invoice_sent')
if mode == 'invoice_confirmed':
ManualBooking.objects.filter(id=booking.id).update(invoice_status='invoice_confirmed')
party_invoice_sent = check_booking_status(booking, mode)
if not party_invoice_sent:
create_new_booking_status(booking, mode, user)
else:
update_booking_status(booking, mode, 'in_progress', user)
def upload_invoice_receipt_docs(request):
token = get_or_none(Token, key=request.POST.get('Authorization'))
if not isinstance(token, Token):
return HttpResponseRedirect('/login/')
try:
invoice = get_or_none(Invoice, invoice_number=request.POST.get('invoice_number', None))
if not isinstance(invoice, Invoice):
return json_error_response('invoice_number doesnot exists', status=400)
if not request.FILES:
return json_error_response('no file to upload', status=400)
invoice_sent_mode = request.POST.get('invoice_sent_mode', None)
invoice_confirm_mode = request.POST.get('invoice_confirm_mode', None)
invoice_confirm_by_name = request.POST.get('invoice_confirm_by_name', None)
invoice_confirm_by_phone = request.POST.get('invoice_confirm_by_phone', None)
if invoice_sent_mode:
if invoice_sent_mode not in [x[0] for x in INVOICE_SENT_MODE_CHOICES]:
return json_error_response('Invalid Invoice Sent Mode', status=400)
if invoice_confirm_mode:
if invoice_confirm_mode not in [x[0] for x in INVOICE_CONFIRM_MODE_CHOICES]:
return json_error_response('Invalid Invoice Confirm Mode', status=400)
if invoice_confirm_mode == 'PH' and not invoice_confirm_by_name and not invoice_confirm_by_phone:
return json_error_response('Invoice Confirm Name and Phone required', status=400)
if not invoice_sent_mode and not invoice_confirm_mode:
return json_error_response('Invoice Sent or Confirm Mode Required', status=400)
if 'file' in request.FILES:
upload_file = request.FILES.get('file')
else:
upload_file = None
invoice_receipt = create_invoice_receipt_file(invoice.invoice_number, upload_file, token.user,
invoice_sent_mode, invoice_confirm_mode, invoice_confirm_by_name,
invoice_confirm_by_phone)
if invoice_sent_mode:
update_invoice_booking_status(invoice.id, 'party_invoice_sent', token.user)
if invoice_confirm_mode:
update_invoice_booking_status(invoice.id, 'invoice_confirmed', token.user)
return upload_json_response(request, data={'files': [serialize(invoice_receipt)]})
except UnreadablePostError:
return HttpResponseRedirect('/upload/upload-cheque-document-page/')
def create_invoice_receipt_file(invoice_number, upload_file, user, invoice_sent_mode, invoice_confirm_mode,
invoice_confirm_by_name, invoice_confirm_by_phone):
serial = get_new_serial(InvoiceReceiptFile, invoice_number=invoice_number)
if upload_file:
orig_filename = upload_file.name
new_filename = 'inv-receipt-%s-%s.%s' % (
slugify(invoice_number), serial, get_ext(orig_filename)
)
s3_upload = s3util.save_to_s3_payment_invoice_receipt(new_filename, upload_file)
else:
s3_upload = None
invoice_receipt_file = InvoiceReceiptFile.objects.create(
serial=serial,
invoice_number=invoice_number,
invoice_receipt=Invoice.objects.filter(invoice_number=invoice_number).last(),
s3_upload=s3_upload,
uploaded_by=user,
invoice_sent_mode=invoice_sent_mode,
invoice_confirm_mode=invoice_confirm_mode,
invoice_confirm_by_name=invoice_confirm_by_name,
invoice_confirm_by_phone=invoice_confirm_by_phone,
)
return invoice_receipt_file
def invoice_receipt_list(request):
files = [serialize(p) for p in InvoiceReceiptFile.objects.all()]
return upload_json_response(request, data={'files': files})
class InvoiceReceiptListView(ListView):
model = InvoiceReceiptFile
def render_to_response(self, context, **response_kwargs):
files = [serialize(p) for p in self.get_queryset().order_by('created_on')[0:0]]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def delete_invoice_receipt(request, pk):
invoice_receipt_id = pk
if not invoice_receipt_id:
return json_error_response('cheque_file_id not provided', status=400)
invoice_receipt = get_or_none(InvoiceReceiptFile, id=invoice_receipt_id)
if not invoice_receipt:
return json_error_response('ChequeFile with id=%s not found' % invoice_receipt_id, status=404)
invoice_receipt.s3_upload.delete_from_s3()
invoice_receipt.delete()
return upload_json_response(request, True)
|
shiver-me-timbers/smt-cloudformation-parent
|
smt-cloudformation-generation/src/main/java/shiver/me/timbers/cloudformation/types/JavaTypes.java
|
package shiver.me.timbers.cloudformation.types;
import java.util.stream.IntStream;
import static java.util.stream.Collectors.joining;
public class JavaTypes {
private final String basePackage;
private final String defaultPackageName;
public JavaTypes(String basePackage, String defaultPackageName) {
this.basePackage = basePackage;
this.defaultPackageName = defaultPackageName;
}
public String extractClassName(String resourceName) {
final String className = takeLast(resourceName, "::");
if (className.contains(".")) {
return className.replaceAll("\\.", "");
}
return className;
}
public String extractResourceClassName(String resourceName) {
final String className = takeLast(resourceName, "::");
if (className.contains(".")) {
return className.split("\\.")[0];
}
return className;
}
public String parsePackage(String key) {
final String resourcePackage = extractPackage(key);
if (resourcePackage.isEmpty()) {
return addBasePackage(defaultPackageName);
}
return addBasePackage(resourcePackage.replaceAll("::", ".").toLowerCase());
}
public String extractPackage(String key) {
final String[] split = key.split("::");
return IntStream.range(0, split.length - 1).mapToObj(index -> split[index]).collect(joining("::"));
}
private String takeLast(String string, String delimiter) {
final String[] split = string.split(delimiter);
return split[split.length - 1];
}
private String addBasePackage(String packageName) {
return basePackage + "." + packageName;
}
}
|
doodzik/google-cloud-ruby
|
google-cloud-bigquery/test/google/cloud/bigquery/service_test.rb
|
<reponame>doodzik/google-cloud-ruby
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "helper"
describe Google::Cloud::Bigquery::Service do
Service = Google::Cloud::Bigquery::Service
let(:project_id) { "my-project" }
let(:dataset_id) { "my_dataset" }
let(:table_id) { "my_table" }
let(:project_id_default) { "my-project-default" }
let(:dataset_id_default) { "my_dataset_default" }
let(:table_id_default) { "my_table_default" }
let(:project_default_ref) { Google::Apis::BigqueryV2::ProjectReference.new project_id: project_id_default }
let(:dataset_default_ref) { Google::Apis::BigqueryV2::DatasetReference.new project_id: project_id_default, dataset_id: dataset_id_default }
let(:table_default_ref) { Google::Apis::BigqueryV2::TableReference.new project_id: project_id_default, dataset_id: dataset_id_default, table_id: table_id_default }
it "returns table ref from standard sql format with project, dataset, table and no default ref" do
table_ref = Service.table_ref_from_s "#{project_id}.#{dataset_id}.#{table_id}"
_(table_ref).must_be_kind_of Google::Apis::BigqueryV2::TableReference
_(table_ref.project_id).must_equal project_id
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from legacy sql format with project, dataset, table and no default ref" do
table_ref = Service.table_ref_from_s "#{project_id}:#{dataset_id}.#{table_id}"
_(table_ref.project_id).must_equal project_id
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with project, dataset, table and project default ref" do
table_ref = Service.table_ref_from_s "#{project_id}.#{dataset_id}.#{table_id}", default_ref: project_default_ref
_(table_ref.project_id).must_equal project_id
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with project, dataset, table and dataset default ref" do
table_ref = Service.table_ref_from_s "#{project_id}.#{dataset_id}.#{table_id}", default_ref: dataset_default_ref
_(table_ref.project_id).must_equal project_id
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with project, dataset, table and table default ref" do
table_ref = Service.table_ref_from_s "#{project_id}.#{dataset_id}.#{table_id}", default_ref: table_default_ref
_(table_ref.project_id).must_equal project_id
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with dataset, table and project default ref" do
table_ref = Service.table_ref_from_s "#{dataset_id}.#{table_id}", default_ref: project_default_ref
_(table_ref.project_id).must_equal project_id_default
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with dataset, table and dataset default ref" do
table_ref = Service.table_ref_from_s "#{dataset_id}.#{table_id}", default_ref: dataset_default_ref
_(table_ref.project_id).must_equal project_id_default
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with dataset, table and table default ref" do
table_ref = Service.table_ref_from_s "#{dataset_id}.#{table_id}", default_ref: table_default_ref
_(table_ref.project_id).must_equal project_id_default
_(table_ref.dataset_id).must_equal dataset_id
_(table_ref.table_id).must_equal table_id
end
it "raises from standard sql format with table and project default ref" do
err = expect do
Service.table_ref_from_s table_id, default_ref: project_default_ref
end.must_raise ArgumentError
_(err.message).must_equal "TableReference is missing dataset_id"
end
it "returns table ref from standard sql format with table and dataset default ref" do
table_ref = Service.table_ref_from_s table_id, default_ref: dataset_default_ref
_(table_ref.project_id).must_equal project_id_default
_(table_ref.dataset_id).must_equal dataset_id_default
_(table_ref.table_id).must_equal table_id
end
it "returns table ref from standard sql format with table and table default ref" do
table_ref = Service.table_ref_from_s table_id, default_ref: table_default_ref
_(table_ref.project_id).must_equal project_id_default
_(table_ref.dataset_id).must_equal dataset_id_default
_(table_ref.table_id).must_equal table_id
end
end
|
sillsdev/crosswalk
|
sysapps/device_capabilities/device_capabilities_extension.cc
|
<reponame>sillsdev/crosswalk<gh_stars>1-10
// Copyright (c) 2013 Intel Corporation. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "xwalk/sysapps/device_capabilities/device_capabilities_extension.h"
#include "grit/xwalk_sysapps_resources.h"
#include "ui/base/resource/resource_bundle.h"
#include "xwalk/sysapps/device_capabilities/device_capabilities.h"
#include "xwalk/sysapps/device_capabilities/device_capabilities_object.h"
namespace xwalk {
namespace sysapps {
namespace experimental {
using jsapi::device_capabilities::DeviceCapabilitiesConstructor::Params;
DeviceCapabilitiesExtension::DeviceCapabilitiesExtension() {
set_name("xwalk.experimental.system");
set_javascript_api(ResourceBundle::GetSharedInstance().GetRawDataResource(
IDR_XWALK_SYSAPPS_DEVICE_CAPABILITIES_API).as_string());
}
DeviceCapabilitiesExtension::~DeviceCapabilitiesExtension() {}
XWalkExtensionInstance* DeviceCapabilitiesExtension::CreateInstance() {
return new DeviceCapabilitiesInstance();
}
DeviceCapabilitiesInstance::DeviceCapabilitiesInstance()
: handler_(this),
store_(&handler_) {
handler_.Register("deviceCapabilitiesConstructor",
base::Bind(&DeviceCapabilitiesInstance::OnDeviceCapabilitiesConstructor,
base::Unretained(this)));
}
void DeviceCapabilitiesInstance::HandleMessage(scoped_ptr<base::Value> msg) {
handler_.HandleMessage(std::move(msg));
}
void DeviceCapabilitiesInstance::OnDeviceCapabilitiesConstructor(
scoped_ptr<XWalkExtensionFunctionInfo> info) {
scoped_ptr<Params> params(Params::Create(*info->arguments()));
scoped_ptr<BindingObject> obj(new DeviceCapabilitiesObject());
store_.AddBindingObject(params->object_id, std::move(obj));
}
} // namespace experimental
} // namespace sysapps
} // namespace xwalk
|
kuangdai/AxiSEM3D
|
SOLVER/src/core/element/grad/Gradient.cpp
|
// Gradient.cpp
// created by Kuangdai on 19-May-2017
// elemental gradient
#include "Gradient.h"
#include "FluidElement.h"
#include "SolidElement.h"
Gradient::Gradient(const RDMatPP &dsdxii, const RDMatPP &dsdeta,
const RDMatPP &dzdxii, const RDMatPP &dzdeta,
const RDMatPP &inv_s, bool axial):
mDsDxii(dsdxii.cast<Real>()), mDsDeta(dsdeta.cast<Real>()),
mDzDxii(dzdxii.cast<Real>()), mDzDeta(dzdeta.cast<Real>()),
mInv_s(inv_s.cast<Real>()), mAxial(axial) {
if (mAxial) {
sG_xii = &sG_GLJ;
sGT_xii = &sGT_GLJ;
} else {
sG_xii = &sG_GLL;
sGT_xii = &sGT_GLL;
}
sG_eta = &sG_GLL;
sGT_eta = &sGT_GLL;
}
void Gradient::computeGrad(const vec_CMatPP &u, vec_ar3_CMatPP &u_i, int Nu, int nyquist) const {
// hardcode for alpha = 0
static RMatPP GUR, UGR;
GUR = (*sGT_xii) * u[0].real();
UGR = u[0].real() * (*sG_eta);
u_i[0][0].real() = mDzDeta.schur(GUR) + mDzDxii.schur(UGR);
u_i[0][1].real().setZero();
u_i[0][2].real() = mDsDeta.schur(GUR) + mDsDxii.schur(UGR);
// alpha > 0
static CMatPP v, GU, UG;
for (int alpha = 1; alpha <= Nu - nyquist; alpha++) {
Complex iialpha = (Real)alpha * ii;
v = iialpha * u[alpha];
GU = (*sGT_xii) * u[alpha];
UG = u[alpha] * (*sG_eta);
u_i[alpha][0] = mDzDeta.schur(GU) + mDzDxii.schur(UG);
u_i[alpha][1] = mInv_s.schur(v);
u_i[alpha][2] = mDsDeta.schur(GU) + mDsDxii.schur(UG);
if (mAxial) {
u_i[alpha][1].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v);
}
}
// mask Nyquist
if (nyquist) {
u_i[Nu][0].setZero();
u_i[Nu][1].setZero();
u_i[Nu][2].setZero();
}
}
void Gradient::computeQuad(vec_CMatPP &f, const vec_ar3_CMatPP &f_i, int Nu, int nyquist) const {
// hardcode for mbeta = 0
static RMatPP XR, YR;
XR = mDzDeta.schur(f_i[0][0].real()) + mDsDeta.schur(f_i[0][2].real());
YR = mDzDxii.schur(f_i[0][0].real()) + mDsDxii.schur(f_i[0][2].real());
f[0].real() = (*sG_xii) * XR + YR * (*sGT_eta);
// mbeta > 0
static CMatPP g, X, Y;
for (int mbeta = 1; mbeta <= Nu - nyquist; mbeta++) {
Complex iibeta = - (Real)mbeta * ii;
g = iibeta * f_i[mbeta][1];
X = mDzDeta.schur(f_i[mbeta][0]) + mDsDeta.schur(f_i[mbeta][2]);
Y = mDzDxii.schur(f_i[mbeta][0]) + mDsDxii.schur(f_i[mbeta][2]);
f[mbeta] = (*sG_xii) * X + Y * (*sGT_eta) + mInv_s.schur(g);
if (mAxial) {
f[mbeta] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g.row(0));
}
}
// mask Nyquist
if (nyquist) {
f[Nu].setZero();
}
}
void Gradient::computeGrad9(const vec_ar3_CMatPP &ui, vec_ar9_CMatPP &ui_j, int Nu, int nyquist) const {
// hardcode for alpha = 0
static RMatPP GU0R, GU1R, GU2R, UG0R, UG1R, UG2R;
GU0R = (*sGT_xii) * ui[0][0].real();
GU1R = (*sGT_xii) * ui[0][1].real();
GU2R = (*sGT_xii) * ui[0][2].real();
UG0R = ui[0][0].real() * (*sG_eta);
UG1R = ui[0][1].real() * (*sG_eta);
UG2R = ui[0][2].real() * (*sG_eta);
ui_j[0][0].real() = mDzDeta.schur(GU0R) + mDzDxii.schur(UG0R);
ui_j[0][1].real() = -mInv_s.schur(ui[0][1].real());
ui_j[0][2].real() = mDsDeta.schur(GU0R) + mDsDxii.schur(UG0R);
ui_j[0][3].real() = mDzDeta.schur(GU1R) + mDzDxii.schur(UG1R);
ui_j[0][4].real() = mInv_s.schur(ui[0][0].real());
ui_j[0][5].real() = mDsDeta.schur(GU1R) + mDsDxii.schur(UG1R);
ui_j[0][6].real() = mDzDeta.schur(GU2R) + mDzDxii.schur(UG2R);
ui_j[0][7].real().setZero();
ui_j[0][8].real() = mDsDeta.schur(GU2R) + mDsDxii.schur(UG2R);
if (mAxial) {
ui_j[0][4].row(0).real() += mDzDeta.row(0).schur((*sGT_xii).row(0) * ui[0][0].real());
ui_j[0][1].row(0).real() -= mDzDeta.row(0).schur((*sGT_xii).row(0) * ui[0][1].real());
}
// alpha > 0
static CMatPP v0, v1, v2, GU0, GU1, GU2, UG0, UG1, UG2;
for (int alpha = 1; alpha <= Nu - nyquist; alpha++) {
Complex iialpha = (Real)alpha * ii;
v0 = ui[alpha][0] + iialpha * ui[alpha][1];
v1 = iialpha * ui[alpha][0] - ui[alpha][1];
v2 = iialpha * ui[alpha][2];
GU0 = (*sGT_xii) * ui[alpha][0];
GU1 = (*sGT_xii) * ui[alpha][1];
GU2 = (*sGT_xii) * ui[alpha][2];
UG0 = ui[alpha][0] * (*sG_eta);
UG1 = ui[alpha][1] * (*sG_eta);
UG2 = ui[alpha][2] * (*sG_eta);
ui_j[alpha][0] = mDzDeta.schur(GU0) + mDzDxii.schur(UG0);
ui_j[alpha][1] = mInv_s.schur(v1);
ui_j[alpha][2] = mDsDeta.schur(GU0) + mDsDxii.schur(UG0);
ui_j[alpha][3] = mDzDeta.schur(GU1) + mDzDxii.schur(UG1);
ui_j[alpha][4] = mInv_s.schur(v0);
ui_j[alpha][5] = mDsDeta.schur(GU1) + mDsDxii.schur(UG1);
ui_j[alpha][6] = mDzDeta.schur(GU2) + mDzDxii.schur(UG2);
ui_j[alpha][7] = mInv_s.schur(v2);
ui_j[alpha][8] = mDsDeta.schur(GU2) + mDsDxii.schur(UG2);
if (mAxial) {
ui_j[alpha][4].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v0);
ui_j[alpha][1].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v1);
ui_j[alpha][7].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v2);
if (alpha == 1) {
ui_j[alpha][4].row(0) += mDzDxii.row(0).schur(v0.row(0) * (*sG_eta));
ui_j[alpha][1].row(0) += mDzDxii.row(0).schur(v1.row(0) * (*sG_eta));
}
}
}
// mask Nyquist
if (nyquist) {
ui_j[Nu][0].setZero();
ui_j[Nu][1].setZero();
ui_j[Nu][2].setZero();
ui_j[Nu][3].setZero();
ui_j[Nu][4].setZero();
ui_j[Nu][5].setZero();
ui_j[Nu][6].setZero();
ui_j[Nu][7].setZero();
ui_j[Nu][8].setZero();
}
}
void Gradient::computeQuad9(vec_ar3_CMatPP &fi, const vec_ar9_CMatPP &fi_j, int Nu, int nyquist) const {
// hardcode for mbeta = 0
static RMatPP X0R, X1R, X2R, Y0R, Y1R, Y2R;
X0R = mDzDeta.schur(fi_j[0][0].real()) + mDsDeta.schur(fi_j[0][2].real());
X1R = mDzDeta.schur(fi_j[0][3].real()) + mDsDeta.schur(fi_j[0][5].real());
X2R = mDzDeta.schur(fi_j[0][6].real()) + mDsDeta.schur(fi_j[0][8].real());
Y0R = mDzDxii.schur(fi_j[0][0].real()) + mDsDxii.schur(fi_j[0][2].real());
Y1R = mDzDxii.schur(fi_j[0][3].real()) + mDsDxii.schur(fi_j[0][5].real());
Y2R = mDzDxii.schur(fi_j[0][6].real()) + mDsDxii.schur(fi_j[0][8].real());
fi[0][0].real() = (*sG_xii) * X0R + Y0R * (*sGT_eta) + mInv_s.schur(fi_j[0][4].real());
fi[0][1].real() = (*sG_xii) * X1R + Y1R * (*sGT_eta) - mInv_s.schur(fi_j[0][1].real());
fi[0][2].real() = (*sG_xii) * X2R + Y2R * (*sGT_eta);
if (mAxial) {
fi[0][0].real() += (*sG_xii).col(0) * mDzDeta.row(0).schur(fi_j[0][4].real().row(0));
fi[0][1].real() -= (*sG_xii).col(0) * mDzDeta.row(0).schur(fi_j[0][1].real().row(0));
}
// mbeta > 0
static CMatPP g0, g1, g2, X0, X1, X2, Y0, Y1, Y2;
for (int mbeta = 1; mbeta <= Nu - nyquist; mbeta++) {
Complex iibeta = - (Real)mbeta * ii;
g0 = fi_j[mbeta][4] + iibeta * fi_j[mbeta][1];
g1 = iibeta * fi_j[mbeta][4] - fi_j[mbeta][1];
g2 = iibeta * fi_j[mbeta][7];
X0 = mDzDeta.schur(fi_j[mbeta][0]) + mDsDeta.schur(fi_j[mbeta][2]);
X1 = mDzDeta.schur(fi_j[mbeta][3]) + mDsDeta.schur(fi_j[mbeta][5]);
X2 = mDzDeta.schur(fi_j[mbeta][6]) + mDsDeta.schur(fi_j[mbeta][8]);
Y0 = mDzDxii.schur(fi_j[mbeta][0]) + mDsDxii.schur(fi_j[mbeta][2]);
Y1 = mDzDxii.schur(fi_j[mbeta][3]) + mDsDxii.schur(fi_j[mbeta][5]);
Y2 = mDzDxii.schur(fi_j[mbeta][6]) + mDsDxii.schur(fi_j[mbeta][8]);
fi[mbeta][0] = (*sG_xii) * X0 + Y0 * (*sGT_eta) + mInv_s.schur(g0);
fi[mbeta][1] = (*sG_xii) * X1 + Y1 * (*sGT_eta) + mInv_s.schur(g1);
fi[mbeta][2] = (*sG_xii) * X2 + Y2 * (*sGT_eta) + mInv_s.schur(g2);
if (mAxial) {
fi[mbeta][0] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g0.row(0));
fi[mbeta][1] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g1.row(0));
fi[mbeta][2] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g2.row(0));
if (mbeta == 1) {
fi[mbeta][0].row(0) += mDzDxii.row(0).schur(g0.row(0)) * (*sGT_eta);
fi[mbeta][1].row(0) += mDzDxii.row(0).schur(g1.row(0)) * (*sGT_eta);
}
}
}
// mask Nyquist
if (nyquist) {
fi[Nu][0].setZero();
fi[Nu][1].setZero();
fi[Nu][2].setZero();
}
}
void Gradient::computeGrad6(const vec_ar3_CMatPP &ui, vec_ar6_CMatPP &eij, int Nu, int nyquist) const {
// hardcode for alpha = 0
static RMatPP GU0R, GU1R, GU2R, UG0R, UG1R, UG2R;
GU0R = (*sGT_xii) * ui[0][0].real();
GU1R = (*sGT_xii) * ui[0][1].real();
GU2R = (*sGT_xii) * ui[0][2].real();
UG0R = ui[0][0].real() * (*sG_eta);
UG1R = ui[0][1].real() * (*sG_eta);
UG2R = ui[0][2].real() * (*sG_eta);
eij[0][0].real() = mDzDeta.schur(GU0R) + mDzDxii.schur(UG0R);
eij[0][1].real() = mInv_s.schur(ui[0][0].real());
eij[0][2].real() = mDsDeta.schur(GU2R) + mDsDxii.schur(UG2R);
eij[0][3].real() = mDsDeta.schur(GU1R) + mDsDxii.schur(UG1R);
eij[0][4].real() = mDsDeta.schur(GU0R) + mDsDxii.schur(UG0R) + mDzDeta.schur(GU2R) + mDzDxii.schur(UG2R);
eij[0][5].real() = mDzDeta.schur(GU1R) + mDzDxii.schur(UG1R) - mInv_s.schur(ui[0][1].real());
if (mAxial) {
eij[0][1].row(0).real() += mDzDeta.row(0).schur((*sGT_xii).row(0) * ui[0][0].real());
eij[0][5].row(0).real() -= mDzDeta.row(0).schur((*sGT_xii).row(0) * ui[0][1].real());
}
// alpha > 0
static CMatPP v0, v1, v2, GU0, GU1, GU2, UG0, UG1, UG2;
for (int alpha = 1; alpha <= Nu - nyquist; alpha++) {
Complex iialpha = (Real)alpha * ii;
v0 = ui[alpha][0] + iialpha * ui[alpha][1];
v1 = iialpha * ui[alpha][0] - ui[alpha][1];
v2 = iialpha * ui[alpha][2];
GU0 = (*sGT_xii) * ui[alpha][0];
GU1 = (*sGT_xii) * ui[alpha][1];
GU2 = (*sGT_xii) * ui[alpha][2];
UG0 = ui[alpha][0] * (*sG_eta);
UG1 = ui[alpha][1] * (*sG_eta);
UG2 = ui[alpha][2] * (*sG_eta);
eij[alpha][0] = mDzDeta.schur(GU0) + mDzDxii.schur(UG0);
eij[alpha][1] = mInv_s.schur(v0);
eij[alpha][2] = mDsDeta.schur(GU2) + mDsDxii.schur(UG2);
eij[alpha][3] = mDsDeta.schur(GU1) + mDsDxii.schur(UG1) + mInv_s.schur(v2);
eij[alpha][4] = mDsDeta.schur(GU0) + mDsDxii.schur(UG0) + mDzDeta.schur(GU2) + mDzDxii.schur(UG2);
eij[alpha][5] = mDzDeta.schur(GU1) + mDzDxii.schur(UG1) + mInv_s.schur(v1);
if (mAxial) {
eij[alpha][1].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v0);
eij[alpha][5].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v1);
eij[alpha][3].row(0) += mDzDeta.row(0).schur((*sGT_xii).row(0) * v2);
if (alpha == 1) {
eij[alpha][1].row(0) += mDzDxii.row(0).schur(v0.row(0) * (*sG_eta));
eij[alpha][5].row(0) += mDzDxii.row(0).schur(v1.row(0) * (*sG_eta));
}
}
}
// mask Nyquist
if (nyquist) {
eij[Nu][0].setZero();
eij[Nu][1].setZero();
eij[Nu][2].setZero();
eij[Nu][3].setZero();
eij[Nu][4].setZero();
eij[Nu][5].setZero();
}
}
void Gradient::computeQuad6(vec_ar3_CMatPP &fi, const vec_ar6_CMatPP &sij, int Nu, int nyquist) const {
// hardcode for mbeta = 0
static RMatPP X0R, X1R, X2R, Y0R, Y1R, Y2R;
X0R = mDzDeta.schur(sij[0][0].real()) + mDsDeta.schur(sij[0][4].real());
X1R = mDzDeta.schur(sij[0][5].real()) + mDsDeta.schur(sij[0][3].real());
X2R = mDzDeta.schur(sij[0][4].real()) + mDsDeta.schur(sij[0][2].real());
Y0R = mDzDxii.schur(sij[0][0].real()) + mDsDxii.schur(sij[0][4].real());
Y1R = mDzDxii.schur(sij[0][5].real()) + mDsDxii.schur(sij[0][3].real());
Y2R = mDzDxii.schur(sij[0][4].real()) + mDsDxii.schur(sij[0][2].real());
fi[0][0].real() = (*sG_xii) * X0R + Y0R * (*sGT_eta) + mInv_s.schur(sij[0][1].real());
fi[0][1].real() = (*sG_xii) * X1R + Y1R * (*sGT_eta) - mInv_s.schur(sij[0][5].real());
fi[0][2].real() = (*sG_xii) * X2R + Y2R * (*sGT_eta);
if (mAxial) {
fi[0][0].real() += (*sG_xii).col(0) * mDzDeta.row(0).schur(sij[0][1].real().row(0));
fi[0][1].real() -= (*sG_xii).col(0) * mDzDeta.row(0).schur(sij[0][5].real().row(0));
}
// mbeta > 0
static CMatPP g0, g1, g2, X0, X1, X2, Y0, Y1, Y2;
for (int mbeta = 1; mbeta <= Nu - nyquist; mbeta++) {
Complex iibeta = - (Real)mbeta * ii;
g0 = sij[mbeta][1] + iibeta * sij[mbeta][5];
g1 = iibeta * sij[mbeta][1] - sij[mbeta][5];
g2 = iibeta * sij[mbeta][3];
X0 = mDzDeta.schur(sij[mbeta][0]) + mDsDeta.schur(sij[mbeta][4]);
X1 = mDzDeta.schur(sij[mbeta][5]) + mDsDeta.schur(sij[mbeta][3]);
X2 = mDzDeta.schur(sij[mbeta][4]) + mDsDeta.schur(sij[mbeta][2]);
Y0 = mDzDxii.schur(sij[mbeta][0]) + mDsDxii.schur(sij[mbeta][4]);
Y1 = mDzDxii.schur(sij[mbeta][5]) + mDsDxii.schur(sij[mbeta][3]);
Y2 = mDzDxii.schur(sij[mbeta][4]) + mDsDxii.schur(sij[mbeta][2]);
fi[mbeta][0] = (*sG_xii) * X0 + Y0 * (*sGT_eta) + mInv_s.schur(g0);
fi[mbeta][1] = (*sG_xii) * X1 + Y1 * (*sGT_eta) + mInv_s.schur(g1);
fi[mbeta][2] = (*sG_xii) * X2 + Y2 * (*sGT_eta) + mInv_s.schur(g2);
if (mAxial) {
fi[mbeta][0] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g0.row(0));
fi[mbeta][1] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g1.row(0));
fi[mbeta][2] += (*sG_xii).col(0) * mDzDeta.row(0).schur(g2.row(0));
if (mbeta == 1) {
fi[mbeta][0].row(0) += mDzDxii.row(0).schur(g0.row(0)) * (*sGT_eta);
fi[mbeta][1].row(0) += mDzDxii.row(0).schur(g1.row(0)) * (*sGT_eta);
}
}
}
// mask Nyquist
if (nyquist) {
fi[Nu][0].setZero();
fi[Nu][1].setZero();
fi[Nu][2].setZero();
}
}
//-------------------------- static --------------------------//
RMatPP Gradient::sG_GLL;
RMatPP Gradient::sG_GLJ;
RMatPP Gradient::sGT_GLL;
RMatPP Gradient::sGT_GLJ;
void Gradient::setGMat(const RDMatPP &G_GLL, const RDMatPP &G_GLJ) {
sG_GLL = G_GLL.cast<Real>();
sG_GLJ = G_GLJ.cast<Real>();
sGT_GLL = sG_GLL.transpose();
sGT_GLJ = sG_GLJ.transpose();
}
|
steve-ord/askapsoft
|
Code/Base/accessors/current/dataaccess/TableDataIterator.h
|
/// @file
///
/// @brief Implementation of IDataIterator in the table-based case
/// @details
/// TableConstDataIterator: Allow read-only iteration across preselected data. Each
/// iteration step is represented by the IConstDataAccessor interface.
/// TableDataIterator extends the interface further to read-write operations.
/// Each iteration step is represented by the IDataAccessor interface in this
/// case.
///
/// @copyright (c) 2007 CSIRO
/// Australia Telescope National Facility (ATNF)
/// Commonwealth Scientific and Industrial Research Organisation (CSIRO)
/// PO Box 76, Epping NSW 1710, Australia
/// <EMAIL>
///
/// This file is part of the ASKAP software distribution.
///
/// The ASKAP software distribution is free software: you can redistribute it
/// and/or modify it under the terms of the GNU General Public License as
/// published by the Free Software Foundation; either version 2 of the License,
/// or (at your option) any later version.
///
/// This program is distributed in the hope that it will be useful,
/// but WITHOUT ANY WARRANTY; without even the implied warranty of
/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
/// GNU General Public License for more details.
///
/// You should have received a copy of the GNU General Public License
/// along with this program; if not, write to the Free Software
/// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
///
/// @author <NAME> <<EMAIL>>
///
#ifndef ASKAP_ACCESSORS_TABLE_DATA_ITERATOR_H
#define ASKAP_ACCESSORS_TABLE_DATA_ITERATOR_H
// std includes
#include <string>
#include <map>
// boost includes
#include <boost/shared_ptr.hpp>
// own includes
#include <dataaccess/TableConstDataIterator.h>
#include <dataaccess/IDataIterator.h>
#include <dataaccess/TableInfoAccessor.h>
#include <dataaccess/IDataAccessor.h>
#include <dataaccess/TableBufferDataAccessor.h>
namespace askap {
namespace accessors {
// forward declaration to be able to define shared pointer to this class
class TableDataAccessor;
/// @brief Implementation of IDataIterator in the table-based case
/// @details
/// TableConstDataIterator: Allow read-only iteration across preselected data. Each
/// iteration step is represented by the IConstDataAccessor interface.
/// TableDataIterator extends the interface further to read-write operations.
/// Each iteration step is represented by the IDataAccessor interface in this
/// case.
/// @ingroup dataaccess_tab
class TableDataIterator : public TableConstDataIterator,
virtual public IDataIterator,
virtual protected TableInfoAccessor
{
public:
/// @param[in] msManager a manager of the measurement set to use
/// @param[in] sel shared pointer to selector
/// @param[in] conv shared pointer to converter
/// @param[in] cacheSize a number of uvw machines in the cache (default is 1)
/// @param[in] tolerance pointing direction tolerance in radians, exceeding which leads
/// to initialisation of a new UVW Machine
/// @param[in] maxChunkSize maximum number of rows per accessor
TableDataIterator(const boost::shared_ptr<ITableManager const>
&msManager,
const boost::shared_ptr<ITableDataSelectorImpl const> &sel,
const boost::shared_ptr<IDataConverterImpl const> &conv,
size_t cacheSize = 1, double tolerance = 1e-6,
casa::uInt maxChunkSize = INT_MAX);
/// destructor required to sync buffers on the last iteration
virtual ~TableDataIterator();
/// @brief operator* delivers a reference to data accessor (current chunk)
/// @details
/// @return a reference to the current chunk
/// @note
/// constness of the return type is changed to allow read/write
/// operations.
///
virtual IDataAccessor& operator*() const;
/// @brief Switch the output of operator* and operator-> to one of
/// the buffers.
/// @details This is meant to be done to provide the same
/// interface for a buffer access as exists for the original
/// visibilities (e.g. it->visibility() to get the cube).
/// It can be used for an easy substitution of the original
/// visibilities to ones stored in a buffer, when the iterator is
/// passed as a parameter to mathematical algorithms.
/// The operator* and operator-> will refer to the chosen buffer
/// until a new buffer is selected or the chooseOriginal() method
/// is executed to revert operators to their default meaning
/// (to refer to the primary visibility data).
/// @param[in] bufferID the name of the buffer to choose
///
virtual void chooseBuffer(const std::string &bufferID);
/// Switch the output of operator* and operator-> to the original
/// state (present after the iterator is just constructed)
/// where they point to the primary visibility data. This method
/// is indended to cancel the results of chooseBuffer(std::string)
///
virtual void chooseOriginal();
/// @brief obtain any associated buffer for read/write access.
/// @details The buffer is identified by its bufferID. The method
/// ignores a chooseBuffer/chooseOriginal setting.
/// @param[in] bufferID the name of the buffer requested
/// @return a reference to writable data accessor to the
/// buffer requested
virtual IDataAccessor& buffer(const std::string &bufferID) const;
/// Restart the iteration from the beginning
void init();
/// advance the iterator one step further
/// @return True if there are more data (so constructions like
/// while(it.next()) {} are possible)
virtual casa::Bool next();
// to make it public instead of protected
using TableConstDataIterator::getAccessor;
/// populate the cube with the data stored in the given buffer
/// @param[in] vis a reference to the nRow x nChannel x nPol buffer
/// cube to fill with the complex visibility data
/// @param[in] name a name of the buffer to work with
virtual void readBuffer(casa::Cube<casa::Complex> &vis,
const std::string &name) const;
/// write the cube back to the given buffer
/// @param[in] vis a reference to the nRow x nChannel x nPol buffer
/// cube to fill with the complex visibility data
/// @param[in] name a name of the buffer to work with
virtual void writeBuffer(const casa::Cube<casa::Complex> &vis,
const std::string &name) const;
/// @brief write back visibilities
/// @details The write operation is possible if the shape of the
/// visibility cube stays the same as the shape of the data in the
/// table. The method uses DataAccessor to obtain a reference to the
/// visibility cube (hence no parameters).
void writeOriginalVis() const;
/// @brief write back flags
/// @details The write operation is possible if the shape of the
/// flag cube stays the same as the shape of the data in the
/// table. The method uses DataAccessor to obtain a reference to the
/// visibility cube (hence no parameters).
/// @note This operation is specific to table (i.e MS) based implementaton
/// of the interface
void writeOriginalFlag() const;
/// @brief check whether one can write to the main table
/// @details Buffers held in subtables are not covered by this method.
/// @return true if write operation is allowed
bool mainTableWritable() const throw();
private:
/// @brief helper templated method to write back a cube to main table column
/// @details For now, it is only used in writeOriginalVis/Flag methods
/// and therefore can be kept in cc rather than tcc file (it is private, so
/// can be used by this class only). This can easily be changed in the future,
/// if need arises. This method encapsulates handling of channel selection
/// @param[in] cube Cube to work with, type should match the column type. Should be
/// of the appropriate shape
/// @param[in] colName Name of the column
template<typename T>
void writeCube(const casa::Cube<T> &cube, const std::string &colName) const;
/// shared pointer to the data accessor associated with either an active
/// buffer or original visibilites. The actual type held by the pointer
/// may vary.
boost::shared_ptr<IDataAccessor> itsActiveBufferPtr;
/// a container of buffers
mutable std::map<std::string,
boost::shared_ptr<TableBufferDataAccessor> > itsBuffers;
/// shared pointer to data accessor associated with original visibilities
/// (initialized at the constructor)
boost::shared_ptr<TableDataAccessor> itsOriginalVisAccessor;
/// counter of the iteration steps. It is used to store the buffers
/// to the appropriate cell of the disk table
casa::uInt itsIterationCounter;
};
} // end of namespace accessors
} // end of namespace askap
#endif // #ifndef TABLE_DATA_ITERATOR_H
|
zkw012300/EnjoyMusic
|
app/src/main/java/com/zspirytus/enjoymusic/utils/StatusBarUtil.java
|
<filename>app/src/main/java/com/zspirytus/enjoymusic/utils/StatusBarUtil.java
package com.zspirytus.enjoymusic.utils;
import android.content.Context;
import java.lang.reflect.Method;
public class StatusBarUtil {
private StatusBarUtil() {
throw new AssertionError();
}
public static void collapseStatusBar(Context context) {
try {
Object statusBarManager = context.getSystemService("statusbar");
Method collapse;
collapse = statusBarManager.getClass().getMethod("collapsePanels");
collapse.invoke(statusBarManager);
} catch (Exception localException) {
localException.printStackTrace();
}
}
}
|
fernandoj92/ltm-learning
|
src/main/java/research/ferjorosa/examples/learning/BridgedIslands/AsiaDataset.java
|
package research.ferjorosa.examples.learning.BridgedIslands;
import eu.amidst.core.datastream.Attribute;
import eu.amidst.core.datastream.DataInstance;
import eu.amidst.core.datastream.DataOnMemory;
import eu.amidst.core.datastream.DataStream;
import eu.amidst.core.io.DataStreamLoader;
import eu.amidst.core.learning.parametric.bayesian.SVB;
import research.ferjorosa.core.learning.normal.LTMLearningEngine;
import research.ferjorosa.core.learning.normal.StaticLearningAlgorithm;
import research.ferjorosa.core.learning.normal.structural.ABI;
import research.ferjorosa.core.learning.normal.structural.ABIConfig;
import research.ferjorosa.core.learning.normal.structural.StructuralLearning;
import research.ferjorosa.core.models.LTM;
import java.util.ArrayList;
import java.util.List;
/**
* Created by Fer on 04/04/2016.
*/
public class AsiaDataset {
public static void main(String[] args) throws Exception {
DataStream<DataInstance> data = DataStreamLoader.open("datasets/ferjorosaData/Asia_train.arff");
StaticLearningAlgorithm staticLearningAlgorithm = new ABI(new ABIConfig());
LTM learntModel = null;
LTM zhangModel = null;
long startTime = System.currentTimeMillis();
for (DataOnMemory<DataInstance> batch : data.iterableOverBatches(100)){
learntModel = staticLearningAlgorithm.learnModel(batch);
}
long estimatedTime = System.currentTimeMillis() - startTime;
DataStream<DataInstance> data2 = DataStreamLoader.open("datasets/ferjorosaData/Asia_train.arff");
for (DataOnMemory<DataInstance> batch : data2.iterableOverBatches(100)){
zhangModel = buildZhangLTM(batch);
}
System.out.println("elapsed time: "+estimatedTime);
System.out.println("ABI score: "+ learntModel.getScore());
System.out.println("Zhang's BI score: "+ zhangModel.getScore());
//BayesianNetworkWriter.saveToFile(learntModel.getLearntBayesianNetwork(),"networks/asia_train.bn");
//BNWriterToHugin.saveToHuginFile(learntModel.getLearntBayesianNetwork(),"networks/asia_train.net");
}
private static LTM buildZhangLTM(DataOnMemory<DataInstance> batch){
List<Attribute> leftAttributes = new ArrayList<>();
List<Attribute> rightAttributes = new ArrayList<>();
// Defines the default parameter learning algorithm
SVB streamingVariationalBayes = new SVB();
streamingVariationalBayes.setWindowsSize(100);
LTMLearningEngine ltmLearner = new LTMLearningEngine(streamingVariationalBayes);
List<Attribute> allAttributes = batch.getAttributes().getFullListOfAttributes();
leftAttributes.add(batch.getAttributes().getAttributeByName("vTuberculosis"));
leftAttributes.add(batch.getAttributes().getAttributeByName("vSmoking"));
leftAttributes.add(batch.getAttributes().getAttributeByName("vLungCancer"));
leftAttributes.add(batch.getAttributes().getAttributeByName("vTbOrCa"));
leftAttributes.add(batch.getAttributes().getAttributeByName("vXRay"));
rightAttributes.add(batch.getAttributes().getAttributeByName("vBronchitis"));
rightAttributes.add(batch.getAttributes().getAttributeByName("vDyspnea"));
rightAttributes.add(batch.getAttributes().getAttributeByName("vVisitToAsia"));
return ltmLearner.learn2dimensionalLTM(leftAttributes,rightAttributes,2,2,batch);
}
}
|
sergenyalcin/typewriter
|
pkg/cmd/builtin.go
|
<reponame>sergenyalcin/typewriter
// Copyright 2021 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"go/types"
"github.com/pkg/errors"
"github.com/muvaf/typewriter/pkg/packages"
"github.com/muvaf/typewriter/pkg/traverser"
)
func NewProducers(cache *packages.Cache, im *packages.Imports) FuncGenerator {
return &Producers{
cache: cache,
imports: im,
}
}
// Producers generates a function for every merged type of the given type that will
// let you produce those remote types from the local one.
type Producers struct {
cache *packages.Cache
imports *packages.Imports
}
func (p *Producers) Generate(source *types.Named, cm *packages.CommentMarkers) (map[string]interface{}, error) {
merged := cm.SectionContents[packages.SectionMerged]
if len(merged) == 0 {
return nil, nil
}
result := ""
for _, target := range merged {
targetType, err := p.cache.GetTypeWithFullPath(target)
if err != nil {
return nil, errors.Wrap(err, "cannot get target type")
}
fn := traverser.NewPrinter(p.imports, traverser.NewGeneric(p.imports))
funcName := fmt.Sprintf("Generate%s", targetType.Obj().Name())
generated, err := fn.Print(funcName, source, targetType, nil)
if err != nil {
return nil, errors.Wrap(err, "cannot wrap function")
}
result += fmt.Sprintf("%s\n", generated)
}
return map[string]interface{}{
"Producers": result,
}, nil
}
|
ShyamNandanKumar/coding-ninja2
|
25_number_theory_3/4_nth_fibonacci.cpp
|
<reponame>ShyamNandanKumar/coding-ninja2
/*
eg f(8)=21 => in O(log(n))
*/
#include<bits/stdc++.h>
using namespace std;
typedef unsigned long long ll;
#define mod 1000000007
void multiply(ll A[2][2],ll M[2][2]){
// find all 4 values and put back in an A
ll first=A[0][0]*M[0][0]+A[0][1]*M[1][0];
ll second=A[0][0]*M[0][1]+A[0][1]*M[1][1];
ll third=A[1][0]*M[0][0]+A[1][1]*M[1][0];
ll fourth=A[1][0]*M[0][1]+A[1][1]*M[1][1];
A[0][0]=first;
A[0][1]=second;
A[1][0]=third;
A[1][1]=fourth;
}
void power_(ll A[2][2],ll n){
if(n==0||n==1){
return;
}
// find A^(n/2)
power_(A,n/2);
// find A^n my multiplying A^n/2*A^n/2
multiply(A,A);
// if odd multiply with A again
if(n%2!=0){
ll M[2][2]={{1,1},{1,0}};
multiply(A,M);
}
}
ll fib(ll n){
ll A[2][2]={{1,1},{1,0}};
if(n==0){
return 0;
}
power_(A,n-1);
// after A^n-1 our ans i.e f(n) will be stored at [0][0] index
return A[0][0];
}
int main(){
cout<<fib(8)<<"\n";
return 0;
}
|
iscai-msft/azure-sdk-for-python
|
sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/container_instance_management_client_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ContainerNetworkProtocol(str, Enum):
tcp = "TCP"
udp = "UDP"
class GpuSku(str, Enum):
k80 = "K80"
p100 = "P100"
v100 = "V100"
class ResourceIdentityType(str, Enum):
system_assigned = "SystemAssigned"
user_assigned = "UserAssigned"
system_assigned_user_assigned = "SystemAssigned, UserAssigned"
none = "None"
class ContainerGroupRestartPolicy(str, Enum):
always = "Always"
on_failure = "OnFailure"
never = "Never"
class ContainerGroupNetworkProtocol(str, Enum):
tcp = "TCP"
udp = "UDP"
class ContainerGroupIpAddressType(str, Enum):
public = "Public"
private = "Private"
class OperatingSystemTypes(str, Enum):
windows = "Windows"
linux = "Linux"
class LogAnalyticsLogType(str, Enum):
container_insights = "ContainerInsights"
container_instance_logs = "ContainerInstanceLogs"
class ContainerInstanceOperationsOrigin(str, Enum):
user = "User"
system = "System"
|
mrtanweijie/Park
|
src/storage/models/News.js
|
import mongoose from 'mongoose'
const Schema = mongoose.Schema
const NewsSchema = new Schema(
{
title: { type: 'String', required: true },
url: { type: 'String', required: true },
summary: String,
recommend: { type: Boolean, default: false },
source: { type: Number, required: true, default: 0 },
status: { type: Number, required: true, default: 0 },
createdTime: { type: Date, default: Date.now }
},
{
collection: 'news'
}
)
export default mongoose.model('news', NewsSchema)
|
code-en-design/econobis
|
client-app/src/actions/document/DocumentFiltersActions.js
|
export const setFilterDocumentNumber = number => ({
type: 'SET_FILTER_DOCUMENT_NUMBER',
number,
});
export const setFilterDocumentDate = date => ({
type: 'SET_FILTER_DOCUMENT_DATE',
date,
});
export const setFilterDocumentFilename = filename => ({
type: 'SET_FILTER_DOCUMENT_FILENAME',
filename,
});
export const setFilterDocumentContact = contact => ({
type: 'SET_FILTER_DOCUMENT_CONTACT',
contact,
});
export const setFilterDocumentDocumentType = documentType => ({
type: 'SET_FILTER_DOCUMENT_DOCUMENT_TYPE',
documentType,
});
export const setFilterDocumentDocumentGroup = documentGroup => ({
type: 'SET_FILTER_DOCUMENT_DOCUMENT_GROUP',
documentGroup,
});
export const clearFilterDocuments = () => ({
type: 'CLEAR_FILTER_DOCUMENT',
});
|
S10MC2015/cms-django
|
src/cms/forms/offer_templates/offer_template_form.py
|
from django import forms
from ...models import OfferTemplate
from ...utils.slug_utils import generate_unique_slug
class OfferTemplateForm(forms.ModelForm):
"""
Form for creating and modifying offer template objects
"""
class Meta:
model = OfferTemplate
fields = ["name", "slug", "thumbnail", "url", "post_data", "use_postal_code"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def clean_slug(self):
return generate_unique_slug(self, "offer-template")
def clean_post_data(self):
cleaned_post_data = self.cleaned_data["post_data"]
if not cleaned_post_data:
cleaned_post_data = dict()
return cleaned_post_data
|
Meira-JH/futureEats
|
futureEats/src/reducers/orders.js
|
const initialState = {
orders: [],
ordersHistory: [],
activeOrder: [],
};
const orders = (state = initialState, action) => {
switch (action.type) {
case "SET_ORDER": {
return { ...state, orders: [...state.orders, action.payload.orders] };
}
case "SET_ACTIVE_ORDER": {
return { ...state, activeOrder: action.payload.activeOrder };
}
case "SET_ORDERS_HISTORY": {
return { ...state, ordersHistory: action.payload.ordersHistory };
}
default:
return state;
}
};
export default orders;
|
Shaptic/py-stellar-base
|
tests/operation/test_create_claimable_balance.py
|
from decimal import Decimal
import pytest
from stellar_sdk import Claimant, ClaimPredicate, CreateClaimableBalance, Operation
from stellar_sdk.xdr.claim_predicate import ClaimPredicate as XdrClaimPredicate
from . import *
class TestCreateClaimableBalance:
@pytest.mark.parametrize(
"amount, source, xdr",
[
pytest.param(
"100",
None,
"<KEY>
id="without_source",
),
pytest.param(
"100",
kp1.public_key,
"<KEY>
id="with_source_public_key",
),
pytest.param(
"100",
muxed1,
"<KEY>
id="with_source_muxed_account",
),
pytest.param(
"100",
muxed1.account_muxed,
"<KEY>
id="with_source_muxed_account_strkey",
),
pytest.param(
Decimal("100"),
kp1.public_key,
"<KEY>
id="starting_balance_decimal",
),
],
)
def test_xdr(self, amount, source, xdr):
predicate_left = ClaimPredicate.predicate_and(
ClaimPredicate.predicate_before_absolute_time(1600000000),
ClaimPredicate.predicate_unconditional(),
)
predicate_right = ClaimPredicate.predicate_or(
ClaimPredicate.predicate_before_relative_time(50000),
ClaimPredicate.predicate_not(
ClaimPredicate.predicate_before_absolute_time(1700000000)
),
)
predicate1 = ClaimPredicate.predicate_and(predicate_left, predicate_right)
claimant1 = Claimant(
destination="GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ",
predicate=predicate1,
)
predicate2 = ClaimPredicate.predicate_unconditional()
claimant2 = Claimant(
destination="GBRSRPWAE26K5SZ5FQNCQ7Z3VW2Q7O7C64Z25NXJXWL4HBGS77X7CWTG",
predicate=predicate2,
)
predicate3 = ClaimPredicate.predicate_before_absolute_time(1601391266)
claimant3 = Claimant(
destination="GCXGGIREYPENNT3LYFRD5I2SDALFWM3NKKLIQD3DMJ63ML5N3FG4OQQG",
predicate=predicate3,
)
claimants = [claimant1, claimant2, claimant3]
op = CreateClaimableBalance(
asset=asset1,
amount=amount,
claimants=claimants,
source=source,
)
assert op.asset == asset1
assert op.amount == str(amount)
assert op.claimants == claimants
check_source(op.source, source)
xdr_object = op.to_xdr_object()
assert xdr_object.to_xdr() == xdr
assert Operation.from_xdr_object(xdr_object) == op
def test_invalid_amount_raise(self):
amount = "12345678902.23423324"
claimants = [
Claimant(
destination=kp2.public_key,
predicate=ClaimPredicate.predicate_unconditional(),
)
]
with pytest.raises(
ValueError,
match=f'Value of argument "amount" must have at most 7 digits after the decimal: {amount}',
):
CreateClaimableBalance(asset1, amount, claimants, kp1.public_key)
class TestClaimPredicate:
@staticmethod
def to_xdr(predicate):
return predicate.to_xdr_object().to_xdr()
def test_predicate_unconditional(self):
xdr = "AAAAAA=="
predicate = ClaimPredicate.predicate_unconditional()
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_before_relative_time(self):
xdr = "AAAABQAAAAAAAAPo"
predicate = ClaimPredicate.predicate_before_relative_time(1000)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_before_absolute_time(self):
xdr = "AAAABAAAAABfc0qi"
predicate = ClaimPredicate.predicate_before_absolute_time(1601391266)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_not(self):
xdr = "AAAAAwAAAAEAAAAEAAAAAF9zSqI="
predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)
predicate = ClaimPredicate.predicate_not(predicate_abs)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_and_1(self):
xdr = "AAAAAQAAAAIAAAAEAAAAAF9zSqIAAAAFAAAAAAAAA+g="
predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)
predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)
predicate = ClaimPredicate.predicate_and(predicate_abs, predicate_rel)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_and_2(self):
xdr = "AAAAAQAAAAIAAAAFAAAAAAAAA+gAAAAEAAAAAF9zSqI="
predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)
predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)
predicate = ClaimPredicate.predicate_and(predicate_rel, predicate_abs)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_or_1(self):
xdr = "AAAAAgAAAAIAAAAEAAAAAF9zSqIAAAAFAAAAAAAAA+g="
predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)
predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)
predicate = ClaimPredicate.predicate_or(predicate_abs, predicate_rel)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_or_2(self):
xdr = "AAAAAgAAAAIAAAAFAAAAAAAAA+gAAAAEAAAAAF9zSqI="
predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)
predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)
predicate = ClaimPredicate.predicate_or(predicate_rel, predicate_abs)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
def test_predicate_mix(self):
xdr = "AAAAAQAAAAIAAAABAAAAAgAAAAQAAAAAX14QAAAAAAAAAAACAAAAAgAAAAUAAAAAAADDUAAAAAMAAAABAAAABAAAAABlU/EA"
predicate_left = ClaimPredicate.predicate_and(
ClaimPredicate.predicate_before_absolute_time(1600000000),
ClaimPredicate.predicate_unconditional(),
)
predicate_right = ClaimPredicate.predicate_or(
ClaimPredicate.predicate_before_relative_time(50000),
ClaimPredicate.predicate_not(
ClaimPredicate.predicate_before_absolute_time(1700000000)
),
)
predicate = ClaimPredicate.predicate_and(predicate_left, predicate_right)
assert xdr == self.to_xdr(predicate)
xdr_object = XdrClaimPredicate.from_xdr(xdr)
assert predicate == ClaimPredicate.from_xdr_object(xdr_object)
class TestClaimant:
@staticmethod
def to_xdr(claimant):
return claimant.to_xdr_object().to_xdr()
def test_claimant(self):
xdr = "AAAAAAAA<KEY>wAAAAEAAAACAAAAAQAAAAIAAAAEAAAAAF9eEAAAAAAAAAAAAgAAAAIAAAAFAAAAAAAAw1AAAAADAAAAAQAAAAQAAAAAZVPxAA=="
destination = "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ"
predicate_left = ClaimPredicate.predicate_and(
ClaimPredicate.predicate_before_absolute_time(1600000000),
ClaimPredicate.predicate_unconditional(),
)
predicate_right = ClaimPredicate.predicate_or(
ClaimPredicate.predicate_before_relative_time(50000),
ClaimPredicate.predicate_not(
ClaimPredicate.predicate_before_absolute_time(1700000000)
),
)
predicate = ClaimPredicate.predicate_and(predicate_left, predicate_right)
claimant = Claimant(destination=destination, predicate=predicate)
assert self.to_xdr(claimant) == xdr
assert claimant == Claimant.from_xdr_object(claimant.to_xdr_object())
def test_claimant_default(self):
xdr = "AAAAAAAAAACJmyhA7VY2xW3cXxSyOXX3nxuiOI0mlOTFbs3dyWDl7wAAAAA="
destination = "<KEY>"
claimant = Claimant(destination=destination)
assert self.to_xdr(claimant) == xdr
assert claimant == Claimant.from_xdr_object(claimant.to_xdr_object())
def test_invalid_destination_raise(self):
key = "<KEY>"
with pytest.raises(
ValueError,
match=f'Value of argument "destination" is not a valid ed25519 public key: {key}',
):
Claimant(destination=key)
|
Chaffelson/cloudbreak
|
core/src/main/java/com/sequenceiq/cloudbreak/controller/PlatformParameterV1Controller.java
|
<reponame>Chaffelson/cloudbreak<filename>core/src/main/java/com/sequenceiq/cloudbreak/controller/PlatformParameterV1Controller.java
package com.sequenceiq.cloudbreak.controller;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Named;
import javax.transaction.Transactional;
import javax.transaction.Transactional.TxType;
import org.springframework.core.convert.ConversionService;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import com.google.common.base.Strings;
import com.sequenceiq.cloudbreak.api.endpoint.v1.ConnectorV1Endpoint;
import com.sequenceiq.cloudbreak.api.model.PlatformAccessConfigsResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformDisksJson;
import com.sequenceiq.cloudbreak.api.model.PlatformEncryptionKeysResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformGatewaysResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformIpPoolsResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformNetworksResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformOrchestratorsJson;
import com.sequenceiq.cloudbreak.api.model.PlatformRegionsJson;
import com.sequenceiq.cloudbreak.api.model.PlatformResourceRequestJson;
import com.sequenceiq.cloudbreak.api.model.PlatformSecurityGroupsResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformSshKeysResponse;
import com.sequenceiq.cloudbreak.api.model.PlatformVariantsJson;
import com.sequenceiq.cloudbreak.api.model.PlatformVirtualMachinesJson;
import com.sequenceiq.cloudbreak.api.model.RecommendationRequestJson;
import com.sequenceiq.cloudbreak.api.model.RecommendationResponse;
import com.sequenceiq.cloudbreak.api.model.SpecialParameters;
import com.sequenceiq.cloudbreak.api.model.SpecialParametersJson;
import com.sequenceiq.cloudbreak.api.model.TagSpecificationsJson;
import com.sequenceiq.cloudbreak.cloud.PlatformParameters;
import com.sequenceiq.cloudbreak.cloud.model.CloudAccessConfigs;
import com.sequenceiq.cloudbreak.cloud.model.CloudEncryptionKeys;
import com.sequenceiq.cloudbreak.cloud.model.CloudGateWays;
import com.sequenceiq.cloudbreak.cloud.model.CloudIpPools;
import com.sequenceiq.cloudbreak.cloud.model.CloudNetworks;
import com.sequenceiq.cloudbreak.cloud.model.CloudSecurityGroups;
import com.sequenceiq.cloudbreak.cloud.model.CloudSshKeys;
import com.sequenceiq.cloudbreak.cloud.model.Platform;
import com.sequenceiq.cloudbreak.cloud.model.PlatformDisks;
import com.sequenceiq.cloudbreak.cloud.model.PlatformOrchestrators;
import com.sequenceiq.cloudbreak.cloud.model.PlatformRecommendation;
import com.sequenceiq.cloudbreak.cloud.model.PlatformRegions;
import com.sequenceiq.cloudbreak.cloud.model.PlatformVariants;
import com.sequenceiq.cloudbreak.cloud.model.PlatformVirtualMachines;
import com.sequenceiq.cloudbreak.controller.exception.BadRequestException;
import com.sequenceiq.cloudbreak.domain.PlatformResourceRequest;
import com.sequenceiq.cloudbreak.domain.organization.Organization;
import com.sequenceiq.cloudbreak.domain.organization.User;
import com.sequenceiq.cloudbreak.service.RestRequestThreadLocalService;
import com.sequenceiq.cloudbreak.service.organization.OrganizationService;
import com.sequenceiq.cloudbreak.service.stack.CloudParameterService;
import com.sequenceiq.cloudbreak.service.stack.CloudResourceAdvisor;
import com.sequenceiq.cloudbreak.service.user.UserService;
@Component
@Transactional(TxType.NEVER)
public class PlatformParameterV1Controller implements ConnectorV1Endpoint {
@Inject
private CloudParameterService cloudParameterService;
@Inject
@Named("conversionService")
private ConversionService conversionService;
@Inject
private CloudResourceAdvisor cloudResourceAdvisor;
@Inject
private UserService userService;
@Inject
private RestRequestThreadLocalService restRequestThreadLocalService;
@Inject
private OrganizationService organizationService;
@Override
public Map<String, Object> getPlatforms(Boolean extended) {
PlatformVariants pv = cloudParameterService.getPlatformVariants();
PlatformDisks diskTypes = cloudParameterService.getDiskTypes();
PlatformRegions regions = cloudParameterService.getRegions();
PlatformVirtualMachines vmtypes = new PlatformVirtualMachines();
PlatformOrchestrators orchestrators = cloudParameterService.getOrchestrators();
Map<Platform, PlatformParameters> platformParameters = cloudParameterService.getPlatformParameters();
SpecialParameters specialParameters = cloudParameterService.getSpecialParameters();
Map<String, Object> map = new HashMap<>();
map.put("variants", conversionService.convert(pv, PlatformVariantsJson.class));
map.put("disks", conversionService.convert(diskTypes, PlatformDisksJson.class));
map.put("regions", conversionService.convert(regions, PlatformRegionsJson.class));
map.put("virtualMachines", conversionService.convert(vmtypes, PlatformVirtualMachinesJson.class));
map.put("orchestrators", conversionService.convert(orchestrators, PlatformOrchestratorsJson.class));
map.put("tagspecifications", conversionService.convert(platformParameters, TagSpecificationsJson.class));
Map<String, Boolean> globalParameters = conversionService.convert(specialParameters, Map.class);
Map<String, Map<String, Boolean>> platformSpecificParameters = conversionService.convert(platformParameters, Map.class);
SpecialParametersJson specialParametersJson = new SpecialParametersJson();
specialParametersJson.setSpecialParameters(globalParameters);
specialParametersJson.setPlatformSpecificSpecialParameters(platformSpecificParameters);
map.put("specialParameters", specialParametersJson);
return map;
}
@Override
public PlatformVariantsJson getPlatformVariants() {
PlatformVariants pv = cloudParameterService.getPlatformVariants();
return conversionService.convert(pv, PlatformVariantsJson.class);
}
@Override
public Collection<String> getPlatformVariantByType(String type) {
PlatformVariants pv = cloudParameterService.getPlatformVariants();
Collection<String> strings = conversionService.convert(pv, PlatformVariantsJson.class).getPlatformToVariants().get(type.toUpperCase());
return strings == null ? new ArrayList<>() : strings;
}
@Override
public PlatformDisksJson getDisktypes() {
PlatformDisks dts = cloudParameterService.getDiskTypes();
return conversionService.convert(dts, PlatformDisksJson.class);
}
@Override
public Collection<String> getDisktypeByType(String type) {
PlatformDisks diskTypes = cloudParameterService.getDiskTypes();
Collection<String> strings = conversionService.convert(diskTypes, PlatformDisksJson.class)
.getDiskTypes().get(type.toUpperCase());
return strings == null ? new ArrayList<>() : strings;
}
@Override
public PlatformOrchestratorsJson getOrchestratortypes() {
PlatformOrchestrators orchestrators = cloudParameterService.getOrchestrators();
return conversionService.convert(orchestrators, PlatformOrchestratorsJson.class);
}
@Override
public Collection<String> getOchestratorsByType(String type) {
PlatformOrchestrators orchestrators = cloudParameterService.getOrchestrators();
Collection<String> strings = conversionService.convert(orchestrators, PlatformOrchestratorsJson.class)
.getOrchestrators().get(type.toUpperCase());
return strings == null ? new ArrayList<>() : strings;
}
@Override
public PlatformRegionsJson getRegions() {
PlatformRegions pv = cloudParameterService.getRegions();
return conversionService.convert(pv, PlatformRegionsJson.class);
}
@Override
public Collection<String> getRegionRByType(String type) {
PlatformRegions pv = cloudParameterService.getRegions();
Collection<String> regions = conversionService.convert(pv, PlatformRegionsJson.class)
.getRegions().get(type.toUpperCase());
return regions == null ? new ArrayList<>() : regions;
}
@Override
public Map<String, Collection<String>> getRegionAvByType(String type) {
PlatformRegions pv = cloudParameterService.getRegions();
Map<String, Collection<String>> azs = conversionService.convert(pv, PlatformRegionsJson.class)
.getAvailabilityZones().get(type.toUpperCase());
return azs == null ? new HashMap<>() : azs;
}
@Override
public TagSpecificationsJson getTagSpecifications() {
Map<Platform, PlatformParameters> platformParameters = cloudParameterService.getPlatformParameters();
return conversionService.convert(platformParameters, TagSpecificationsJson.class);
}
@Override
public SpecialParametersJson getSpecialProperties() {
SpecialParameters specialParameters = cloudParameterService.getSpecialParameters();
Map<Platform, PlatformParameters> platformParameters = cloudParameterService.getPlatformParameters();
Map<String, Boolean> globalParameters = conversionService.convert(specialParameters, Map.class);
Map<String, Map<String, Boolean>> platformSpecificParameters = conversionService.convert(platformParameters, Map.class);
SpecialParametersJson specialParametersJson = new SpecialParametersJson();
specialParametersJson.setSpecialParameters(globalParameters);
specialParametersJson.setPlatformSpecificSpecialParameters(platformSpecificParameters);
return specialParametersJson;
}
@Override
public RecommendationResponse createRecommendation(RecommendationRequestJson recommendationRequestJson) {
PlatformResourceRequest resourceRequest = conversionService.convert(recommendationRequestJson, PlatformResourceRequest.class);
if (recommendationRequestJson.getBlueprintId() == null && Strings.isNullOrEmpty(recommendationRequestJson.getBlueprintName())) {
fieldIsNotEmpty(recommendationRequestJson.getBlueprintId(), "blueprintId");
}
fieldIsNotEmpty(resourceRequest.getRegion(), "region");
fieldIsNotEmpty(resourceRequest.getAvailabilityZone(), "availabilityZone");
User user = userService.getOrCreate(restRequestThreadLocalService.getIdentityUser());
Organization organization = organizationService.get(restRequestThreadLocalService.getRequestedOrgId(), user);
PlatformRecommendation recommendedVms =
cloudResourceAdvisor.createForBlueprint(recommendationRequestJson.getBlueprintName(), recommendationRequestJson.getBlueprintId(),
resourceRequest, user, organization);
return conversionService.convert(recommendedVms, RecommendationResponse.class);
}
@Override
public PlatformNetworksResponse getCloudNetworks(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudNetworks cloudNetworks = cloudParameterService.getCloudNetworks(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(cloudNetworks, PlatformNetworksResponse.class);
}
@Override
public PlatformSshKeysResponse getCloudSshKeys(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudSshKeys cloudSshKeys = cloudParameterService.getCloudSshKeys(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(cloudSshKeys, PlatformSshKeysResponse.class);
}
@Override
public PlatformSecurityGroupsResponse getSecurityGroups(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudSecurityGroups securityGroups = cloudParameterService.getSecurityGroups(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(securityGroups, PlatformSecurityGroupsResponse.class);
}
@Override
public PlatformGatewaysResponse getGatewaysCredentialId(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudGateWays cloudGateWays = cloudParameterService.getGateways(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(cloudGateWays, PlatformGatewaysResponse.class);
}
@Override
public PlatformIpPoolsResponse getIpPoolsCredentialId(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudIpPools cloudIpPools = cloudParameterService.getPublicIpPools(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(cloudIpPools, PlatformIpPoolsResponse.class);
}
@Override
public PlatformAccessConfigsResponse getAccessConfigs(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudAccessConfigs cloudAccessConfigs = cloudParameterService.getCloudAccessConfigs(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(cloudAccessConfigs, PlatformAccessConfigsResponse.class);
}
@Override
public PlatformEncryptionKeysResponse getEncryptionKeys(PlatformResourceRequestJson resourceRequestJson) {
PlatformResourceRequest convert = conversionService.convert(resourceRequestJson, PlatformResourceRequest.class);
CloudEncryptionKeys cloudEncryptionKeys = cloudParameterService.getCloudEncryptionKeys(convert.getCredential(), convert.getRegion(),
convert.getPlatformVariant(), convert.getFilters());
return conversionService.convert(cloudEncryptionKeys, PlatformEncryptionKeysResponse.class);
}
private void fieldIsNotEmpty(Object field, String fieldName) {
if (StringUtils.isEmpty(field)) {
throw new BadRequestException(String.format("The '%s' request body field is mandatory for recommendation creation.", fieldName));
}
}
}
|
Semantria/sem5.java
|
src/main/java/com/lexalytics/semantria/client/dto/DocumentResult.java
|
<reponame>Semantria/sem5.java<filename>src/main/java/com/lexalytics/semantria/client/dto/DocumentResult.java<gh_stars>0
package com.lexalytics.semantria.client.dto;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.fasterxml.jackson.databind.annotation.JsonNaming;
import java.util.List;
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class DocumentResult {
private String id;
private String configId;
private String languageId;
private String accountId;
private String userId;
private String jobId;
private String tag;
private JsonNode metadata;
private String creationDate;
private String status;
private String summary;
@JsonProperty("taxonomy")
@JsonTypeInfo(use = JsonTypeInfo.Id.NONE)
private List<TaxonomyNodeObject> taxonomies;
private String sourceText;
@JsonProperty("sentiment_score")
private Float score;
private String sentimentPolarity;
private String language;
private Float languageScore;
private ModelSentiment modelSentiment;
@JsonTypeInfo(use = JsonTypeInfo.Id.NONE)
private List<Topic> topics;
private List<Entity> entities;
private List<Theme> themes;
private List<Phrase> phrases;
private List<Sentence> details;
private List<Relation> relations;
private List<Opinion> opinions;
private List<Topic> autoCategories;
private List<Intention> intentions;
public Float getScore() {
return score;
}
public void setScore(Float score) {
this.score = score;
}
public String getSentimentPolarity() {
return sentimentPolarity;
}
public void setSentimentPolarity(String sentimentPolarity) {
this.sentimentPolarity = sentimentPolarity;
}
public List<Topic> getTopics() {
return topics;
}
public void setTopics(List<Topic> topics) {
this.topics = topics;
}
public List<Entity> getEntities() {
return entities;
}
public void setEntities(List<Entity> entities) {
this.entities = entities;
}
public List<Theme> getThemes() {
return themes;
}
public void setThemes(List<Theme> themes) {
this.themes = themes;
}
public List<Phrase> getPhrases() {
return phrases;
}
public void setPhrases(List<Phrase> phrases) {
this.phrases = phrases;
}
public String getLanguage() {
return language;
}
public void setLanguage(String language) {
this.language = language;
}
public Float getLanguageScore() {
return languageScore;
}
public void setLanguageScore(Float languageScore) {
this.languageScore = languageScore;
}
public List<Sentence> getDetails() {
return details;
}
public void setDetails(List<Sentence> details) {
this.details = details;
}
public List<Relation> getRelations() {
return relations;
}
public void setRelations(List<Relation> relations) {
this.relations = relations;
}
public String getSourceText() {
return sourceText;
}
public void setSourceText(String sourceText) {
this.sourceText = sourceText;
}
public List<Opinion> getOpinions() {
return opinions;
}
public void setOpinions(List<Opinion> opinions) {
this.opinions = opinions;
}
public List<Topic> getAutoCategories() {
return autoCategories;
}
public void setAutoCategories(List<Topic> autoCategories) {
this.autoCategories = autoCategories;
}
public List<Intention> getIntentions() {
return intentions;
}
public void setIntentions(List<Intention> intentions) {
this.intentions = intentions;
}
public ModelSentiment getModelSentiment() {
return modelSentiment;
}
public void setModelSentiment(ModelSentiment modelSentiment) {
this.modelSentiment = modelSentiment;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getConfigId() {
return configId;
}
public void setConfigId(String configId) {
this.configId = configId;
}
public String getLanguageId() {
return languageId;
}
public void setLanguageId(String languageId) {
this.languageId = languageId;
}
public String getAccountId() {
return accountId;
}
public void setAccountId(String accountId) {
this.accountId = accountId;
}
public String getUserId() {
return userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public String getTag() {
return tag;
}
public void setTag(String tag) {
this.tag = tag;
}
public JsonNode getMetadata() {
return metadata;
}
public void setMetadata(JsonNode metadata) {
this.metadata = metadata;
}
public String getCreationDate() {
return creationDate;
}
public void setCreationDate(String creationDate) {
this.creationDate = creationDate;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getSummary() {
return summary;
}
public void setSummary(String summary) {
this.summary = summary;
}
public List<TaxonomyNodeObject> getTaxonomies() {
return taxonomies;
}
public void setTaxonomies(List<TaxonomyNodeObject> taxonomies) {
this.taxonomies = taxonomies;
}
}
|
khamutov/intellij-scala
|
scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/sbtzinc/ModulesFedToZincStore.scala
|
<reponame>khamutov/intellij-scala
package org.jetbrains.jps.incremental.scala.sbtzinc
import java.util
import com.intellij.openapi.util.Key
import org.jetbrains.jps.ModuleChunk
import org.jetbrains.jps.incremental.CompileContext
import org.jetbrains.jps.incremental.scala.SourceDependenciesProviderService
import org.jetbrains.jps.model.module.{JpsDependencyElement, JpsModule, JpsModuleDependency, JpsModuleSourceDependency}
import scala.collection.JavaConverters._
import scala.collection.mutable
/**
* Keep track of dirty modules and dirty dependant modules for a single build
*/
object ModulesFedToZincStore {
// We need to duplicate storage since context data somehow miss recompiled modules/scopes
private val dataKey: Key[Set[String]] = new Key[Set[String]]("MODULES_FED_TO_ZINC") {}
private val modulesFedToZincStore = java.util.concurrent.ConcurrentHashMap.newKeySet[String]()
def checkIfAnyModuleDependencyWasFedToZinc(context: CompileContext, chunk: ModuleChunk): Boolean = {
val modulesFedToZinc = ModulesFedToZincStore.get(context)
def wasCompiledBefore(name: String) =
modulesFedToZinc.contains(name) || modulesFedToZincStore.contains(name)
def isModuleAndWasCompiledBefore(elem: JpsDependencyElement): Boolean = {
elem match {
case moduleDep: JpsModuleDependency =>
wasCompiledBefore(moduleDep.getModule.getName)
case _: JpsModuleSourceDependency =>
wasCompiledBefore(elem.getContainingModule.getName)
case _ => false
}
}
def hasDependencyCompiledBefore(module: JpsModule) =
module.getDependenciesList.getDependencies.iterator().asScala.exists(isModuleAndWasCompiledBefore)
val wasFedToZinc = chunk.getModules.iterator().asScala.exists(hasDependencyCompiledBefore)
wasFedToZinc
}
def add(context: CompileContext, moduleNames: Seq[String]): Unit = {
dataKey.synchronized {
val previous = getValue(context)
val next = previous ++ moduleNames
context.putUserData(dataKey, next)
modulesFedToZincStore.addAll(moduleNames.asJava)
}
}
private def get(context: CompileContext): Set[String] =
dataKey.synchronized(getValue(context))
private def getValue(context: CompileContext): Set[String] =
Option(context.getUserData(dataKey)).getOrElse(Set.empty[String])
}
|
YGLLL/FunLive
|
app/src/main/java/com/github/yglll/funlive/view/widget/FunLiveWidget.java
|
<reponame>YGLLL/FunLive<gh_stars>10-100
package com.github.yglll.funlive.view.widget;
import android.app.PendingIntent;
import android.appwidget.AppWidgetManager;
import android.appwidget.AppWidgetProvider;
import android.content.Context;
import android.content.Intent;
import android.widget.RemoteViews;
import com.github.yglll.funlive.R;
import com.github.yglll.funlive.view.MainActivity;
/**
* 作者:YGL
* 版本号:1.0
* 类描述:
* 备注消息:
* 创建时间:2018/02/19 2:07
**/
public class FunLiveWidget extends AppWidgetProvider {
@Override
public void onUpdate(Context context, AppWidgetManager appWidgetManager, int[] appWidgetIds) {
for (int appWidgetId:appWidgetIds){
Intent intent = new Intent(context, MainActivity.class);
PendingIntent pendingIntent = PendingIntent.getActivity(context, 0, intent, 0);
RemoteViews views=new RemoteViews(context.getPackageName(), R.layout.widget_layout);
//views.setOnClickPendingIntent(R.id.widget,pendingIntent);
appWidgetManager.updateAppWidget(appWidgetId,views);
}
}
}
|
neinteractiveliterature/intercode
|
app/policies/queries/query_manager.rb
|
# frozen_string_literal: true
class Queries::QueryManager
def self.query_methods
instance_methods(false)
end
attr_reader :user
def initialize(user:)
@user = user
end
end
|
WJ44/ElementalChemistry
|
src/main/java/com/wj44/echem/item/ItemElementContainer.java
|
package com.wj44.echem.item;
import com.wj44.echem.creativetab.ModCreativeTabs;
import com.wj44.echem.init.ModItems;
import com.wj44.echem.reference.Names;
import com.wj44.echem.reference.Textures;
import com.wj44.elementscore.api.Element;
import net.minecraft.client.resources.model.ModelBakery;
import net.minecraft.creativetab.CreativeTabs;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.item.Item;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.world.World;
import java.util.List;
/**
* Created by Wesley "WJ44" Joosten on 29/12/2015.
* -
* Part of the ElementalChemistry Mod, distributed under a
* Creative Commons Attribution-NonCommercial-ShareAlike 3.0 License
* (https://creativecommons.org/licenses/by-nc-sa/3.0/)
*/
public class ItemElementContainer extends ItemEChem
{
public ItemElementContainer()
{
setCreativeTab(ModCreativeTabs.tabElementContainers);
setHasSubtypes(true);
setUnlocalizedName(Names.Items.ELEMENT_CONTAINER);
setMaxStackSize(1);
}
@Override
public String getUnlocalizedName(ItemStack itemStack)
{
return String.format("item.%s%s%s", Textures.RESOURCE_PREFIX, Names.Items.ELEMENT_CONTAINER, Names.Items.ELEMENT_CONTAINER_SUBTYPES[itemStack.getItemDamage()]);
}
@Override
public void getSubItems(Item item, CreativeTabs tab, List subItems)
{
for(int meta = 0; meta < Element.elements.values().size(); ++meta)
{
subItems.add(new ItemStack(this, 1, meta));
}
}
public static void registerVariants()
{
String[] variantNames = new String[Names.Items.ELEMENT_CONTAINER_SUBTYPES.length];
for (int i = 0; i < Names.Items.ELEMENT_CONTAINER_SUBTYPES.length; i++)
{
variantNames[i] = Textures.RESOURCE_PREFIX + Names.Items.ELEMENT_CONTAINER + Names.Items.ELEMENT_CONTAINER_SUBTYPES[i];
}
ModelBakery.addVariantName(ModItems.elementContainer, variantNames);
}
@Override
public void onCreated(ItemStack itemStack, World world, EntityPlayer player)
{
if (itemStack.getTagCompound() == null)
{
itemStack.setTagCompound(new NBTTagCompound());
itemStack.getTagCompound().setInteger("amount", 0);
}
}
@Override
public void addInformation(ItemStack itemStack, EntityPlayer player, List tooltip, boolean advanced)
{
if (!itemStack.hasTagCompound())
{
itemStack.setTagCompound(new NBTTagCompound());
itemStack.getTagCompound().setInteger("amount", 0);
}
if (itemStack.getTagCompound().getInteger("amount") != 0)
{
tooltip.remove("Empty");
tooltip.add("Amount: " + itemStack.getTagCompound().getInteger("amount"));
}
else
{
tooltip.add("Empty");
}
}
}
|
Jakubeeee/iotaccess
|
core/src/main/java/com/jakubeeee/iotaccess/core/misc/SortedProperties.java
|
package com.jakubeeee.iotaccess.core.misc;
import java.util.*;
import static java.util.Collections.enumeration;
import static java.util.Collections.unmodifiableSet;
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toCollection;
public final class SortedProperties extends Properties {
@Override
public Set<Object> keySet() {
return unmodifiableSet(new TreeSet<>(super.keySet()));
}
@Override
public Set<Map.Entry<Object, Object>> entrySet() {
return super.entrySet().stream()
.sorted(comparing(entry -> entry.getKey().toString()))
.collect(toCollection(LinkedHashSet::new));
}
@Override
public synchronized Enumeration<Object> keys() {
return enumeration(new TreeSet<>(super.keySet()));
}
}
|
FanOfSilence/A-Stream-of-Torrents
|
astreamoftorrents/src/test/java/TestMagnet.java
|
import magnet.Magnet;
import org.junit.Before;
import org.junit.Test;
import java.net.URI;
/**
* Created by Jesper on 2017-06-03.
*/
public class TestMagnet {
private Magnet magnet;
@Before
public void setUp() {
URI uri = URI.create(MockMagnetString.magnetString);
String testString = uri.getSchemeSpecificPart();
magnet = new Magnet(testString);
}
@Test
public void testMagnetProperties() {
assert magnet.xt().equals("urn:btih:5c29c2615e13815c0466726c8ea76d77a32e6c42");
}
}
|
Rarestq/galaxy
|
galaxy-web/src/main/java/com/wuxiu/galaxy/web/biz/vo/ChargeCalculateRuleVO.java
|
<filename>galaxy-web/src/main/java/com/wuxiu/galaxy/web/biz/vo/ChargeCalculateRuleVO.java<gh_stars>1-10
package com.wuxiu.galaxy.web.biz.vo;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
/**
* 计费规则展示对象
*
* @author: wuxiu
* @date: 2019/5/7 22:55
*/
@ApiModel("计费规则展示对象")
@Data
public class ChargeCalculateRuleVO implements Serializable {
private static final long serialVersionUID = 2243939964870479813L;
/**
* 计费规则主键id
*/
@ApiModelProperty(value = "计费规则主键id", required = true)
private Long calculationRuleId;
/**
* 计费规则描述
*/
@ApiModelProperty(value = "计费规则描述", required = true)
private String calculateRuleDesc;
/**
* 行李类型
*/
@ApiModelProperty(value = "行李类型", required = true)
private String luggageType;
/**
* 计费单位(1-元/件/天,2-元/件/次,3-元/件)
*/
@ApiModelProperty(value = "计费单位(1-元/件/天,2-元/件/次,3-元/件)", required = true)
private String calculationUnits;
/**
* 单位金额
*/
@ApiModelProperty(value = "单位金额", required = true)
private String feePerUnit;
/**
* 创建时间
*/
@ApiModelProperty(value = "创建时间", required = true)
private String gmtCreate;
}
|
Yelp/mycroft
|
mycroft/mycroft/logic/cluster_actions.py
|
# -*- coding: utf-8 -*-
"""
**logic.cluster_actions**
=========================
A collection of functions to handle actions relating to clusters
in the mycroft service. The C part of MVC for mycroft/clusters
"""
import simplejson
import re
from sherlock.common.redshift_psql import DEFAULT_NAMESPACE
MAX_CLUSTER_NAME_LENGTH = 63
REDSHIFT_ID_RE = re.compile(r"""^[a-zA-Z](-?[a-zA-Z0-9])+$""")
REQUIRED_POST_ARGS = frozenset([
"redshift_id",
"port",
"host",
])
CLUSTER_KWARGS = {
'redshift_id': None,
'port': None,
'host': None,
'db_schema': None,
'groups': None,
'node_type': None,
'node_count': None,
}
def _parse_clusters(query_result):
"""
_parse_clusters converts the results of a query on the backing store
redshift_cluster_object to a dictionary with a key of 'clusters' and value
of a list of clusters
*Each cluster returned in the list is of the form*::
{
'port': None,
'host': None,
'db_schema': None,
'groups': None,
'redshift_id': None,
}
:param query_result: iterable of RedshiftCluster elements
:type query_result: iterable
:returns: A dict of \{'clusters': [cluster1, cluster2, ...]\}
:rtype: dict
"""
return {'clusters': [_construct_cluster_dict(cluster) for cluster in query_result]}
def _construct_cluster_dict(cluster_object):
cluster_dict = cluster_object.get(**CLUSTER_KWARGS)
groups = cluster_dict.get('groups', None)
if groups:
cluster_dict['groups'] = list(groups)
return cluster_dict
def list_all_clusters(redshift_clusters_object):
"""
lists all clusters
*Each cluster returned in the list is of the form*::
{
'port': None,
'host': None,
'db_schema': None,
'groups': None,
'redshift_id': None,
}
:param redshift_clusters_object: the RedshiftClusters from which we read clusters
:type redshift_clusters_object: an instance of RedshiftClusters
:returns: A dict of \{'clusters': [cluster1, cluster2, ...]\}
:rtype: dict
"""
all_clusters = [cluster for cluster in redshift_clusters_object]
return _parse_clusters(all_clusters)
def list_cluster_by_name(redshift_clusters_object, cluster_name):
"""
lists all clusters for a particular cluster_name
*Each cluster returned in the list is of the form*::
{
'port': None,
'host': None,
'db_schema': None,
'groups': None,
'redshift_id': None,
}
:param redshift_clusters_object: the RedshiftClusters from which we read clusters
:type redshift_clusters_object: an instance of RedshiftClusters
:param cluster_name: name of the redshift cluster (e.g., cluster)
:type cluster_name: string
:returns: a dict of {'clusters': [cluster1, cluster2, ...]}
:rtype: dict
"""
if cluster_name is None:
raise ValueError("no cluster name")
if REDSHIFT_ID_RE.match(cluster_name) is None:
raise ValueError("invalid cluster_name: {0}".format(cluster_name))
if len(cluster_name) > MAX_CLUSTER_NAME_LENGTH:
raise ValueError("invalid cluster_name: {0} exceeds {1} char limit {0}".format(
cluster_name, MAX_CLUSTER_NAME_LENGTH))
cluster = redshift_clusters_object.get(redshift_id=cluster_name)
return _construct_cluster_dict(cluster)
def _check_required_args(param_dict):
"""
:param param_dict: parameters to enter into the backing store
:type param_dict: dictionary
:returns: None
:rtype: None
"""
if not REQUIRED_POST_ARGS.issubset(set(param_dict)):
missing_args = list(REQUIRED_POST_ARGS - set(param_dict))
raise ValueError("missing the following required args {0}".format(missing_args))
if REDSHIFT_ID_RE.match(param_dict['redshift_id']) is None:
raise ValueError("invalid cluster_name: {0}".format(param_dict['redshift_id']))
if len(param_dict['redshift_id']) > MAX_CLUSTER_NAME_LENGTH:
raise ValueError("invalid cluster_name: {0} exceeds {1} char limit {0}".format(
param_dict['redshift_id'], MAX_CLUSTER_NAME_LENGTH))
try:
port = int(param_dict['port'])
except ValueError:
raise ValueError("invalid port: {0}".format(param_dict['port']))
if port < 1000 or port > 65535:
raise ValueError("invalid port: {0}".format(port))
def post_cluster(redshift_clusters_object, request_body_str):
"""
the request body should be a dictionary (so \*\*request_body are kwargs),
and it has the following required keys:
* redshift_id
* host
* port
:param redshift_clusters_object: an instance of RedshiftClusters
:type redshift_clusters_object: RedshiftClusters
:param request_body: the body of the post as stringified dict
:type request_body: string
:returns: success
:rtype: boolean
:raises S3ResponseError: if the bytes written don't match the length of
the content
"""
request_body_dict = simplejson.loads(request_body_str)
if 'groups' in request_body_dict and request_body_dict['groups'] is not None:
request_body_dict['groups'] = set(request_body_dict['groups'])
else:
request_body_dict['groups'] = set([])
if 'db_schema' not in request_body_dict:
request_body_dict['db_schema'] = DEFAULT_NAMESPACE
else:
# store lower case name -- use only lower for case insensitivity
request_body_dict['db_schema'] = request_body_dict['db_schema'].lower()
_check_required_args(request_body_dict)
return {'post_accepted': redshift_clusters_object.put(**request_body_dict)}
|
tglatt/emjpm
|
packages/knex/migrations/20190816180823_view_department_availability.js
|
<filename>packages/knex/migrations/20190816180823_view_department_availability.js
exports.up = async function(knex) {
return knex.raw(`
CREATE VIEW view_department_availability AS
SELECT department_id, sum(mesures_awaiting) mesures_awaiting, sum(mesures_in_progress) mesures_in_progress, sum(mesures_max) mesures_max
FROM view_mesure_gestionnaire
GROUP BY department_id
`);
};
exports.down = function(knex) {
return knex.raw(`
DROP VIEW view_department_availability
`);
};
|
nattyco/fermatold
|
CBP/library/api/fermat-cbp-api/src/main/java/com/bitdubai/fermat_cbp_api/layer/sub_app_module/crypto_broker_identity/interfaces/CryptoBrokerIdentityModuleManager.java
|
package com.bitdubai.fermat_cbp_api.layer.sub_app_module.crypto_broker_identity.interfaces;
import com.bitdubai.fermat_api.layer.modules.ModuleManager;
import com.bitdubai.fermat_cbp_api.layer.sub_app_module.crypto_broker_identity.exceptions.CantGetCryptoBrokerListException;
import com.bitdubai.fermat_cbp_api.layer.sub_app_module.crypto_broker_identity.exceptions.CouldNotCreateCryptoBrokerException;
import com.bitdubai.fermat_cbp_api.layer.sub_app_module.crypto_broker_identity.exceptions.CouldNotPublishCryptoBrokerException;
import com.bitdubai.fermat_cbp_api.layer.sub_app_module.crypto_broker_identity.exceptions.CouldNotUnPublishCryptoBrokerException;
import java.util.List;
/**
* Created by natalia on 16/09/15.
*/
/**
* The interface <code>com.bitdubai.fermat_cbp_api.layer.cbp_sub_app_module.crypto_broker_identity.interfaces.CryptoCustomerIdentityModuleManager</code>
* provides the methods for the Crypto Broker Identity sub app.
*/
public interface CryptoBrokerIdentityModuleManager extends ModuleManager {
/**
* The method <code>createCryptoBrokerIdentity</code> is used to create a new crypto Broker identity
*
* @param cryptoBrokerName the name of the crypto Broker to create
* @param profileImage the profile image of the crypto Broker to create
* @return the crypto broker identity generated.
* @throws CouldNotCreateCryptoBrokerException
*/
public CryptoBrokerIdentityInformation createCryptoBrokerIdentity(String cryptoBrokerName, byte[] profileImage) throws CouldNotCreateCryptoBrokerException;
/**
* The method <code>publishCryptoBrokerIdentity</code> is used to publish a Broker identity
*
* @param cryptoBrokerPublicKey the public key of the crypto Broker to publish
*
* @throws CouldNotPublishCryptoBrokerException
*/
public void publishCryptoBrokerIdentity(String cryptoBrokerPublicKey) throws CouldNotPublishCryptoBrokerException;
/**
* The method <code>publishCryptoBrokerIdentity</code> is used to publish a Broker identity
*
* @param cryptoBrokerPublicKey the public key of the crypto Broker to publish
*
* @throws CouldNotPublishCryptoBrokerException
*/
public void unPublishCryptoBrokerIdentity(String cryptoBrokerPublicKey) throws CouldNotUnPublishCryptoBrokerException;
/**
* The method <code>getAllCryptoBrokersIdentities</code> returns the list of all crypto Broker published
*
* @return the list of crypto Broker published
* @throws CantGetCryptoBrokerListException
*/
public List<CryptoBrokerIdentityInformation> getAllCryptoBrokersIdentities(int max, int offset) throws CantGetCryptoBrokerListException;
}
|
GitHub-LiuMing/E-Shop
|
src/main/java/com/liuming/eshop/controller/itemController/ItemController.java
|
<reponame>GitHub-LiuMing/E-Shop<filename>src/main/java/com/liuming/eshop/controller/itemController/ItemController.java
package com.liuming.eshop.controller.itemController;
import com.liuming.eshop.entity.itemEntity.Item;
import com.liuming.eshop.service.itemService.ItemService;
import com.liuming.eshop.utils.DataResult;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
/**
* @Description 商品
* @ClassName ItemController
* @Author 鲸落
* @date 2019.11.11 14:33
*/
@RestController
@RequestMapping("/item")
public class ItemController {
@Resource
private ItemService itemService;
/**
* @Description 新增商品
* @param item
* @return com.liuming.eshop.utils.DataResult
* @Author 鲸落
* @Date 2019.12.17 15:12
*/
@RequestMapping("/addItem")
public DataResult addItem(Item item) {
if (StringUtils.isNotBlank(item.getItemName())
&& StringUtils.isNotBlank(item.getClassifyId())
//&& StringUtils.isNotBlank(item.getCommissionId())
&& StringUtils.isNotBlank(item.getLogisticsTemplateId())
&& item.getItemOriginalPrice() != null
&& item.getItemPresentPrice() != null
&& item.getItemStatus() != null) {
return itemService.addItem(item);
} else {
return DataResult.build(500, "商品名称、分类ID、佣金ID、物流模板ID、商品原价、商品现价、商品状态不得为空");
}
}
/**
* @Description 查询商品
* @param pageNum
* @param pageSize
* @param item
* @return com.liuming.eshop.utils.DataResult
* @Author 鲸落
* @Date 2019.12.14 17:36
*/
@RequestMapping("/findItem")
public DataResult findItem(@RequestParam(defaultValue = "1") int pageNum,
@RequestParam(defaultValue = "10") int pageSize, Item item) {
return itemService.findItem(pageNum, pageSize, item);
}
/**
* @Description 修改商品
* @param item
* @return com.liuming.eshop.utils.DataResult
* @Author 鲸落
* @Date 2020.01.04 11:12
*/
@RequestMapping("/updateItem")
public DataResult updateItem(Item item){
if (StringUtils.isNotBlank(item.getItemId())){
return itemService.updateItem(item);
} else {
return DataResult.build(500,"商品获取失败");
}
}
}
|
CDK-Vonkil/libwebsockets
|
include/libwebsockets/lws-async-dns.h
|
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 <NAME> <<EMAIL>>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#if defined(LWS_WITH_UDP)
typedef enum dns_query_type {
LWS_ADNS_RECORD_A = 0x01,
LWS_ADNS_RECORD_CNAME = 0x05,
LWS_ADNS_RECORD_MX = 0x0f,
LWS_ADNS_RECORD_AAAA = 0x1c,
} adns_query_type_t;
typedef enum {
LADNS_RET_FAILED_WSI_CLOSED = -4,
LADNS_RET_NXDOMAIN = -3,
LADNS_RET_TIMEDOUT = -2,
LADNS_RET_FAILED = -1,
LADNS_RET_FOUND,
LADNS_RET_CONTINUING
} lws_async_dns_retcode_t;
typedef struct lws * (*lws_async_dns_cb_t)(struct lws *wsi, const char *ads,
const struct addrinfo *result, int n,
void *opaque);
/**
* lws_async_dns_query() - perform a dns lookup using async dns
*
* \param context: the lws_context
* \param tsi: thread service index (usually 0)
* \param name: DNS name to look up
* \param qtype: type of query (A, AAAA etc)
* \param cb: query completion callback
* \param wsi: wsi if the query is related to one
*
* Starts an asynchronous DNS lookup, on completion the \p cb callback will
* be called.
*
* The reference count on the cached object is incremented for every callback
* that was called with the cached addrinfo results.
*
* The cached object can't be evicted until the reference count reaches zero...
* use lws_async_dns_freeaddrinfo() to indicate you're finsihed with the
* results for each callback that happened with them.
*/
LWS_VISIBLE LWS_EXTERN lws_async_dns_retcode_t
lws_async_dns_query(struct lws_context *context, int tsi, const char *name,
adns_query_type_t qtype, lws_async_dns_cb_t cb,
struct lws *wsi, void *opaque);
/**
* lws_async_dns_freeaddrinfo() - decrement refcount on cached addrinfo results
*
* \param pai: a pointert to a pointer to first addrinfo returned as result in the callback
*
* Decrements the cache object's reference count. When it reaches zero, the
* cached object may be reaped subject to LRU rules.
*
* The pointer to the first addrinfo give in the argument is set to NULL.
*/
LWS_VISIBLE LWS_EXTERN void
lws_async_dns_freeaddrinfo(const struct addrinfo **ai);
#endif
|
YKato521/ironpython-stubs
|
release/stubs.min/System/Windows/Forms/__init___parts/WebBrowserDocumentCompletedEventArgs.py
|
<filename>release/stubs.min/System/Windows/Forms/__init___parts/WebBrowserDocumentCompletedEventArgs.py
class WebBrowserDocumentCompletedEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.WebBrowser.DocumentCompleted event.
WebBrowserDocumentCompletedEventArgs(url: Uri)
"""
@staticmethod
def __new__(self, url):
""" __new__(cls: type,url: Uri) """
pass
Url = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the location of the document to which the System.Windows.Forms.WebBrowser control has navigated.
Get: Url(self: WebBrowserDocumentCompletedEventArgs) -> Uri
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.