prompt large_stringlengths 70 991k | completion large_stringlengths 0 1.02k |
|---|---|
<|file_name|>celeryconfig.py<|end_file_name|><|fim▁begin|># List of modules to import when celery starts.
# CELERY_IMPORTS = ('libcloud_sandbox.tasks.code_execute', )
# Result store settings.
CELERY_RESULT_BACKEND = 'database'
CELERY_RESULT_DBURI = 'sqlite:///mydatabase.db'
# Broker settings.
BROKER_TRANSPORT = 'sqlalchemy'
BROKER_HOST = 'sqlite:///tasks.db'
BROKER_PORT = 5672
BROKER_VHOST = '/'<|fim▁hole|>BROKER_USER = 'guest'
BROKER_PASSWORD = 'guest'
## Worker settings
CELERYD_CONCURRENCY = 1
CELERYD_TASK_TIME_LIMIT = 20
# CELERYD_LOG_FILE = 'celeryd.log'
CELERYD_LOG_LEVEL = 'INFO'<|fim▁end|> | |
<|file_name|>article.rs<|end_file_name|><|fim▁begin|>use std::collections::HashMap;
use iron::prelude::*;
use base::framework::{ResponseData, temp_response,
json_error_response, json_ok_response,<|fim▁hole|>use base::db::MyPool;
use form_checker::{Validator, Checker, Rule, Str, I64};
use base::framework::LoginUser;
use base::util::render_html;
use iron_login::User as U;
use persistent::Read;
use chrono::*;
use router::Router;
use mysql as my;
use base::model::{Article, User, Category, Comment};
use rustc_serialize::json::ToJson;
use base::util;
use base::util::gen_gravatar_url;
use base::constant;
pub fn new_load(req: &mut Request) -> IronResult<Response> {
let mut data = ResponseData::new(req);
data.insert("categories", util::gen_categories_json(None));
temp_response("article/new_load", &data)
}
pub fn new(req: &mut Request) -> IronResult<Response> {
let mut validator = Validator::new();
validator
.check(Checker::new("category", "类别", I64)
.meet(Rule::Lambda(Box::new(|v| {
constant::CATEGORY::ALL.iter().any(
|c|*c as i64 == v.as_i64().unwrap())
}), None)))
.check(Checker::new("title", "标题", Str)
.meet(Rule::Min(3))
.meet(Rule::Max(64)))
.check(Checker::new("content", "内容", Str)
.meet(Rule::Min(7)));
validator.validate(&req.get::<UrlEncodedBody>().unwrap_or(HashMap::new()));
if !validator.is_valid() {
return json_error_response(&validator.get_some_error());
}
let category = validator.get_required("category").as_i64().unwrap();
let title = validator.get_required("title").as_str().unwrap();
let content = validator.get_required("content").as_str().unwrap();
let pool = req.get::<Read<MyPool>>().unwrap().value();
let login = LoginUser::get_login(req);
let user = login.get_user().unwrap();
let now = Local::now().naive_local();
let mut stmt = pool.prepare("INSERT INTO article(category, title, content, \
user_id, create_time, update_time) \
VALUES (?, ?, ?, ?, ?, ?)").unwrap();
let result = stmt.execute((category, title, content, user.id, now, now));
result.unwrap();
json_ok_response()
}
pub fn show(req: &mut Request) -> IronResult<Response> {
let article_id = try!(req.extensions.get::<Router>().unwrap()
.find("article_id").unwrap()
.parse::<u64>().map_err(|_| not_found_response().unwrap_err()));
let pool = req.get::<Read<MyPool>>().unwrap().value();
let mut result = pool.prep_exec(
"SELECT a.id, a.category, a.title, a.content, a.comments_count, \
a.create_time, u.id as user_id, u.username, u.email from article \
as a join user as u on a.user_id=u.id where a.id=? and a.status=?",
(&article_id, constant::ARTICLE::STATUS::NORMAL)).unwrap();
let raw_row = result.next();
if raw_row.is_none() {
return not_found_response();
}
let row = raw_row.unwrap().unwrap();
let (id, category, title, content, comments_count, create_time, user_id, username, email) = my::from_row::<(_,_,_,_,_,_,_,_,String)>(row);
let mut article = Article {
id: id,
category: Category::from_value(category),
title: title,
content: content,
comments_count: comments_count,
user: User{
id: user_id,
avatar: gen_gravatar_url(&email),
username: username,
email: email,
create_time: *constant::DEFAULT_DATETIME,
},
create_time: create_time,
update_time: *constant::DEFAULT_DATETIME,
flag: 0,
comments: Vec::new(),
};
article.content = render_html(&article.content);
let result = pool.prep_exec(
"SELECT c.id, c.content, c.create_time, u.id as user_id, \
u.username, u.email from comment \
as c join user as u on c.user_id=u.id where c.article_id=? \
order by c.create_time", (&article_id, )).unwrap();
article.comments = result.map(|x| x.unwrap()).map(|row|{
let (id, content, create_time, user_id, username, email) = my::from_row::<(_,String,_,_,_,String)>(row);
Comment {
id: id,
content: render_html(&content),
user: User {
id: user_id,
avatar: gen_gravatar_url(&email),
username: username,
email: email,
create_time: *constant::DEFAULT_DATETIME,
},
create_time: create_time,
article: None,
}
}).collect();
// judge whether is my article
let mut is_my_own = false;
let raw_login_user = LoginUser::get_login(req).get_user();
if let Some(login_user) = raw_login_user {
if login_user.id == article.user.id {
is_my_own = true;
}
}
let mut data = ResponseData::new(req);
data.insert("article", article.to_json());
data.insert("comments_count", article.comments.len().to_json());
let mentions: Vec<String> = article.comments.into_iter().map(|c|c.user.username).collect();
data.insert("mentions", mentions.to_json());
data.insert("is_my_own", is_my_own.to_json());
temp_response("article/show", &data)
}
pub fn edit_load(req: &mut Request) -> IronResult<Response> {
let login = LoginUser::get_login(req);
let user = login.get_user().unwrap();
let article_id = try!(req.extensions.get::<Router>().unwrap()
.find("article_id").unwrap()
.parse::<u64>().map_err(|_| not_found_response().unwrap_err()));
let pool = req.get::<Read<MyPool>>().unwrap().value();
let mut result = pool.prep_exec(
"SELECT a.id, a.category, a.title, a.content, a.comments_count, \
a.create_time, u.id as user_id, u.username, u.email from article \
as a join user as u on a.user_id=u.id where a.id=? and a.status=?",
(&article_id, constant::ARTICLE::STATUS::NORMAL)).unwrap();
let raw_row = result.next();
if raw_row.is_none() {
return not_found_response();
}
let row = raw_row.unwrap().unwrap();
let (id, category, title, content, comments_count,
create_time, user_id, username, email) = my::from_row::<
(_,_,_,_,_,_,_,_,String)>(row);
if user_id != user.id {
return not_found_response();
}
let article = Article {
id: id,
category: Category::from_value(category),
title: title,
content: content,
comments_count: comments_count,
user: User{
id: user_id,
avatar: gen_gravatar_url(&email),
username: username,
email: email,
create_time: *constant::DEFAULT_DATETIME,
},
create_time: create_time,
update_time: *constant::DEFAULT_DATETIME,
flag: 0,
comments: Vec::new(),
};
let mut data = ResponseData::new(req);
data.insert("categories", util::gen_categories_json(Some(category)));
data.insert("article", article.to_json());
temp_response("article/edit_load", &data)
}
pub fn edit(req: &mut Request) -> IronResult<Response> {
let login = LoginUser::get_login(req);
let user = login.get_user().unwrap();
let article_id = try!(req.extensions.get::<Router>().unwrap()
.find("article_id").unwrap()
.parse::<u64>().map_err(|_| not_found_response().unwrap_err()));
let mut validator = Validator::new();
validator
.check(Checker::new("category", "类别", I64)
.meet(Rule::Lambda(Box::new(|v| {
constant::CATEGORY::ALL.iter().any(
|c|*c as i64 == v.as_i64().unwrap())
}), None)))
.check(Checker::new("title", "标题", Str)
.meet(Rule::Min(3))
.meet(Rule::Max(64)))
.check(Checker::new("content", "内容", Str)
.meet(Rule::Min(7)));
validator.validate(&req.get::<UrlEncodedBody>().unwrap_or(HashMap::new()));
if !validator.is_valid() {
return json_error_response(&validator.get_some_error());
}
let category = validator.get_required("category").as_i64().unwrap();
let title = validator.get_required("title").as_str().unwrap();
let content = validator.get_required("content").as_str().unwrap();
let now = Local::now().naive_local();
let pool = req.get::<Read<MyPool>>().unwrap().value();
let mut trans = pool.start_transaction(false, None, None).unwrap();
{
let mut result = trans.prep_exec(
"SELECT u.id as user_id from article \
as a join user as u on a.user_id=u.id \
where a.id=? and a.status=? for update",
(article_id, constant::ARTICLE::STATUS::NORMAL)).unwrap();
let raw_row = result.next();
if raw_row.is_none() {
return not_found_response();
}
let row = raw_row.unwrap().unwrap();
let user_id = my::from_row::<u64>(row);
if user_id != user.id {
return json_error_response("非法请求");
}
}
trans.prep_exec("UPDATE article set category=?, title=?, content=?, \
update_time=? where id=?",
(category, title, content, now, article_id)).unwrap();
trans.commit().unwrap();
json_ok_response()
}<|fim▁end|> | not_found_response};
use urlencoded::UrlEncodedBody; |
<|file_name|>AmazonImageSelectInput.spec.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import { mockHttpClient } from 'core/api/mock/jasmine';
import { mock } from 'angular';
import { ShallowWrapper, ReactWrapper, shallow, mount } from 'enzyme';
import { IAmazonImage } from 'amazon/image';
import { Application } from 'core/application';
import { REACT_MODULE } from 'core/reactShims';
import { AmazonImageSelectInput, IAmazonImageSelectorProps, IAmazonImageSelectorState } from './AmazonImageSelectInput';
const application = new Application('testapp', null, []);
const region = 'us-region-1';
const credentials = 'prodaccount';
const imageName = 'fancypackage-1.0.0-h005.6c8b5fe-x86_64-20181206030728-xenial-hvm-sriov-ebs';
const amiId = 'fake-abcd123';
describe('<AmazonImageSelectInput/>', () => {
let shallowComponent: ShallowWrapper<IAmazonImageSelectorProps, IAmazonImageSelectorState>;
let mountedComponent: ReactWrapper<IAmazonImageSelectorProps, IAmazonImageSelectorState>;
beforeEach(mock.module(REACT_MODULE));
beforeEach(mock.inject());
afterEach(() => {
shallowComponent && shallowComponent.unmount();
mountedComponent && mountedComponent.unmount();
});
const baseProps = {
application,
credentials,
region,
value: {} as IAmazonImage,
onChange: () => null as any,<|fim▁hole|> describe('fetches package images when mounted', () => {
it('using application name when no amiId is present in the initial value', async () => {
const http = mockHttpClient();
http.expectGET(`/images/find?q=testapp*`).respond(200, []);
shallowComponent = shallow(<AmazonImageSelectInput {...baseProps} />);
await http.flush();
});
it('and updates isLoadingPackageImages', async () => {
const http = mockHttpClient();
http.expectGET(`/images/find?q=testapp*`).respond(200, []);
shallowComponent = shallow(<AmazonImageSelectInput {...baseProps} />);
expect(shallowComponent.state().isLoadingPackageImages).toBe(true);
await http.flush();
expect(shallowComponent.state().isLoadingPackageImages).toBe(false);
});
it('using fetching image by amiId and looking up via the imageName', async () => {
const http = mockHttpClient();
const value = AmazonImageSelectInput.makeFakeImage(imageName, amiId, region);
http.expectGET(`/images/${credentials}/${region}/${amiId}?provider=aws`).respond(200, [value]);
http.expectGET(`/images/find?q=fancypackage-*`).respond(200, []);
shallowComponent = shallow(<AmazonImageSelectInput {...baseProps} value={value} />);
await http.flush();
});
it('using application name when an amiId is present in the initial value, but the image was not found', async () => {
const http = mockHttpClient();
const value = AmazonImageSelectInput.makeFakeImage(imageName, amiId, region);
http.expectGET(`/images/${credentials}/${region}/${amiId}?provider=aws`).respond(200, null);
http.expectGET(`/images/find?q=${application.name}*`).respond(200, []);
shallowComponent = shallow(<AmazonImageSelectInput {...baseProps} value={value} />);
await http.flush();
});
});
it('calls onChange with the backend image when the package images are loaded', async () => {
const http = mockHttpClient();
const value = AmazonImageSelectInput.makeFakeImage(imageName, amiId, region);
const backendValue = AmazonImageSelectInput.makeFakeImage(imageName, amiId, region);
const onChange = jasmine.createSpy('onChange');
http.expectGET(`/images/${credentials}/${region}/${amiId}?provider=aws`).respond(200, [backendValue]);
http.expectGET(`/images/find?q=fancypackage-*`).respond(200, [backendValue]);
mountedComponent = mount(<AmazonImageSelectInput {...baseProps} onChange={onChange} value={value} />);
await http.flush();
expect(onChange).toHaveBeenCalledWith(backendValue);
});
it('calls onChange with null image when the image is not found in the package images', async () => {
const http = mockHttpClient();
const value = AmazonImageSelectInput.makeFakeImage(imageName, amiId, region);
const noResults = [] as IAmazonImage[];
http.expectGET(`/images/${credentials}/${region}/${amiId}?provider=aws`).respond(200, noResults);
http.expectGET(`/images/find?q=testapp*`).respond(200, noResults);
const onChange = jasmine.createSpy('onChange');
mountedComponent = mount(<AmazonImageSelectInput {...baseProps} onChange={onChange} value={value} />);
await http.flush();
expect(onChange).toHaveBeenCalledWith(undefined);
});
});<|fim▁end|> | };
|
<|file_name|>Minesweeper.py<|end_file_name|><|fim▁begin|># public class Solution {
# public char[][] updateBoard(char[][] board, int[] click) {
# int m = board.length, n = board[0].length;
# int row = click[0], col = click[1];
#
# if (board[row][col] == 'M') { // Mine
# board[row][col] = 'X';
# }
# else { // Empty
# // Get number of mines first.
# int count = 0;
# for (int i = -1; i < 2; i++) {
# for (int j = -1; j < 2; j++) {
# if (i == 0 && j == 0) continue;
# int r = row + i, c = col + j;
# if (r < 0 || r >= m || c < 0 || c >= n) continue;<|fim▁hole|># if (board[r][c] == 'M' || board[r][c] == 'X') count++;
# }
# }
#
# if (count > 0) { // If it is not a 'B', stop further DFS.
# board[row][col] = (char)(count + '0');
# }
# else { // Continue DFS to adjacent cells.
# board[row][col] = 'B';
# for (int i = -1; i < 2; i++) {
# for (int j = -1; j < 2; j++) {
# if (i == 0 && j == 0) continue;
# int r = row + i, c = col + j;
# if (r < 0 || r >= m || c < 0 || c >= n) continue;
# if (board[r][c] == 'E') updateBoard(board, new int[] {r, c});
# }
# }
# }
# }
#
# return board;
# }
# }
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
if not board:
return
m, n = len(board), len(board[0])
queue = collections.deque()
queue.append((click[0], click[1]))
valid_neighbours = lambda (i, j): 0<=i<m and 0<=j<n
while queue:
x, y = queue.pop()
if board[x][y] == 'M':
board[x][y] = 'X'
else:
# Filter out the valid neighbours
neighbours = filter(valid_neighbours, [(x-1, y), (x+1, y),
(x, y-1), (x, y+1), (x-1, y-1), (x+1, y-1), (x-1, y+1), (x+1, y+1)])
# Count the number of mines amongst the neighbours
mine_count = sum([board[i][j]=='M' for i, j in neighbours])
# If at least one neighbour is a potential mine, store the mine count.
if mine_count > 0:
board[x][y] = str(mine_count)
# If no neighbour is a mine, then add all unvisited neighbours
# to the queue for future processing
else:
board[x][y] = 'B'
queue.extend([(i, j) for (i, j) in neighbours if board[i][j]=='E'])
return board<|fim▁end|> | |
<|file_name|>card.js<|end_file_name|><|fim▁begin|>/**
* @license Copyright (c) 2013, Viet Trinh All Rights Reserved.
* Available via MIT license.
*/
/**
* A card entity.
*/
define([ 'framework/entity/base_entity' ],
function(BaseEntity)
{
/**
* Constructor.
* @param rawObject (optional)
* The raw object to create the entity with.
*/
var Card = function(rawObject)
{
this.name = null;
this.color = null;
BaseEntity.call(this, rawObject);
return this;
};
Card.prototype = new BaseEntity();<|fim▁hole|> Card.RARITY.MYTHIC = 'M';
Card.RARITY.RARE = 'R';
Card.RARITY.UNCOMMON = 'U';
Card.RARITY.COMMON = 'C';
Card.RARITY.LAND = 'L';
return Card;
});<|fim▁end|> | BaseEntity.generateProperties(Card);
// The card rarities.
Card.RARITY = {}; |
<|file_name|>GuiDimletScrambler.java<|end_file_name|><|fim▁begin|>package mcjty.rftools.blocks.dimlets;
import mcjty.lib.container.GenericGuiContainer;
import mcjty.lib.gui.Window;
import mcjty.lib.gui.layout.PositionalLayout;
import mcjty.lib.gui.widgets.EnergyBar;
import mcjty.lib.gui.widgets.ImageLabel;
import mcjty.lib.gui.widgets.Panel;
import mcjty.lib.gui.widgets.Widget;
import mcjty.rftools.RFTools;
import mcjty.rftools.network.RFToolsMessages;
import net.minecraft.util.ResourceLocation;
import net.minecraftforge.common.util.ForgeDirection;
import java.awt.*;
public class GuiDimletScrambler extends GenericGuiContainer<DimletScramblerTileEntity> {
public static final int SCRAMBLER_WIDTH = 180;
public static final int SCRAMBLER_HEIGHT = 152;
private EnergyBar energyBar;
private ImageLabel progressIcon;
private static final ResourceLocation iconLocation = new ResourceLocation(RFTools.MODID, "textures/gui/dimletscrambler.png");
private static final ResourceLocation iconGuiElements = new ResourceLocation(RFTools.MODID, "textures/gui/guielements.png");
public GuiDimletScrambler(DimletScramblerTileEntity pearlInjectorTileEntity, DimletScramblerContainer container) {
super(RFTools.instance, RFToolsMessages.INSTANCE, pearlInjectorTileEntity, container, RFTools.GUI_MANUAL_DIMENSION, "scrambler");
pearlInjectorTileEntity.setCurrentRF(pearlInjectorTileEntity.getEnergyStored(ForgeDirection.DOWN));
xSize = SCRAMBLER_WIDTH;
ySize = SCRAMBLER_HEIGHT;
}
@Override
public void initGui() {
super.initGui();
int maxEnergyStored = tileEntity.getMaxEnergyStored(ForgeDirection.DOWN);
energyBar = new EnergyBar(mc, this).setVertical().setMaxValue(maxEnergyStored).setLayoutHint(new PositionalLayout.PositionalHint(10, 7, 8, 54)).setShowText(false);
energyBar.setValue(tileEntity.getCurrentRF());
progressIcon = new ImageLabel(mc, this).setImage(iconGuiElements, 4 * 16, 16);
progressIcon.setLayoutHint(new PositionalLayout.PositionalHint(64, 24, 16, 16));
Widget toplevel = new Panel(mc, this).setBackground(iconLocation).setLayout(new PositionalLayout()).addChild(energyBar).addChild(progressIcon);
toplevel.setBounds(new Rectangle(guiLeft, guiTop, xSize, ySize));
window = new Window(this, toplevel);
tileEntity.requestRfFromServer(RFToolsMessages.INSTANCE);
tileEntity.requestScramblingFromServer();
}
@Override
protected void drawGuiContainerBackgroundLayer(float v, int i, int i2) {
int scrambling = tileEntity.getScrambling();
if (scrambling == 0) {
progressIcon.setImage(iconGuiElements, 4 * 16, 16);
} else {
progressIcon.setImage(iconGuiElements, (scrambling % 4) * 16, 16);
}
drawWindow();
energyBar.setValue(tileEntity.getCurrentRF());
tileEntity.requestRfFromServer(RFToolsMessages.INSTANCE);<|fim▁hole|><|fim▁end|> | tileEntity.requestScramblingFromServer();
}
} |
<|file_name|>static_analysis.py<|end_file_name|><|fim▁begin|># Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from .logging_utils import warn
from .utils import is_paddle_api, is_dygraph_api, is_numpy_api, index_in_list, ast_to_source_code
__all__ = ['AstNodeWrapper', 'NodeVarType', 'StaticAnalysisVisitor']
class NodeVarType(object):
"""
Enum class of python variable types. We have to know some variable types
during compile time to transfer AST. For example, a string variable and a
tensor variable in if clause may lead to different conversion from dygraph
to static graph.
"""
ERROR = -1 # Returns when static analysis gets error
UNKNOWN = 0 # Reserve for AST nodes have not known the type
STATEMENT = 1 # For nodes representing statement (non-variable type)
CALLABLE = 2
# python data types
NONE = 100
BOOLEAN = 101
INT = 102
FLOAT = 103
STRING = 104
TENSOR = 105
NUMPY_NDARRAY = 106
# python collections
LIST = 200
SET = 201
DICT = 202
PADDLE_DYGRAPH_API = 300
PADDLE_CONTROL_IF = 301
PADDLE_CONTROL_WHILE = 302
PADDLE_CONTROL_FOR = 303
# Paddle API may not be visible to get source code.
# We use this enum value to denote the type return by a Paddle API
PADDLE_RETURN_TYPES = 304
# If node.node_var_type in TENSOR_TYPES, it can be considered as tensor-dependent.
TENSOR_TYPES = {TENSOR, PADDLE_RETURN_TYPES}
Annotation_map = {
"Tensor": TENSOR,
"paddle.Tensor": TENSOR,
"int": INT,
"float": FLOAT,
"bool": BOOLEAN,
"str": STRING
}
@staticmethod
def binary_op_output_type(in_type1, in_type2):
if in_type1 == in_type2:
return in_type1
if in_type1 == NodeVarType.UNKNOWN:
return in_type2
if in_type2 == NodeVarType.UNKNOWN:
return in_type1
supported_types = [
NodeVarType.BOOLEAN, NodeVarType.INT, NodeVarType.FLOAT,
NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR,
NodeVarType.PADDLE_RETURN_TYPES
]
if in_type1 not in supported_types:
return NodeVarType.UNKNOWN
if in_type2 not in supported_types:
return NodeVarType.UNKNOWN
forbidden_types = [NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR]
if in_type1 in forbidden_types and in_type2 in forbidden_types:
return NodeVarType.UNKNOWN
return max(in_type1, in_type2)
@staticmethod
def type_from_annotation(annotation):
annotation_str = ast_to_source_code(annotation).strip()
if annotation_str in NodeVarType.Annotation_map:
return NodeVarType.Annotation_map[annotation_str]
# raise warning if not found
warn("Currently we don't support annotation: %s" % annotation_str)
return NodeVarType.UNKNOWN
class AstNodeWrapper(object):
"""
Wrapper for python gast.node. We need a node wrapper because gast.node
doesn't store all required information when we are transforming AST.
We should collect additional information which the actual transformation
needs.
"""
def __init__(self, node):
self.node = node
self.parent = None
self.children = []
self.node_var_type = {NodeVarType.UNKNOWN}
class AstVarScope(object):
"""
AstVarScope is a class holding the map from current scope variable to its
type.
"""
SCOPE_TYPE_SCRIPT = 0
SCOPE_TYPE_FUNCTION = 1
SCOPE_TYPE_CLASS = 2
def __init__(self,
scope_name='',
scope_type=SCOPE_TYPE_SCRIPT,
parent_scope=None):
self.sub_scopes = []
self.name_to_id = {}
self.id_to_type = {}
self.cur_id = 0
self.scope_name = scope_name
self.scope_type = scope_type
self.parent_scope = parent_scope
if parent_scope is not None:
parent_scope.sub_scopes.append(self)
def add_var_type(self, var_name, node_var_type):
var_type = self.get_var_type(var_name)
if var_type == {NodeVarType.UNKNOWN}:
self.set_var_type(var_name, node_var_type)
else:
if isinstance(node_var_type, set):
var_type.update(node_var_type)
else:
var_type.add(node_var_type)
def set_var_type(self, var_name, node_var_type):
if var_name in self.name_to_id:
num_id = self.name_to_id[var_name]
else:
num_id = self.cur_id
self.cur_id += 1
self.name_to_id[var_name] = num_id
self.id_to_type[num_id] = node_var_type if isinstance(
node_var_type, set) else {node_var_type}
def get_var_type(self, var_name):
if var_name in self.name_to_id:
num_id = self.name_to_id[var_name]
return self.id_to_type[num_id]
if self.parent_scope is None:
return {NodeVarType.UNKNOWN}
return self.parent_scope.get_var_type(var_name)
class AstVarEnv(object):
"""
A class maintains scopes and mapping from name strings to type.
"""
def __init__(self):
self.cur_scope = AstVarScope()
def enter_scope(self, scope_name, scope_type):
self.cur_scope = AstVarScope(
scope_name, scope_type, parent_scope=self.cur_scope)
return self.cur_scope
def exit_scope(self):
assert self.cur_scope.parent_scope is not None, "Call exit_scope in "\
"AstVarEnv when current scope doesn't have parent scope."
self.cur_scope = self.cur_scope.parent_scope
return self.cur_scope
def get_parent_scope(self):
assert self.cur_scope.parent_scope is not None, "Call parent_scope in "\
"AstVarEnv when current scope doesn't have parent scope."
return self.cur_scope.parent_scope
def add_var_type(self, var_name, node_var_type):
self.cur_scope.add_var_type(var_name, node_var_type)
def set_var_type(self, var_name, node_var_type):
self.cur_scope.set_var_type(var_name, node_var_type)
def get_var_type(self, var_name):
return self.cur_scope.get_var_type(var_name)
def get_scope_var_type(self):
'''
Returns a dict mapping from variable name to type. Used for debug and
test.
'''
cur_scope_dict = {}
for name in self.cur_scope.name_to_id:
node_var_type = self.cur_scope.get_var_type(name)
cur_scope_dict[name] = node_var_type
return cur_scope_dict
class StaticAnalysisVisitor(object):
"""
A class that does static analysis
"""
def __init__(self, ast_root=None):
if ast_root is not None:
self.run(ast_root)
def run(self, ast_root):
self.node_wrapper_root = None
self.ancestor_wrappers = []
self.node_to_wrapper_map = {}
self.var_env = AstVarEnv()
self.dfs_visit(ast_root)
def dfs_visit(self, node):
# AST reuses some gast.nodes, such as Param node of expr_context
if node not in self.node_to_wrapper_map:
cur_wrapper = AstNodeWrapper(node)
self.node_to_wrapper_map[node] = cur_wrapper
else:
cur_wrapper = self.node_to_wrapper_map[node]
if self.node_wrapper_root is None:
self.node_wrapper_root = cur_wrapper
if len(self.ancestor_wrappers) != 0:
last_wrapper = self.ancestor_wrappers[-1]
last_wrapper.children.append(cur_wrapper)
cur_wrapper.parent = last_wrapper
self.ancestor_wrappers.append(cur_wrapper)
for child in gast.iter_child_nodes(node):
if isinstance(child, gast.FunctionDef) or isinstance(
child, gast.AsyncFunctionDef):
# TODO: current version is function name mapping to its type
# consider complex case involving parameters
self.var_env.enter_scope(child.name,
AstVarScope.SCOPE_TYPE_FUNCTION)
func_type = self.dfs_visit(child)
self.var_env.exit_scope()
else:
self.dfs_visit(child)
self.ancestor_wrappers.pop()
cur_wrapper.node_var_type = self._get_node_var_type(cur_wrapper)
return cur_wrapper.node_var_type
def get_node_wrapper_root(self):
return self.node_wrapper_root
def get_node_to_wrapper_map(self):
return self.node_to_wrapper_map
def get_var_env(self):
return self.var_env
def is_tensor_node(self, node):
tensor_types = {NodeVarType.TENSOR, NodeVarType.PADDLE_RETURN_TYPES}
node_wrapper = self.node_to_wrapper_map.get(node, None)
if node_wrapper is None:
return False
if node_wrapper.node_var_type & tensor_types:<|fim▁hole|> def _get_constant_node_type(self, node):
assert isinstance(node, gast.Constant), \
"Type of input node should be gast.Constant, but received %s" % type(node)
# singleton: None, True or False
if node.value is None:
return {NodeVarType.NONE}
if isinstance(node.value, bool):
return {NodeVarType.BOOLEAN}
if isinstance(node.value, int):
return {NodeVarType.INT}
if isinstance(node.value, float):
return {NodeVarType.FLOAT}
if isinstance(node.value, str):
return {NodeVarType.STRING}
return {NodeVarType.UNKNOWN}
def _get_node_var_type(self, cur_wrapper):
node = cur_wrapper.node
if isinstance(node, gast.Constant):
return self._get_constant_node_type(node)
if isinstance(node, gast.BoolOp):
return {NodeVarType.BOOLEAN}
if isinstance(node, gast.Compare):
return {NodeVarType.BOOLEAN}
if isinstance(node, gast.Dict):
return {NodeVarType.DICT}
if isinstance(node, gast.Set):
return {NodeVarType.SET}
if isinstance(node, gast.UnaryOp):
return self.node_to_wrapper_map[node.operand].node_var_type
if isinstance(node, gast.BinOp):
left_type = self.node_to_wrapper_map[node.left].node_var_type
right_type = self.node_to_wrapper_map[node.right].node_var_type
result_type = set()
for l in left_type:
for r in right_type:
result_type.add(NodeVarType.binary_op_output_type(l, r))
return result_type
if isinstance(node, gast.Assign):
ret_type = self.node_to_wrapper_map[node.value].node_var_type
for target in node.targets:
if isinstance(target, gast.Name):
self.node_to_wrapper_map[target].node_var_type = ret_type
self.var_env.set_var_type(target.id, ret_type)
return ret_type
if isinstance(node, gast.AnnAssign):
# TODO(0x45f): To determine whether need to support assignment statements
# like `self.x: float = 2.1`.
ret_type = {NodeVarType.type_from_annotation(node.annotation)}
# if annotation and value(Constant) are diffent type, we use value type
if node.value:
ret_type = self.node_to_wrapper_map[node.value].node_var_type
if isinstance(node.target, gast.Name):
self.node_to_wrapper_map[node.target].node_var_type = ret_type
self.var_env.set_var_type(node.target.id, ret_type)
return ret_type
if isinstance(node, gast.Name):
if node.id == "None":
return {NodeVarType.NONE}
if node.id in {"True", "False"}:
return {NodeVarType.BOOLEAN}
# If node is child of functionDef.arguments
parent_node_wrapper = cur_wrapper.parent
if parent_node_wrapper and isinstance(parent_node_wrapper.node,
gast.arguments):
return self._get_func_argument_type(parent_node_wrapper, node)
return self.var_env.get_var_type(node.id)
if isinstance(node, gast.Return):
# If return nothing:
if node.value is None:
return {NodeVarType.NONE}
return_type = self.node_to_wrapper_map[node.value].node_var_type
assert self.var_env.cur_scope.scope_type == AstVarScope.SCOPE_TYPE_FUNCTION, "Return at non-function scope"
func_name = self.var_env.cur_scope.scope_name
parent_scope = self.var_env.get_parent_scope()
parent_scope.add_var_type(func_name, return_type)
return return_type
if isinstance(node, gast.Call):
if is_dygraph_api(node):
if isinstance(node.func, gast.Attribute):
if node.func.attr == "to_variable":
return {NodeVarType.TENSOR}
if is_paddle_api(node):
return {NodeVarType.PADDLE_RETURN_TYPES}
if is_numpy_api(node):
# In this simple version we assume numpy api returns nd-array
return {NodeVarType.NUMPY_NDARRAY}
if isinstance(node.func, gast.Name):
return self.var_env.get_var_type(node.func.id)
if isinstance(node, gast.Subscript):
if self.is_tensor_node(node.value):
return {NodeVarType.TENSOR}
return {NodeVarType.STATEMENT}
def _get_func_argument_type(self, parent_node_wrapper, node):
"""
Returns type information by parsing annotation or default values.
For example:
1. parse by default values.
foo(x, y=1, z='s') -> x: UNKNOWN, y: INT, z: STR
2. parse by Py3 type annotation.
foo(x: Tensor, y: int, z: str) -> x: Tensor, y: INT, z: STR
3. parse by type annotation and default values.
foo(x: Tensor, y: int, z: str = 'abc') -> x: Tensor, y: INT, z: STR
NOTE: Currently, we only support Tensor, int, bool, float, str et.al.
Other complicate types will be supported later.
"""
assert isinstance(node, gast.Name)
parent_node = parent_node_wrapper.node
var_type = {NodeVarType.UNKNOWN}
if node.annotation is not None:
var_type = {NodeVarType.type_from_annotation(node.annotation)}
self.var_env.set_var_type(node.id, var_type)
# if annotation and value(Constant) are diffent type, we use value type
if parent_node.defaults:
index = index_in_list(parent_node.args, node)
args_len = len(parent_node.args)
if index != -1 and args_len - index <= len(parent_node.defaults):
defaults_node = parent_node.defaults[index - args_len]
if isinstance(defaults_node, gast.Constant):
var_type = self._get_constant_node_type(defaults_node)
# Add node with identified type into cur_env.
self.var_env.set_var_type(node.id, var_type)
return var_type<|fim▁end|> | return True
|
<|file_name|>config.rs<|end_file_name|><|fim▁begin|>//! Utilities for managing the configuration of the website.
//!
//! The configuration should contain values that I expect to change. This way, I can edit them all
//! in a single human-readable file instead of having to track down the values in the code.
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use log::*;
use serde::Deserialize;
use serde_yaml;
use url::Url;
use url_serde;
use crate::errors::*;
/// Configuration values for the website.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Config {
/// A link to a PDF copy of my Resume.
#[serde(with = "url_serde")]
pub resume_link: Url,
}
fn parse_config<R>(reader: R) -> Result<Config>
where
R: Read,<|fim▁hole|> Ok(config)
}
/// Load the website configuration from a path.
pub fn load<P>(config_path: P) -> Result<Config>
where
P: AsRef<Path>,
{
let path = config_path.as_ref().to_str().unwrap();
info!("loading configuration from {}", path);
let config_file = File::open(&config_path).chain_err(|| "error opening config file")?;
parse_config(config_file)
}
#[cfg(test)]
mod tests {
use super::*;
use url::Url;
#[test]
fn load_config() {
let test_config = String::from(
r#"
---
resume_link: http://google.com
"#,
);
let expected_config = Config {
resume_link: Url::parse("http://google.com").unwrap(),
};
assert_eq!(
expected_config,
super::parse_config(test_config.as_bytes()).unwrap()
);
}
}<|fim▁end|> | {
let config = serde_yaml::from_reader(reader)?; |
<|file_name|>bluetoothconfigview.py<|end_file_name|><|fim▁begin|>import kivy
kivy.require('1.9.1')
from kivy.app import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ObjectProperty
from kivy.logger import Logger
from settingsview import SettingsView, SettingsSwitch, SettingsButton
from autosportlabs.widgets.separator import HLineSeparator
from autosportlabs.racecapture.views.util.alertview import editor_popup
from autosportlabs.racecapture.views.configuration.rcp.advancedbluetoothconfigview import AdvancedBluetoothConfigView
Builder.load_string('''
<BluetoothConfigView>
id: bluetooth
cols: 1
spacing: [0, dp(20)]
size_hint: [1, None]
height: self.minimum_height
HSeparator:
text: 'Bluetooth'
size_hint_y: None
SettingsView:
id: bt_enable
label_text: 'Bluetooth'
help_text: 'If the Bluetooth module is connected, enable it here'
SettingsView:
id: btconfig
label_text: 'Advanced configuration'
help_text: 'Change Bluetooth name and passkey. Firmware version 2.9.0 or greater required.'
''')
class BluetoothConfigView(GridLayout):
def __init__(self, config, **kwargs):
super(BluetoothConfigView, self).__init__(**kwargs)
self.config = None
self.register_event_type('on_modified')
self.config_updated(config)
self._bt_popup = None
self._bt_config_view = None
btConfig = self.ids.btconfig
btConfig.bind(on_setting=self.on_bt_configure)
btConfig.setControl(SettingsButton(text='Advanced'))
def on_bluetooth_enabled_change(self, instance, value):
if self.config:
self.config.connectivityConfig.bluetoothConfig.btEnabled = value
self.config.connectivityConfig.stale = True
self.dispatch('on_modified')
def config_updated(self, config):
self.config = config
value = self.config.connectivityConfig.bluetoothConfig.btEnabled
bluetooth_enabled = self.ids.bt_enable
bluetooth_enabled.setControl(SettingsSwitch(active=value))
bluetooth_enabled.control.bind(active=self.on_bluetooth_enabled_change)
def on_modified(self):
pass
def on_bt_configure(self, instance, value):
if not self._bt_popup:
content = AdvancedBluetoothConfigView(self.config.connectivityConfig)
popup = editor_popup(title="Configure Bluetooth", content=content,
answerCallback=self.on_bluetooth_popup_answer)
self._bt_popup = popup
self._bt_config_view = content
def on_bluetooth_popup_answer(self, instance, answer):
close = True
modified = False
# If the user clicked the checkbox to save, validate the view. If it's valid, close and save values to config.
# If invalid, leave it (view will show error messages)
if answer:
valid = self._bt_config_view.validate()
if valid:
bt_values = self._bt_config_view.values
if len(bt_values["name"]) > 0:
self.config.connectivityConfig.bluetoothConfig.name = bt_values["name"]<|fim▁hole|> modified = True
if len(bt_values["passkey"]) > 0:
self.config.connectivityConfig.bluetoothConfig.passKey = bt_values["passkey"]
modified = True
else:
close = False
if modified:
self.config.connectivityConfig.stale = True
self.dispatch('on_modified')
if close:
self._bt_popup.dismiss()
self._bt_popup = None
self._bt_config_view = None<|fim▁end|> | |
<|file_name|>irods_auth_object.cpp<|end_file_name|><|fim▁begin|>/* -*- mode: c++; fill-column: 132; c-basic-offset: 4; indent-tabs-mode: nil -*- */
#include "irods_auth_object.hpp"
namespace irods {
auth_object::auth_object(
rError_t* _r_error ) : r_error_( _r_error ) {
// TODO - stub
}
auth_object::~auth_object() {
// TODO - stub
}
auth_object::auth_object(
const auth_object& _rhs ) {
r_error_ = _rhs.r_error();
request_result_ = _rhs.request_result();
context_ = _rhs.context();
}
auth_object& auth_object::operator=(
const auth_object& _rhs ) {
r_error_ = _rhs.r_error();
request_result_ = _rhs.request_result();<|fim▁hole|> }
bool auth_object::operator==(
const auth_object& _rhs ) const {
// For the base class just always return true
return ( r_error_ == _rhs.r_error() &&
request_result_ == _rhs.request_result() &&
context_ == _rhs.context() );
}
}; // namespace irods<|fim▁end|> | context_ = _rhs.context();
return *this; |
<|file_name|>2016-12-16 tryout2.py<|end_file_name|><|fim▁begin|>"""
This script investigates how calculating phasic currents from voltage clamp
recordings may benefit from subtracting-out the "noise" determined from a
subset of the quietest pieces of the recording, rather than using smoothing
or curve fitting to guess a guassian-like RMS noise function.
"""
import os
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
POINTS_PER_SEC=20000
POINTS_PER_MS=int(POINTS_PER_SEC/1000)
CHUNK_POINTS=POINTS_PER_MS*10 # size of Y pieces to calculate variance from
PERCENT_STEP=10 # percentile steps to display
HIST_RESOLUTION=.1 # pA per bin
COLORMAP=plt.get_cmap('jet') # which color scheme do we want to use?
#COLORMAP=plt.get_cmap('winter') # which color scheme do we want to use?
def quietParts(data,percentile=10):
"""
Given some data (Y) break it into chunks and return just the quiet ones.
Returns data where the variance for its chunk size is below the given percentile.
CHUNK_POINTS should be adjusted so it's about 10ms of data.
"""
nChunks=int(len(Y)/CHUNK_POINTS)
chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
selected=chunks[np.where(percentiles<=percentile)[0]].flatten()
return selected
def ndist(data,Xs):
"""
given some data and a list of X posistions, return the normal
distribution curve as a Y point at each of those Xs.
"""
sigma=np.sqrt(np.var(data))
center=np.average(data)
curve=mlab.normpdf(Xs,center,sigma)
curve*=len(data)*HIST_RESOLUTION
return curve
if __name__=="__main__":
Y=np.load("sweepdata.npy")
baseline=swhlab.common.lowpass(Y,POINTS_PER_MS*250)
plt.figure(figsize=(15,5))
plt.plot(Y)
plt.plot(baseline,color='r',alpha=.5,lw=5)
plt.savefig("baseline.png")
plt.figure(figsize=(15,5))
plt.plot(Y-baseline)
plt.axhline(0,color='r',alpha=.5,lw=5)
plt.savefig("baseline2.png")
plt.show()
if __name__=="__main__" and False:
# apply baseline
Y=Y-baseline
# predict what our histogram will look like
padding=50
histCenter=int(np.average(Y))
histRange=(histCenter-padding,histCenter+padding)
histBins=int(abs(histRange[0]-histRange[1])/HIST_RESOLUTION)
# FIRST CALCULATE THE 10-PERCENTILE CURVE
data=quietParts(Y,10) # assume 10% is a good percentile to use
hist,bins=np.histogram(data,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
curve=ndist(data,bins[:-1]) # normal distribution curve
hist[hist == 0] = np.nan<|fim▁hole|> baselineCurve=curve/np.max(curve) # max is good for smooth curve
# THEN CALCULATE THE WHOLE-SWEEP HISTOGRAM
hist,bins=np.histogram(Y,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
histY/=np.percentile(histY,98) # percentile is needed for noisy data
# DETERMINE THE DIFFERENCE
diffX=bins[:-1][histValidIs]
diffY=histY-baselineCurve[histValidIs]
diffY[diffY<0]=np.nan
# NOW PLOT THE DIFFERENCE
plt.figure(figsize=(10,10))
plt.subplot(211)
plt.grid()
plt.plot(histX,histY,'b.',ms=10,alpha=.5,label="data points")
plt.plot(bins[:-1],baselineCurve,'r-',lw=3,alpha=.5,label="10% distribution")
plt.legend(loc='upper left',shadow=True)
plt.ylabel("normalized distribution")
plt.axis([histCenter-20,histCenter+20,0,1.5])
plt.subplot(212)
plt.grid()
plt.plot(diffX,diffY,'.',ms=10,alpha=.5,color='b')
plt.axvline(histCenter,color='r',lw=3,alpha=.5,ls='--')
plt.legend(loc='upper left',shadow=True)
plt.ylabel("difference")
plt.xlabel("histogram data points (pA)")
plt.margins(0,.1)
plt.axis([histCenter-20,histCenter+20,0,None])
plt.tight_layout()
plt.savefig("2016-12-16-tryout-yesSub.png")
plt.show()
print("DONE")<|fim▁end|> | histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans |
<|file_name|>actix_state.rs<|end_file_name|><|fim▁begin|>use super::repo_actor;<|fim▁hole|>pub struct State {
pub db: Addr<repo_actor::DbExecutor>,
}<|fim▁end|> | use ::actix::prelude::*;
|
<|file_name|>passphrases.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from flask import Blueprint<|fim▁hole|>
@route(bp, '/')
def get():
p = Passphrase.get_random()
return {"results": p}<|fim▁end|> | from jotonce.api import route
from jotonce.passphrases.models import Passphrase
bp = Blueprint('passphrase', __name__, url_prefix='/passphrase') |
<|file_name|>test_object.go<|end_file_name|><|fim▁begin|>package model
type TypeA struct {
Int int
String string
}
type TypeB struct {
Int int
String string
}
type TypeC struct {
Field1 int
Field2 int
}
type TypeD struct {
Field int
}
type TypeE struct {
Field *int<|fim▁hole|><|fim▁end|> | } |
<|file_name|>weight_drop.py<|end_file_name|><|fim▁begin|># Taken from https://github.com/salesforce/awd-lstm-lm/blob/master/weight_drop.py
import torch
from torch.nn import Parameter
from functools import wraps
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()<|fim▁hole|> return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self._dummy
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
w = None
if self.variational:
mask = torch.autograd.Variable(torch.ones(raw_w.size(0), 1))
if raw_w.is_cuda: mask = mask.cuda()
mask = torch.nn.functional.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
else:
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
setattr(self.module, name_w, w)
def forward(self, *args):
self._setweights()
return self.module.forward(*args)<|fim▁end|> |
def _dummy(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function |
<|file_name|>issue-62046.rs<|end_file_name|><|fim▁begin|>// build-fail
// ignore-emscripten no asm! support
<|fim▁hole|>fn main() {
unsafe {
llvm_asm!("nop" : "+r"("r15"));
//~^ malformed inline assembly
}
}<|fim▁end|> | #![feature(llvm_asm)]
#![allow(deprecated)] // llvm_asm!
|
<|file_name|>loopbce.go<|end_file_name|><|fim▁begin|>package ssa
type indVar struct {
ind *Value // induction variable
inc *Value // increment, a constant
nxt *Value // ind+inc variable
min *Value // minimum value. inclusive,
max *Value // maximum value. exclusive.
entry *Block // entry block in the loop.
// Invariants: for all blocks dominated by entry:
// min <= ind < max
// min <= nxt <= max
}
// findIndVar finds induction variables in a function.
//
// Look for variables and blocks that satisfy the following
//
// loop:<|fim▁hole|>// if ind < max
// then goto enter_loop
// else goto exit_loop
//
// enter_loop:
// do something
// nxt = inc + ind
// goto loop
//
// exit_loop:
//
//
// TODO: handle 32 bit operations
func findIndVar(f *Func) []indVar {
var iv []indVar
sdom := f.sdom()
nextb:
for _, b := range f.Blocks {
if b.Kind != BlockIf || len(b.Preds) != 2 {
continue
}
var ind, max *Value // induction, and maximum
entry := -1 // which successor of b enters the loop
// Check thet the control if it either ind < max or max > ind.
// TODO: Handle Leq64, Geq64.
switch b.Control.Op {
case OpLess64:
entry = 0
ind, max = b.Control.Args[0], b.Control.Args[1]
case OpGreater64:
entry = 0
ind, max = b.Control.Args[1], b.Control.Args[0]
default:
continue nextb
}
// Check that the induction variable is a phi that depends on itself.
if ind.Op != OpPhi {
continue
}
// Extract min and nxt knowing that nxt is an addition (e.g. Add64).
var min, nxt *Value // minimum, and next value
if n := ind.Args[0]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
min, nxt = ind.Args[1], n
} else if n := ind.Args[1]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
min, nxt = ind.Args[0], n
} else {
// Not a recognized induction variable.
continue
}
var inc *Value
if nxt.Args[0] == ind { // nxt = ind + inc
inc = nxt.Args[1]
} else if nxt.Args[1] == ind { // nxt = inc + ind
inc = nxt.Args[0]
} else {
panic("unreachable") // one of the cases must be true from the above.
}
// Expect the increment to be a positive constant.
// TODO: handle negative increment.
if inc.Op != OpConst64 || inc.AuxInt <= 0 {
continue
}
// Up to now we extracted the induction variable (ind),
// the increment delta (inc), the temporary sum (nxt),
// the mininum value (min) and the maximum value (max).
//
// We also know that ind has the form (Phi min nxt) where
// nxt is (Add inc nxt) which means: 1) inc dominates nxt
// and 2) there is a loop starting at inc and containing nxt.
//
// We need to prove that the induction variable is incremented
// only when it's smaller than the maximum value.
// Two conditions must happen listed below to accept ind
// as an induction variable.
// First condition: loop entry has a single predecessor, which
// is the header block. This implies that b.Succs[entry] is
// reached iff ind < max.
if len(b.Succs[entry].b.Preds) != 1 {
// b.Succs[1-entry] must exit the loop.
continue
}
// Second condition: b.Succs[entry] dominates nxt so that
// nxt is computed when inc < max, meaning nxt <= max.
if !sdom.isAncestorEq(b.Succs[entry].b, nxt.Block) {
// inc+ind can only be reached through the branch that enters the loop.
continue
}
// If max is c + SliceLen with c <= 0 then we drop c.
// Makes sure c + SliceLen doesn't overflow when SliceLen == 0.
// TODO: save c as an offset from max.
if w, c := dropAdd64(max); (w.Op == OpStringLen || w.Op == OpSliceLen) && 0 >= c && -c >= 0 {
max = w
}
// We can only guarantee that the loops runs within limits of induction variable
// if the increment is 1 or when the limits are constants.
if inc.AuxInt != 1 {
ok := false
if min.Op == OpConst64 && max.Op == OpConst64 {
if max.AuxInt > min.AuxInt && max.AuxInt%inc.AuxInt == min.AuxInt%inc.AuxInt { // handle overflow
ok = true
}
}
if !ok {
continue
}
}
if f.pass.debug > 1 {
if min.Op == OpConst64 {
b.Func.Config.Warnl(b.Line, "Induction variable with minimum %d and increment %d", min.AuxInt, inc.AuxInt)
} else {
b.Func.Config.Warnl(b.Line, "Induction variable with non-const minimum and increment %d", inc.AuxInt)
}
}
iv = append(iv, indVar{
ind: ind,
inc: inc,
nxt: nxt,
min: min,
max: max,
entry: b.Succs[entry].b,
})
b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
}
return iv
}
// loopbce performs loop based bounds check elimination.
func loopbce(f *Func) {
ivList := findIndVar(f)
m := make(map[*Value]indVar)
for _, iv := range ivList {
m[iv.ind] = iv
}
removeBoundsChecks(f, m)
}
// removesBoundsChecks remove IsInBounds and IsSliceInBounds based on the induction variables.
func removeBoundsChecks(f *Func, m map[*Value]indVar) {
sdom := f.sdom()
for _, b := range f.Blocks {
if b.Kind != BlockIf {
continue
}
v := b.Control
// Simplify:
// (IsInBounds ind max) where 0 <= const == min <= ind < max.
// (IsSliceInBounds ind max) where 0 <= const == min <= ind < max.
// Found in:
// for i := range a {
// use a[i]
// use a[i:]
// use a[:i]
// }
if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
ind, add := dropAdd64(v.Args[0])
if ind.Op != OpPhi {
goto skip1
}
if v.Op == OpIsInBounds && add != 0 {
goto skip1
}
if v.Op == OpIsSliceInBounds && (0 > add || add > 1) {
goto skip1
}
if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) {
if v.Args[1] == iv.max {
if f.pass.debug > 0 {
f.Config.Warnl(b.Line, "Found redundant %s", v.Op)
}
goto simplify
}
}
}
skip1:
// Simplify:
// (IsSliceInBounds ind (SliceCap a)) where 0 <= min <= ind < max == (SliceLen a)
// Found in:
// for i := range a {
// use a[:i]
// use a[:i+1]
// }
if v.Op == OpIsSliceInBounds {
ind, add := dropAdd64(v.Args[0])
if ind.Op != OpPhi {
goto skip2
}
if 0 > add || add > 1 {
goto skip2
}
if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) {
if v.Args[1].Op == OpSliceCap && iv.max.Op == OpSliceLen && v.Args[1].Args[0] == iv.max.Args[0] {
if f.pass.debug > 0 {
f.Config.Warnl(b.Line, "Found redundant %s (len promoted to cap)", v.Op)
}
goto simplify
}
}
}
skip2:
// Simplify
// (IsInBounds (Add64 ind) (Const64 [c])) where 0 <= min <= ind < max <= (Const64 [c])
// (IsSliceInBounds ind (Const64 [c])) where 0 <= min <= ind < max <= (Const64 [c])
if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
ind, add := dropAdd64(v.Args[0])
if ind.Op != OpPhi {
goto skip3
}
// ind + add >= 0 <-> min + add >= 0 <-> min >= -add
if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isGreaterOrEqualThan(iv.min, -add) {
if !v.Args[1].isGenericIntConst() || !iv.max.isGenericIntConst() {
goto skip3
}
limit := v.Args[1].AuxInt
if v.Op == OpIsSliceInBounds {
// If limit++ overflows signed integer then 0 <= max && max <= limit will be false.
limit++
}
if max := iv.max.AuxInt + add; 0 <= max && max <= limit { // handle overflow
if f.pass.debug > 0 {
f.Config.Warnl(b.Line, "Found redundant (%s ind %d), ind < %d", v.Op, v.Args[1].AuxInt, iv.max.AuxInt+add)
}
goto simplify
}
}
}
skip3:
continue
simplify:
f.Logf("removing bounds check %v at %v in %s\n", b.Control, b, f.Name)
b.Kind = BlockFirst
b.SetControl(nil)
}
}
func dropAdd64(v *Value) (*Value, int64) {
if v.Op == OpAdd64 && v.Args[0].Op == OpConst64 {
return v.Args[1], v.Args[0].AuxInt
}
if v.Op == OpAdd64 && v.Args[1].Op == OpConst64 {
return v.Args[0], v.Args[1].AuxInt
}
return v, 0
}
func isGreaterOrEqualThan(v *Value, c int64) bool {
if c == 0 {
return isNonNegative(v)
}
if v.isGenericIntConst() && v.AuxInt >= c {
return true
}
return false
}<|fim▁end|> | // ind = (Phi min nxt), |
<|file_name|>test_audioop.py<|end_file_name|><|fim▁begin|>import audioop
import sys
import unittest
def pack(width, data):
return b''.join(v.to_bytes(width, sys.byteorder, signed=True) for v in data)
def unpack(width, data):
return [int.from_bytes(data[i: i + width], sys.byteorder, signed=True)
for i in range(0, len(data), width)]
packs = {w: (lambda *data, width=w: pack(width, data)) for w in (1, 2, 3, 4)}
maxvalues = {w: (1 << (8 * w - 1)) - 1 for w in (1, 2, 3, 4)}
minvalues = {w: -1 << (8 * w - 1) for w in (1, 2, 3, 4)}
datas = {
1: b'\x00\x12\x45\xbb\x7f\x80\xff',
2: packs[2](0, 0x1234, 0x4567, -0x4567, 0x7fff, -0x8000, -1),
3: packs[3](0, 0x123456, 0x456789, -0x456789, 0x7fffff, -0x800000, -1),
4: packs[4](0, 0x12345678, 0x456789ab, -0x456789ab,
0x7fffffff, -0x80000000, -1),
}
INVALID_DATA = [
(b'abc', 0),
(b'abc', 2),
(b'ab', 3),
(b'abc', 4),
]
class TestAudioop(unittest.TestCase):
def test_max(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.max(b'', w), 0)
self.assertEqual(audioop.max(bytearray(), w), 0)
self.assertEqual(audioop.max(memoryview(b''), w), 0)
p = packs[w]
self.assertEqual(audioop.max(p(5), w), 5)
self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
def test_minmax(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.minmax(b'', w),
(0x7fffffff, -0x80000000))
self.assertEqual(audioop.minmax(bytearray(), w),
(0x7fffffff, -0x80000000))
self.assertEqual(audioop.minmax(memoryview(b''), w),
(0x7fffffff, -0x80000000))
p = packs[w]
self.assertEqual(audioop.minmax(p(5), w), (5, 5))
self.assertEqual(audioop.minmax(p(5, -8, -1), w), (-8, 5))
self.assertEqual(audioop.minmax(p(maxvalues[w]), w),
(maxvalues[w], maxvalues[w]))
self.assertEqual(audioop.minmax(p(minvalues[w]), w),
(minvalues[w], minvalues[w]))
self.assertEqual(audioop.minmax(datas[w], w),
(minvalues[w], maxvalues[w]))
def test_maxpp(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.maxpp(b'', w), 0)
self.assertEqual(audioop.maxpp(bytearray(), w), 0)
self.assertEqual(audioop.maxpp(memoryview(b''), w), 0)
self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.maxpp(datas[w], w),
maxvalues[w] - minvalues[w])
def test_avg(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.avg(b'', w), 0)
self.assertEqual(audioop.avg(bytearray(), w), 0)
self.assertEqual(audioop.avg(memoryview(b''), w), 0)
p = packs[w]
self.assertEqual(audioop.avg(p(5), w), 5)
self .assertEqual(audioop.avg(p(5, 8), w), 6)
self.assertEqual(audioop.avg(p(5, -8), w), -2)
self.assertEqual(audioop.avg(p(maxvalues[w], maxvalues[w]), w),
maxvalues[w])
self.assertEqual(audioop.avg(p(minvalues[w], minvalues[w]), w),
minvalues[w])
self.assertEqual(audioop.avg(packs[4](0x50000000, 0x70000000), 4),
0x60000000)
self.assertEqual(audioop.avg(packs[4](-0x50000000, -0x70000000), 4),
-0x60000000)
def test_avgpp(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.avgpp(b'', w), 0)
self.assertEqual(audioop.avgpp(bytearray(), w), 0)
self.assertEqual(audioop.avgpp(memoryview(b''), w), 0)
self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.avgpp(datas[1], 1), 196)
self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
self.assertEqual(audioop.avgpp(datas[3], 3), 12937096)
self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002)
def test_rms(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.rms(b'', w), 0)
self.assertEqual(audioop.rms(bytearray(), w), 0)
self.assertEqual(audioop.rms(memoryview(b''), w), 0)
p = packs[w]
self.assertEqual(audioop.rms(p(*range(100)), w), 57)
self.assertAlmostEqual(audioop.rms(p(maxvalues[w]) * 5, w),
maxvalues[w], delta=1)
self.assertAlmostEqual(audioop.rms(p(minvalues[w]) * 5, w),
-minvalues[w], delta=1)
self.assertEqual(audioop.rms(datas[1], 1), 77)
self.assertEqual(audioop.rms(datas[2], 2), 20001)
self.assertEqual(audioop.rms(datas[3], 3), 5120523)
self.assertEqual(audioop.rms(datas[4], 4), 1310854152)
def test_cross(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.cross(b'', w), -1)
self.assertEqual(audioop.cross(bytearray(), w), -1)
self.assertEqual(audioop.cross(memoryview(b''), w), -1)
p = packs[w]
self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)<|fim▁hole|> def test_add(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.add(b'', b'', w), b'')
self.assertEqual(audioop.add(bytearray(), bytearray(), w), b'')
self.assertEqual(audioop.add(memoryview(b''), memoryview(b''), w), b'')
self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
datas[w])
self.assertEqual(audioop.add(datas[1], datas[1], 1),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.add(datas[2], datas[2], 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.add(datas[3], datas[3], 3),
packs[3](0, 0x2468ac, 0x7fffff, -0x800000,
0x7fffff, -0x800000, -2))
self.assertEqual(audioop.add(datas[4], datas[4], 4),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_bias(self):
for w in 1, 2, 3, 4:
for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
self.assertEqual(audioop.bias(b'', w, bias), b'')
self.assertEqual(audioop.bias(bytearray(), w, bias), b'')
self.assertEqual(audioop.bias(memoryview(b''), w, bias), b'')
self.assertEqual(audioop.bias(datas[1], 1, 1),
b'\x01\x13\x46\xbc\x80\x81\x00')
self.assertEqual(audioop.bias(datas[1], 1, -1),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
datas[1])
self.assertEqual(audioop.bias(datas[2], 2, 1),
packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
self.assertEqual(audioop.bias(datas[2], 2, -1),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
datas[2])
self.assertEqual(audioop.bias(datas[3], 3, 1),
packs[3](1, 0x123457, 0x45678a, -0x456788,
-0x800000, -0x7fffff, 0))
self.assertEqual(audioop.bias(datas[3], 3, -1),
packs[3](-1, 0x123455, 0x456788, -0x45678a,
0x7ffffe, 0x7fffff, -2))
self.assertEqual(audioop.bias(datas[3], 3, 0x7fffffff),
packs[3](-1, 0x123455, 0x456788, -0x45678a,
0x7ffffe, 0x7fffff, -2))
self.assertEqual(audioop.bias(datas[3], 3, -0x80000000),
datas[3])
self.assertEqual(audioop.bias(datas[4], 4, 1),
packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
-0x80000000, -0x7fffffff, 0))
self.assertEqual(audioop.bias(datas[4], 4, -1),
packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
0x7ffffffe, 0x7fffffff, -2))
self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
-2, -1, 0x7ffffffe))
self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
-1, 0, 0x7fffffff))
def test_lin2lin(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.lin2lin(datas[w], w, w), datas[w])
self.assertEqual(audioop.lin2lin(bytearray(datas[w]), w, w),
datas[w])
self.assertEqual(audioop.lin2lin(memoryview(datas[w]), w, w),
datas[w])
self.assertEqual(audioop.lin2lin(datas[1], 1, 2),
packs[2](0, 0x1200, 0x4500, -0x4500, 0x7f00, -0x8000, -0x100))
self.assertEqual(audioop.lin2lin(datas[1], 1, 3),
packs[3](0, 0x120000, 0x450000, -0x450000,
0x7f0000, -0x800000, -0x10000))
self.assertEqual(audioop.lin2lin(datas[1], 1, 4),
packs[4](0, 0x12000000, 0x45000000, -0x45000000,
0x7f000000, -0x80000000, -0x1000000))
self.assertEqual(audioop.lin2lin(datas[2], 2, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[2], 2, 3),
packs[3](0, 0x123400, 0x456700, -0x456700,
0x7fff00, -0x800000, -0x100))
self.assertEqual(audioop.lin2lin(datas[2], 2, 4),
packs[4](0, 0x12340000, 0x45670000, -0x45670000,
0x7fff0000, -0x80000000, -0x10000))
self.assertEqual(audioop.lin2lin(datas[3], 3, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[3], 3, 2),
packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
self.assertEqual(audioop.lin2lin(datas[3], 3, 4),
packs[4](0, 0x12345600, 0x45678900, -0x45678900,
0x7fffff00, -0x80000000, -0x100))
self.assertEqual(audioop.lin2lin(datas[4], 4, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[4], 4, 2),
packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
self.assertEqual(audioop.lin2lin(datas[4], 4, 3),
packs[3](0, 0x123456, 0x456789, -0x45678a,
0x7fffff, -0x800000, -1))
def test_adpcm2lin(self):
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(bytearray(b'\x07\x7f\x7f'), 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(memoryview(b'\x07\x7f\x7f'), 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
(packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 3, None),
(packs[3](0, 0xb00, 0x2900, -0x1600, 0x7200,
-0xb300), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
(packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
-0xb30000), (-179, 40)))
# Very cursory test
for w in 1, 2, 3, 4:
self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
(b'\0' * w * 10, (0, 0)))
def test_lin2adpcm(self):
self.assertEqual(audioop.lin2adpcm(datas[1], 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
self.assertEqual(audioop.lin2adpcm(bytearray(datas[1]), 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
self.assertEqual(audioop.lin2adpcm(memoryview(datas[1]), 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
for w in 2, 3, 4:
self.assertEqual(audioop.lin2adpcm(datas[w], w, None),
(b'\x07\x7f\x7f', (31, 39)))
# Very cursory test
for w in 1, 2, 3, 4:
self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
(b'\0' * 5, (0, 0)))
def test_invalid_adpcm_state(self):
# state must be a tuple or None, not an integer
self.assertRaises(TypeError, audioop.adpcm2lin, b'\0', 1, 555)
self.assertRaises(TypeError, audioop.lin2adpcm, b'\0', 1, 555)
# Issues #24456, #24457: index out of range
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0, -1))
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0, 89))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0, -1))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0, 89))
# value out of range
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (-0x8001, 0))
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0x8000, 0))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (-0x8001, 0))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0x8000, 0))
def test_lin2alaw(self):
self.assertEqual(audioop.lin2alaw(datas[1], 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
self.assertEqual(audioop.lin2alaw(bytearray(datas[1]), 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
self.assertEqual(audioop.lin2alaw(memoryview(datas[1]), 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
for w in 2, 3, 4:
self.assertEqual(audioop.lin2alaw(datas[w], w),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
def test_alaw2lin(self):
encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
for w in 1, 2, 3, 4:
decoded = packs[w](*(x << (w * 8) >> 13 for x in src))
self.assertEqual(audioop.alaw2lin(encoded, w), decoded)
self.assertEqual(audioop.alaw2lin(bytearray(encoded), w), decoded)
self.assertEqual(audioop.alaw2lin(memoryview(encoded), w), decoded)
encoded = bytes(range(256))
for w in 2, 3, 4:
decoded = audioop.alaw2lin(encoded, w)
self.assertEqual(audioop.lin2alaw(decoded, w), encoded)
def test_lin2ulaw(self):
self.assertEqual(audioop.lin2ulaw(datas[1], 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
self.assertEqual(audioop.lin2ulaw(bytearray(datas[1]), 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
self.assertEqual(audioop.lin2ulaw(memoryview(datas[1]), 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
for w in 2, 3, 4:
self.assertEqual(audioop.lin2ulaw(datas[w], w),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
def test_ulaw2lin(self):
encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
for w in 1, 2, 3, 4:
decoded = packs[w](*(x << (w * 8) >> 14 for x in src))
self.assertEqual(audioop.ulaw2lin(encoded, w), decoded)
self.assertEqual(audioop.ulaw2lin(bytearray(encoded), w), decoded)
self.assertEqual(audioop.ulaw2lin(memoryview(encoded), w), decoded)
# Current u-law implementation has two codes fo 0: 0x7f and 0xff.
encoded = bytes(range(127)) + bytes(range(128, 256))
for w in 2, 3, 4:
decoded = audioop.ulaw2lin(encoded, w)
self.assertEqual(audioop.lin2ulaw(decoded, w), encoded)
def test_mul(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.mul(b'', w, 2), b'')
self.assertEqual(audioop.mul(bytearray(), w, 2), b'')
self.assertEqual(audioop.mul(memoryview(b''), w, 2), b'')
self.assertEqual(audioop.mul(datas[w], w, 0),
b'\0' * len(datas[w]))
self.assertEqual(audioop.mul(datas[w], w, 1),
datas[w])
self.assertEqual(audioop.mul(datas[1], 1, 2),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.mul(datas[2], 2, 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.mul(datas[3], 3, 2),
packs[3](0, 0x2468ac, 0x7fffff, -0x800000,
0x7fffff, -0x800000, -2))
self.assertEqual(audioop.mul(datas[4], 4, 2),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_ratecv(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(bytearray(), w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(memoryview(b''), w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
(b'', (-1, ((0, 0),) * 5)))
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
(b'', (-2, ((0, 0),))))
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
datas[w])
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 1, 0)[0],
datas[w])
state = None
d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
for w in 1, 2, 3, 4:
d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
d, state = b'', None
for i in range(0, len(datas[w]), w):
d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
8000, 16000, state)
d += d1
self.assertEqual(d, d0)
self.assertEqual(state, state0)
expected = {
1: packs[1](0, 0x0d, 0x37, -0x26, 0x55, -0x4b, -0x14),
2: packs[2](0, 0x0da7, 0x3777, -0x2630, 0x5673, -0x4a64, -0x129a),
3: packs[3](0, 0x0da740, 0x377776, -0x262fca,
0x56740c, -0x4a62fd, -0x1298c0),
4: packs[4](0, 0x0da740da, 0x37777776, -0x262fc962,
0x56740da6, -0x4a62fc96, -0x1298bf26),
}
for w in 1, 2, 3, 4:
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 3, 1)[0],
expected[w])
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 30, 10)[0],
expected[w])
def test_reverse(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.reverse(b'', w), b'')
self.assertEqual(audioop.reverse(bytearray(), w), b'')
self.assertEqual(audioop.reverse(memoryview(b''), w), b'')
self.assertEqual(audioop.reverse(packs[w](0, 1, 2), w),
packs[w](2, 1, 0))
def test_tomono(self):
for w in 1, 2, 3, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(data2, w, 1, 0), data1)
self.assertEqual(audioop.tomono(data2, w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(data2, w, 0.5, 0.5), data1)
self.assertEqual(audioop.tomono(bytearray(data2), w, 0.5, 0.5),
data1)
self.assertEqual(audioop.tomono(memoryview(data2), w, 0.5, 0.5),
data1)
def test_tostereo(self):
for w in 1, 2, 3, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2)
self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
self.assertEqual(audioop.tostereo(bytearray(data1), w, 1, 1), data2)
self.assertEqual(audioop.tostereo(memoryview(data1), w, 1, 1),
data2)
def test_findfactor(self):
self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
self.assertEqual(audioop.findfactor(bytearray(datas[2]),
bytearray(datas[2])), 1.0)
self.assertEqual(audioop.findfactor(memoryview(datas[2]),
memoryview(datas[2])), 1.0)
self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
0.0)
def test_findfit(self):
self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
self.assertEqual(audioop.findfit(bytearray(datas[2]),
bytearray(datas[2])), (0, 1.0))
self.assertEqual(audioop.findfit(memoryview(datas[2]),
memoryview(datas[2])), (0, 1.0))
self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
(1, 8038.8))
self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
(30, 1.0))
def test_findmax(self):
self.assertEqual(audioop.findmax(datas[2], 1), 5)
self.assertEqual(audioop.findmax(bytearray(datas[2]), 1), 5)
self.assertEqual(audioop.findmax(memoryview(datas[2]), 1), 5)
def test_getsample(self):
for w in 1, 2, 3, 4:
data = packs[w](0, 1, -1, maxvalues[w], minvalues[w])
self.assertEqual(audioop.getsample(data, w, 0), 0)
self.assertEqual(audioop.getsample(bytearray(data), w, 0), 0)
self.assertEqual(audioop.getsample(memoryview(data), w, 0), 0)
self.assertEqual(audioop.getsample(data, w, 1), 1)
self.assertEqual(audioop.getsample(data, w, 2), -1)
self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w])
self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def test_byteswap(self):
swapped_datas = {
1: datas[1],
2: packs[2](0, 0x3412, 0x6745, -0x6646, -0x81, 0x80, -1),
3: packs[3](0, 0x563412, -0x7698bb, 0x7798ba, -0x81, 0x80, -1),
4: packs[4](0, 0x78563412, -0x547698bb, 0x557698ba,
-0x81, 0x80, -1),
}
for w in 1, 2, 3, 4:
self.assertEqual(audioop.byteswap(b'', w), b'')
self.assertEqual(audioop.byteswap(datas[w], w), swapped_datas[w])
self.assertEqual(audioop.byteswap(swapped_datas[w], w), datas[w])
self.assertEqual(audioop.byteswap(bytearray(datas[w]), w),
swapped_datas[w])
self.assertEqual(audioop.byteswap(memoryview(datas[w]), w),
swapped_datas[w])
def test_negativelen(self):
# from issue 3306, previously it segfaulted
self.assertRaises(audioop.error,
audioop.findmax, bytes(range(256)), -2392392)
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def test_string(self):
data = 'abcd'
size = 2
self.assertRaises(TypeError, audioop.getsample, data, size, 0)
self.assertRaises(TypeError, audioop.max, data, size)
self.assertRaises(TypeError, audioop.minmax, data, size)
self.assertRaises(TypeError, audioop.avg, data, size)
self.assertRaises(TypeError, audioop.rms, data, size)
self.assertRaises(TypeError, audioop.avgpp, data, size)
self.assertRaises(TypeError, audioop.maxpp, data, size)
self.assertRaises(TypeError, audioop.cross, data, size)
self.assertRaises(TypeError, audioop.mul, data, size, 1.0)
self.assertRaises(TypeError, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(TypeError, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(TypeError, audioop.add, data, data, size)
self.assertRaises(TypeError, audioop.bias, data, size, 0)
self.assertRaises(TypeError, audioop.reverse, data, size)
self.assertRaises(TypeError, audioop.lin2lin, data, size, size)
self.assertRaises(TypeError, audioop.ratecv, data, size, 1, 1, 1, None)
self.assertRaises(TypeError, audioop.lin2ulaw, data, size)
self.assertRaises(TypeError, audioop.lin2alaw, data, size)
self.assertRaises(TypeError, audioop.lin2adpcm, data, size, None)
def test_wrongsize(self):
data = b'abcdefgh'
state = None
for size in (-1, 0, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
|
<|file_name|>responsive-scripts.js<|end_file_name|><|fim▁begin|>/*! Responsive JS Library v1.2.2 */
/*! matchMedia() polyfill - Test a CSS media type/query in JS. Authors & copyright (c) 2012: Scott Jehl, Paul Irish, Nicholas Zakas. Dual MIT/BSD license */
/*! NOTE: If you're already including a window.matchMedia polyfill via Modernizr or otherwise, you don't need this part */
window.matchMedia = window.matchMedia || (function(doc, undefined){
var bool,
docElem = doc.documentElement,
refNode = docElem.firstElementChild || docElem.firstChild,
// fakeBody required for <FF4 when executed in <head>
fakeBody = doc.createElement('body'),
div = doc.createElement('div');
div.id = 'mq-test-1';
div.style.cssText = "position:absolute;top:-100em";
fakeBody.style.background = "none";
fakeBody.appendChild(div);
return function(q){
div.innerHTML = '­<style media="'+q+'"> #mq-test-1 { width: 42px; }</style>';
docElem.insertBefore(fakeBody, refNode);
bool = div.offsetWidth == 42;
docElem.removeChild(fakeBody);
return { matches: bool, media: q };
};
})(document);
/*! Respond.js v1.1.0: min/max-width media query polyfill. (c) Scott Jehl. MIT/GPLv2 Lic. j.mp/respondjs */
(function( win ){
//exposed namespace
win.respond = {};
//define update even in native-mq-supporting browsers, to avoid errors
respond.update = function(){};
//expose media query support flag for external use
respond.mediaQueriesSupported = win.matchMedia && win.matchMedia( "only all" ).matches;
//if media queries are supported, exit here
if( respond.mediaQueriesSupported ){ return; }
//define vars
var doc = win.document,
docElem = doc.documentElement,
mediastyles = [],
rules = [],
appendedEls = [],
parsedSheets = {},
resizeThrottle = 30,
head = doc.getElementsByTagName( "head" )[0] || docElem,
base = doc.getElementsByTagName( "base" )[0],
links = head.getElementsByTagName( "link" ),
requestQueue = [],
//loop stylesheets, send text content to translate
ripCSS = function(){
var sheets = links,
sl = sheets.length,
i = 0,
//vars for loop:
sheet, href, media, isCSS;
for( ; i < sl; i++ ){
sheet = sheets[ i ],
href = sheet.href,
media = sheet.media,
isCSS = sheet.rel && sheet.rel.toLowerCase() === "stylesheet";
//only links plz and prevent re-parsing
if( !!href && isCSS && !parsedSheets[ href ] ){
// selectivizr exposes css through the rawCssText expando
if (sheet.styleSheet && sheet.styleSheet.rawCssText) {
translate( sheet.styleSheet.rawCssText, href, media );
parsedSheets[ href ] = true;
} else {
if( (!/^([a-zA-Z:]*\/\/)/.test( href ) && !base)
|| href.replace( RegExp.$1, "" ).split( "/" )[0] === win.location.host ){
requestQueue.push( {
href: href,
media: media
} );
}
}
}
}
makeRequests();
},
//recurse through request queue, get css text
makeRequests = function(){
if( requestQueue.length ){
var thisRequest = requestQueue.shift();
ajax( thisRequest.href, function( styles ){
translate( styles, thisRequest.href, thisRequest.media );
parsedSheets[ thisRequest.href ] = true;
makeRequests();
} );
}
},
//find media blocks in css text, convert to style blocks
translate = function( styles, href, media ){
var qs = styles.match( /@media[^\{]+\{([^\{\}]*\{[^\}\{]*\})+/gi ),
ql = qs && qs.length || 0,
//try to get CSS path
href = href.substring( 0, href.lastIndexOf( "/" )),
repUrls = function( css ){
return css.replace( /(url\()['"]?([^\/\)'"][^:\)'"]+)['"]?(\))/g, "$1" + href + "$2$3" );
},
useMedia = !ql && media,
//vars used in loop
i = 0,
j, fullq, thisq, eachq, eql;
//if path exists, tack on trailing slash
if( href.length ){ href += "/"; }
//if no internal queries exist, but media attr does, use that
//note: this currently lacks support for situations where a media attr is specified on a link AND
//its associated stylesheet has internal CSS media queries.
//In those cases, the media attribute will currently be ignored.
if( useMedia ){
ql = 1;
}
for( ; i < ql; i++ ){
j = 0;
//media attr
if( useMedia ){
fullq = media;
rules.push( repUrls( styles ) );
}
//parse for styles
else{
fullq = qs[ i ].match( /@media *([^\{]+)\{([\S\s]+?)$/ ) && RegExp.$1;
rules.push( RegExp.$2 && repUrls( RegExp.$2 ) );
}
eachq = fullq.split( "," );
eql = eachq.length;
for( ; j < eql; j++ ){
thisq = eachq[ j ];
mediastyles.push( {
media : thisq.split( "(" )[ 0 ].match( /(only\s+)?([a-zA-Z]+)\s?/ ) && RegExp.$2 || "all",
rules : rules.length - 1,
hasquery: thisq.indexOf("(") > -1,
minw : thisq.match( /\(min\-width:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/ ) && parseFloat( RegExp.$1 ) + ( RegExp.$2 || "" ),
maxw : thisq.match( /\(max\-width:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/ ) && parseFloat( RegExp.$1 ) + ( RegExp.$2 || "" )
} );
}
}
applyMedia();
},
lastCall,
resizeDefer,
// returns the value of 1em in pixels
getEmValue = function() {
var ret,
div = doc.createElement('div'),
body = doc.body,
fakeUsed = false;
div.style.cssText = "position:absolute;font-size:1em;width:1em";
if( !body ){
body = fakeUsed = doc.createElement( "body" );
body.style.background = "none";
}
body.appendChild( div );
docElem.insertBefore( body, docElem.firstChild );
ret = div.offsetWidth;
if( fakeUsed ){
docElem.removeChild( body );
}
else {
body.removeChild( div );
}
//also update eminpx before returning
ret = eminpx = parseFloat(ret);
return ret;
},
//cached container for 1em value, populated the first time it's needed
eminpx,
//enable/disable styles
applyMedia = function( fromResize ){
var name = "clientWidth",
docElemProp = docElem[ name ],
currWidth = doc.compatMode === "CSS1Compat" && docElemProp || doc.body[ name ] || docElemProp,
styleBlocks = {},
lastLink = links[ links.length-1 ],
now = (new Date()).getTime();
//throttle resize calls
if( fromResize && lastCall && now - lastCall < resizeThrottle ){
clearTimeout( resizeDefer );
resizeDefer = setTimeout( applyMedia, resizeThrottle );
return;
}
else {
lastCall = now;
}
for( var i in mediastyles ){
var thisstyle = mediastyles[ i ],
min = thisstyle.minw,
max = thisstyle.maxw,
minnull = min === null,
maxnull = max === null,
em = "em";
if( !!min ){
min = parseFloat( min ) * ( min.indexOf( em ) > -1 ? ( eminpx || getEmValue() ) : 1 );
}
if( !!max ){
max = parseFloat( max ) * ( max.indexOf( em ) > -1 ? ( eminpx || getEmValue() ) : 1 );
}
// if there's no media query at all (the () part), or min or max is not null, and if either is present, they're true
if( !thisstyle.hasquery || ( !minnull || !maxnull ) && ( minnull || currWidth >= min ) && ( maxnull || currWidth <= max ) ){
if( !styleBlocks[ thisstyle.media ] ){
styleBlocks[ thisstyle.media ] = [];
}
styleBlocks[ thisstyle.media ].push( rules[ thisstyle.rules ] );
}
}
//remove any existing respond style element(s)
for( var i in appendedEls ){
if( appendedEls[ i ] && appendedEls[ i ].parentNode === head ){
head.removeChild( appendedEls[ i ] );
}
}
//inject active styles, grouped by media type
for( var i in styleBlocks ){
var ss = doc.createElement( "style" ),
css = styleBlocks[ i ].join( "\n" );
ss.type = "text/css";
ss.media = i;
//originally, ss was appended to a documentFragment and sheets were appended in bulk.
//this caused crashes in IE in a number of circumstances, such as when the HTML element had a bg image set, so appending beforehand seems best. Thanks to @dvelyk for the initial research on this one!
head.insertBefore( ss, lastLink.nextSibling );
if ( ss.styleSheet ){
ss.styleSheet.cssText = css;
}
else {
ss.appendChild( doc.createTextNode( css ) );
}
//push to appendedEls to track for later removal
appendedEls.push( ss );
}
},
//tweaked Ajax functions from Quirksmode
ajax = function( url, callback ) {
var req = xmlHttp();
if (!req){
return;
}
req.open( "GET", url, true );
req.onreadystatechange = function () {
if ( req.readyState != 4 || req.status != 200 && req.status != 304 ){
return;
}
callback( req.responseText );
}
if ( req.readyState == 4 ){
return;
}
req.send( null );
},
//define ajax obj
xmlHttp = (function() {
var xmlhttpmethod = false;
try {
xmlhttpmethod = new XMLHttpRequest();
}
catch( e ){<|fim▁hole|> }
return function(){
return xmlhttpmethod;
};
})();
//translate CSS
ripCSS();
//expose update for re-running respond later on
respond.update = ripCSS;
//adjust on resize
function callMedia(){
applyMedia( true );
}
if( win.addEventListener ){
win.addEventListener( "resize", callMedia, false );
}
else if( win.attachEvent ){
win.attachEvent( "onresize", callMedia );
}
})(this);
/**
* jQuery Scroll Top Plugin 1.0.0
*/
jQuery(document).ready(function ($) {
$('a[href=#scroll-top]').click(function () {
$('html, body').animate({
scrollTop: 0
}, 'slow');
return false;
});
});
/*! http://mths.be/placeholder v2.0.7 by @mathias */
;(function(window, document, $) {
var isInputSupported = 'placeholder' in document.createElement('input'),
isTextareaSupported = 'placeholder' in document.createElement('textarea'),
prototype = $.fn,
valHooks = $.valHooks,
hooks,
placeholder;
if (isInputSupported && isTextareaSupported) {
placeholder = prototype.placeholder = function() {
return this;
};
placeholder.input = placeholder.textarea = true;
} else {
placeholder = prototype.placeholder = function() {
var $this = this;
$this
.filter((isInputSupported ? 'textarea' : ':input') + '[placeholder]')
.not('.placeholder')
.bind({
'focus.placeholder': clearPlaceholder,
'blur.placeholder': setPlaceholder
})
.data('placeholder-enabled', true)
.trigger('blur.placeholder');
return $this;
};
placeholder.input = isInputSupported;
placeholder.textarea = isTextareaSupported;
hooks = {
'get': function(element) {
var $element = $(element);
return $element.data('placeholder-enabled') && $element.hasClass('placeholder') ? '' : element.value;
},
'set': function(element, value) {
var $element = $(element);
if (!$element.data('placeholder-enabled')) {
return element.value = value;
}
if (value == '') {
element.value = value;
// Issue #56: Setting the placeholder causes problems if the element continues to have focus.
if (element != document.activeElement) {
// We can't use `triggerHandler` here because of dummy text/password inputs :(
setPlaceholder.call(element);
}
} else if ($element.hasClass('placeholder')) {
clearPlaceholder.call(element, true, value) || (element.value = value);
} else {
element.value = value;
}
// `set` can not return `undefined`; see http://jsapi.info/jquery/1.7.1/val#L2363
return $element;
}
};
isInputSupported || (valHooks.input = hooks);
isTextareaSupported || (valHooks.textarea = hooks);
$(function() {
// Look for forms
$(document).delegate('form', 'submit.placeholder', function() {
// Clear the placeholder values so they don't get submitted
var $inputs = $('.placeholder', this).each(clearPlaceholder);
setTimeout(function() {
$inputs.each(setPlaceholder);
}, 10);
});
});
// Clear placeholder values upon page reload
$(window).bind('beforeunload.placeholder', function() {
$('.placeholder').each(function() {
this.value = '';
});
});
}
function args(elem) {
// Return an object of element attributes
var newAttrs = {},
rinlinejQuery = /^jQuery\d+$/;
$.each(elem.attributes, function(i, attr) {
if (attr.specified && !rinlinejQuery.test(attr.name)) {
newAttrs[attr.name] = attr.value;
}
});
return newAttrs;
}
function clearPlaceholder(event, value) {
var input = this,
$input = $(input);
if (input.value == $input.attr('placeholder') && $input.hasClass('placeholder')) {
if ($input.data('placeholder-password')) {
$input = $input.hide().next().show().attr('id', $input.removeAttr('id').data('placeholder-id'));
// If `clearPlaceholder` was called from `$.valHooks.input.set`
if (event === true) {
return $input[0].value = value;
}
$input.focus();
} else {
input.value = '';
$input.removeClass('placeholder');
input == document.activeElement && input.select();
}
}
}
function setPlaceholder() {
var $replacement,
input = this,
$input = $(input),
$origInput = $input,
id = this.id;
if (input.value == '') {
if (input.type == 'password') {
if (!$input.data('placeholder-textinput')) {
try {
$replacement = $input.clone().attr({ 'type': 'text' });
} catch(e) {
$replacement = $('<input>').attr($.extend(args(this), { 'type': 'text' }));
}
$replacement
.removeAttr('name')
.data({
'placeholder-password': true,
'placeholder-id': id
})
.bind('focus.placeholder', clearPlaceholder);
$input
.data({
'placeholder-textinput': $replacement,
'placeholder-id': id
})
.before($replacement);
}
$input = $input.removeAttr('id').hide().prev().attr('id', id).show();
// Note: `$input[0] != input` now!
}
$input.addClass('placeholder');
$input[0].value = $input.attr('placeholder');
} else {
$input.removeClass('placeholder');
}
}
}(this, document, jQuery));
/*global jQuery */
/*jshint multistr:true browser:true */
/*!
* FitVids 1.0
*
* Copyright 2011, Chris Coyier - http://css-tricks.com + Dave Rupert - http://daverupert.com
* Credit to Thierry Koblentz - http://www.alistapart.com/articles/creating-intrinsic-ratios-for-video/
* Released under the WTFPL license - http://sam.zoy.org/wtfpl/
*
* Date: Thu Sept 01 18:00:00 2011 -0500
*/
(function( $ ){
"use strict";
$.fn.fitVids = function( options ) {
var settings = {
customSelector: null
};
var div = document.createElement('div'),
ref = document.getElementsByTagName('base')[0] || document.getElementsByTagName('script')[0];
div.className = 'fit-vids-style';
div.innerHTML = '­<style> \
.fluid-width-video-wrapper { \
width: 100%; \
position: relative; \
padding: 0; \
} \
\
.fluid-width-video-wrapper iframe, \
.fluid-width-video-wrapper object, \
.fluid-width-video-wrapper embed { \
position: absolute; \
top: 0; \
left: 0; \
width: 100%; \
height: 100%; \
} \
</style>';
ref.parentNode.insertBefore(div,ref);
if ( options ) {
$.extend( settings, options );
}
return this.each(function(){
var selectors = [
"iframe[src*='player.vimeo.com']",
"iframe[src*='www.youtube.com']",
"iframe[src*='www.youtube-nocookie.com']",
"iframe[src*='fast.wistia.com']",
"embed"
];
if (settings.customSelector) {
selectors.push(settings.customSelector);
}
var $allVideos = $(this).find(selectors.join(','));
$allVideos.each(function(){
var $this = $(this);
if (this.tagName.toLowerCase() === 'embed' && $this.parent('object').length || $this.parent('.fluid-width-video-wrapper').length) { return; }
var height = ( this.tagName.toLowerCase() === 'object' || ($this.attr('height') && !isNaN(parseInt($this.attr('height'), 10))) ) ? parseInt($this.attr('height'), 10) : $this.height(),
width = !isNaN(parseInt($this.attr('width'), 10)) ? parseInt($this.attr('width'), 10) : $this.width(),
aspectRatio = height / width;
if(!$this.attr('id')){
var videoID = 'fitvid' + Math.floor(Math.random()*999999);
$this.attr('id', videoID);
}
$this.wrap('<div class="fluid-width-video-wrapper"></div>').parent('.fluid-width-video-wrapper').css('padding-top', (aspectRatio * 100)+"%");
$this.removeAttr('height').removeAttr('width');
});
});
};
})( jQuery );
/*!
* Mobile Menu
*/
(function($) {
var current = $('.main-nav li.current-menu-item a').html();
current = $('.main-nav li.current_page_item a').html();
if( $('span').hasClass('custom-mobile-menu-title') ) {
current = $('span.custom-mobile-menu-title').html();
}
else if( typeof current == 'undefined' || current === null ) {
if( $('body').hasClass('home') ) {
if( $('#logo span').hasClass('site-name') ) {
current = $('#logo .site-name a').html();
}
else {
current = $('#logo img').attr('alt');
}
}
else {
if( $('body').hasClass('woocommerce') ) {
current = $('h1.page-title').html();
}
else if( $('body').hasClass('archive') ) {
current = $('h6.title-archive').html();
}
else if( $('body').hasClass('search-results') ) {
current = $('h6.title-search-results').html();
}
else if( $('body').hasClass('page-template-blog-excerpt-php') ) {
current = $('.current_page_item').text();
}
else if( $('body').hasClass('page-template-blog-php') ) {
current = $('.current_page_item').text();
}
else {
current = $('h1.post-title').html();
}
}
};
$('.main-nav').append('<a id="responsive_menu_button"></a>');
$('.main-nav').prepend('<div id="responsive_current_menu_item">Menu</div>');
$('a#responsive_menu_button, #responsive_current_menu_item').click(function(){
$('.js .main-nav .menu').slideToggle( function() {
if( $(this).is(':visible') ) {
$('a#responsive_menu_button').addClass('responsive-toggle-open');
}
else {
$('a#responsive_menu_button').removeClass('responsive-toggle-open');
$('.js .main-nav .menu').removeAttr('style');
}
});
});
})(jQuery);
// Close the mobile menu when clicked outside of it.
(function($) {
$('html').click(function() {
// Check if the menu is open, close in that case.
if( $('a#responsive_menu_button').hasClass('responsive-toggle-open') ){
$('.js .main-nav .menu').slideToggle( function() {
$('a#responsive_menu_button').removeClass('responsive-toggle-open');
$('.js .main-nav .menu').removeAttr('style');
});
}
})
})(jQuery);
// Stop propagation on click on menu.
jQuery('.main-nav').click(function(event){
var pathname = window.location.pathname;
if( pathname != '/wp-admin/customize.php' ){
event.stopPropagation();
}
});
// Placeholder
jQuery(function(){
jQuery('input[placeholder], textarea[placeholder]').placeholder();
});
// FitVids
jQuery(document).ready(function(){
// Target your #container, #wrapper etc.
jQuery("#wrapper").fitVids();
});
// Have a custom video player? We now have a customSelector option where you can add your own specific video vendor selector (mileage may vary depending on vendor and fluidity of player):
// jQuery("#thing-with-videos").fitVids({ customSelector: "iframe[src^='http://example.com'], iframe[src^='http://example.org']"});
// Selectors are comma separated, just like CSS
// Note: This will be the quickest way to add your own custom vendor as well as test your player's compatibility with FitVids.<|fim▁end|> | xmlhttpmethod = new ActiveXObject( "Microsoft.XMLHTTP" ); |
<|file_name|>app.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
from kotori.version import __VERSION__
from pyramid.config import Configurator
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
settings['SOFTWARE_VERSION'] = __VERSION__
config = Configurator(settings=settings)
# Addons
config.include('pyramid_jinja2')
# http://docs.pylonsproject.org/projects/pyramid-jinja2/en/latest/#adding-or-overriding-a-renderer
config.add_jinja2_renderer('.html')
config.include('cornice')
# Views and routes
config.add_static_view('static/app', 'static/app', cache_max_age=0)
config.add_static_view('static/lib', 'static/lib', cache_max_age=60 * 24)
config.add_route('index', '/')
config.scan()<|fim▁hole|> return config.make_wsgi_app()<|fim▁end|> | |
<|file_name|>af.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2003-2019, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
<|fim▁hole|>*/
CKEDITOR.plugins.setLang( 'newpage', 'af', {
toolbar: 'Nuwe bladsy'
} );<|fim▁end|> | |
<|file_name|>contain_substring_matcher.go<|end_file_name|><|fim▁begin|>package matchers
import (
"fmt"
"github.com/chai2010/gopkg/database/leveldb/internal/gomega/format"
"strings"
)
type ContainSubstringMatcher struct {
Substr string
Args []interface{}
}
func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
}
return strings.Contains(actualString, matcher.stringToMatch()), nil
}<|fim▁hole|> stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...)
}
return stringToMatch
}
func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to contain substring", matcher.stringToMatch())
}
func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to contain substring", matcher.stringToMatch())
}<|fim▁end|> |
func (matcher *ContainSubstringMatcher) stringToMatch() string {
stringToMatch := matcher.Substr
if len(matcher.Args) > 0 { |
<|file_name|>base.py<|end_file_name|><|fim▁begin|>'''Base module to handle the collection and the output of statistical data.'''
import logging
import time
import multiprocessing as mp
import queue
from collections import Counter
log = logging.getLogger(__name__)
current_milli_time = lambda: int(round(time.time() * 1000))
def is_number(val):
'''Function to check if the value is a number.'''
try:
float(val)
return True
except ValueError:
return False
class Logstats(object):
'''This class briges the data in input (provided by the `stats` param)
to a generic output (`log`, by default).
'''
def __init__(self, msg=None, emit_func=None, logger=log, level='INFO',
timeout=1, queue=None):
'''Initialize the instance.
If `emit_func` is defined, `logger` and `level` are ignored.
Keyword arguments:
stats -- a dict-like object storing values to output
msg -- a string to use to format `stats` (by default it outputs a
list of comma separated values)
emit_func -- a function to emit the formatted output
(default: logging.log)
logger -- the logger to use to log the formatted output (default:
a `log` instance
level -- the log level (default: INFO)
'''
self.stats = Counter()
self.msg = msg
self.logger = logger
self.level = level
self.old_stats = {}
self.emit_func = emit_func
self.last = current_milli_time()
self.timeout = timeout
self.queue = queue
self.main_queue = None
if not logger.isEnabledFor(logging.getLevelName(level)):
logger.warning('Logger is not enabled to log at level {}.'.format(level))
def __getitem__(self, key):
return self.stats[key]
def __setitem__(self, key, val):
self.stats[key] = val
def update(self, *args, **kwargs):
self.stats.update(*args, **kwargs)
def _get_speed(self, new, old, delta):
return int(round(float((new - old)) / (delta / 1e3)))
def _consume_queue(self):
if self.main_queue:
while True:
try:
self.stats.update(self.main_queue.get_nowait())
except queue.Empty:
return
def get_stats(self, delta):
self._consume_queue()
stats = self.stats
if hasattr(self.stats, '__call__'):
stats = self.stats(delta)
else:
stats = stats.copy()<|fim▁hole|> speed = dict(('{}.speed'.format(k),
self._get_speed(stats[k],
self.old_stats.get(k, 0),
delta))
for k in stats if is_number(stats[k]))
self.old_stats = stats
stats.update(speed)
return stats
def get_child(self):
if not self.main_queue:
self.main_queue = mp.Queue()
return Logstats(queue=self.main_queue)
def format_msg(self, stats):
if self.msg:
msg = self.msg.format(**stats)
else:
msg = ', '.join('{}: {}'.format(k, stats[k])
for k in sorted(stats))
return msg
def emit(self, msg):
if self.emit_func:
self.emit_func(msg)
else:
self.logger.log(getattr(logging, self.level), msg)
def __call__(self):
if self.queue:
self.queue.put(self.stats)
self.stats = Counter()
else:
delta = current_milli_time() - self.last
stats = self.get_stats(delta)
if stats:
self.emit(self.format_msg(stats))
self.last = current_milli_time()<|fim▁end|> | |
<|file_name|>totaltime.go<|end_file_name|><|fim▁begin|>// totaltime shows the total play time of the video content rooted at current
// directory.
package main
import (
"errors"
"fmt"
"math"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
var (
ignoreSuffixes = []string{
".srt",
}
videoSuffixes = []string{
".avi",
".m4v",
".mkv",
".mp4",
".webm",
".wmv",
}
)
func determineDuration(path string) (time.Duration, error) {
cmd := exec.Command("ffprobe", "-show_format", path)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "$ ffprobe -show_format %s\n", path)
fmt.Fprintln(os.Stderr, string(out))
return 0, err<|fim▁hole|> start := time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC)
lines := strings.Split(string(out), "\n")
for _, line := range lines {
if strings.Contains(line, "Duration") {
fields := strings.Split(strings.TrimSpace(line), ",")
durString := strings.TrimPrefix(fields[0], "Duration: ")
t, err := time.Parse("15:04:05.00", durString)
if err != nil {
return 0, err
}
return t.Sub(start), nil
}
}
return 0, fmt.Errorf("%s: could not parse duration", path)
}
func totalTime() error {
var total time.Duration
err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && !info.Mode().IsRegular() {
return fmt.Errorf("%s: neither directory nor normal file", path)
}
// ignore directories
if info.IsDir() {
return nil
}
// ignore some suffixes
for _, suffix := range ignoreSuffixes {
if strings.HasSuffix(path, suffix) {
return nil
}
}
// process video suffixes
for _, suffix := range videoSuffixes {
if strings.HasSuffix(path, suffix) {
dur, err := determineDuration(path)
if err != nil {
return err
}
if dur > math.MaxInt64-total {
return errors.New("more than 290 years of material")
}
total += dur
return nil
}
}
return fmt.Errorf("%s: unknown suffix", path)
})
if err != nil {
return err
}
fmt.Println(total)
return nil
}
func fatal(err error) {
fmt.Fprintf(os.Stderr, "%s: error: %s\n", os.Args[0], err)
os.Exit(1)
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: %s\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Show the total play time of the video content rooted at current directory.\n")
os.Exit(2)
}
func main() {
if len(os.Args) != 1 {
usage()
}
if err := totalTime(); err != nil {
fatal(err)
}
}<|fim▁end|> | } |
<|file_name|>angular.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for Angular JS 1.3+
// Project: http://angularjs.org
// Definitions by: Diego Vilar <http://github.com/diegovilar>
// Definitions: https://github.com/borisyankov/DefinitelyTyped
/// <reference path="../jquery/jquery.d.ts" />
declare var angular: angular.IAngularStatic;
// Support for painless dependency injection
interface Function {
$inject?: string[];
}
// Collapse angular into ng
import ng = angular;
// Support AMD require
declare module 'angular' {
export = angular;
}
///////////////////////////////////////////////////////////////////////////////
// ng module (angular.js)
///////////////////////////////////////////////////////////////////////////////
declare module angular {
// not directly implemented, but ensures that constructed class implements $get
interface IServiceProviderClass {
new (...args: any[]): IServiceProvider;
}
interface IServiceProviderFactory {
(...args: any[]): IServiceProvider;
}
// All service providers extend this interface
interface IServiceProvider {
$get: any;
}
interface IAngularBootstrapConfig {
strictDi?: boolean;
}
///////////////////////////////////////////////////////////////////////////
// AngularStatic
// see http://docs.angularjs.org/api
///////////////////////////////////////////////////////////////////////////
interface IAngularStatic {
bind(context: any, fn: Function, ...args: any[]): Function;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: string, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: string, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: string, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: JQuery, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: JQuery, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: JQuery, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: Element, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: Element, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: Element, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: Document, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: Document, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Use this function to manually start up angular application.
*
* @param element DOM element which is the root of angular application.
* @param modules An array of modules to load into the application.
* Each item in the array should be the name of a predefined module or a (DI annotated)
* function that will be invoked by the injector as a run block.
* @param config an object for defining configuration options for the application. The following keys are supported:
* - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code.
*/
bootstrap(element: Document, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService;
/**
* Creates a deep copy of source, which should be an object or an array.
*
* - If no destination is supplied, a copy of the object or array is created.
* - If a destination is provided, all of its elements (for array) or properties (for objects) are deleted and then all elements/properties from the source are copied to it.
* - If source is not an object or array (inc. null and undefined), source is returned.
* - If source is identical to 'destination' an exception will be thrown.
*
* @param source The source that will be used to make a copy. Can be any type, including primitives, null, and undefined.
* @param destination Destination into which the source is copied. If provided, must be of the same type as source.
*/
copy<T>(source: T, destination?: T): T;
/**
* Wraps a raw DOM element or HTML string as a jQuery element.
*
* If jQuery is available, angular.element is an alias for the jQuery function. If jQuery is not available, angular.element delegates to Angular's built-in subset of jQuery, called "jQuery lite" or "jqLite."
*/
element: IAugmentedJQueryStatic;
equals(value1: any, value2: any): boolean;
extend(destination: any, ...sources: any[]): any;
/**
* Invokes the iterator function once for each item in obj collection, which can be either an object or an array. The iterator function is invoked with iterator(value, key), where value is the value of an object property or an array element and key is the object property key or array element index. Specifying a context for the function is optional.
*
* It is worth noting that .forEach does not iterate over inherited properties because it filters using the hasOwnProperty method.
*
* @param obj Object to iterate over.
* @param iterator Iterator function.
* @param context Object to become context (this) for the iterator function.
*/
forEach<T>(obj: T[], iterator: (value: T, key: number) => any, context?: any): any;
/**
* Invokes the iterator function once for each item in obj collection, which can be either an object or an array. The iterator function is invoked with iterator(value, key), where value is the value of an object property or an array element and key is the object property key or array element index. Specifying a context for the function is optional.
*
* It is worth noting that .forEach does not iterate over inherited properties because it filters using the hasOwnProperty method.
*
* @param obj Object to iterate over.
* @param iterator Iterator function.
* @param context Object to become context (this) for the iterator function.
*/
forEach<T>(obj: { [index: string]: T; }, iterator: (value: T, key: string) => any, context?: any): any;
/**
* Invokes the iterator function once for each item in obj collection, which can be either an object or an array. The iterator function is invoked with iterator(value, key), where value is the value of an object property or an array element and key is the object property key or array element index. Specifying a context for the function is optional.
*
* It is worth noting that .forEach does not iterate over inherited properties because it filters using the hasOwnProperty method.
*
* @param obj Object to iterate over.
* @param iterator Iterator function.
* @param context Object to become context (this) for the iterator function.
*/
forEach(obj: any, iterator: (value: any, key: any) => any, context?: any): any;
fromJson(json: string): any;
identity(arg?: any): any;
injector(modules?: any[]): auto.IInjectorService;
isArray(value: any): boolean;
isDate(value: any): boolean;
isDefined(value: any): boolean;
isElement(value: any): boolean;
isFunction(value: any): boolean;
isNumber(value: any): boolean;
isObject(value: any): boolean;
isString(value: any): boolean;
isUndefined(value: any): boolean;
lowercase(str: string): string;
/**
* The angular.module is a global place for creating, registering and retrieving Angular modules. All modules (angular core or 3rd party) that should be available to an application must be registered using this mechanism.
*
* When passed two or more arguments, a new module is created. If passed only one argument, an existing module (the name passed as the first argument to module) is retrieved.
*
* @param name The name of the module to create or retrieve.
* @param requires The names of modules this module depends on. If specified then new module is being created. If unspecified then the module is being retrieved for further configuration.
* @param configFn Optional configuration function for the module.
*/
module(
name: string,
requires?: string[],
configFn?: Function): IModule;
noop(...args: any[]): void;
reloadWithDebugInfo(): void;
toJson(obj: any, pretty?: boolean): string;
uppercase(str: string): string;
version: {
full: string;
major: number;
minor: number;
dot: number;
codeName: string;
};
}
///////////////////////////////////////////////////////////////////////////
// Module
// see http://docs.angularjs.org/api/angular.Module
///////////////////////////////////////////////////////////////////////////
interface IModule {
animation(name: string, animationFactory: Function): IModule;
animation(name: string, inlineAnnotatedFunction: any[]): IModule;
animation(object: Object): IModule;
/**
* Use this method to register work which needs to be performed on module loading.
*
* @param configFn Execute this function on module load. Useful for service configuration.
*/
config(configFn: Function): IModule;
/**
* Use this method to register work which needs to be performed on module loading.
*
* @param inlineAnnotatedFunction Execute this function on module load. Useful for service configuration.
*/
config(inlineAnnotatedFunction: any[]): IModule;
/**
* Register a constant service, such as a string, a number, an array, an object or a function, with the $injector. Unlike value it can be injected into a module configuration function (see config) and it cannot be overridden by an Angular decorator.
*
* @param name The name of the constant.
* @param value The constant value.
*/
constant(name: string, value: any): IModule;
constant(object: Object): IModule;
/**
* The $controller service is used by Angular to create new controllers.
*
* This provider allows controller registration via the register method.
*
* @param name Controller name, or an object map of controllers where the keys are the names and the values are the constructors.
* @param controllerConstructor Controller constructor fn (optionally decorated with DI annotations in the array notation).
*/
controller(name: string, controllerConstructor: Function): IModule;
/**
* The $controller service is used by Angular to create new controllers.
*
* This provider allows controller registration via the register method.
*
* @param name Controller name, or an object map of controllers where the keys are the names and the values are the constructors.
* @param controllerConstructor Controller constructor fn (optionally decorated with DI annotations in the array notation).
*/
controller(name: string, inlineAnnotatedConstructor: any[]): IModule;
controller(object: Object): IModule;
/**
* Register a new directive with the compiler.
*
* @param name Name of the directive in camel-case (i.e. ngBind which will match as ng-bind)
* @param directiveFactory An injectable directive factory function.
*/
directive(name: string, directiveFactory: IDirectiveFactory): IModule;
/**
* Register a new directive with the compiler.
*
* @param name Name of the directive in camel-case (i.e. ngBind which will match as ng-bind)
* @param directiveFactory An injectable directive factory function.
*/
directive(name: string, inlineAnnotatedFunction: any[]): IModule;
directive(object: Object): IModule;
/**
* Register a service factory, which will be called to return the service instance. This is short for registering a service where its provider consists of only a $get property, which is the given service factory function. You should use $provide.factory(getFn) if you do not need to configure your service in a provider.
*
* @param name The name of the instance.
* @param $getFn The $getFn for the instance creation. Internally this is a short hand for $provide.provider(name, {$get: $getFn}).
*/
factory(name: string, $getFn: Function): IModule;
/**
* Register a service factory, which will be called to return the service instance. This is short for registering a service where its provider consists of only a $get property, which is the given service factory function. You should use $provide.factory(getFn) if you do not need to configure your service in a provider.
*
* @param name The name of the instance.
* @param inlineAnnotatedFunction The $getFn for the instance creation. Internally this is a short hand for $provide.provider(name, {$get: $getFn}).
*/
factory(name: string, inlineAnnotatedFunction: any[]): IModule;
factory(object: Object): IModule;
filter(name: string, filterFactoryFunction: Function): IModule;
filter(name: string, inlineAnnotatedFunction: any[]): IModule;
filter(object: Object): IModule;
provider(name: string, serviceProviderFactory: IServiceProviderFactory): IModule;
provider(name: string, serviceProviderConstructor: IServiceProviderClass): IModule;
provider(name: string, inlineAnnotatedConstructor: any[]): IModule;
provider(name: string, providerObject: IServiceProvider): IModule;
provider(object: Object): IModule;
/**
* Run blocks are the closest thing in Angular to the main method. A run block is the code which needs to run to kickstart the application. It is executed after all of the service have been configured and the injector has been created. Run blocks typically contain code which is hard to unit-test, and for this reason should be declared in isolated modules, so that they can be ignored in the unit-tests.
*/
run(initializationFunction: Function): IModule;
/**
* Run blocks are the closest thing in Angular to the main method. A run block is the code which needs to run to kickstart the application. It is executed after all of the service have been configured and the injector has been created. Run blocks typically contain code which is hard to unit-test, and for this reason should be declared in isolated modules, so that they can be ignored in the unit-tests.
*/
run(inlineAnnotatedFunction: any[]): IModule;
service(name: string, serviceConstructor: Function): IModule;
service(name: string, inlineAnnotatedConstructor: any[]): IModule;
service(object: Object): IModule;
/**
* Register a value service with the $injector, such as a string, a number, an array, an object or a function. This is short for registering a service where its provider's $get property is a factory function that takes no arguments and returns the value service.
Value services are similar to constant services, except that they cannot be injected into a module configuration function (see config) but they can be overridden by an Angular decorator.
*
* @param name The name of the instance.
* @param value The value.
*/
value(name: string, value: any): IModule;
value(object: Object): IModule;
// Properties
name: string;
requires: string[];
}
///////////////////////////////////////////////////////////////////////////
// Attributes
// see http://docs.angularjs.org/api/ng.$compile.directive.Attributes
///////////////////////////////////////////////////////////////////////////
interface IAttributes {
/**
* this is necessary to be able to access the scoped attributes. it's not very elegant
* because you have to use attrs['foo'] instead of attrs.foo but I don't know of a better way
* this should really be limited to return string but it creates this problem: http://stackoverflow.com/q/17201854/165656
*/
[name: string]: any;
/**
* Adds the CSS class value specified by the classVal parameter to the
* element. If animations are enabled then an animation will be triggered
* for the class addition.
*/
$addClass(classVal: string): void;
/**
* Removes the CSS class value specified by the classVal parameter from the
* element. If animations are enabled then an animation will be triggered for
* the class removal.
*/
$removeClass(classVal: string): void;
/**
* Set DOM element attribute value.
*/
$set(key: string, value: any): void;
/**
* Observes an interpolated attribute.
* The observer function will be invoked once during the next $digest
* following compilation. The observer is then invoked whenever the
* interpolated value changes.
*/
$observe(name: string, fn: (value?: any) => any): Function;
/**
* A map of DOM element attribute names to the normalized name. This is needed
* to do reverse lookup from normalized name back to actual name.
*/
$attr: Object;
}
/**
* form.FormController - type in module ng
* see https://docs.angularjs.org/api/ng/type/form.FormController
*/
interface IFormController {
/**
* Indexer which should return ng.INgModelController for most properties but cannot because of "All named properties must be assignable to string indexer type" constraint - see https://github.com/Microsoft/TypeScript/issues/272
*/
[name: string]: any;
$pristine: boolean;
$dirty: boolean;
$valid: boolean;
$invalid: boolean;
$submitted: boolean;
$error: any;
$addControl(control: INgModelController): void;
$removeControl(control: INgModelController): void;
$setValidity(validationErrorKey: string, isValid: boolean, control: INgModelController): void;
$setDirty(): void;
$setPristine(): void;
$commitViewValue(): void;
$rollbackViewValue(): void;
$setSubmitted(): void;
$setUntouched(): void;
}
///////////////////////////////////////////////////////////////////////////
// NgModelController
// see http://docs.angularjs.org/api/ng.directive:ngModel.NgModelController
///////////////////////////////////////////////////////////////////////////
interface INgModelController {
$render(): void;
$setValidity(validationErrorKey: string, isValid: boolean): void;
// Documentation states viewValue and modelValue to be a string but other
// types do work and it's common to use them.
$setViewValue(value: any, trigger?: string): void;
$setPristine(): void;
$validate(): void;
$setTouched(): void;
$setUntouched(): void;
$rollbackViewValue(): void;
$commitViewValue(): void;
$isEmpty(value: any): boolean;
$viewValue: any;
$modelValue: any;
$parsers: IModelParser[];
$formatters: IModelFormatter[];
$viewChangeListeners: IModelViewChangeListener[];
$error: any;
$name: string;
$touched: boolean;
$untouched: boolean;
$validators: IModelValidators;
$asyncValidators: IAsyncModelValidators;
$pending: any;
$pristine: boolean;
$dirty: boolean;
$valid: boolean;
$invalid: boolean;
}
interface IModelValidators {
[index: string]: (...args: any[]) => boolean;
}
interface IAsyncModelValidators {
[index: string]: (...args: any[]) => IPromise<boolean>;
}
interface IModelParser {
(value: any): any;
}
interface IModelFormatter {
(value: any): any;
}
interface IModelViewChangeListener {
(): void;
}
/**
* $rootScope - $rootScopeProvider - service in module ng
* see https://docs.angularjs.org/api/ng/type/$rootScope.Scope and https://docs.angularjs.org/api/ng/service/$rootScope
*/
interface IRootScopeService {
[index: string]: any;
$apply(): any;
$apply(exp: string): any;
$apply(exp: (scope: IScope) => any): any;
$applyAsync(): any;
$applyAsync(exp: string): any;
$applyAsync(exp: (scope: IScope) => any): any;
$broadcast(name: string, ...args: any[]): IAngularEvent;
$destroy(): void;
$digest(): void;
$emit(name: string, ...args: any[]): IAngularEvent;
$eval(): any;
$eval(expression: string, locals?: Object): any;
$eval(expression: (scope: IScope) => any, locals?: Object): any;
$evalAsync(): void;
$evalAsync(expression: string): void;
$evalAsync(expression: (scope: IScope) => any): void;
// Defaults to false by the implementation checking strategy
$new(isolate?: boolean, parent?: IScope): IScope;
/**
* Listens on events of a given type. See $emit for discussion of event life cycle.
*
* The event listener function format is: function(event, args...).
*
* @param name Event name to listen on.
* @param listener Function to call when the event is emitted.
*/
$on(name: string, listener: (event: IAngularEvent, ...args: any[]) => any): Function;
$watch(watchExpression: string, listener?: string, objectEquality?: boolean): Function;
$watch(watchExpression: string, listener?: (newValue: any, oldValue: any, scope: IScope) => any, objectEquality?: boolean): Function;
$watch(watchExpression: (scope: IScope) => any, listener?: string, objectEquality?: boolean): Function;
$watch(watchExpression: (scope: IScope) => any, listener?: (newValue: any, oldValue: any, scope: IScope) => any, objectEquality?: boolean): Function;
$watchCollection(watchExpression: string, listener: (newValue: any, oldValue: any, scope: IScope) => any): Function;
$watchCollection(watchExpression: (scope: IScope) => any, listener: (newValue: any, oldValue: any, scope: IScope) => any): Function;
$watchGroup(watchExpressions: any[], listener: (newValue: any, oldValue: any, scope: IScope) => any): Function;
$watchGroup(watchExpressions: { (scope: IScope): any }[], listener: (newValue: any, oldValue: any, scope: IScope) => any): Function;
$parent: IScope;
$root: IRootScopeService;
$id: number;
// Hidden members
$$isolateBindings: any;
$$phase: any;
}
interface IScope extends IRootScopeService { }
/**
* $scope for ngRepeat directive.
* see https://docs.angularjs.org/api/ng/directive/ngRepeat
*/
interface IRepeatScope extends IScope {
/**
* iterator offset of the repeated element (0..length-1).
*/
$index: number;
/**
* true if the repeated element is first in the iterator.
*/
$first: boolean;
/**
* true if the repeated element is between the first and last in the iterator.
*/
$middle: boolean;
/**
* true if the repeated element is last in the iterator.
*/
$last: boolean;
/**
* true if the iterator position $index is even (otherwise false).
*/
$even: boolean;
/**
* true if the iterator position $index is odd (otherwise false).
*/
$odd: boolean;
}
interface IAngularEvent {
/**
* the scope on which the event was $emit-ed or $broadcast-ed.
*/
targetScope: IScope;
/**
* the scope that is currently handling the event. Once the event propagates through the scope hierarchy, this property is set to null.
*/
currentScope: IScope;
/**
* name of the event.
*/
name: string;
/**
* calling stopPropagation function will cancel further event propagation (available only for events that were $emit-ed).
*/
stopPropagation?: Function;
/**
* calling preventDefault sets defaultPrevented flag to true.
*/
preventDefault: Function;
/**
* true if preventDefault was called.
*/
defaultPrevented: boolean;
}
///////////////////////////////////////////////////////////////////////////
// WindowService
// see http://docs.angularjs.org/api/ng.$window
///////////////////////////////////////////////////////////////////////////
interface IWindowService extends Window {
[key: string]: any;
}
///////////////////////////////////////////////////////////////////////////
// BrowserService
// TODO undocumented, so we need to get it from the source code
///////////////////////////////////////////////////////////////////////////
interface IBrowserService {
[key: string]: any;
}
///////////////////////////////////////////////////////////////////////////
// TimeoutService
// see http://docs.angularjs.org/api/ng.$timeout
///////////////////////////////////////////////////////////////////////////
interface ITimeoutService {
(func: Function, delay?: number, invokeApply?: boolean): IPromise<any>;
cancel(promise: IPromise<any>): boolean;
}
///////////////////////////////////////////////////////////////////////////
// IntervalService
// see http://docs.angularjs.org/api/ng.$interval
///////////////////////////////////////////////////////////////////////////
interface IIntervalService {
(func: Function, delay: number, count?: number, invokeApply?: boolean): IPromise<any>;
cancel(promise: IPromise<any>): boolean;
}
///////////////////////////////////////////////////////////////////////////
// AngularProvider
// see http://docs.angularjs.org/api/ng/provider/$animateProvider
///////////////////////////////////////////////////////////////////////////
interface IAnimateProvider {
/**
* Registers a new injectable animation factory function.
*
* @param name The name of the animation.
* @param factory The factory function that will be executed to return the animation object.
*/
register(name: string, factory: () => IAnimateCallbackObject): void;
/**
* Gets and/or sets the CSS class expression that is checked when performing an animation.
*
* @param expression The className expression which will be checked against all animations.
* @returns The current CSS className expression value. If null then there is no expression value.
*/
classNameFilter(expression?: RegExp): RegExp;
}
/**
* The animation object which contains callback functions for each event that is expected to be animated.
*/
interface IAnimateCallbackObject {
eventFn(element: Node, doneFn: () => void): Function;
}
///////////////////////////////////////////////////////////////////////////
// FilterService
// see http://docs.angularjs.org/api/ng.$filter
// see http://docs.angularjs.org/api/ng.$filterProvider
///////////////////////////////////////////////////////////////////////////
interface IFilterService {
(name: string): Function;
}
interface IFilterProvider extends IServiceProvider {
register(name: string, filterFactory: Function): IServiceProvider;
}
///////////////////////////////////////////////////////////////////////////
// LocaleService
// see http://docs.angularjs.org/api/ng.$locale
///////////////////////////////////////////////////////////////////////////
interface ILocaleService {
id: string;
// These are not documented
// Check angular's i18n files for exemples
NUMBER_FORMATS: ILocaleNumberFormatDescriptor;
DATETIME_FORMATS: ILocaleDateTimeFormatDescriptor;
pluralCat: (num: any) => string;
}
interface ILocaleNumberFormatDescriptor {
DECIMAL_SEP: string;
GROUP_SEP: string;
PATTERNS: ILocaleNumberPatternDescriptor[];
CURRENCY_SYM: string;
}
interface ILocaleNumberPatternDescriptor {
minInt: number;
minFrac: number;
maxFrac: number;
posPre: string;
posSuf: string;
negPre: string;
negSuf: string;
gSize: number;
lgSize: number;
}
interface ILocaleDateTimeFormatDescriptor {
MONTH: string[];
SHORTMONTH: string[];
DAY: string[];
SHORTDAY: string[];
AMPMS: string[];
medium: string;
short: string;
fullDate: string;
longDate: string;
mediumDate: string;
shortDate: string;
mediumTime: string;
shortTime: string;
}
///////////////////////////////////////////////////////////////////////////
// LogService
// see http://docs.angularjs.org/api/ng.$log
// see http://docs.angularjs.org/api/ng.$logProvider
///////////////////////////////////////////////////////////////////////////
interface ILogService {
debug: ILogCall;
error: ILogCall;
info: ILogCall;
log: ILogCall;
warn: ILogCall;
}
interface ILogProvider {
debugEnabled(): boolean;
debugEnabled(enabled: boolean): ILogProvider;
}
// We define this as separate interface so we can reopen it later for
// the ngMock module.
interface ILogCall {
(...args: any[]): void;
}
///////////////////////////////////////////////////////////////////////////
// ParseService
// see http://docs.angularjs.org/api/ng.$parse
// see http://docs.angularjs.org/api/ng.$parseProvider
///////////////////////////////////////////////////////////////////////////
interface IParseService {
(expression: string): ICompiledExpression;
}
interface IParseProvider {
logPromiseWarnings(): boolean;
logPromiseWarnings(value: boolean): IParseProvider;
unwrapPromises(): boolean;
unwrapPromises(value: boolean): IParseProvider;
}
interface ICompiledExpression {
(context: any, locals?: any): any;
// If value is not provided, undefined is gonna be used since the implementation
// does not check the parameter. Let's force a value for consistency. If consumer
// whants to undefine it, pass the undefined value explicitly.
assign(context: any, value: any): any;
}
/**
* $location - $locationProvider - service in module ng
* see https://docs.angularjs.org/api/ng/service/$location
*/
interface ILocationService {
absUrl(): string;
hash(): string;
hash(newHash: string): ILocationService;
host(): string;
/**
* Return path of current url
*/
path(): string;
/**
* Change path when called with parameter and return $location.
* Note: Path should always begin with forward slash (/), this method will add the forward slash if it is missing.
*
* @param path New path
*/
path(path: string): ILocationService;
port(): number;
protocol(): string;
replace(): ILocationService;
/**
* Return search part (as object) of current url
*/
search(): any;
/**
* Change search part when called with parameter and return $location.
*
* @param search When called with a single argument the method acts as a setter, setting the search component of $location to the specified value.
*
* If the argument is a hash object containing an array of values, these values will be encoded as duplicate search parameters in the url.
*/
search(search: any): ILocationService;
/**
* Change search part when called with parameter and return $location.
*
* @param search New search params
* @param paramValue If search is a string or a Number, then paramValue will override only a single search property. If paramValue is null, the property specified via the first argument will be deleted. If paramValue is an array, it will override the property of the search component of $location specified via the first argument. If paramValue is true, the property specified via the first argument will be added with no value nor trailing equal sign.
*/
search(search: string, paramValue: string|number|string[]|boolean): ILocationService;
state(): any;
state(state: any): ILocationService;
url(): string;
url(url: string): ILocationService;
}
interface ILocationProvider extends IServiceProvider {
hashPrefix(): string;
hashPrefix(prefix: string): ILocationProvider;
html5Mode(): boolean;
// Documentation states that parameter is string, but
// implementation tests it as boolean, which makes more sense
// since this is a toggler
html5Mode(active: boolean): ILocationProvider;
html5Mode(mode: { enabled?: boolean; requireBase?: boolean; rewriteLinks?: boolean; }): ILocationProvider;
}
///////////////////////////////////////////////////////////////////////////
// DocumentService
// see http://docs.angularjs.org/api/ng.$document
///////////////////////////////////////////////////////////////////////////
interface IDocumentService extends IAugmentedJQuery {}
///////////////////////////////////////////////////////////////////////////
// ExceptionHandlerService
// see http://docs.angularjs.org/api/ng.$exceptionHandler
///////////////////////////////////////////////////////////////////////////
interface IExceptionHandlerService {
(exception: Error, cause?: string): void;
}
///////////////////////////////////////////////////////////////////////////
// RootElementService
// see http://docs.angularjs.org/api/ng.$rootElement
///////////////////////////////////////////////////////////////////////////
interface IRootElementService extends JQuery {}
interface IQResolveReject<T> {
(): void;
(value: T): void;
}
/**
* $q - service in module ng
* A promise/deferred implementation inspired by Kris Kowal's Q.
* See http://docs.angularjs.org/api/ng/service/$q
*/
interface IQService {
new (resolver: (resolve: IQResolveReject<any>) => any): IPromise<any>;
new (resolver: (resolve: IQResolveReject<any>, reject: IQResolveReject<any>) => any): IPromise<any>;
new <T>(resolver: (resolve: IQResolveReject<T>, reject: IQResolveReject<any>) => any): IPromise<T>;
/**
* Combines multiple promises into a single promise that is resolved when all of the input promises are resolved.
*
* Returns a single promise that will be resolved with an array of values, each value corresponding to the promise at the same index in the promises array. If any of the promises is resolved with a rejection, this resulting promise will be rejected with the same rejection value.
*
* @param promises An array of promises.
*/
all(promises: IPromise<any>[]): IPromise<any[]>;
/**
* Combines multiple promises into a single promise that is resolved when all of the input promises are resolved.
*
* Returns a single promise that will be resolved with a hash of values, each value corresponding to the promise at the same key in the promises hash. If any of the promises is resolved with a rejection, this resulting promise will be rejected with the same rejection value.
*
* @param promises A hash of promises.
*/
all(promises: { [id: string]: IPromise<any>; }): IPromise<{ [id: string]: any; }>;
/**
* Creates a Deferred object which represents a task which will finish in the future.
*/
defer<T>(): IDeferred<T>;
/**
* Creates a promise that is resolved as rejected with the specified reason. This api should be used to forward rejection in a chain of promises. If you are dealing with the last promise in a promise chain, you don't need to worry about it.
*
* When comparing deferreds/promises to the familiar behavior of try/catch/throw, think of reject as the throw keyword in JavaScript. This also means that if you "catch" an error via a promise error callback and you want to forward the error to the promise derived from the current promise, you have to "rethrow" the error by returning a rejection constructed via reject.
*
* @param reason Constant, message, exception or an object representing the rejection reason.
*/
reject(reason?: any): IPromise<any>;
/**
* Wraps an object that might be a value or a (3rd party) then-able promise into a $q promise. This is useful when you are dealing with an object that might or might not be a promise, or if the promise comes from a source that can't be trusted.
*
* @param value Value or a promise
*/
when<T>(value: IPromise<T>|T): IPromise<T>;
/**
* Wraps an object that might be a value or a (3rd party) then-able promise into a $q promise. This is useful when you are dealing with an object that might or might not be a promise, or if the promise comes from a source that can't be trusted.
*
* @param value Value or a promise
*/
when(): IPromise<void>;
}
interface IPromise<T> {
/**
* Regardless of when the promise was or will be resolved or rejected, then calls one of the success or error callbacks asynchronously as soon as the result is available. The callbacks are called with a single argument: the result or rejection reason. Additionally, the notify callback may be called zero or more times to provide a progress indication, before the promise is resolved or rejected.
*
* This method returns a new promise which is resolved or rejected via the return value of the successCallback, errorCallback. It also notifies via the return value of the notifyCallback method. The promise can not be resolved or rejected from the notifyCallback method.
*/
then<TResult>(successCallback: (promiseValue: T) => IHttpPromise<TResult>|IPromise<TResult>|TResult, errorCallback?: (reason: any) => any, notifyCallback?: (state: any) => any): IPromise<TResult>;
/**
* Shorthand for promise.then(null, errorCallback)
*/
catch<TResult>(onRejected: (reason: any) => IHttpPromise<TResult>|IPromise<TResult>|TResult): IPromise<TResult>;
/**
* Allows you to observe either the fulfillment or rejection of a promise, but to do so without modifying the final value. This is useful to release resources or do some clean-up that needs to be done whether the promise was rejected or resolved. See the full specification for more information.
*
* Because finally is a reserved word in JavaScript and reserved keywords are not supported as property names by ES3, you'll need to invoke the method like promise['finally'](callback) to make your code IE8 and Android 2.x compatible.
*/
finally<TResult>(finallyCallback: () => any): IPromise<TResult>;
}
interface IDeferred<T> {
resolve(value?: T): void;
reject(reason?: any): void;
notify(state?: any): void;
promise: IPromise<T>;
}
///////////////////////////////////////////////////////////////////////////
// AnchorScrollService
// see http://docs.angularjs.org/api/ng.$anchorScroll
///////////////////////////////////////////////////////////////////////////
interface IAnchorScrollService {
(): void;
yOffset: any;
}
interface IAnchorScrollProvider extends IServiceProvider {
disableAutoScrolling(): void;
}
///////////////////////////////////////////////////////////////////////////
// CacheFactoryService
// see http://docs.angularjs.org/api/ng.$cacheFactory
///////////////////////////////////////////////////////////////////////////
interface ICacheFactoryService {
// Lets not foce the optionsMap to have the capacity member. Even though
// it's the ONLY option considered by the implementation today, a consumer
// might find it useful to associate some other options to the cache object.
//(cacheId: string, optionsMap?: { capacity: number; }): CacheObject;
(cacheId: string, optionsMap?: { capacity: number; }): ICacheObject;
// Methods bellow are not documented
info(): any;
get(cacheId: string): ICacheObject;
}
interface ICacheObject {
info(): {
id: string;
size: number;
// Not garanteed to have, since it's a non-mandatory option
//capacity: number;
};
put<T>(key: string, value?: T): T;
get(key: string): any;
remove(key: string): void;
removeAll(): void;
destroy(): void;
}
///////////////////////////////////////////////////////////////////////////
// CompileService
// see http://docs.angularjs.org/api/ng.$compile
// see http://docs.angularjs.org/api/ng.$compileProvider
///////////////////////////////////////////////////////////////////////////
interface ICompileService {
(element: string, transclude?: ITranscludeFunction, maxPriority?: number): ITemplateLinkingFunction;
(element: Element, transclude?: ITranscludeFunction, maxPriority?: number): ITemplateLinkingFunction;
(element: JQuery, transclude?: ITranscludeFunction, maxPriority?: number): ITemplateLinkingFunction;
}
interface ICompileProvider extends IServiceProvider {
directive(name: string, directiveFactory: Function): ICompileProvider;
// Undocumented, but it is there...
directive(directivesMap: any): ICompileProvider;
aHrefSanitizationWhitelist(): RegExp;
aHrefSanitizationWhitelist(regexp: RegExp): ICompileProvider;
imgSrcSanitizationWhitelist(): RegExp;
imgSrcSanitizationWhitelist(regexp: RegExp): ICompileProvider;
debugInfoEnabled(enabled?: boolean): any;
}
interface ICloneAttachFunction {
// Let's hint but not force cloneAttachFn's signature
(clonedElement?: JQuery, scope?: IScope): any;
}
// This corresponds to the "publicLinkFn" returned by $compile.
interface ITemplateLinkingFunction {
(scope: IScope, cloneAttachFn?: ICloneAttachFunction): IAugmentedJQuery;
}
// This corresponds to $transclude (and also the transclude function passed to link).
interface ITranscludeFunction {
// If the scope is provided, then the cloneAttachFn must be as well.
(scope: IScope, cloneAttachFn: ICloneAttachFunction): IAugmentedJQuery;
// If one argument is provided, then it's assumed to be the cloneAttachFn.
(cloneAttachFn?: ICloneAttachFunction): IAugmentedJQuery;
}
///////////////////////////////////////////////////////////////////////////
// ControllerService
// see http://docs.angularjs.org/api/ng.$controller
// see http://docs.angularjs.org/api/ng.$controllerProvider
///////////////////////////////////////////////////////////////////////////
interface IControllerService {
// Although the documentation doesn't state this, locals are optional
(controllerConstructor: Function, locals?: any): any;
(controllerName: string, locals?: any): any;
}
interface IControllerProvider extends IServiceProvider {
register(name: string, controllerConstructor: Function): void;
register(name: string, dependencyAnnotatedConstructor: any[]): void;
allowGlobals(): void;
}
/**
* HttpService
* see http://docs.angularjs.org/api/ng/service/$http
*/
interface IHttpService {
/**
* Object describing the request to be made and how it should be processed.
*/
<T>(config: IRequestConfig): IHttpPromise<T>;
/**
* Shortcut method to perform GET request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param config Optional configuration object
*/
get<T>(url: string, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Shortcut method to perform DELETE request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param config Optional configuration object
*/
delete<T>(url: string, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Shortcut method to perform HEAD request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param config Optional configuration object
*/
head<T>(url: string, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Shortcut method to perform JSONP request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param config Optional configuration object
*/
jsonp<T>(url: string, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Shortcut method to perform POST request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param data Request content
* @param config Optional configuration object
*/
post<T>(url: string, data: any, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Shortcut method to perform PUT request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param data Request content
* @param config Optional configuration object
*/
put<T>(url: string, data: any, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Shortcut method to perform PATCH request.
*
* @param url Relative or absolute URL specifying the destination of the request
* @param data Request content
* @param config Optional configuration object
*/
patch<T>(url: string, data: any, config?: IRequestShortcutConfig): IHttpPromise<T>;
/**
* Runtime equivalent of the $httpProvider.defaults property. Allows configuration of default headers, withCredentials as well as request and response transformations.
*/
defaults: IRequestConfig;
/**
* Array of config objects for currently pending requests. This is primarily meant to be used for debugging purposes.
*/
pendingRequests: any[];
}
/**
* Object describing the request to be made and how it should be processed.
* see http://docs.angularjs.org/api/ng/service/$http#usage
*/
interface IRequestShortcutConfig {
/**
* {Object.<string|Object>}
* Map of strings or objects which will be turned to ?key1=value1&key2=value2 after the url. If the value is not a string, it will be JSONified.
*/
params?: any;
/**
* Map of strings or functions which return strings representing HTTP headers to send to the server. If the return value of a function is null, the header will not be sent.
*/
headers?: any;
/**
* Name of HTTP header to populate with the XSRF token.
*/
xsrfHeaderName?: string;
/**
* Name of cookie containing the XSRF token.
*/
xsrfCookieName?: string;
/**
* {boolean|Cache}
* If true, a default $http cache will be used to cache the GET request, otherwise if a cache instance built with $cacheFactory, this cache will be used for caching.
*/
cache?: any;
/**
* whether to to set the withCredentials flag on the XHR object. See [requests with credentials]https://developer.mozilla.org/en/http_access_control#section_5 for more information.
*/
withCredentials?: boolean;
/**
* {string|Object}
* Data to be sent as the request message data.
*/
data?: any;
/**
* {function(data, headersGetter)|Array.<function(data, headersGetter)>}
* Transform function or an array of such functions. The transform function takes the http request body and headers and returns its transformed (typically serialized) version.
*/
transformRequest?: any;
/**
* {function(data, headersGetter)|Array.<function(data, headersGetter)>}
* Transform function or an array of such functions. The transform function takes the http response body and headers and returns its transformed (typically deserialized) version.
*/
transformResponse?: any;
/**
* {number|Promise}
* Timeout in milliseconds, or promise that should abort the request when resolved.
*/
timeout?: any;
/**
* See requestType.
*/
responseType?: string;
}
/**
* Object describing the request to be made and how it should be processed.
* see http://docs.angularjs.org/api/ng/service/$http#usage
*/
interface IRequestConfig extends IRequestShortcutConfig {
/**
* HTTP method (e.g. 'GET', 'POST', etc)
*/
method: string;
/**
* Absolute or relative URL of the resource that is being requested.
*/
url: string;
}
interface IHttpHeadersGetter {
(): { [name: string]: string; };
(headerName: string): string;
}
interface IHttpPromiseCallback<T> {
(data: T, status: number, headers: IHttpHeadersGetter, config: IRequestConfig): void;
}
interface IHttpPromiseCallbackArg<T> {
data?: T;
status?: number;
headers?: (headerName: string) => string;
config?: IRequestConfig;
statusText?: string;
}
interface IHttpPromise<T> extends IPromise<IHttpPromiseCallbackArg<T>> {
success(callback: IHttpPromiseCallback<T>): IHttpPromise<T>;
error(callback: IHttpPromiseCallback<any>): IHttpPromise<T>;
then<TResult>(successCallback: (response: IHttpPromiseCallbackArg<T>) => IPromise<TResult>|TResult, errorCallback?: (response: IHttpPromiseCallbackArg<any>) => any): IPromise<TResult>;
}
/**
* Object that controls the defaults for $http provider
* https://docs.angularjs.org/api/ng/service/$http#defaults
*/
interface IHttpProviderDefaults {
xsrfCookieName?: string;
xsrfHeaderName?: string;
withCredentials?: boolean;
headers?: {
common?: any;
post?: any;
put?: any;
patch?: any;
}
}
interface IHttpProvider extends IServiceProvider {
defaults: IHttpProviderDefaults;
interceptors: any[];
useApplyAsync(): boolean;
useApplyAsync(value: boolean): IHttpProvider;
}
///////////////////////////////////////////////////////////////////////////
// HttpBackendService
// see http://docs.angularjs.org/api/ng.$httpBackend
// You should never need to use this service directly.
///////////////////////////////////////////////////////////////////////////
interface IHttpBackendService {
// XXX Perhaps define callback signature in the future
(method: string, url: string, post?: any, callback?: Function, headers?: any, timeout?: number, withCredentials?: boolean): void;
}
///////////////////////////////////////////////////////////////////////////
// InterpolateService
// see http://docs.angularjs.org/api/ng.$interpolate
// see http://docs.angularjs.org/api/ng.$interpolateProvider
///////////////////////////////////////////////////////////////////////////
interface IInterpolateService {
(text: string, mustHaveExpression?: boolean, trustedContext?: string, allOrNothing?: boolean): IInterpolationFunction;
endSymbol(): string;
startSymbol(): string;
}
interface IInterpolationFunction {
(context: any): string;
}
interface IInterpolateProvider extends IServiceProvider {
startSymbol(): string;
startSymbol(value: string): IInterpolateProvider;
endSymbol(): string;
endSymbol(value: string): IInterpolateProvider;
}
///////////////////////////////////////////////////////////////////////////
// TemplateCacheService
// see http://docs.angularjs.org/api/ng.$templateCache
///////////////////////////////////////////////////////////////////////////
interface ITemplateCacheService extends ICacheObject {}
///////////////////////////////////////////////////////////////////////////
// SCEService
// see http://docs.angularjs.org/api/ng.$sce
///////////////////////////////////////////////////////////////////////////
interface ISCEService {
getTrusted(type: string, mayBeTrusted: any): any;
getTrustedCss(value: any): any;
getTrustedHtml(value: any): any;
getTrustedJs(value: any): any;
getTrustedResourceUrl(value: any): any;
getTrustedUrl(value: any): any;
parse(type: string, expression: string): (context: any, locals: any) => any;
parseAsCss(expression: string): (context: any, locals: any) => any;
parseAsHtml(expression: string): (context: any, locals: any) => any;
parseAsJs(expression: string): (context: any, locals: any) => any;
parseAsResourceUrl(expression: string): (context: any, locals: any) => any;
parseAsUrl(expression: string): (context: any, locals: any) => any;
trustAs(type: string, value: any): any;
trustAsHtml(value: any): any;
trustAsJs(value: any): any;
trustAsResourceUrl(value: any): any;
trustAsUrl(value: any): any;
isEnabled(): boolean;
}
///////////////////////////////////////////////////////////////////////////
// SCEProvider
// see http://docs.angularjs.org/api/ng.$sceProvider
///////////////////////////////////////////////////////////////////////////
interface ISCEProvider extends IServiceProvider {
enabled(value: boolean): void;
}
///////////////////////////////////////////////////////////////////////////
// SCEDelegateService
// see http://docs.angularjs.org/api/ng.$sceDelegate
///////////////////////////////////////////////////////////////////////////
interface ISCEDelegateService {
getTrusted(type: string, mayBeTrusted: any): any;
trustAs(type: string, value: any): any;
valueOf(value: any): any;
}
///////////////////////////////////////////////////////////////////////////
// SCEDelegateProvider
// see http://docs.angularjs.org/api/ng.$sceDelegateProvider
///////////////////////////////////////////////////////////////////////////
interface ISCEDelegateProvider extends IServiceProvider {
resourceUrlBlacklist(blacklist: any[]): void;
resourceUrlWhitelist(whitelist: any[]): void;
}
/**
* $templateRequest service
* see http://docs.angularjs.org/api/ng/service/$templateRequest
*/
interface ITemplateRequestService {
/**
* Downloads a template using $http and, upon success, stores the
* contents inside of $templateCache.
*
* If the HTTP request fails or the response data of the HTTP request is
* empty then a $compile error will be thrown (unless<|fim▁hole|> * @param ignoreRequestError Whether or not to ignore the exception
* when the request fails or the template is
* empty.
*
* @return A promise whose value is the template content.
*/
(tpl: string, ignoreRequestError?: boolean): IPromise<string>;
/**
* total amount of pending template requests being downloaded.
* @type {number}
*/
totalPendingRequests: number;
}
///////////////////////////////////////////////////////////////////////////
// Directive
// see http://docs.angularjs.org/api/ng.$compileProvider#directive
// and http://docs.angularjs.org/guide/directive
///////////////////////////////////////////////////////////////////////////
interface IDirectiveFactory {
(...args: any[]): IDirective;
}
interface IDirectiveLinkFn {
(
scope: IScope,
instanceElement: IAugmentedJQuery,
instanceAttributes: IAttributes,
controller: any,
transclude: ITranscludeFunction
): void;
}
interface IDirectivePrePost {
pre?: IDirectiveLinkFn;
post?: IDirectiveLinkFn;
}
interface IDirectiveCompileFn {
(
templateElement: IAugmentedJQuery,
templateAttributes: IAttributes,
transclude: ITranscludeFunction
): IDirectivePrePost;
}
interface IDirective {
compile?: IDirectiveCompileFn;
controller?: any;
controllerAs?: string;
bindToController?: boolean;
link?: IDirectiveLinkFn | IDirectivePrePost;
name?: string;
priority?: number;
replace?: boolean;
require?: any;
restrict?: string;
scope?: any;
template?: any;
templateUrl?: any;
terminal?: boolean;
transclude?: any;
}
/**
* angular.element
* when calling angular.element, angular returns a jQuery object,
* augmented with additional methods like e.g. scope.
* see: http://docs.angularjs.org/api/angular.element
*/
interface IAugmentedJQueryStatic extends JQueryStatic {
(selector: string, context?: any): IAugmentedJQuery;
(element: Element): IAugmentedJQuery;
(object: {}): IAugmentedJQuery;
(elementArray: Element[]): IAugmentedJQuery;
(object: JQuery): IAugmentedJQuery;
(func: Function): IAugmentedJQuery;
(array: any[]): IAugmentedJQuery;
(): IAugmentedJQuery;
}
interface IAugmentedJQuery extends JQuery {
// TODO: events, how to define?
//$destroy
find(selector: string): IAugmentedJQuery;
find(element: any): IAugmentedJQuery;
find(obj: JQuery): IAugmentedJQuery;
controller(): any;
controller(name: string): any;
injector(): any;
scope(): IScope;
isolateScope(): IScope;
inheritedData(key: string, value: any): JQuery;
inheritedData(obj: { [key: string]: any; }): JQuery;
inheritedData(key?: string): any;
}
///////////////////////////////////////////////////////////////////////
// AnimateService
// see http://docs.angularjs.org/api/ng.$animate
///////////////////////////////////////////////////////////////////////
interface IAnimateService {
addClass(element: JQuery, className: string, done?: Function): IPromise<any>;
enter(element: JQuery, parent: JQuery, after: JQuery, done?: Function): void;
leave(element: JQuery, done?: Function): void;
move(element: JQuery, parent: JQuery, after: JQuery, done?: Function): void;
removeClass(element: JQuery, className: string, done?: Function): void;
}
///////////////////////////////////////////////////////////////////////////
// AUTO module (angular.js)
///////////////////////////////////////////////////////////////////////////
export module auto {
///////////////////////////////////////////////////////////////////////
// InjectorService
// see http://docs.angularjs.org/api/AUTO.$injector
///////////////////////////////////////////////////////////////////////
interface IInjectorService {
annotate(fn: Function): string[];
annotate(inlineAnnotatedFunction: any[]): string[];
get(name: string): any;
has(name: string): boolean;
instantiate(typeConstructor: Function, locals?: any): any;
invoke(inlineAnnotatedFunction: any[]): any;
invoke(func: Function, context?: any, locals?: any): any;
}
///////////////////////////////////////////////////////////////////////
// ProvideService
// see http://docs.angularjs.org/api/AUTO.$provide
///////////////////////////////////////////////////////////////////////
interface IProvideService {
// Documentation says it returns the registered instance, but actual
// implementation does not return anything.
// constant(name: string, value: any): any;
/**
* Register a constant service, such as a string, a number, an array, an object or a function, with the $injector. Unlike value it can be injected into a module configuration function (see config) and it cannot be overridden by an Angular decorator.
*
* @param name The name of the constant.
* @param value The constant value.
*/
constant(name: string, value: any): void;
/**
* Register a service decorator with the $injector. A service decorator intercepts the creation of a service, allowing it to override or modify the behaviour of the service. The object returned by the decorator may be the original service, or a new service object which replaces or wraps and delegates to the original service.
*
* @param name The name of the service to decorate.
* @param decorator This function will be invoked when the service needs to be instantiated and should return the decorated service instance. The function is called using the injector.invoke method and is therefore fully injectable. Local injection arguments:
*
* $delegate - The original service instance, which can be monkey patched, configured, decorated or delegated to.
*/
decorator(name: string, decorator: Function): void;
/**
* Register a service decorator with the $injector. A service decorator intercepts the creation of a service, allowing it to override or modify the behaviour of the service. The object returned by the decorator may be the original service, or a new service object which replaces or wraps and delegates to the original service.
*
* @param name The name of the service to decorate.
* @param inlineAnnotatedFunction This function will be invoked when the service needs to be instantiated and should return the decorated service instance. The function is called using the injector.invoke method and is therefore fully injectable. Local injection arguments:
*
* $delegate - The original service instance, which can be monkey patched, configured, decorated or delegated to.
*/
decorator(name: string, inlineAnnotatedFunction: any[]): void;
factory(name: string, serviceFactoryFunction: Function): IServiceProvider;
factory(name: string, inlineAnnotatedFunction: any[]): IServiceProvider;
provider(name: string, provider: IServiceProvider): IServiceProvider;
provider(name: string, serviceProviderConstructor: Function): IServiceProvider;
service(name: string, constructor: Function): IServiceProvider;
value(name: string, value: any): IServiceProvider;
}
}
}<|fim▁end|> | * {ignoreRequestError} is set to true).
*
* @param tpl The template URL. |
<|file_name|>test_inifile.py<|end_file_name|><|fim▁begin|>from textwrap import dedent
import inspect
from collections import OrderedDict
from clusterjob import JobScript
import pytest
import logging
try:
from ConfigParser import Error as ConfigParserError
except ImportError:
from configparser import Error as ConfigParserError
# built-in fixtures: tmpdir
# pytest-capturelog fixutres: caplog
def get_methods(obj):
"""Get list of methods of object or class"""
return sorted([k for (k, v) in inspect.getmembers(obj, inspect.isroutine)])
# isroutine works in Python 2 and Python 3, while ismethod does not work in
# Python 3 if obj is a class (since the methods are not *bound*)
def get_attributes(obj, hidden=False):
"""Get list of attributes of object"""
methods = get_methods(obj)
attribs = sorted([k for k in obj.__dict__ if k not in methods])
if hidden:
return attribs
else:
return [attr for attr in attribs if not attr.startswith('_')]
def default_class_attr_val(attr):
"""Return the default value for the given class attribute"""
defaults = JobScript._attributes.copy()
defaults.update(JobScript._protected_attributes)
try:
return defaults[attr]
except KeyError:
if attr == 'resources':
return OrderedDict()
else:
return None
def check_attributes(obj, expected):
for key in expected:
assert getattr(obj, key) == expected[key]
def check_resources(obj, expected):
for key in expected:
assert obj.resources[key] == expected[key]
def example_inidata():
inidata = dedent(r'''
[Attributes]
remote = login.cluster.edu
backend = pbs
shell = /bin/sh
cache_folder = cache
prologue =
ssh {remote} 'mkdir -p {rootdir}/{workdir}'
rsync -av {workdir}/ {remote}:{rootdir}/{workdir}
epilogue = rsync -av {remote}:{rootdir}/{workdir}/ {workdir}
rootdir = ~/jobs/
workdir = run001
max_sleep_interval = 60
# the following is a new attribute
text = Hello World
[Resources]
queue = exec
nodes = 1
threads = 12
mem = 10000
''')
expected_attribs = {
'remote': 'login.cluster.edu',
'backend': 'pbs',
'shell': '/bin/sh',
'prologue' : "ssh {remote} 'mkdir -p {rootdir}/{workdir}'\n"
"rsync -av {workdir}/ {remote}:{rootdir}/{workdir}",
'epilogue': "rsync -av {remote}:{rootdir}/{workdir}/ {workdir}",
'rootdir': '~/jobs',
'workdir': 'run001',
'max_sleep_interval': 60,
'text': "Hello World"
}
expected_resources = {
'queue': 'exec',
'nodes': 1,
'threads': 12,
'mem': 10000,
}
return inidata, expected_attribs, expected_resources
def test_read_inifile(tmpdir):
p = tmpdir.join("default.ini")
ini_filename = str(p)
attribs = {}
resources = {}
def attr_setter(k,v):
attribs[k] = v
def rsrc_setter(k,v):
resources[k] = v
inidata = ''
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "must contain at least one of the sections" in str(exc_info.value)
inidata = dedent(r'''
max_sleep_interval = 60
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "File contains no section headers" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
max_sleep_interval = 60
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
inidata = dedent(r'''
[Resources]
threads = 2
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert resources['threads'] == 2
inidata = dedent(r'''
[Attributes]
shell = /bin/bash
[Resources]
nodes = 1
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert attribs['shell'] == '/bin/bash'
assert resources['threads'] == 2
assert resources['nodes'] == 1
# both section headers and keys are case sensitive
inidata = dedent(r'''
[Attributes]
Max_Sleep_Interval = 120
Shell = /bin/bash
[Resources]
Nodes = 1
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert attribs['Max_Sleep_Interval'] == '120' # no conversion to int!
inidata = dedent(r'''
[Attributes]
shell = /bin/bash
[Resources]
nodes = 1
[Schedulers]
cluster = login.cluster.com
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "Invalid section 'Schedulers'" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
resources = {1:2}
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "not allowed" in str(exc_info.value)
# quotes are not stripped out!
inidata = dedent(r'''
[Attributes]
text = "This is a text"
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['text'] == '"This is a text"'
def test_read_defaults(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo 'Hello'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
assert get_attributes(jobscript.__class__) == ['backend', 'backends',
'cache_folder', 'cache_prefix', 'epilogue', 'filename',
'max_sleep_interval', 'prologue', 'remote', 'resources', 'rootdir',
'scp', 'shell', 'ssh', 'workdir']
for attr in get_attributes(jobscript.__class__):
if attr not in ['resources', 'backends']:
assert getattr(jobscript, attr) == default_class_attr_val(attr)
inidata, expected_attribs, expected_resources = example_inidata()
p = tmpdir.join("default.ini")
p.write(inidata)
ini_filename = str(p)
# Setting class defaults before instantiation sets both the attributes and
# the resources
JobScript.read_defaults(ini_filename)
jobscript = JobScript(body="echo '{text}'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
check_attributes(jobscript, expected_attribs)
check_resources(jobscript, expected_resources)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -l nodes=1:ppn=12
#PBS -q exec
#PBS -l mem=10000m
#PBS -N test
echo 'Hello World'
''').strip()
# calling read_defaults without filename argument resets the class, and
# thus also changes the attributes of an existing instance
JobScript.read_defaults()
check_resources(jobscript, expected_resources)
for attr in get_attributes(jobscript.__class__):
if attr not in ['resources', 'backends']:
assert getattr(jobscript, attr) == default_class_attr_val(attr)
with pytest.raises(KeyError) as exc_info:
str(jobscript)
assert "no matching attribute or resource entry" in str(exc_info.value)
jobscript.text = 'Hello World' # instance attribute
assert str(jobscript) == dedent(r'''
#!/bin/bash
#SBATCH --partition=exec
#SBATCH --nodes=1
#SBATCH --cpus-per-task=12
#SBATCH --mem=10000
#SBATCH --job-name=test
echo 'Hello World'
''').strip()
# Setting class defaults after instantiation sets the attributes, but not
# the resources
jobscript = JobScript(body="echo '{text}'", jobname="test")
JobScript.read_defaults(ini_filename)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -N test
echo 'Hello World'
''').strip()
def test_read_settings(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo '{text}'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
jobscript2 = JobScript(body="echo 'Hello'", jobname="test2")
inidata, expected_attribs, expected_resources = example_inidata()
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(AttributeError) as excinfo:
jobscript.read_settings(ini_filename)
assert "'cache_folder' can only be set as a class attribute" \
in str(excinfo.value)
inidata = inidata.replace("cache_folder = cache\n", "")
p.write(inidata)
jobscript.read_settings(ini_filename)
assert get_attributes(jobscript) == ['aux_scripts', 'backend', 'body',
'epilogue', 'max_sleep_interval', 'prologue', 'remote',
'resources', 'rootdir', 'shell', 'text', 'workdir']
# class attributes remain unaffected
for attr in get_attributes(JobScript):
if attr not in ['resources', 'backends']:
assert getattr(JobScript, attr) == default_class_attr_val(attr)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -l nodes=1:ppn=12
#PBS -N test
#PBS -q exec
#PBS -l mem=10000m
echo 'Hello World'
''').strip()
# the second jobscript is unaffected
assert str(jobscript2) == dedent(r'''
#!/bin/bash
#SBATCH --job-name=test2
echo 'Hello'
''').strip()
<|fim▁hole|> JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo '{text}'", jobname="test")
inidata = dedent(r'''
[Attributes]
_remote = login.cluster.edu
''')
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(ConfigParserError) as exc_info:
jobscript.read_settings(ini_filename)
assert "Key '_remote' is invalid" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
key with spaces = bla
''')
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(ConfigParserError) as exc_info:
jobscript.read_settings(ini_filename)
assert "Key 'key with spaces' is invalid" in str(exc_info.value)<|fim▁end|> | def test_read_invalid_attribute(caplog, tmpdir): |
<|file_name|>hp_roman8.py<|end_file_name|><|fim▁begin|>""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
Original source: LaserJet IIP Printer User's Manual HP part no
33471-90901, Hewlet-Packard, June 1989.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00a2: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00a3: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00a4: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00a5: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00a6: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a7: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00a8: 0x00b4, # ACUTE ACCENT
0x00a9: 0x02cb, # MODIFIER LETTER GRAVE ACCENT (Mandarin Chinese fourth tone)
0x00aa: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x00ab: 0x00a8, # DIAERESIS
0x00ac: 0x02dc, # SMALL TILDE
0x00ad: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ae: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00af: 0x20a4, # LIRA SIGN
0x00b0: 0x00af, # MACRON
0x00b1: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00b2: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00b3: 0x00b0, # DEGREE SIGN
0x00b4: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00b5: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x00b6: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00b7: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00b8: 0x00a1, # INVERTED EXCLAMATION MARK
0x00b9: 0x00bf, # INVERTED QUESTION MARK
0x00ba: 0x00a4, # CURRENCY SIGN
0x00bb: 0x00a3, # POUND SIGN
0x00bc: 0x00a5, # YEN SIGN
0x00bd: 0x00a7, # SECTION SIGN
0x00be: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00bf: 0x00a2, # CENT SIGN
0x00c0: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00c1: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00c2: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00c3: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00c4: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00c5: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x00c6: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00c7: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00c8: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x00c9: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x00ca: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x00cb: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x00cc: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x00cd: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ce: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x00cf: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00d0: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00d1: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00d2: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00d3: 0x00c6, # LATIN CAPITAL LETTER AE
0x00d4: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x00d5: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00d6: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00d7: 0x00e6, # LATIN SMALL LETTER AE
0x00d8: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00d9: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00da: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS<|fim▁hole|> 0x00db: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dc: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x00dd: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x00de: 0x00df, # LATIN SMALL LETTER SHARP S (German)
0x00df: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e0: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e1: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00e2: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00e3: 0x00d0, # LATIN CAPITAL LETTER ETH (Icelandic)
0x00e4: 0x00f0, # LATIN SMALL LETTER ETH (Icelandic)
0x00e5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00e6: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00e7: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e8: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e9: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ea: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00eb: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00ec: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ed: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ee: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00ef: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00f0: 0x00de, # LATIN CAPITAL LETTER THORN (Icelandic)
0x00f1: 0x00fe, # LATIN SMALL LETTER THORN (Icelandic)
0x00f2: 0x00b7, # MIDDLE DOT
0x00f3: 0x00b5, # MICRO SIGN
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f6: 0x2014, # EM DASH
0x00f7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00f8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00f9: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00fa: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00fb: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00fc: 0x25a0, # BLACK SQUARE
0x00fd: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00fe: 0x00b1, # PLUS-MINUS SIGN
0x00ff: None,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)<|fim▁end|> | |
<|file_name|>rendermonitor.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Generic rendering function that can handle all simulations, but makes poor renders. Nice for sketching.
import os, time, subprocess, re
def Say(text, verbosity=0, end='\n', suppressTime=False):
if verbosity<=VERBOSITY:
if suppressTime:
timeStr = ''
else:
timeStr = time.strftime('%H:%M:%S ')
if verbosity == 0:
print(timeStr + text, end=end)
else:
print('DEBUG: ' + text, end=end)
###############################################################################
# Global settings #
###################
dirFilter = '.*' # regular expression
#dirFilter = 'ecoli_'
#dirFilter = 'odetol' # regular expression
VERBOSITY = 0 # Note that only rendermonitor.py is printed to console, render.py shows up in logfile.txt
iterModDiv = 5
###############################################################################
resultsPath = os.getcwd()[:os.getcwd().index("/blender")] + "/results"
while True:
t0 = time.time()
dirList = [dirs for dirs in os.listdir(resultsPath) if os.path.isdir(os.path.join(resultsPath, dirs)) and os.path.isdir(os.path.join(resultsPath, dirs, 'output')) and re.search(dirFilter,dirs)]
dirList.sort()
for d in dirList:
Say(d)
#######################################################################
# Pre-render settings #
#######################
renderpySettingsDict = {'VERBOSITY':VERBOSITY,
'resolution_percentage':50,
'offset':'array([120,120,20])',
'model.L':'array([60e-6,60e-6,60e-6])',
'saveBlend':True,
'drawStick':False,
'renderDir':'render'
}
#renderpySettingsDict['suppressRender'] = True
if re.match('^aom', d):
renderpySettingsDict['model.L'] = 'array([20e-6,20e-6,20e-6])'
renderpySettingsDict['offset'] = 'array([10,10,10])'
renderpySettingsDict['configMaterial'] = 'ConfigAOM'
renderpySettingsDict['gridStepSize'] = 5
elif re.match('^as', d):
renderpySettingsDict['model.L'] = 'array([80e-6,80e-6,80e-6])'
renderpySettingsDict['offset'] = 'array([40,40,40])'
renderpySettingsDict['configMaterial'] = 'ConfigAS'
elif re.match('^ecoli', d):
renderpySettingsDict['model.L'] = 'array([80e-6,80e-6,80e-6])'
renderpySettingsDict['offset'] = 'array([40,40,0])'
renderpySettingsDict['configMaterial'] = 'ConfigEcoli' # Change colours of cells for consistency with paper/experiments
renderpySettingsDict['colourByGeneration'] = True
#######################################################################
dAbs = resultsPath + "/" + d + "/output"
Say(dAbs, 2)
fileList = [files for files in os.listdir(dAbs) if os.path.splitext(files)[-1]=='.mat']
fileList.sort(reverse=True)
for f in fileList:
if not int(re.match('g(\d{4})r(\d{4}).mat',f).group(2))%iterModDiv == 0:
# relaxation iteration (YYYY in filename gXXXXrYYYY.mat) % iterModulusDivider == 0
continue<|fim▁hole|> fAbs = dAbs + "/" + f
# Check if file is already plotted
fName = os.path.splitext(fAbs.split("/")[-1])[0]
renderPath = (fAbs[:fAbs.index("/output/"+fName)] + "/" + renderpySettingsDict['renderDir']) if ("/output/"+f in fAbs) else ("/".join(fAbs.split("/")[:-1]))
if os.path.isfile(renderPath + "/" + fName + ".png"):
Say(" " + f + ' --> already rendered', 2)
else:
Say(" " + f, end='\r')
callStr = ["blender", "--background", "--python", "render.py", "--", fAbs] # Call string is with filename
[callStr.extend([key,str(val)]) for key,val in renderpySettingsDict.items()] # Append settingsDict
Say("\nCall string = " + " ".join(callStr), verbosity=2)
[stdout, _] = subprocess.Popen(callStr, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
stdout = stdout.decode()
if 'Error' in stdout or 'WARNING' in stdout:
with open('logfile.txt', 'w') as file:
file.write(time.strftime('%Y/%m/%d, %H:%M:%S') + " (" + fAbs + ")\n\n" + stdout)
if 'error' in stdout.lower() and 'warning' in stdout.lower():
suffix = " --> WARNING and ERROR"
elif 'error' in stdout.lower():
suffix = " --> ERROR"
else:
suffix = " --> "
for line in stdout.split('\n'):
if 'warning' in line.lower():
suffix += line + ' '
Say(" " + f + suffix)
else:
Say('', suppressTime=True) # Make newline
time.sleep(max(0, 10-(time.time()-t0))) # There must be at least some time between each loop<|fim▁end|> | |
<|file_name|>DropdownTimePicker.js<|end_file_name|><|fim▁begin|>dojo.provide("dojo.widget.DropdownTimePicker");
dojo.require("dojo.widget.*");
dojo.require("dojo.widget.DropdownContainer");
dojo.require("dojo.widget.TimePicker");
dojo.require("dojo.event.*");
dojo.require("dojo.html.*");
dojo.require("dojo.date.format");
dojo.require("dojo.date.serialize");
dojo.require("dojo.i18n.common");
dojo.requireLocalization("dojo.widget", "DropdownTimePicker");
dojo.widget.defineWidget(
"dojo.widget.DropdownTimePicker",
dojo.widget.DropdownContainer,
{
/*
summary:
A form input for entering times with a pop-up dojo.widget.TimePicker to aid in selection
description:
This is TimePicker in a DropdownContainer, it supports all features of TimeePicker.
It is possible to customize the user-visible formatting with either the formatLength or
displayFormat attributes. The value sent to the server is typically a locale-independent
value in a hidden field as defined by the name attribute. RFC3339 representation is used
by default, but other options are available with saveFormat.
usage:
var dtp = dojo.widget.createWidget("DropdownTimePicker", {},
dojo.byId("DropdownTimePickerNode"));
<input dojoType="DropdownTimePicker">
*/
// iconURL: URL
// path of icon for button to display time picker widget
iconURL: dojo.uri.moduleUri("dojo.widget", "templates/images/timeIcon.gif"),
// formatLength: String
// Type of formatting used for visual display, appropriate to locale (choice of long, short, medium or full)
// See dojo.date.format for details.
formatLength: "short",
// displayFormat: String
// A pattern used for the visual display of the formatted date.
// Setting this overrides the default locale-specific settings as well as the formatLength
// attribute. See dojo.date.format for a reference which defines the formatting patterns.
displayFormat: "",
timeFormat: "", // deprecated, will be removed for 0.5
//FIXME: need saveFormat attribute support
// saveFormat: String
// Formatting scheme used when submitting the form element. This formatting is used in a hidden
// field (name) intended for server use, and is therefore typically locale-independent.
// By default, uses rfc3339 style date formatting (rfc)
// Use a pattern string like displayFormat or one of the following:
// rfc|iso|posix|unix
saveFormat: "",
// value: String|Date
// form value property in rfc3339 format. ex: 12:00
value: "",
// name: String
// name of the form element, used to create a hidden field by this name for form element submission.
name: "",
postMixInProperties: function() {
// summary: see dojo.widget.DomWidget
dojo.widget.DropdownTimePicker.superclass.postMixInProperties.apply(this, arguments);
var messages = dojo.i18n.getLocalization("dojo.widget", "DropdownTimePicker", this.lang);
this.iconAlt = messages.selectTime;
//FIXME: should this be if/else/else?
if(typeof(this.value)=='string'&&this.value.toLowerCase()=='today'){
this.value = new Date();
}
if(this.value && isNaN(this.value)){
var orig = this.value;
this.value = dojo.date.fromRfc3339(this.value);
if(!this.value){
var d = dojo.date.format(new Date(), { selector: "dateOnly", datePattern: "yyyy-MM-dd" });
var c = orig.split(":");
for(var i = 0; i < c.length; ++i){
if(c[i].length == 1) c[i] = "0" + c[i];
}
orig = c.join(":");
this.value = dojo.date.fromRfc3339(d + "T" + orig);
dojo.deprecated("dojo.widget.DropdownTimePicker", "time attributes must be passed in Rfc3339 format", "0.5");
}
}
if(this.value && !isNaN(this.value)){
this.value = new Date(this.value);
}
},
fillInTemplate: function(){
// summary: see dojo.widget.DomWidget
dojo.widget.DropdownTimePicker.superclass.fillInTemplate.apply(this, arguments);
var value = "";
if(this.value instanceof Date) {
value = this.value;
} else if(this.value) {
var orig = this.value;
var d = dojo.date.format(new Date(), { selector: "dateOnly", datePattern: "yyyy-MM-dd" });
var c = orig.split(":");
for(var i = 0; i < c.length; ++i){
if(c[i].length == 1) c[i] = "0" + c[i];
}
orig = c.join(":");
value = dojo.date.fromRfc3339(d + "T" + orig);
}
var tpArgs = { widgetContainerId: this.widgetId, lang: this.lang, value: value };
this.timePicker = dojo.widget.createWidget("TimePicker", tpArgs, this.containerNode, "child");
dojo.event.connect(this.timePicker, "onValueChanged", this, "_updateText");
if(this.value){
this._updateText();
}
this.containerNode.style.zIndex = this.zIndex;
this.containerNode.explodeClassName = "timeContainer";
this.valueNode.name = this.name;
},
getValue: function(){
// summary: return current time in time-only portion of RFC 3339 format
return this.valueNode.value; /*String*/
},
getTime: function(){
// summary: return current time as a Date object
return this.timePicker.storedTime; /*Date*/
},
<|fim▁hole|> //summary: set the current time from RFC 3339 formatted string or a date object, synonymous with setTime
this.setTime(rfcDate);
},
setTime: function(/*Date|String*/dateObj){
// summary: set the current time and update the UI
var value = "";
if(dateObj instanceof Date) {
value = dateObj;
} else if(this.value) {
var orig = this.value;
var d = dojo.date.format(new Date(), { selector: "dateOnly", datePattern: "yyyy-MM-dd" });
var c = orig.split(":");
for(var i = 0; i < c.length; ++i){
if(c[i].length == 1) c[i] = "0" + c[i];
}
orig = c.join(":");
value = dojo.date.fromRfc3339(d + "T" + orig);
}
this.timePicker.setTime(value);
this._syncValueNode();
},
_updateText: function(){
// summary: updates the <input> field according to the current value (ie, displays the formatted date)
if(this.timePicker.selectedTime.anyTime){
this.inputNode.value = "";
}else if(this.timeFormat){
dojo.deprecated("dojo.widget.DropdownTimePicker",
"Must use displayFormat attribute instead of timeFormat. See dojo.date.format for specification.", "0.5");
this.inputNode.value = dojo.date.strftime(this.timePicker.time, this.timeFormat, this.lang);
}else{
this.inputNode.value = dojo.date.format(this.timePicker.time,
{formatLength:this.formatLength, timePattern:this.displayFormat, selector:'timeOnly', locale:this.lang});
}
this._syncValueNode();
this.onValueChanged(this.getTime());
this.hideContainer();
},
onValueChanged: function(/*Date*/dateObj){
// summary: triggered when this.value is changed
},
onInputChange: function(){
// summary: callback when user manually types a time into the <input> field
if(this.dateFormat){
dojo.deprecated("dojo.widget.DropdownTimePicker",
"Cannot parse user input. Must use displayFormat attribute instead of dateFormat. See dojo.date.format for specification.", "0.5");
}else{
var input = dojo.string.trim(this.inputNode.value);
if(input){
var inputTime = dojo.date.parse(input,
{formatLength:this.formatLength, timePattern:this.displayFormat, selector:'timeOnly', locale:this.lang});
if(inputTime){
this.setTime(inputTime);
}
} else {
this.valueNode.value = input;
}
}
// If the time entered didn't parse, reset to the old time. KISS, for now.
//TODO: usability? should we provide more feedback somehow? an error notice?
// seems redundant to do this if the parse failed, but at least until we have validation,
// this will fix up the display of entries like 01/32/2006
if(input){ this._updateText(); }
},
_syncValueNode: function(){
var time = this.timePicker.time;
var value;
switch(this.saveFormat.toLowerCase()){
case "rfc": case "iso": case "":
value = dojo.date.toRfc3339(time, 'timeOnly');
break;
case "posix": case "unix":
value = Number(time);
break;
default:
value = dojo.date.format(time, {datePattern:this.saveFormat, selector:'timeOnly', locale:this.lang});
}
this.valueNode.value = value;
},
destroy: function(/*Boolean*/finalize){
this.timePicker.destroy(finalize);
dojo.widget.DropdownTimePicker.superclass.destroy.apply(this, arguments);
}
}
);<|fim▁end|> | setValue: function(/*Date|String*/rfcDate){ |
<|file_name|>ref.py<|end_file_name|><|fim▁begin|># encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import Mapping
import os
from pyLibrary import dot
from pyLibrary.dot import set_default, wrap, unwrap
from pyLibrary.parsers import URL
DEBUG = False
_convert = None
_Log = None
_Except = None
def _late_import():
global _convert
global _Log
global _Except
from pyLibrary import convert as _convert
from pyLibrary.debugs.logs import Log as _Log
from pyLibrary.debugs.exceptions import Except as _Except
_ = _convert
_ = _Log
_ = _Except
def get(url):
"""<|fim▁hole|>
if url.find("://") == -1:
_Log.error("{{url}} must have a prototcol (eg http://) declared", url=url)
base = URL("")
if url.startswith("file://") and url[7] != "/":
if os.sep=="\\":
base = URL("file:///" + os.getcwd().replace(os.sep, "/").rstrip("/") + "/.")
else:
base = URL("file://" + os.getcwd().rstrip("/") + "/.")
elif url[url.find("://") + 3] != "/":
_Log.error("{{url}} must be absolute", url=url)
phase1 = _replace_ref(wrap({"$ref": url}), base) # BLANK URL ONLY WORKS IF url IS ABSOLUTE
try:
phase2 = _replace_locals(phase1, [phase1])
return wrap(phase2)
except Exception, e:
_Log.error("problem replacing locals in\n{{phase1}}", phase1=phase1)
def expand(doc, doc_url):
"""
ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE
EXPANDING FEATURE
"""
if doc_url.find("://") == -1:
_Log.error("{{url}} must have a prototcol (eg http://) declared", url= doc_url)
phase1 = _replace_ref(doc, URL(doc_url)) # BLANK URL ONLY WORKS IF url IS ABSOLUTE
phase2 = _replace_locals(phase1, [phase1])
return wrap(phase2)
def _replace_ref(node, url):
if url.path.endswith("/"):
url.path = url.path[:-1]
if isinstance(node, Mapping):
ref = None
output = {}
for k, v in node.items():
if k == "$ref":
ref = URL(v)
else:
output[k] = _replace_ref(v, url)
if not ref:
return output
node = output
if not ref.scheme and not ref.path:
# DO NOT TOUCH LOCAL REF YET
output["$ref"] = ref
return output
if not ref.scheme:
# SCHEME RELATIVE IMPLIES SAME PROTOCOL AS LAST TIME, WHICH
# REQUIRES THE CURRENT DOCUMENT'S SCHEME
ref.scheme = url.scheme
# FIND THE SCHEME AND LOAD IT
if ref.scheme in scheme_loaders:
new_value = scheme_loaders[ref.scheme](ref, url)
else:
raise _Log.error("unknown protocol {{scheme}}", scheme=ref.scheme)
if ref.fragment:
new_value = dot.get_attr(new_value, ref.fragment)
if DEBUG:
_Log.note("Replace {{ref}} with {{new_value}}", ref=ref, new_value=new_value)
if not output:
output = new_value
else:
output = unwrap(set_default(output, new_value))
if DEBUG:
_Log.note("Return {{output}}", output=output)
return output
elif isinstance(node, list):
output = [_replace_ref(n, url) for n in node]
# if all(p[0] is p[1] for p in zip(output, node)):
# return node
return output
return node
def _replace_locals(node, doc_path):
if isinstance(node, Mapping):
# RECURS, DEEP COPY
ref = None
output = {}
for k, v in node.items():
if k == "$ref":
ref = v
else:
output[k] = _replace_locals(v, [v] + doc_path)
if not ref:
return output
# REFER TO SELF
frag = ref.fragment
if frag[0] == ".":
# RELATIVE
for i, p in enumerate(frag):
if p != ".":
if i>len(doc_path):
_Log.error("{{frag|quote}} reaches up past the root document", frag=frag)
new_value = dot.get_attr(doc_path[i-1], frag[i::])
break
else:
new_value = doc_path[len(frag) - 1]
else:
# ABSOLUTE
new_value = dot.get_attr(doc_path[-1], frag)
new_value = _replace_locals(new_value, [new_value] + doc_path)
if not output:
return new_value # OPTIMIZATION FOR CASE WHEN node IS {}
else:
return unwrap(set_default(output, new_value))
elif isinstance(node, list):
candidate = [_replace_locals(n, [n] + doc_path) for n in node]
# if all(p[0] is p[1] for p in zip(candidate, node)):
# return node
return candidate
return node
###############################################################################
## SCHEME LOADERS ARE BELOW THIS LINE
###############################################################################
def get_file(ref, url):
from pyLibrary.env.files import File
if ref.path.startswith("~"):
home_path = os.path.expanduser("~")
if os.sep == "\\":
home_path = "/" + home_path.replace(os.sep, "/")
if home_path.endswith("/"):
home_path = home_path[:-1]
ref.path = home_path + ref.path[1::]
elif not ref.path.startswith("/"):
# CONVERT RELATIVE TO ABSOLUTE
ref.path = "/".join(url.path.rstrip("/").split("/")[:-1]) + "/" + ref.path
path = ref.path if os.sep != "\\" else ref.path[1::].replace("/", "\\")
try:
if DEBUG:
_Log.note("reading file {{path}}", path=path)
content = File(path).read()
except Exception, e:
content = None
_Log.error("Could not read file {{filename}}", filename=path, cause=e)
try:
new_value = _convert.json2value(content, params=ref.query, flexible=True, leaves=True)
except Exception, e:
if not _Except:
_late_import()
e = _Except.wrap(e)
try:
new_value = _convert.ini2value(content)
except Exception, f:
raise _Log.error("Can not read {{file}}", file=path, cause=e)
new_value = _replace_ref(new_value, ref)
return new_value
def get_http(ref, url):
from pyLibrary.env import http
params = url.query
new_value = _convert.json2value(http.get(ref), params=params, flexible=True, leaves=True)
return new_value
def get_env(ref, url):
# GET ENVIRONMENT VARIABLES
ref = ref.host
try:
new_value = _convert.json2value(os.environ[ref])
except Exception, e:
new_value = os.environ[ref]
return new_value
def get_param(ref, url):
# GET PARAMETERS FROM url
param = url.query
new_value = param[ref.host]
return new_value
scheme_loaders = {
"http": get_http,
"file": get_file,
"env": get_env,
"param": get_param
}<|fim▁end|> | USE json.net CONVENTIONS TO LINK TO INLINE OTHER JSON
"""
if not _Log:
_late_import() |
<|file_name|>utils.go<|end_file_name|><|fim▁begin|>package rpc
import (
"errors"
log "github.com/Sirupsen/logrus"
"strconv"
"strings"
)
var (
ERRINVALIDLENGTH = errors.New("Invalid length of message field")
ERRINVALIDTOKEN = errors.New("Invalid authentication token")
)
func (s *Sender) Sign() ([]byte, error) {
var body []byte
if (len(s.Name) > 255) || (len(s.UUID) > 255) {
return body, ERRINVALIDLENGTH
}
cn := [3]byte{}
cu := [3]byte{}
copy(cn[:], strconv.Itoa(len(s.Name)))
copy(cu[:], strconv.Itoa(len(s.UUID)))
body = append(body[:], cn[:]...)
body = append(body[:], cu[:]...)
body = append(body[:], []byte(s.Name)[:]...)
body = append(body[:], []byte(s.UUID)[:]...)
return body, nil
}
func (p *Receiver) Sign() ([]byte, error) {
var body []byte
if (len(p.Name) > 255) || (len(p.UUID) > 255) || (len(p.Handler) > 255) {
return body, ERRINVALIDLENGTH
}
cn := [3]byte{}
cu := [3]byte{}
ch := [3]byte{}
copy(cn[:], strconv.Itoa(len(p.Name)))
copy(cu[:], strconv.Itoa(len(p.UUID)))
copy(ch[:], strconv.Itoa(len(p.Handler)))
body = append(body[:], cn[:]...)
body = append(body[:], cu[:]...)
body = append(body[:], ch[:]...)<|fim▁hole|> body = append(body[:], []byte(p.UUID)[:]...)
body = append(body[:], []byte(p.Handler)[:]...)
return body, nil
}
func (d *Destination) Sign() ([]byte, error) {
var body []byte
if (len(d.Name) > 255) || (len(d.UUID) > 255) || (len(d.Handler) > 255) {
return body, ERRINVALIDLENGTH
}
cn := [3]byte{}
cu := [3]byte{}
ch := [3]byte{}
copy(cn[:], strconv.Itoa(len(d.Name)))
copy(cu[:], strconv.Itoa(len(d.UUID)))
copy(ch[:], strconv.Itoa(len(d.Handler)))
body = append(body[:], cn[:]...)
body = append(body[:], cu[:]...)
body = append(body[:], ch[:]...)
body = append(body[:], []byte(d.Name)[:]...)
body = append(body[:], []byte(d.UUID)[:]...)
body = append(body[:], []byte(d.Handler)[:]...)
return body, nil
}
func (r *RPC) encode(s Sender, d Destination, p Receiver, data []byte) ([]byte, error) {
var body []byte
if len(r.token) > 96 {
return body, ERRINVALIDLENGTH
}
ct := [2]byte{}
copy(ct[:], strconv.Itoa(len(r.token)))
body = append(body[:], ct[:]...)
body = append(body[:], []byte(r.token)[:]...)
sender, err := s.Sign()
if err != nil {
return body, err
}
body = append(body, sender[:]...)
destinaiton, err := d.Sign()
if err != nil {
return body, err
}
body = append(body, destinaiton[:]...)
receiver, err := p.Sign()
if err != nil {
return body, err
}
body = append(body, receiver[:]...)
body = append(body, data[:]...)
return body, nil
}
func (r *RPC) decode(data []byte) (Sender, Destination, Receiver, []byte, error) {
s := Sender{}
d := Destination{}
p := Receiver{}
if len(data) == 0 {
return s, d, p, []byte{}, errors.New("Body is empty")
}
tc, err := r.parseInt(data[0:2], 2)
if err != nil {
log.Error("Parse message error:", err)
return s, d, p, []byte{}, err
}
var token = string(data[2:tc])
if token != r.token {
return s, d, p, []byte{}, ERRINVALIDTOKEN
}
data = data[tc:]
snl, _ := r.parseInt(data[0:3], 6)
sul, _ := r.parseInt(data[3:6], snl)
s.Name = string(data[6:snl])
s.UUID = string(data[snl:sul])
data = data[sul:]
dnl, _ := r.parseInt(data[0:3], 9)
dul, _ := r.parseInt(data[3:6], dnl)
dhl, _ := r.parseInt(data[6:9], dul)
if dnl > 9 {
d.Name = string(data[9:dnl])
}
if dul > dnl {
d.UUID = string(data[dnl:dul])
}
if dhl > dul {
d.Handler = string(data[dul:dhl])
}
data = data[dhl:]
pnl, _ := r.parseInt(data[0:3], 9)
pul, _ := r.parseInt(data[3:6], pnl)
phl, _ := r.parseInt(data[6:9], pul)
if pnl > 9 {
p.Name = string(data[9:pnl])
}
if pul > pnl {
p.UUID = string(data[pnl:pul])
}
if phl > pul {
p.Handler = string(data[pul:phl])
}
data = data[phl:]
return s, d, p, data, nil
}
func (r *RPC) parseInt(data []byte, start int) (int, error) {
num := string(data)
if strings.Index(num, "\x00") >= 0 {
num = num[:strings.Index(string(num), "\x00")]
}
i, err := strconv.Atoi(num)
if err != nil {
return 0, err
}
i += start
return i, nil
}<|fim▁end|> |
body = append(body[:], []byte(p.Name)[:]...) |
<|file_name|>connector.py<|end_file_name|><|fim▁begin|># Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from desktop.lib.python_util import force_dict_to_strings
from config import Config
class Connector(object):
def __init__(self, id, name, version, link_config, job_config, config_resources={}, **kwargs):
self.id = id
self.name = name
self.version = version
self.job_config = job_config
self.link_config = link_config
self.config_resources = config_resources
setattr(self, 'class', kwargs['class'])
@staticmethod
def from_dict(connector_dict):
connector_dict.setdefault('link_config', [])
connector_dict['link_config'] = [ Config.from_dict(link_config_dict) for link_config_dict in connector_dict['link-config'] ]
connector_dict.setdefault('job_config', {})
connector_dict['job_config'] = {}
if 'FROM' in connector_dict['job-config']:
connector_dict['job_config']['FROM'] = [ Config.from_dict(from_config_dict) for from_config_dict in connector_dict['job-config']['FROM'] ]
if 'TO' in connector_dict['job-config']:
connector_dict['job_config']['TO'] = [ Config.from_dict(to_config_dict) for to_config_dict in connector_dict['job-config']['TO'] ]
connector_dict['config_resources'] = connector_dict['all-config-resources']<|fim▁hole|>
return Connector(**force_dict_to_strings(connector_dict))
def to_dict(self):
d = {
'id': self.id,
'name': self.name,
'version': self.version,
'class': getattr(self, 'class'),
'link-config': [ link_config.to_dict() for link_config in self.link_config ],
'job-config': {},
'all-config-resources': self.config_resources
}
if 'FROM' in self.job_config:
d['job-config']['FROM'] = [ job_config.to_dict() for job_config in self.job_config['FROM'] ]
if 'TO' in self.job_config:
d['job-config']['TO'] = [ job_config.to_dict() for job_config in self.job_config['TO'] ]
return d<|fim▁end|> | |
<|file_name|>errorRibbonPlotMulti.py<|end_file_name|><|fim▁begin|>import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
sys.path.append('../../')
from util import readCSVToMatrix
file_path = sys.argv[1]
file = open(file_path)
def plotErrorRibbon(x, mu, std, metric, pp):
fig, ax = plt.subplots(1)
ax.fill_between(x, mu+std, mu-std, facecolor='blue', alpha=0.5, label='$\pm \sigma$')
ax.plot(data[:,0], mu, alpha=0.7, label='$\mu$', linewidth=2.0)
# ax.set_title(metric + ' convergence', fontsize=22)
ax.legend(loc='upper right')
ax.set_xlabel('number of scenario samples', fontsize=20)
ax.set_ylabel('standard deviation $\mu$ $\pm \sigma$', fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.tick_params(axis='both', which='minor', labelsize=14)
plt.axis("tight")
plt.savefig('convergence_plot_' + metric + '.svg', format='svg', dpi=1000, bbox_inches='tight')
pp.savefig(fig, bbox_inches='tight')
# plt.show()
if __name__ == '__main__':
print file
data = readCSVToMatrix(file, delimiter=',')
print data
pp = PdfPages("convergence" + '_plots.pdf')
plotErrorRibbon(data[:,0], data[:,1], data[:,5], 'coverage', pp)
plotErrorRibbon(data[:,0], data[:,2], data[:,6], 'time', pp)<|fim▁hole|> plotErrorRibbon(data[:,0], data[:,4], data[:,8], 'effort', pp)
# plotData(pp, filename, parameter)
pp.close()<|fim▁end|> | plotErrorRibbon(data[:,0], data[:,3], data[:,7], 'distance', pp) |
<|file_name|>actions.py<|end_file_name|><|fim▁begin|>import datetime
import hashlib
import itertools
import logging
import os
import time
from collections import defaultdict
from dataclasses import asdict, dataclass, field
from operator import itemgetter
from typing import (
IO,
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from django.utils.translation import override as override_language
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat
from analytics.models import RealmCount, StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
get_stream_cache_key,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_delivery_email_cache_key,
)
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import (
InvitationError,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
ZephyrMessageAlreadySentException,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MessageRenderingResult, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.mention import MentionBackend, MentionData, silent_mention_syntax_for_user
from zerver.lib.message import (
MessageDict,
SendMessageRequest,
access_message,
bulk_access_messages,
get_last_message_id,
normalize_body,
render_markdown,
truncate_topic,
update_first_visible_message_id,
wildcard_mention_allowed,
)
from zerver.lib.notification_data import UserMessageNotificationsData, get_user_group_mentions_data
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
SubInfo,
bulk_get_private_peers,
bulk_get_subscriber_peer_info,
get_active_subscriptions_for_stream_id,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_subscribed_stream_ids_for_user,
get_subscriptions_for_send_message,
get_used_colors_for_user_ids,
get_user_ids_for_streams,
num_subscribers_for_stream_id,
subscriber_ids_with_stream_history_access,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_by_id,
access_stream_for_send_message,
can_access_stream_user_ids,
check_stream_access_based_on_stream_post_policy,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
get_stream_permission_policy_name,
get_web_public_streams_queryset,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.string_validation import check_stream_name, check_stream_topic
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.timezone import canonicalize_timezone
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
RESOLVED_TOPIC_PREFIX,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
messages_for_topic,
save_message_for_edit_use_case,
update_edit_history,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileDataElementValue, ProfileFieldData, UnspecifiedValue
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_mutes import add_user_mute, get_muting_users, get_user_mutes
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions, is_widget_message
from zerver.models import (
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
Draft,
EmailChangeStatus,
Message,
MultiuseInvite,
MutedUser,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
RealmPlayground,
RealmUserDefault,
Recipient,
ScheduledEmail,
ScheduledMessage,
ScheduledMessageNotificationEmail,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
UserTopic,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_fake_email_domain,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_realm,
get_realm_playgrounds,
get_stream,
get_stream_by_id_in_realm,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
linkifiers_for_realm,
query_for_ids,
realm_filters_for_realm,
validate_attachment_request,
)
from zerver.tornado.django_api import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
downgrade_now_without_creating_additional_invoices,
update_license_ledger_if_needed,
)
@dataclass
class SubscriptionInfo:
subscriptions: List[Dict[str, Any]]
unsubscribed: List[Dict[str, Any]]
never_subscribed: List[Dict[str, Any]]
# These are hard to type-check because of the API_FIELDS loops.
RawStreamDict = Dict[str, Any]
RawSubscriptionDict = Dict[str, Any]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
ONBOARDING_RECENT_TIMEDELTA = datetime.timedelta(weeks=1)
STREAM_ASSIGNMENT_COLORS = [
"#76ce90",
"#fae589",
"#a6c7e5",
"#e79ab5",
"#bfd56f",
"#f4ae55",
"#b0a5fd",
"#addfe5",
"#f5ce6e",
"#c2726a",
"#94c849",
"#bd86e5",
"#ee7e4a",
"#a6dcbf",
"#95a5fd",
"#53a063",
"#9987e1",
"#e4523d",
"#c2c2c2",
"#4f8de4",
"#c6a8ad",
"#e7cc4d",
"#c8bebf",
"#a47462",
]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {"id": user_id, "flags": ["read"]}
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream
and user_profile.default_sending_stream.invite_only
or user_profile.default_events_register_stream
and user_profile.default_events_register_stream.invite_only
)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {
str(UserProfile.ROLE_REALM_ADMINISTRATOR): 0,
str(UserProfile.ROLE_REALM_OWNER): 0,
str(UserProfile.ROLE_MODERATOR): 0,
str(UserProfile.ROLE_MEMBER): 0,
str(UserProfile.ROLE_GUEST): 0,
}
for value_dict in list(
UserProfile.objects.filter(realm=realm, is_bot=False, is_active=True)
.values("role")
.annotate(Count("role"))
):
human_counts[str(value_dict["role"])] = value_dict["role__count"]
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def send_message_to_signup_notification_stream(
sender: UserProfile, realm: Realm, message: str, topic_name: str = _("signups")
) -> None:
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream is None:
return
with override_language(realm.default_language):
internal_send_stream_message(sender, signup_notifications_stream, topic_name, message)
def notify_new_user(user_profile: UserProfile) -> None:
user_count = realm_user_count(user_profile.realm)
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email, user_profile.realm_id)
is_first_user = user_count == 1
if not is_first_user:
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=silent_mention_syntax_for_user(user_profile), user_count=user_count
)
if settings.BILLING_ENABLED:
from corporate.lib.registration import generate_licenses_low_warning_message_if_required
licenses_low_warning_message = generate_licenses_low_warning_message_if_required(
user_profile.realm
)
if licenses_low_warning_message is not None:
message += "\n"
message += licenses_low_warning_message
send_message_to_signup_notification_stream(sender, user_profile.realm, message)
# We also send a notification to the Zulip administrative realm
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
admin_realm_sender = get_system_bot(sender_email, admin_realm.id)
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count
)
internal_send_stream_message(
admin_realm_sender, signups_stream, user_profile.realm.display_subdomain, message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(realm: Realm) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
send_event(realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - ONBOARDING_RECENT_TIMEDELTA
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(
recipient_id__in=recipient_ids, date_sent__gt=one_week_ago
).order_by("-id")
message_ids_to_use = list(
reversed(recent_messages.values_list("id", flat=True)[0:ONBOARDING_TOTAL_MESSAGES])
)
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(
UserMessage.objects.filter(
message_id__in=message_ids_to_use, user_profile=user_profile
).values_list("message_id", flat=True)
)
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
def process_new_human_user(
user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
realm_creation: bool = False,
) -> None:
realm = user_profile.realm
mit_beta_user = realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams: List[Stream] = list(prereg_user.streams.all())
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(
realm,
streams,
[user_profile],
from_user_creation=True,
acting_user=acting_user,
)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if (
not mit_beta_user
and prereg_user is not None
and prereg_user.referred_by is not None
and prereg_user.referred_by.is_active
):
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT, prereg_user.referred_by.realm_id),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>"
),
)
revoke_preregistration_users(user_profile, prereg_user, realm_creation)
if not realm_creation and prereg_user is not None and prereg_user.referred_by is not None:
notify_invites_changed(user_profile.realm)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
def revoke_preregistration_users(
created_user_profile: UserProfile,
used_preregistration_user: Optional[PreregistrationUser],
realm_creation: bool,
) -> None:
if used_preregistration_user is None:
assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
if used_preregistration_user is not None:
used_preregistration_user.status = confirmation_settings.STATUS_ACTIVE
used_preregistration_user.save(update_fields=["status"])
# In the special case of realm creation, there can be no additional PreregistrationUser
# for us to want to modify - because other realm_creation PreregistrationUsers should be
# left usable for creating different realms.
if realm_creation:
return
# Mark any other PreregistrationUsers in the realm that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics.
if used_preregistration_user is not None:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).exclude(id=used_preregistration_user.id).update(
status=confirmation_settings.STATUS_REVOKED
)
else:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).update(status=confirmation_settings.STATUS_REVOKED)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(
user_profile.realm,
user_profile,
user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={},
)
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(
email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services=get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot["owner_id"] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(
realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int] = None
) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(
email: str,
password: Optional[str],
realm: Realm,
full_name: str,
bot_type: Optional[int] = None,
role: Optional[int] = None,
bot_owner: Optional[UserProfile] = None,
tos_version: Optional[str] = None,
timezone: str = "",
avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream] = None,
default_events_register_stream: Optional[Stream] = None,
default_all_public_streams: Optional[bool] = None,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
source_profile: Optional[UserProfile] = None,
realm_creation: bool = False,
*,
acting_user: Optional[UserProfile],
enable_marketing_emails: bool = True,
) -> UserProfile:
with transaction.atomic():
user_profile = create_user(
email=email,
password=password,
realm=realm,
full_name=full_name,
role=role,
bot_type=bot_type,
bot_owner=bot_owner,
tos_version=tos_version,
timezone=timezone,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile,
enable_marketing_emails=enable_marketing_emails,
)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
if realm_creation:
# If this user just created a realm, make sure they are
# properly tagged as the creator of the realm.
realm_creation_audit_log = (
RealmAuditLog.objects.filter(event_type=RealmAuditLog.REALM_CREATED, realm=realm)
.order_by("id")
.last()
)
assert realm_creation_audit_log is not None
realm_creation_audit_log.acting_user = user_profile
realm_creation_audit_log.save(update_fields=["acting_user"])
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(
user_profile,
prereg_user=prereg_user,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation,
)
return user_profile
def do_activate_mirror_dummy_user(
user_profile: UserProfile, *, acting_user: Optional[UserProfile]
) -> None:
"""Called to have a user "take over" a "mirror dummy" user
(i.e. is_mirror_dummy=True) account when they sign up with the
same email address.
Essentially, the result should be as though we had created the
UserProfile just now with do_create_user, except that the mirror
dummy user may appear as the recipient or sender of messages from
before their account was fully created.
TODO: This function likely has bugs resulting from this being a
parallel code path to do_create_user; e.g. it likely does not
handle preferences or default streams properly.
"""
with transaction.atomic():
change_user_is_active(user_profile, True)
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TERMS_OF_SERVICE_VERSION
user_profile.save(
update_fields=["date_joined", "password", "is_mirror_dummy", "tos_version"]
)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
"""Reactivate a user that had previously been deactivated"""
with transaction.atomic():
change_user_is_active(user_profile, True)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
subscribed_recipient_ids = Subscription.objects.filter(
user_profile_id=user_profile.id, active=True, recipient__type=Recipient.STREAM
).values_list("recipient__type_id", flat=True)
subscribed_streams = Stream.objects.filter(id__in=subscribed_recipient_ids, deactivated=False)
subscriber_peer_info = bulk_get_subscriber_peer_info(
realm=user_profile.realm,
streams=subscribed_streams,
)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
for stream in subscribed_streams:
altered_user_dict[stream.id] = {user_profile.id}
stream_dict = {stream.id: stream for stream in subscribed_streams}
send_peer_subscriber_events(
op="peer_add",
realm=user_profile.realm,
altered_user_dict=altered_user_dict,
stream_dict=stream_dict,
private_peer_dict=subscriber_peer_info.private_peer_dict,
)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
@transaction.atomic(savepoint=False)
def do_set_realm_property(
realm: Realm, name: str, value: Any, *, acting_user: Optional[UserProfile]
) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(
value, property_type
), f"Cannot update {name}: {value} is not an instance of {property_type}"
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type="realm",
op="update",
property=name,
value=value,
)
transaction.on_commit(lambda: send_event(realm, event, active_user_ids(realm.id)))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile)
UserProfile.objects.bulk_update(user_profiles, ["email"])
for user_profile in user_profiles:
transaction.on_commit(
lambda: flush_user_profile(sender=UserProfile, instance=user_profile)
)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
def do_set_realm_authentication_methods(
realm: Realm, authentication_methods: Dict[str, bool], *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.authentication_methods_dict()
with transaction.atomic():
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=["authentication_methods"])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: updated_value,
"property": "authentication_methods",
}
).decode(),
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(
realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
*,
acting_user: Optional[UserProfile],
) -> None:
old_values = dict(
allow_message_editing=realm.allow_message_editing,
message_content_edit_limit_seconds=realm.message_content_edit_limit_seconds,
edit_topic_policy=realm.edit_topic_policy,
)
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.edit_topic_policy = edit_topic_policy
event_time = timezone_now()
updated_properties = dict(
allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
edit_topic_policy=edit_topic_policy,
)
with transaction.atomic():
for updated_property, updated_value in updated_properties.items():
if updated_value == old_values[updated_property]:
continue
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_values[updated_property],
RealmAuditLog.NEW_VALUE: updated_value,
"property": updated_property,
}
).decode(),
)
realm.save(update_fields=list(updated_properties.keys()))
event = dict(
type="realm",
op="update_dict",
property="default",
data=updated_properties,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.notifications_stream_id
realm.notifications_stream = stream
with transaction.atomic():
realm.save(update_fields=["notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.signup_notifications_stream_id
realm.signup_notifications_stream = stream
with transaction.atomic():
realm.save(update_fields=["signup_notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "signup_notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_user_default_setting(
realm_user_default: RealmUserDefault,
name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
old_value = getattr(realm_user_default, name)
realm = realm_user_default.realm
event_time = timezone_now()
with transaction.atomic(savepoint=False):
setattr(realm_user_default, name, value)
realm_user_default.save(update_fields=[name])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEFAULT_USER_SETTINGS_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
event = dict(
type="realm_user_settings_defaults",
op="update",
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEACTIVATED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
# This event will only ever be received by clients with an active
# longpoll connection, because by this point clients will be
# unable to authenticate again to their event queue (triggering an
# immediate reload into the page explaining the realm was
# deactivated). So the purpose of sending this is to flush all
# active longpoll connections for the realm.
event = dict(type="realm", op="deactivated", realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
with transaction.atomic():
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
def do_change_realm_subdomain(
realm: Realm, new_subdomain: str, *, acting_user: Optional[UserProfile]
) -> None:
"""Changing a realm's subdomain is a highly disruptive operation,
because all existing clients will need to be updated to point to
the new URL. Further, requests to fetch data from existing event
queues will fail with an authentication error when this change
happens (because the old subdomain is no longer associated with
the realm), making it hard for us to provide a graceful update
experience for clients.
"""
old_subdomain = realm.subdomain
old_uri = realm.uri
# If the realm had been a demo organization scheduled for
# deleting, clear that state.
realm.demo_organization_scheduled_deletion_date = None
realm.string_id = new_subdomain
with transaction.atomic():
realm.save(update_fields=["string_id", "demo_organization_scheduled_deletion_date"])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_subdomain": old_subdomain, "new_subdomain": new_subdomain},
)
# If a realm if being renamed multiple times, we should find all the placeholder
# realms and reset their deactivated_redirect field to point to the new realm uri
placeholder_realms = Realm.objects.filter(deactivated_redirect=old_uri, deactivated=True)
for placeholder_realm in placeholder_realms:
do_add_deactivated_redirect(placeholder_realm, realm.uri)
# The below block isn't executed in a transaction with the earlier code due to
# the functions called below being complex and potentially sending events,
# which we don't want to do in atomic blocks.
# When we change a realm's subdomain the realm with old subdomain is basically
# deactivated. We are creating a deactivated realm using old subdomain and setting
# it's deactivated redirect to new_subdomain so that we can tell the users that
# the realm has been moved to a new subdomain.
placeholder_realm = do_create_realm(old_subdomain, realm.name)
do_deactivate_realm(placeholder_realm, acting_user=None)
do_add_deactivated_redirect(placeholder_realm, realm.uri)
def do_add_deactivated_redirect(realm: Realm, redirect_url: str) -> None:
realm.deactivated_redirect = redirect_url
realm.save(update_fields=["deactivated_redirect"])
def do_scrub_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED,
)
def do_delete_user(user_profile: UserProfile) -> None:
if user_profile.realm.is_zephyr_mirror_realm:
raise AssertionError("Deleting zephyr mirror users is not supported")
do_deactivate_user(user_profile, acting_user=None)
subscribed_huddle_recipient_ids = set(
Subscription.objects.filter(
user_profile=user_profile, recipient__type=Recipient.HUDDLE
).values_list("recipient_id", flat=True)
)
user_id = user_profile.id
realm = user_profile.realm
date_joined = user_profile.date_joined
personal_recipient = user_profile.recipient
with transaction.atomic():
user_profile.delete()
# Recipient objects don't get deleted through CASCADE, so we need to handle
# the user's personal recipient manually. This will also delete all Messages pointing
# to this recipient (all private messages sent to the user).
assert personal_recipient is not None
personal_recipient.delete()
replacement_user = create_user(
force_id=user_id,
email=f"deleteduser{user_id}@{get_fake_email_domain(realm)}",
password=None,
realm=realm,
full_name=f"Deleted User {user_id}",
active=False,
is_mirror_dummy=True,
force_date_joined=date_joined,
)
subs_to_recreate = [
Subscription(
user_profile=replacement_user,
recipient=recipient,
is_user_active=replacement_user.is_active,
)
for recipient in Recipient.objects.filter(id__in=subscribed_huddle_recipient_ids)
]
Subscription.objects.bulk_create(subs_to_recreate)
RealmAuditLog.objects.create(
realm=replacement_user.realm,
modified_user=replacement_user,
acting_user=None,
event_type=RealmAuditLog.USER_DELETED,
event_time=timezone_now(),
)
def change_user_is_active(user_profile: UserProfile, value: bool) -> None:
"""
Helper function for changing the .is_active field. Not meant as a standalone function
in production code as properly activating/deactivating users requires more steps.
This changes the is_active value and saves it, while ensuring
Subscription.is_user_active values are updated in the same db transaction.
"""
with transaction.atomic(savepoint=False):
user_profile.is_active = value
user_profile.save(update_fields=["is_active"])
Subscription.objects.filter(user_profile=user_profile).update(is_user_active=value)
def get_active_bots_owned_by_user(user_profile: UserProfile) -> QuerySet:
return UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile)
def do_deactivate_user(
user_profile: UserProfile, _cascade: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
if not user_profile.is_active:
return
if _cascade:
# We need to deactivate bots before the target user, to ensure
# that a failure partway through this function cannot result
# in only the user being deactivated.
bot_profiles = get_active_bots_owned_by_user(user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False, acting_user=acting_user)
with transaction.atomic():
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.save(update_fields=["is_mirror_dummy"])
change_user_is_active(user_profile, False)
delete_user_sessions(user_profile)
clear_scheduled_emails(user_profile.id)
revoke_invites_generated_by_user(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
increment=-1,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(
type="realm_user",
op="remove",
person=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(
type="realm_bot",
op="remove",
bot=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
@transaction.atomic(savepoint=False)
def do_deactivate_stream(
stream: Stream, log: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
# We want to mark all messages in the to-be-deactivated stream as
# read for all users; otherwise they will pollute queries like
# "Get the user's first unread message". Since this can be an
# expensive operation, we do it via the deferred_work queue
# processor.
deferred_work_event = {
"type": "mark_stream_messages_as_read_for_everyone",
"stream_recipient_id": stream.recipient_id,
}
transaction.on_commit(lambda: queue_json_publish("deferred_work", deferred_work_event))
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=True).update(
active=False
)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
# Prepend a substring of the hashed stream ID to the new stream name
streamID = str(stream.id)
stream_id_hash_object = hashlib.sha512(streamID.encode())
hashed_stream_id = stream_id_hash_object.hexdigest()[0:7]
new_name = (hashed_stream_id + "!DEACTIVATED:" + old_name)[: Stream.MAX_NAME_LENGTH]
stream.name = new_name[: Stream.MAX_NAME_LENGTH]
stream.save(update_fields=["name", "deactivated", "invite_only"])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete", streams=[stream_dict])
transaction.on_commit(lambda: send_event(stream.realm, event, affected_user_ids))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time,
)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id, new_email=user_profile.email)
event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
active_user_ids(user_profile.realm_id),
)
)
@transaction.atomic(savepoint=False)
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id, delivery_email=new_email)
event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(lambda: send_event(user_profile.realm, event, [user_profile.id]))
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time,
)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update(
old_email=old_email,
new_email=new_email,
activate_url=activation_url,
)
language = user_profile.default_language
send_email(
"zerver/emails/confirm_new_email",
to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language,
context=context,
realm=user_profile.realm,
)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(
lambda realm, email, f: user_profile_delivery_email_cache_key(email, realm),
timeout=3600 * 24 * 7,
)
def create_mirror_user_if_needed(
realm: Realm, email: str, email_to_fullname: Callable[[str], str]
) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def render_incoming_message(
message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> MessageRenderingResult:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendering_result = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton=realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_("Unable to render message"))
return rendering_result
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
online_push_user_ids: Set[int]
pm_mention_email_disabled_user_ids: Set[int]
pm_mention_push_disabled_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
muted_sender_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
all_bot_user_ids: Set[int]
def get_recipient_info(
*,
realm_id: int,
recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int] = set(),
possible_wildcard_mention: bool = True,
) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
muted_sender_user_ids: Set[int] = get_muting_users(sender_id)
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert len(message_to_user_ids) in [1, 2]
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert stream_topic is not None
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = (
get_subscriptions_for_send_message(
realm_id=realm_id,
stream_id=stream_topic.stream_id,
possible_wildcard_mention=possible_wildcard_mention,
possibly_mentioned_user_ids=possibly_mentioned_user_ids,
)
.annotate(
user_profile_email_notifications=F(
"user_profile__enable_stream_email_notifications"
),
user_profile_push_notifications=F("user_profile__enable_stream_push_notifications"),
user_profile_wildcard_mentions_notify=F("user_profile__wildcard_mentions_notify"),
)
.values(
"user_profile_id",
"push_notifications",
"email_notifications",
"wildcard_mentions_notify",
"user_profile_email_notifications",
"user_profile_push_notifications",
"user_profile_wildcard_mentions_notify",
"is_muted",
)
.order_by("user_profile_id")
)
message_to_user_ids = [row["user_profile_id"] for row in subscription_rows]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row["is_muted"]:
return False
if row["user_profile_id"] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row["user_profile_" + setting]
stream_push_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send("push_notifications", row)
}
stream_email_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send("email_notifications", row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine the set of users who have enabled the
# "wildcard_mentions_notify" setting (that is, the set of
# users for whom wildcard mentions should be treated like
# personal mentions for notifications). This setting
# applies to both email and push notifications.
wildcard_mention_user_ids = {
row["user_profile_id"]
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError("Bad recipient type")
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered Markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(is_active=True).values(
"id",
"enable_online_push_notifications",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"is_bot",
"bot_type",
"long_term_idle",
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(user_ids),
field="id",
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {row["id"] for row in rows if f(row)} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row["is_bot"] and (row["bot_type"] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
online_push_user_ids = get_ids_for(
lambda r: r["enable_online_push_notifications"],
)
# We deal with only the users who have disabled this setting, since that
# will usually be much smaller a set than those who have enabled it (which
# is the default)
pm_mention_email_disabled_user_ids = get_ids_for(
lambda r: not r["enable_offline_email_notifications"]
)
pm_mention_push_disabled_user_ids = get_ids_for(
lambda r: not r["enable_offline_push_notifications"]
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r["long_term_idle"],
)
# These three bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via Markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row["id"] for row in rows if row["is_bot"] and row["bot_type"] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [(row["id"], row["bot_type"]) for row in rows if is_service_bot(row)]
# We also need the user IDs of all bots, to avoid trying to send push/email
# notifications to them. This set will be directly sent to the event queue code
# where we determine notifiability of the message for users.
all_bot_user_ids = {row["id"] for row in rows if row["is_bot"]}
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
online_push_user_ids=online_push_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
all_bot_user_ids=all_bot_user_ids,
)
return info
def get_service_bot_events(
sender: UserProfile,
service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int],
active_user_ids: Set[int],
recipient_type: int,
) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = "outgoing_webhooks"
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = "embedded_bots"
else:
logging.error(
"Unexpected bot_type for Service bot id=%s: %s",
user_profile_id,
bot_type,
)
return
is_stream = recipient_type == Recipient.STREAM
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = "mention"
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = "private_message"
else:
return
event_dict[queue_name].append(
{
"trigger": trigger,
"user_profile_id": user_profile_id,
}
)
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(send_message_requests: Sequence[SendMessageRequest]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for send_request in send_message_requests:
scheduled_message = ScheduledMessage()
scheduled_message.sender = send_request.message.sender
scheduled_message.recipient = send_request.message.recipient
topic_name = send_request.message.topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = send_request.message.content
scheduled_message.sending_client = send_request.message.sending_client
scheduled_message.stream = send_request.stream
scheduled_message.realm = send_request.realm
assert send_request.deliver_at is not None
scheduled_message.scheduled_timestamp = send_request.deliver_at
if send_request.delivery_type == "send_later":
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif send_request.delivery_type == "remind":
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def build_message_send_dict(
message: Message,
stream: Optional[Stream] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
realm: Optional[Realm] = None,
widget_content_dict: Optional[Dict[str, Any]] = None,
email_gateway: bool = False,
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> SendMessageRequest:
"""Returns a dictionary that can be passed into do_send_messages. In
production, this is always called by check_message, but some
testing code paths call it directly.
"""
if realm is None:
realm = message.sender.realm
if mention_backend is None:
mention_backend = MentionBackend(realm.id)
mention_data = MentionData(
mention_backend=mention_backend,
content=message.content,
)
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message.topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
# Render our message_dicts.<|fim▁hole|>
rendering_result = render_incoming_message(
message,
message.content,
info["active_user_ids"],
realm,
mention_data=mention_data,
email_gateway=email_gateway,
)
message.rendered_content = rendering_result.rendered_content
message.rendered_content_version = markdown_version
links_for_embed = rendering_result.links_for_preview
mentioned_user_groups_map = get_user_group_mentions_data(
mentioned_user_ids=rendering_result.mentions_user_ids,
mentioned_user_group_ids=list(rendering_result.mentions_user_group_ids),
mention_data=mention_data,
)
# For single user as well as user group mentions, we set the `mentioned`
# flag on `UserMessage`
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if rendering_result.mentions_wildcard:
wildcard_mention_user_ids = info["wildcard_mention_user_ids"]
else:
wildcard_mention_user_ids = set()
"""
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
"""
mentioned_user_ids = rendering_result.mentions_user_ids
default_bot_user_ids = info["default_bot_user_ids"]
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
info["um_eligible_user_ids"] |= mentioned_bot_user_ids
message_send_dict = SendMessageRequest(
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
mention_data=mention_data,
mentioned_user_groups_map=mentioned_user_groups_map,
message=message,
rendering_result=rendering_result,
active_user_ids=info["active_user_ids"],
online_push_user_ids=info["online_push_user_ids"],
pm_mention_email_disabled_user_ids=info["pm_mention_email_disabled_user_ids"],
pm_mention_push_disabled_user_ids=info["pm_mention_push_disabled_user_ids"],
stream_push_user_ids=info["stream_push_user_ids"],
stream_email_user_ids=info["stream_email_user_ids"],
muted_sender_user_ids=info["muted_sender_user_ids"],
um_eligible_user_ids=info["um_eligible_user_ids"],
long_term_idle_user_ids=info["long_term_idle_user_ids"],
default_bot_user_ids=info["default_bot_user_ids"],
service_bot_tuples=info["service_bot_tuples"],
all_bot_user_ids=info["all_bot_user_ids"],
wildcard_mention_user_ids=wildcard_mention_user_ids,
links_for_embed=links_for_embed,
widget_content=widget_content_dict,
limit_unread_user_ids=limit_unread_user_ids,
)
return message_send_dict
def do_send_messages(
send_message_requests_maybe_none: Sequence[Optional[SendMessageRequest]],
email_gateway: bool = False,
mark_as_read: Sequence[int] = [],
) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
send_message_requests = [
send_request
for send_request in send_message_requests_maybe_none
if send_request is not None
]
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create(send_request.message for send_request in send_message_requests)
# Claim attachments in message
for send_request in send_message_requests:
if do_claim_attachments(
send_request.message, send_request.rendering_result.potential_attachment_path_ids
):
send_request.message.has_attachment = True
send_request.message.save(update_fields=["has_attachment"])
ums: List[UserMessageLite] = []
for send_request in send_message_requests:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = send_request.rendering_result.mentions_user_ids
# Extend the set with users who have muted the sender.
mark_as_read_user_ids = send_request.muted_sender_user_ids
mark_as_read_user_ids.update(mark_as_read)
user_messages = create_user_messages(
message=send_request.message,
rendering_result=send_request.rendering_result,
um_eligible_user_ids=send_request.um_eligible_user_ids,
long_term_idle_user_ids=send_request.long_term_idle_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
mentioned_user_ids=mentioned_user_ids,
mark_as_read_user_ids=mark_as_read_user_ids,
limit_unread_user_ids=send_request.limit_unread_user_ids,
)
for um in user_messages:
user_message_flags[send_request.message.id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
send_request.message.service_queue_events = get_service_bot_events(
sender=send_request.message.sender,
service_bot_tuples=send_request.service_bot_tuples,
mentioned_user_ids=mentioned_user_ids,
active_user_ids=send_request.active_user_ids,
recipient_type=send_request.message.recipient.type,
)
bulk_insert_ums(ums)
for send_request in send_message_requests:
do_widget_post_save_actions(send_request)
# This next loop is responsible for notifying other parts of the
# Zulip system about the messages we just committed to the database:
# * Notifying clients via send_event
# * Triggering outgoing webhooks via the service event queue.
# * Updating the `first_message_id` field for streams without any message history.
# * Implementing the Welcome Bot reply hack
# * Adding links to the embed_links queue for open graph processing.
for send_request in send_message_requests:
realm_id: Optional[int] = None
if send_request.message.is_stream_message():
if send_request.stream is None:
stream_id = send_request.message.recipient.type_id
send_request.stream = Stream.objects.select_related().get(id=stream_id)
# assert needed because stubs for django are missing
assert send_request.stream is not None
realm_id = send_request.stream.realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(send_request.message, realm_id)
user_flags = user_message_flags.get(send_request.message.id, {})
"""
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
"""
user_ids = send_request.active_user_ids | set(user_flags.keys())
sender_id = send_request.message.sender_id
# We make sure the sender is listed first in the `users` list;
# this results in the sender receiving the message first if
# there are thousands of recipients, decreasing perceived latency.
if sender_id in user_ids:
user_list = [sender_id] + list(user_ids - {sender_id})
else:
user_list = list(user_ids)
class UserData(TypedDict):
id: int
flags: List[str]
mentioned_user_group_id: Optional[int]
users: List[UserData] = []
for user_id in user_list:
flags = user_flags.get(user_id, [])
user_data: UserData = dict(id=user_id, flags=flags, mentioned_user_group_id=None)
if user_id in send_request.mentioned_user_groups_map:
user_data["mentioned_user_group_id"] = send_request.mentioned_user_groups_map[
user_id
]
users.append(user_data)
sender = send_request.message.sender
message_type = wide_message_dict["type"]
active_users_data = [
ActivePresenceIdleUserData(
alerted="has_alert_word" in user_flags.get(user_id, []),
notifications_data=UserMessageNotificationsData.from_user_id_sets(
user_id=user_id,
flags=user_flags.get(user_id, []),
private_message=(message_type == "private"),
online_push_user_ids=send_request.online_push_user_ids,
pm_mention_push_disabled_user_ids=send_request.pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=send_request.pm_mention_email_disabled_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
wildcard_mention_user_ids=send_request.wildcard_mention_user_ids,
muted_sender_user_ids=send_request.muted_sender_user_ids,
all_bot_user_ids=send_request.all_bot_user_ids,
),
)
for user_id in send_request.active_user_ids
]
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
active_users_data=active_users_data,
)
event = dict(
type="message",
message=send_request.message.id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
online_push_user_ids=list(send_request.online_push_user_ids),
pm_mention_push_disabled_user_ids=list(send_request.pm_mention_push_disabled_user_ids),
pm_mention_email_disabled_user_ids=list(
send_request.pm_mention_email_disabled_user_ids
),
stream_push_user_ids=list(send_request.stream_push_user_ids),
stream_email_user_ids=list(send_request.stream_email_user_ids),
wildcard_mention_user_ids=list(send_request.wildcard_mention_user_ids),
muted_sender_user_ids=list(send_request.muted_sender_user_ids),
all_bot_user_ids=list(send_request.all_bot_user_ids),
)
if send_request.message.is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
# assert needed because stubs for django are missing
assert send_request.stream is not None
if send_request.stream.is_public():
event["realm_id"] = send_request.stream.realm_id
event["stream_name"] = send_request.stream.name
if send_request.stream.invite_only:
event["invite_only"] = True
if send_request.stream.first_message_id is None:
send_request.stream.first_message_id = send_request.message.id
send_request.stream.save(update_fields=["first_message_id"])
if send_request.local_id is not None:
event["local_id"] = send_request.local_id
if send_request.sender_queue_id is not None:
event["sender_queue_id"] = send_request.sender_queue_id
send_event(send_request.realm, event, users)
if send_request.links_for_embed:
event_data = {
"message_id": send_request.message.id,
"message_content": send_request.message.content,
"message_realm_id": send_request.realm.id,
"urls": list(send_request.links_for_embed),
}
queue_json_publish("embed_links", event_data)
if send_request.message.recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(
settings.WELCOME_BOT, send_request.message.sender.realm_id
).id
if (
welcome_bot_id in send_request.active_user_ids
and welcome_bot_id != send_request.message.sender_id
):
from zerver.lib.onboarding import send_welcome_bot_response
send_welcome_bot_response(send_request)
for queue_name, events in send_request.message.service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event["trigger"],
"user_profile_id": event["user_profile_id"],
},
)
return [send_request.message.id for send_request in send_message_requests]
class UserMessageLite:
"""
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
"""
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(
message: Message,
rendering_result: MessageRenderingResult,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read_user_ids: Set[int],
limit_unread_user_ids: Optional[Set[int]],
) -> List[UserMessageLite]:
# These properties on the Message are set via
# render_markdown by code in the Markdown inline patterns
ids_with_alert_words = rendering_result.user_ids_with_alert_words
sender_id = message.sender.id
is_stream_message = message.is_stream_message()
base_flags = 0
if rendering_result.mentions_wildcard:
base_flags |= UserMessage.flags.wildcard_mentioned
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
base_flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for user_profile_id in um_eligible_user_ids:
flags = base_flags
if (
(user_profile_id == sender_id and message.sent_by_human())
or user_profile_id in mark_as_read_user_ids
or (limit_unread_user_ids is not None and user_profile_id not in limit_unread_user_ids)
):
flags |= UserMessage.flags.read
if user_profile_id in mentioned_user_ids:
flags |= UserMessage.flags.mentioned
if user_profile_id in ids_with_alert_words:
flags |= UserMessage.flags.has_alert_word
if (
user_profile_id in long_term_idle_user_ids
and user_profile_id not in stream_push_user_ids
and user_profile_id not in stream_email_user_ids
and is_stream_message
and int(flags) == 0
):
continue
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=flags,
)
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
"""
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
"""
if not ums:
return
vals = [(um.user_profile_id, um.message_id, um.flags) for um in ums]
query = SQL(
"""
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
"""
)
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def verify_submessage_sender(
*,
message_id: int,
message_sender_id: int,
submessage_sender_id: int,
) -> None:
"""Even though our submessage architecture is geared toward
collaboration among all message readers, we still enforce
the the first person to attach a submessage to the message
must be the original sender of the message.
"""
if message_sender_id == submessage_sender_id:
return
if SubMessage.objects.filter(
message_id=message_id,
sender_id=message_sender_id,
).exists():
return
raise JsonableError(_("You cannot attach a submessage to this message."))
def do_add_submessage(
realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
transaction.on_commit(lambda: send_event(realm, event, target_user_ids))
def notify_reaction_update(
user_profile: UserProfile, message: Message, reaction: Reaction, op: str
) -> None:
user_dict = {
"user_id": user_profile.id,
"email": user_profile.email,
"full_name": user_profile.full_name,
}
event: Dict[str, Any] = {
"type": "reaction",
"op": op,
"user_id": user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
"user": user_dict,
"message_id": message.id,
"emoji_name": reaction.emoji_name,
"emoji_code": reaction.emoji_code,
"reaction_type": reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message, plus subscribers of
# streams with the access to stream's full history.
#
# This means reactions won't live-update in preview narrows for a
# stream the user isn't yet subscribed to; this is the right
# performance tradeoff to avoid sending every reaction to public
# stream messages to all users.
#
# To ensure that reactions do live-update for any user who has
# actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications, even if they are not
# subscribed to the stream.
user_ids = set(
UserMessage.objects.filter(message=message.id).values_list("user_profile_id", flat=True)
)
if message.recipient.type == Recipient.STREAM:
stream_id = message.recipient.type_id
stream = Stream.objects.get(id=stream_id)
user_ids |= subscriber_ids_with_stream_history_access(stream)
transaction.on_commit(lambda: send_event(user_profile.realm, event, list(user_ids)))
def do_add_reaction(
user_profile: UserProfile,
message: Message,
emoji_name: str,
emoji_code: str,
reaction_type: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction(
user_profile=user_profile,
message=message,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def check_add_reaction(
user_profile: UserProfile,
message_id: int,
emoji_name: str,
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
message, user_message = access_message(user_profile, message_id, lock_message=True)
if emoji_code is None:
# The emoji_code argument is only required for rare corner
# cases discussed in the long block comment below. For simple
# API clients, we allow specifying just the name, and just
# look up the code using the current name->code mapping.
emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]
if reaction_type is None:
reaction_type = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[1]
if Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).exists():
raise JsonableError(_("Reaction already exists."))
query = Reaction.objects.filter(
message=message, emoji_code=emoji_code, reaction_type=reaction_type
)
if query.exists():
# If another user has already reacted to this message with
# same emoji code, we treat the new reaction as a vote for the
# existing reaction. So the emoji name used by that earlier
# reaction takes precedence over whatever was passed in this
# request. This is necessary to avoid a message having 2
# "different" emoji reactions with the same emoji code (and
# thus same image) on the same message, which looks ugly.
#
# In this "voting for an existing reaction" case, we shouldn't
# check whether the emoji code and emoji name match, since
# it's possible that the (emoji_type, emoji_name, emoji_code)
# triple for this existing reaction may not pass validation
# now (e.g. because it is for a realm emoji that has been
# since deactivated). We still want to allow users to add a
# vote any old reaction they see in the UI even if that is a
# deactivated custom emoji, so we just use the emoji name from
# the existing reaction with no further validation.
reaction = query.first()
assert reaction is not None
emoji_name = reaction.emoji_name
else:
# Otherwise, use the name provided in this request, but verify
# it is valid in the user's realm (e.g. not a deactivated
# realm emoji).
check_emoji_request(user_profile.realm, emoji_name, emoji_code, reaction_type)
if user_message is None:
# Users can see and react to messages sent to streams they
# were not a subscriber to; in order to receive events for
# those, we give the user a `historical` UserMessage objects
# for the message. This is the same trick we use for starring
# messages.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
def do_remove_reaction(
user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [
{"user_id": profile.id, "email": profile.email} for profile in recipient_user_profiles
]
event = dict(
type="typing",
message_type="private",
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [user.id for user in recipient_user_profiles if user.is_active]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None:
realm = sender.realm
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def do_send_stream_typing_notification(
sender: UserProfile, operator: str, stream: Stream, topic: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
event = dict(
type="typing",
message_type="stream",
op=operator,
sender=sender_dict,
stream_id=stream.id,
topic=topic,
)
user_ids_to_notify = get_user_ids_for_streams({stream.id})[stream.id]
send_event(sender.realm, event, user_ids_to_notify)
def ensure_stream(
realm: Realm,
stream_name: str,
invite_only: bool = False,
stream_description: str = "",
*,
acting_user: Optional[UserProfile],
) -> Stream:
return create_stream_if_needed(
realm,
stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user,
)[0]
def get_recipient_from_user_profiles(
recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {user_profile.id: user_profile for user_profile in recipient_profiles}
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map:
del recipient_profiles_map[sender.id]
assert recipient_profiles_map
if len(recipient_profiles_map) == 1:
[user_profile] = recipient_profiles_map.values()
return Recipient(
id=user_profile.recipient_id,
type=Recipient.PERSONAL,
type_id=user_profile.id,
)
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids = set(recipient_profiles_map)
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(
user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool = False
) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (
not user_profile.is_active
and not user_profile.is_mirror_dummy
and not allow_deactivated
) or user_profile.realm.deactivated:
raise ValidationError(
_("'{email}' is no longer using Zulip.").format(email=user_profile.email)
)
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(
user_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
allow_deactivated: bool = False,
) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(
user_profiles, sender, allow_deactivated=allow_deactivated
)
return get_recipient_from_user_profiles(
recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window,
)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
data = s
if isinstance(data, str):
data = data.split(",")
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Collection[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Collection[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(
sender: UserProfile,
client: Client,
stream_name: str,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_stream_message_by_id(
sender: UserProfile,
client: Client,
stream_id: int,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_id(stream_id, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(
sender: UserProfile, client: Client, receiving_user: UserProfile, body: str
) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
*,
skip_stream_access_check: bool = False,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
try:
message = check_message(
sender,
client,
addressee,
message_content,
realm,
forged,
forged_timestamp,
forwarder_user_profile,
local_id,
sender_queue_id,
widget_content,
skip_stream_access_check=skip_stream_access_check,
)
except ZephyrMessageAlreadySentException as e:
return e.message_id
return do_send_messages([message])[0]
def check_schedule_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str],
message_content: str,
delivery_type: str,
deliver_at: datetime.datetime,
realm: Optional[Realm] = None,
forwarder_user_profile: Optional[UserProfile] = None,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
send_request = check_message(
sender,
client,
addressee,
message_content,
realm=realm,
forwarder_user_profile=forwarder_user_profile,
)
send_request.deliver_at = deliver_at
send_request.delivery_type = delivery_type
recipient = send_request.message.recipient
if delivery_type == "remind" and (
recipient.type != Recipient.STREAM and recipient.type_id != sender.id
):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([send_request])[0]
def validate_message_edit_payload(
message: Message,
stream_id: Optional[int],
topic_name: Optional[str],
propagate_mode: Optional[str],
content: Optional[str],
) -> None:
"""
Checks that the data sent is well-formed. Does not handle editability, permissions etc.
"""
if topic_name is None and content is None and stream_id is None:
raise JsonableError(_("Nothing to change"))
if not message.is_stream_message():
if stream_id is not None:
raise JsonableError(_("Private messages cannot be moved to streams."))
if topic_name is not None:
raise JsonableError(_("Private messages cannot have topics."))
if propagate_mode != "change_one" and topic_name is None and stream_id is None:
raise JsonableError(_("Invalid propagate_mode without topic edit"))
if topic_name is not None:
check_stream_topic(topic_name)
if stream_id is not None and content is not None:
raise JsonableError(_("Cannot change message content while changing stream"))
# Right now, we prevent users from editing widgets.
if content is not None and is_widget_message(message):
raise JsonableError(_("Widgets cannot be edited."))
def can_edit_content_or_topic(
message: Message,
user_profile: UserProfile,
is_no_topic_msg: bool,
content: Optional[str] = None,
topic_name: Optional[str] = None,
) -> bool:
# You have permission to edit the message (both content and topic) if you sent it.
if message.sender_id == user_profile.id:
return True
# You cannot edit the content of message sent by someone else.
if content is not None:
return False
assert topic_name is not None
# The following cases are the various reasons a user might be
# allowed to edit topics.
# We allow anyone to edit (no topic) messages to help tend them.
if is_no_topic_msg:
return True
# The can_edit_topic_of_any_message helper returns whether the user can edit the topic
# or not based on edit_topic_policy setting and the user's role.
if user_profile.can_edit_topic_of_any_message():
return True
return False
def check_update_message(
user_profile: UserProfile,
message_id: int,
stream_id: Optional[int] = None,
topic_name: Optional[str] = None,
propagate_mode: str = "change_one",
send_notification_to_old_thread: bool = True,
send_notification_to_new_thread: bool = True,
content: Optional[str] = None,
) -> int:
"""This will update a message given the message id and user profile.
It checks whether the user profile has the permission to edit the message
and raises a JsonableError if otherwise.
It returns the number changed.
"""
message, ignored_user_message = access_message(user_profile, message_id)
if not user_profile.realm.allow_message_editing:
raise JsonableError(_("Your organization has turned off message editing"))
# The zerver/views/message_edit.py call point already strips this
# via REQ_topic; so we can delete this line if we arrange a
# contract where future callers in the embedded bots system strip
# use REQ_topic as well (or otherwise are guaranteed to strip input).
if topic_name is not None:
topic_name = topic_name.strip()
if topic_name == message.topic_name():
topic_name = None
validate_message_edit_payload(message, stream_id, topic_name, propagate_mode, content)
is_no_topic_msg = message.topic_name() == "(no topic)"
if content is not None or topic_name is not None:
if not can_edit_content_or_topic(
message, user_profile, is_no_topic_msg, content, topic_name
):
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has passed"))
# If there is a change to the topic, check that the user is allowed to
# edit it and that it has not been too long. If this is not the user who
# sent the message, they are not the admin, and the time limit for editing
# topics is passed, raise an error.
if (
topic_name is not None
and message.sender != user_profile
and not user_profile.is_realm_admin
and not user_profile.is_moderator
and not is_no_topic_msg
):
deadline_seconds = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message's topic has passed"))
rendering_result = None
links_for_embed: Set[str] = set()
prior_mention_user_ids: Set[int] = set()
mention_data: Optional[MentionData] = None
if content is not None:
if content.rstrip() == "":
content = "(deleted)"
content = normalize_body(content)
mention_backend = MentionBackend(user_profile.realm_id)
mention_data = MentionData(
mention_backend=mention_backend,
content=content,
)
user_info = get_user_info_for_message_updates(message.id)
prior_mention_user_ids = user_info["mention_user_ids"]
# We render the message using the current user's realm; since
# the cross-realm bots never edit messages, this should be
# always correct.
# Note: If rendering fails, the called code will raise a JsonableError.
rendering_result = render_incoming_message(
message,
content,
user_info["message_user_ids"],
user_profile.realm,
mention_data=mention_data,
)
links_for_embed |= rendering_result.links_for_preview
if message.is_stream_message() and rendering_result.mentions_wildcard:
stream = access_stream_by_id(user_profile, message.recipient.type_id)[0]
if not wildcard_mention_allowed(message.sender, stream):
raise JsonableError(
_("You do not have permission to use wildcard mentions in this stream.")
)
new_stream = None
number_changed = 0
if stream_id is not None:
assert message.is_stream_message()
if not user_profile.can_move_messages_between_streams():
raise JsonableError(_("You don't have permission to move this message"))
try:
access_stream_by_id(user_profile, message.recipient.type_id)
except JsonableError:
raise JsonableError(
_(
"You don't have permission to move this message due to missing access to its stream"
)
)
new_stream = access_stream_by_id(user_profile, stream_id, require_active=True)[0]
check_stream_access_based_on_stream_post_policy(user_profile, new_stream)
number_changed = do_update_message(
user_profile,
message,
new_stream,
topic_name,
propagate_mode,
send_notification_to_old_thread,
send_notification_to_new_thread,
content,
rendering_result,
prior_mention_user_ids,
mention_data,
)
if links_for_embed:
event_data = {
"message_id": message.id,
"message_content": message.content,
# The choice of `user_profile.realm_id` rather than
# `sender.realm_id` must match the decision made in the
# `render_incoming_message` call earlier in this function.
"message_realm_id": user_profile.realm_id,
"urls": list(links_for_embed),
}
queue_json_publish("embed_links", event_data)
return number_changed
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(
_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
)
)
for i in group_name:
if ord(i) == 0:
raise JsonableError(
_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
)
)
def send_rate_limited_pm_notification_to_bot_owner(
sender: UserProfile, realm: Realm, content: str
) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT, sender.bot_owner.realm_id),
sender.bot_owner,
content,
)
sender.last_reminder = timezone_now()
sender.save(update_fields=["last_reminder"])
def send_pm_if_empty_stream(
stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str] = None,
stream_id: Optional[int] = None,
) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _(
"Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID."
).format(**arg_dict)
else:
assert stream_name is not None
content = _(
"Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it."
).format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _(
"Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers."
).format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(
stream_name: str, realm: Realm, sender: UserProfile
) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(
stream_id: int, realm: Realm, sender: UserProfile
) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(
realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]
) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(
sender: UserProfile,
client: Client,
addressee: Addressee,
message_content_raw: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
email_gateway: bool = False,
*,
skip_stream_access_check: bool = False,
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> SendMessageRequest:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = normalize_body(message_content_raw)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
# To save a database round trip, we construct the Recipient
# object for the Stream rather than fetching it from the
# database using the stream.recipient foreign key.
#
# This is simpler than ensuring that code paths that fetch a
# Stream that will be used for sending a message have a
# `select_related("recipient"), which would also needlessly
# expand Stream objects in memory (all the fields of Recipient
# are already known given the Stream object).
recipient = Recipient(
id=stream.recipient_id,
type_id=stream.id,
type=Recipient.STREAM,
)
if not skip_stream_access_check:
access_stream_for_send_message(
sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile
)
else:
# Defensive assertion - the only currently supported use case
# for this option is for outgoing webhook bots and since this
# is security-sensitive code, it's beneficial to ensure nothing
# else can sneak past the access check.
assert sender.bot_type == sender.OUTGOING_WEBHOOK_BOT
if realm.mandatory_topics and topic_name == "(no topic)":
raise JsonableError(_("Topics are required in this organization"))
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in [
"zephyr_mirror",
"irc_mirror",
"jabber_mirror",
"JabberMirror",
]
check_private_message_policy(realm, sender, user_profiles)
# API super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(
user_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
raise ZephyrMessageAlreadySentException(id)
widget_content_dict = None
if widget_content is not None:
try:
widget_content_dict = orjson.loads(widget_content)
except orjson.JSONDecodeError:
raise JsonableError(_("Widgets: API programmer sent invalid JSON content"))
try:
check_widget_content(widget_content_dict)
except ValidationError as error:
raise JsonableError(
_("Widgets: {error_msg}").format(
error_msg=error.message,
)
)
message_send_dict = build_message_send_dict(
message=message,
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
widget_content_dict=widget_content_dict,
email_gateway=email_gateway,
mention_backend=mention_backend,
limit_unread_user_ids=limit_unread_user_ids,
)
if stream is not None and message_send_dict.rendering_result.mentions_wildcard:
if not wildcard_mention_allowed(sender, stream):
raise JsonableError(
_("You do not have permission to use wildcard mentions in this stream.")
)
return message_send_dict
def _internal_prep_message(
realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str,
email_gateway: bool = False,
mention_backend: Optional[MentionBackend] = None,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[SendMessageRequest]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > settings.MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(
sender,
get_client("Internal"),
addressee,
content,
realm=realm,
email_gateway=email_gateway,
mention_backend=mention_backend,
limit_unread_user_ids=limit_unread_user_ids,
)
except JsonableError as e:
logging.exception(
"Error queueing internal message by %s: %s",
sender.delivery_email,
e.msg,
stack_info=True,
)
return None
def internal_prep_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
realm = stream.realm
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
email_gateway=email_gateway,
limit_unread_user_ids=limit_unread_user_ids,
)
def internal_prep_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(
realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str,
mention_backend: Optional[MentionBackend] = None,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
mention_backend=mention_backend,
)
def internal_send_private_message(
sender: UserProfile, recipient_user: UserProfile, content: str
) -> Optional[int]:
realm = recipient_user.realm
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[int]:
message = internal_prep_stream_message(
sender, stream, topic, content, email_gateway, limit_unread_user_ids=limit_unread_user_ids
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm,
sender,
stream_name,
topic,
content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(
realm: Realm, sender: UserProfile, emails: List[str], content: str
) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_colors(
used_colors: Set[str], color_map: Dict[int, str], recipient_ids: List[int]
) -> Dict[int, str]:
used_colors = set(used_colors)
recipient_ids = sorted(recipient_ids)
result = {}
other_recipient_ids = []
for recipient_id in recipient_ids:
if recipient_id in color_map:
color = color_map[recipient_id]
result[recipient_id] = color
used_colors.add(color)
else:
other_recipient_ids.append(recipient_id)
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
for i, recipient_id in enumerate(other_recipient_ids):
if i < len(available_colors):
color = available_colors[i]
else:
# We have to start re-using old colors, and we use recipient_id
# to choose the color.
color = STREAM_ASSIGNMENT_COLORS[recipient_id % len(STREAM_ASSIGNMENT_COLORS)]
result[recipient_id] = color
return result
def validate_user_access_to_subscribers(
user_profile: Optional[UserProfile], stream: Stream
) -> None:
"""Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{
"realm_id": stream.realm_id,
"is_web_public": stream.is_web_public,
"invite_only": stream.invite_only,
},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id),
)
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Even guest users can access subscribers to web-public streams,
# since they can freely become subscribers to these streams.
if stream_dict["is_web_public"]:
return
# With the exception of web-public streams, a guest must
# be subscribed to a stream (even a public one) in order
# to see subscribers.
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could explicitly handle the case where guests aren't
# subscribed here in an `else` statement or we can fall
# through to the subsequent logic. Tim prefers the latter.
# Adding an `else` would ensure better code coverage.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if stream_dict["invite_only"] and not check_user_subscribed(user_profile):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(
stream_dicts: Collection[Mapping[str, Any]],
user_profile: UserProfile,
subscribed_stream_ids: Set[int],
) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_id = stream_dict["id"]
is_subscribed = stream_id in subscribed_stream_ids
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: is_subscribed,
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts}
recipient_ids = sorted(stream["recipient_id"] for stream in target_stream_dicts)
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
"""
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
"""
query = SQL(
"""
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_subscription.is_user_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
"""
)
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
"""
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
"""
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
return get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=False)
def get_subscriber_ids(stream: Stream, requesting_user: Optional[UserProfile] = None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
return subscriptions_query.values_list("user_profile_id", flat=True)
@dataclass
class StreamInfo:
email_address: str
stream_weekly_traffic: Optional[int]
subscribers: List[int]
def send_subscription_add_events(
realm: Realm,
sub_info_list: List[SubInfo],
subscriber_dict: Dict[int, Set[int]],
) -> None:
info_by_user: Dict[int, List[SubInfo]] = defaultdict(list)
for sub_info in sub_info_list:
info_by_user[sub_info.user.id].append(sub_info)
stream_ids = {sub_info.stream.id for sub_info in sub_info_list}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# We generally only have a few streams, so we compute stream
# data in its own loop.
stream_info_dict: Dict[int, StreamInfo] = {}
for sub_info in sub_info_list:
stream = sub_info.stream
if stream.id not in stream_info_dict:
email_address = encode_email_address(stream, show_sender=True)
stream_weekly_traffic = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic
)
if stream.is_in_zephyr_realm and not stream.invite_only:
subscribers = []
else:
subscribers = list(subscriber_dict[stream.id])
stream_info_dict[stream.id] = StreamInfo(
email_address=email_address,
stream_weekly_traffic=stream_weekly_traffic,
subscribers=subscribers,
)
for user_id, sub_infos in info_by_user.items():
sub_dicts = []
for sub_info in sub_infos:
stream = sub_info.stream
stream_info = stream_info_dict[stream.id]
subscription = sub_info.sub
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict["in_home_view"] = not subscription.is_muted
sub_dict["email_address"] = stream_info.email_address
sub_dict["stream_weekly_traffic"] = stream_info.stream_weekly_traffic
sub_dict["subscribers"] = stream_info.subscribers
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add", subscriptions=sub_dicts)
send_event(realm, event, [user_id])
SubT = Tuple[List[SubInfo], List[SubInfo]]
def bulk_add_subscriptions(
realm: Realm,
streams: Collection[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str] = {},
from_user_creation: bool = False,
*,
acting_user: Optional[UserProfile],
) -> SubT:
users = list(users)
user_ids = [user.id for user in users]
# Sanity check out callers
for stream in streams:
assert stream.realm_id == realm.id
for user in users:
assert user.realm_id == realm.id
recipient_ids = [stream.recipient_id for stream in streams]
recipient_id_to_stream = {stream.recipient_id: stream for stream in streams}
recipient_color_map = {}
for stream in streams:
color: Optional[str] = color_map.get(stream.name, None)
if color is not None:
recipient_color_map[stream.recipient_id] = color
used_colors_for_user_ids: Dict[int, Set[str]] = get_used_colors_for_user_ids(user_ids)
existing_subs = Subscription.objects.filter(
user_profile_id__in=user_ids,
recipient__type=Recipient.STREAM,
recipient_id__in=recipient_ids,
)
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
for sub in existing_subs:
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed: List[SubInfo] = []
subs_to_activate: List[SubInfo] = []
subs_to_add: List[SubInfo] = []
for user_profile in users:
my_subs = subs_by_user[user_profile.id]
# Make a fresh set of all new recipient ids, and then we will
# remove any for which our user already has a subscription
# (and we'll re-activate any subscriptions as needed).
new_recipient_ids: Set[int] = {stream.recipient_id for stream in streams}
for sub in my_subs:
if sub.recipient_id in new_recipient_ids:
new_recipient_ids.remove(sub.recipient_id)
stream = recipient_id_to_stream[sub.recipient_id]
sub_info = SubInfo(user_profile, sub, stream)
if sub.active:
already_subscribed.append(sub_info)
else:
subs_to_activate.append(sub_info)
used_colors = used_colors_for_user_ids.get(user_profile.id, set())
user_color_map = pick_colors(used_colors, recipient_color_map, list(new_recipient_ids))
for recipient_id in new_recipient_ids:
stream = recipient_id_to_stream[recipient_id]
color = user_color_map[recipient_id]
sub = Subscription(
user_profile=user_profile,
is_user_active=user_profile.is_active,
active=True,
color=color,
recipient_id=recipient_id,
)
sub_info = SubInfo(user_profile, sub, stream)
subs_to_add.append(sub_info)
bulk_add_subs_to_db_with_logging(
realm=realm,
acting_user=acting_user,
subs_to_add=subs_to_add,
subs_to_activate=subs_to_activate,
)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
for sub_info in subs_to_add + subs_to_activate:
altered_user_dict[sub_info.stream.id].add(sub_info.user.id)
stream_dict = {stream.id: stream for stream in streams}
new_streams = [stream_dict[stream_id] for stream_id in altered_user_dict]
subscriber_peer_info = bulk_get_subscriber_peer_info(
realm=realm,
streams=new_streams,
)
# We now send several types of events to notify browsers. The
# first batches of notifications are sent only to the user(s)
# being subscribed; we can skip these notifications when this is
# being called from the new user creation flow.
if not from_user_creation:
send_stream_creation_events_for_private_streams(
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
)
send_subscription_add_events(
realm=realm,
sub_info_list=subs_to_add + subs_to_activate,
subscriber_dict=subscriber_peer_info.subscribed_ids,
)
send_peer_subscriber_events(
op="peer_add",
realm=realm,
altered_user_dict=altered_user_dict,
stream_dict=stream_dict,
private_peer_dict=subscriber_peer_info.private_peer_dict,
)
return (
subs_to_add + subs_to_activate,
already_subscribed,
)
# This function contains all the database changes as part of
# subscribing users to streams; we use a transaction to ensure that
# the RealmAuditLog entries are created atomically with the
# Subscription object creation (and updates).
@transaction.atomic(savepoint=False)
def bulk_add_subs_to_db_with_logging(
realm: Realm,
acting_user: Optional[UserProfile],
subs_to_add: List[SubInfo],
subs_to_activate: List[SubInfo],
) -> None:
Subscription.objects.bulk_create(info.sub for info in subs_to_add)
sub_ids = [info.sub.id for info in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for sub_info in subs_to_add:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time,
)
)
for sub_info in subs_to_activate:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time,
)
)
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def send_stream_creation_events_for_private_streams(
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
for stream_id, stream_users_ids in altered_user_dict.items():
stream = stream_dict[stream_id]
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
notify_user_ids = list(stream_users_ids - realm_admin_ids)
if notify_user_ids:
send_stream_creation_event(stream, notify_user_ids)
def send_peer_subscriber_events(
op: str,
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
private_peer_dict: Dict[int, Set[int]],
) -> None:
# Send peer_add/peer_remove events to other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
assert op in ["peer_add", "peer_remove"]
private_stream_ids = [
stream_id for stream_id in altered_user_dict if stream_dict[stream_id].invite_only
]
for stream_id in private_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = private_peer_dict[stream_id] - altered_user_ids
if peer_user_ids and altered_user_ids:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
public_stream_ids = [
stream_id
for stream_id in altered_user_dict
if not stream_dict[stream_id].invite_only and not stream_dict[stream_id].is_in_zephyr_realm
]
if public_stream_ids:
user_streams: Dict[int, Set[int]] = defaultdict(set)
public_peer_ids = set(active_non_guest_user_ids(realm.id))
for stream_id in public_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = public_peer_ids - altered_user_ids
if peer_user_ids and altered_user_ids:
if len(altered_user_ids) == 1:
# If we only have one user, we will try to
# find other streams they have (un)subscribed to
# (where it's just them). This optimization
# typically works when a single user is subscribed
# to multiple default public streams during
# new-user registration.
#
# This optimization depends on all public streams
# having the same peers for any single user, which
# isn't the case for private streams.
altered_user_id = list(altered_user_ids)[0]
user_streams[altered_user_id].add(stream_id)
else:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
for user_id, stream_ids in user_streams.items():
peer_user_ids = public_peer_ids - {user_id}
event = dict(
type="subscription",
op=op,
stream_ids=sorted(list(stream_ids)),
user_ids=[user_id],
)
send_event(realm, event, peer_user_ids)
def send_peer_remove_events(
realm: Realm,
streams: List[Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
private_streams = [stream for stream in streams if stream.invite_only]
private_peer_dict = bulk_get_private_peers(
realm=realm,
private_streams=private_streams,
)
stream_dict = {stream.id: stream for stream in streams}
send_peer_subscriber_events(
op="peer_remove",
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
private_peer_dict=private_peer_dict,
)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path("audio/notification_sounds")
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if "." in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == ".ogg":
available_notification_sounds.append(root)
return sorted(available_notification_sounds)
def notify_subscriptions_removed(
realm: Realm, user_profile: UserProfile, streams: Iterable[Stream]
) -> None:
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove", subscriptions=payload)
send_event(realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(
realm: Realm,
users: Iterable[UserProfile],
streams: Iterable[Stream],
*,
acting_user: Optional[UserProfile],
) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
# Sanity check our callers
for stream in streams:
assert stream.realm_id == realm.id
for user in users:
assert user.realm_id == realm.id
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, streams)
def get_non_subscribed_subs() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {sub_info.stream.id for sub_info in user_sub_stream_info}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_subs()
subs_to_deactivate: List[SubInfo] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for sub_infos in existing_subs_by_user.values():
for sub_info in sub_infos:
subs_to_deactivate.append(sub_info)
sub_ids_to_deactivate.append(sub_info.sub.id)
# We do all the database changes in a transaction to ensure
# RealmAuditLog entries are atomically created when making changes.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
).update(active=False)
occupied_streams_after = list(get_occupied_streams(realm))
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [
RealmAuditLog(
realm=sub_info.user.realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time,
)
for sub_info in subs_to_deactivate
]
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for sub_info in subs_to_deactivate:
stream = sub_info.stream
streams_by_user[sub_info.user.id].append(stream)
altered_user_dict[stream.id].add(sub_info.user.id)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(realm, user_profile, streams_by_user[user_profile.id])
event = {
"type": "mark_stream_messages_as_read",
"user_profile_id": user_profile.id,
"stream_recipient_ids": [stream.recipient_id for stream in streams],
}
queue_json_publish("deferred_work", event)
send_peer_remove_events(
realm=realm,
streams=streams,
altered_user_dict=altered_user_dict,
)
new_vacant_streams = set(occupied_streams_before) - set(occupied_streams_after)
new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only]
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub_info.user, sub_info.stream) for sub_info in subs_to_deactivate],
not_subscribed,
)
def do_change_subscription_property(
user_profile: UserProfile,
sub: Subscription,
stream: Stream,
property_name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
old_value = getattr(sub, database_property_name)
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.SUBSCRIPTION_PROPERTY_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
modified_stream=stream,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: database_value,
"property": database_property_name,
}
).decode(),
)
event = dict(
type="subscription",
op="update",
property=event_property_name,
value=event_value,
stream_id=stream.id,
)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool = True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time,
)
def do_change_full_name(
user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]
) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time,
extra_data=old_name,
)
payload = dict(user_id=user_profile.id, full_name=user_profile.full_name)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(type="realm_bot", op="update", bot=payload),
bot_owner_user_ids(user_profile),
)
def check_change_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: Optional[UserProfile]
) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile
) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
@transaction.atomic(durable=True)
def do_change_bot_owner(
user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile
) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time,
)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
delete_event = dict(
type="realm_bot",
op="delete",
bot=dict(
user_id=user_profile.id,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
delete_event,
{previous_owner.id},
)
)
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
transaction.on_commit(lambda: send_event(user_profile.realm, add_event, {bot_owner.id}))
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
bot_event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
bot_event,
update_users,
)
)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
transaction.on_commit(
lambda: send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
)
@transaction.atomic(durable=True)
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TERMS_OF_SERVICE_VERSION_CHANGED,
event_time=event_time,
)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time,
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
api_key=new_api_key,
),
),
bot_owner_user_ids(user_profile),
)
event = {"type": "clear_push_device_tokens", "user_profile_id": user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
bot_event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
bot_event,
bot_owner_user_ids(user_profile),
)
)
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
active_user_ids(user_profile.realm_id),
)
)
@transaction.atomic(savepoint=False)
def do_change_avatar_fields(
user_profile: UserProfile,
avatar_source: str,
skip_notify: bool = False,
*,
acting_user: Optional[UserProfile],
) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={"avatar_source": avatar_source},
event_time=event_time,
acting_user=acting_user,
)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
@transaction.atomic(durable=True)
def do_change_icon_source(
realm: Realm, icon_source: str, *, acting_user: Optional[UserProfile]
) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_ICON_SOURCE_CHANGED,
extra_data={"icon_source": icon_source, "icon_version": realm.icon_version},
event_time=event_time,
acting_user=acting_user,
)
event = dict(
type="realm",
op="update_dict",
property="icon",
data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm)),
)
transaction.on_commit(
lambda: send_event(
realm,
event,
active_user_ids(realm.id),
)
)
@transaction.atomic(durable=True)
def do_change_logo_source(
realm: Realm, logo_source: str, night: bool, *, acting_user: Optional[UserProfile]
) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
)
event = dict(
type="realm",
op="update_dict",
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night),
)
transaction.on_commit(lambda: send_event(realm, event, active_user_ids(realm.id)))
@transaction.atomic(durable=True)
def do_change_realm_org_type(
realm: Realm,
org_type: int,
acting_user: Optional[UserProfile],
) -> None:
old_value = realm.org_type
realm.org_type = org_type
realm.save(update_fields=["org_type"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_ORG_TYPE_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_value": old_value, "new_value": org_type},
)
@transaction.atomic(savepoint=False)
def do_change_realm_plan_type(
realm: Realm, plan_type: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=["plan_type"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_value": old_value, "new_value": plan_type},
)
if plan_type == Realm.PLAN_TYPE_PLUS:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.PLAN_TYPE_STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.PLAN_TYPE_SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.PLAN_TYPE_STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.PLAN_TYPE_LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=["_max_invites", "message_visibility_limit", "upload_quota_gb"])
event = {
"type": "realm",
"op": "update",
"property": "plan_type",
"value": plan_type,
"extra_data": {"upload_quota": realm.upload_quota_bytes()},
}
transaction.on_commit(lambda: send_event(realm, event, active_user_ids(realm.id)))
@transaction.atomic(durable=True)
def do_change_default_sending_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_sending_stream_id
user_profile.default_sending_stream = stream
user_profile.save(update_fields=["default_sending_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_SENDING_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_sending_stream=stream_name,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
bot_owner_user_ids(user_profile),
)
)
@transaction.atomic(durable=True)
def do_change_default_events_register_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_events_register_stream_id
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=["default_events_register_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_REGISTER_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_events_register_stream=stream_name,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
bot_owner_user_ids(user_profile),
)
)
@transaction.atomic(durable=True)
def do_change_default_all_public_streams(
user_profile: UserProfile, value: bool, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_all_public_streams
user_profile.default_all_public_streams = value
user_profile.save(update_fields=["default_all_public_streams"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
}
).decode(),
)
if user_profile.is_bot:
event = dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
),
)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
event,
bot_owner_user_ids(user_profile),
)
)
@transaction.atomic(durable=True)
def do_change_user_role(
user_profile: UserProfile, value: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)
)
transaction.on_commit(
lambda: send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
)
def do_make_user_billing_admin(user_profile: UserProfile) -> None:
user_profile.is_billing_admin = True
user_profile.save(update_fields=["is_billing_admin"])
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, is_billing_admin=True)
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_can_forge_sender(user_profile: UserProfile, value: bool) -> None:
user_profile.can_forge_sender = value
user_profile.save(update_fields=["can_forge_sender"])
def do_change_can_create_users(user_profile: UserProfile, value: bool) -> None:
user_profile.can_create_users = value
user_profile.save(update_fields=["can_create_users"])
def send_change_stream_permission_notification(
stream: Stream,
*,
old_policy_name: str,
new_policy_name: str,
acting_user: UserProfile,
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, acting_user.realm_id)
user_mention = silent_mention_syntax_for_user(acting_user)
with override_language(stream.realm.default_language):
notification_string = _(
"{user} changed the [access permissions](/help/stream-permissions) "
"for this stream from **{old_policy}** to **{new_policy}**."
)
notification_string = notification_string.format(
user=user_mention,
old_policy=old_policy_name,
new_policy=new_policy_name,
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_permission(
stream: Stream,
*,
invite_only: Optional[bool] = None,
history_public_to_subscribers: Optional[bool] = None,
is_web_public: Optional[bool] = None,
acting_user: UserProfile,
) -> None:
old_invite_only_value = stream.invite_only
old_history_public_to_subscribers_value = stream.history_public_to_subscribers
old_is_web_public_value = stream.is_web_public
# A note on these assertions: It's possible we'd be better off
# making all callers of this function pass the full set of
# parameters, rather than having default values. Doing so would
# allow us to remove the messy logic below, where we sometimes
# ignore the passed parameters.
#
# But absent such a refactoring, it's important to assert that
# we're not requesting an unsupported configurations.
if is_web_public:
assert history_public_to_subscribers is not False
assert invite_only is not True
stream.is_web_public = True
stream.invite_only = False
stream.history_public_to_subscribers = True
else:
assert invite_only is not None
# is_web_public is falsey
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.is_web_public = False
with transaction.atomic():
stream.save(update_fields=["invite_only", "history_public_to_subscribers", "is_web_public"])
event_time = timezone_now()
if old_invite_only_value != stream.invite_only:
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_invite_only_value,
RealmAuditLog.NEW_VALUE: stream.invite_only,
"property": "invite_only",
}
).decode(),
)
if old_history_public_to_subscribers_value != stream.history_public_to_subscribers:
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_history_public_to_subscribers_value,
RealmAuditLog.NEW_VALUE: stream.history_public_to_subscribers,
"property": "history_public_to_subscribers",
}
).decode(),
)
if old_is_web_public_value != stream.is_web_public:
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_is_web_public_value,
RealmAuditLog.NEW_VALUE: stream.is_web_public,
"property": "is_web_public",
}
).decode(),
)
event = dict(
op="update",
type="stream",
property="invite_only",
value=stream.invite_only,
history_public_to_subscribers=stream.history_public_to_subscribers,
is_web_public=stream.is_web_public,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
old_policy_name = get_stream_permission_policy_name(
invite_only=old_invite_only_value,
history_public_to_subscribers=old_history_public_to_subscribers_value,
is_web_public=old_is_web_public_value,
)
new_policy_name = get_stream_permission_policy_name(
invite_only=stream.invite_only,
history_public_to_subscribers=stream.history_public_to_subscribers,
is_web_public=stream.is_web_public,
)
send_change_stream_permission_notification(
stream,
old_policy_name=old_policy_name,
new_policy_name=new_policy_name,
acting_user=acting_user,
)
def send_change_stream_post_policy_notification(
stream: Stream, *, old_post_policy: int, new_post_policy: int, acting_user: UserProfile
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, acting_user.realm_id)
user_mention = silent_mention_syntax_for_user(acting_user)
with override_language(stream.realm.default_language):
notification_string = _(
"{user} changed the [posting permissions](/help/stream-sending-policy) "
"for this stream:\n\n"
"* **Old permissions**: {old_policy}.\n"
"* **New permissions**: {new_policy}.\n"
)
notification_string = notification_string.format(
user=user_mention,
old_policy=Stream.POST_POLICIES[old_post_policy],
new_policy=Stream.POST_POLICIES[new_post_policy],
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_post_policy(
stream: Stream, stream_post_policy: int, *, acting_user: UserProfile
) -> None:
old_post_policy = stream.stream_post_policy
with transaction.atomic():
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=["stream_post_policy"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_post_policy,
RealmAuditLog.NEW_VALUE: stream_post_policy,
"property": "stream_post_policy",
}
).decode(),
)
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
send_change_stream_post_policy_notification(
stream,
old_post_policy=old_post_policy,
new_post_policy=stream_post_policy,
acting_user=acting_user,
)
def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=user_profile,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_NAME_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_name,
RealmAuditLog.NEW_VALUE: new_name,
}
).decode(),
)
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
["email_address", new_email],
["name", new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT, stream.realm_id)
with override_language(stream.realm.default_language):
internal_send_stream_message(
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_("{user_name} renamed stream {old_stream_name} to {new_stream_name}.").format(
user_name=silent_mention_syntax_for_user(user_profile),
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def send_change_stream_description_notification(
stream: Stream, *, old_description: str, new_description: str, acting_user: UserProfile
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, acting_user.realm_id)
user_mention = silent_mention_syntax_for_user(acting_user)
with override_language(stream.realm.default_language):
notification_string = _(
"{user} changed the description for this stream.\n\n"
"* **Old description:**\n"
"``` quote\n"
"{old_description}\n"
"```\n"
"* **New description:**\n"
"``` quote\n"
"{new_description}\n"
"```"
)
notification_string = notification_string.format(
user=user_mention,
old_description=old_description,
new_description=new_description,
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_description(
stream: Stream, new_description: str, *, acting_user: UserProfile
) -> None:
old_description = stream.description
with transaction.atomic():
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=["description", "rendered_description"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_PROPERTY_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_description,
RealmAuditLog.NEW_VALUE: new_description,
"property": "description",
}
).decode(),
)
event = dict(
type="stream",
op="update",
property="description",
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
send_change_stream_description_notification(
stream,
old_description=old_description,
new_description=new_description,
acting_user=acting_user,
)
def send_change_stream_message_retention_days_notification(
user_profile: UserProfile, stream: Stream, old_value: Optional[int], new_value: Optional[int]
) -> None:
sender = get_system_bot(settings.NOTIFICATION_BOT, user_profile.realm_id)
user_mention = silent_mention_syntax_for_user(user_profile)
# If switching from or to the organization's default retention policy,
# we want to take the realm's default into account.
if old_value is None:
old_value = stream.realm.message_retention_days
if new_value is None:
new_value = stream.realm.message_retention_days
with override_language(stream.realm.default_language):
if old_value == Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP["unlimited"]:
old_retention_period = _("Forever")
new_retention_period = f"{new_value} days"
summary_line = f"Messages in this stream will now be automatically deleted {new_value} days after they are sent."
elif new_value == Stream.MESSAGE_RETENTION_SPECIAL_VALUES_MAP["unlimited"]:
old_retention_period = f"{old_value} days"
new_retention_period = _("Forever")
summary_line = _("Messages in this stream will now be retained forever.")
else:
old_retention_period = f"{old_value} days"
new_retention_period = f"{new_value} days"
summary_line = f"Messages in this stream will now be automatically deleted {new_value} days after they are sent."
notification_string = _(
"{user} has changed the [message retention period](/help/message-retention-policy) for this stream:\n"
"* **Old retention period**: {old_retention_period}\n"
"* **New retention period**: {new_retention_period}\n\n"
"{summary_line}"
)
notification_string = notification_string.format(
user=user_mention,
old_retention_period=old_retention_period,
new_retention_period=new_retention_period,
summary_line=summary_line,
)
internal_send_stream_message(
sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, notification_string
)
def do_change_stream_message_retention_days(
stream: Stream, acting_user: UserProfile, message_retention_days: Optional[int] = None
) -> None:
old_message_retention_days_value = stream.message_retention_days
with transaction.atomic():
stream.message_retention_days = message_retention_days
stream.save(update_fields=["message_retention_days"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_MESSAGE_RETENTION_DAYS_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_message_retention_days_value,
RealmAuditLog.NEW_VALUE: message_retention_days,
}
).decode(),
)
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
send_change_stream_message_retention_days_notification(
user_profile=acting_user,
stream=stream,
old_value=old_message_retention_days_value,
new_value=message_retention_days,
)
def set_realm_permissions_based_on_org_type(realm: Realm) -> None:
"""This function implements overrides for the default configuration
for new organizations when the administrator selected specific
organization types.
This substantially simplifies our /help/ advice for folks setting
up new organizations of these types.
"""
# Custom configuration for educational organizations. The present
# defaults are designed for a single class, not a department or
# larger institution, since those are more common.
if (
realm.org_type == Realm.ORG_TYPES["education_nonprofit"]["id"]
or realm.org_type == Realm.ORG_TYPES["education"]["id"]
):
# Limit email address visibility and user creation to administrators.
realm.email_address_visibility = Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS
realm.invite_to_realm_policy = Realm.POLICY_ADMINS_ONLY
# Restrict public stream creation to staff, but allow private
# streams (useful for study groups, etc.).
realm.create_public_stream_policy = Realm.POLICY_ADMINS_ONLY
# Don't allow members (students) to manage user groups or
# stream subscriptions.
realm.user_group_edit_policy = Realm.POLICY_MODERATORS_ONLY
realm.invite_to_stream_policy = Realm.POLICY_MODERATORS_ONLY
# Allow moderators (TAs?) to move topics between streams.
realm.move_messages_between_streams_policy = Realm.POLICY_MODERATORS_ONLY
def do_create_realm(
string_id: str,
name: str,
*,
emails_restricted_to_domains: Optional[bool] = None,
email_address_visibility: Optional[int] = None,
description: Optional[str] = None,
invite_required: Optional[bool] = None,
plan_type: Optional[int] = None,
org_type: Optional[int] = None,
date_created: Optional[datetime.datetime] = None,
is_demo_organization: Optional[bool] = False,
enable_spectator_access: Optional[bool] = False,
) -> Realm:
if string_id == settings.SOCIAL_AUTH_SUBDOMAIN:
raise AssertionError("Creating a realm on SOCIAL_AUTH_SUBDOMAIN is not allowed!")
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs["emails_restricted_to_domains"] = emails_restricted_to_domains
if email_address_visibility is not None:
kwargs["email_address_visibility"] = email_address_visibility
if description is not None:
kwargs["description"] = description
if invite_required is not None:
kwargs["invite_required"] = invite_required
if plan_type is not None:
kwargs["plan_type"] = plan_type
if org_type is not None:
kwargs["org_type"] = org_type
if enable_spectator_access is not None:
kwargs["enable_spectator_access"] = enable_spectator_access
if date_created is not None:
# The date_created parameter is intended only for use by test
# suites that want to backdate the date of a realm's creation.
assert not settings.PRODUCTION
kwargs["date_created"] = date_created
with transaction.atomic():
realm = Realm(string_id=string_id, name=name, **kwargs)
if is_demo_organization:
realm.demo_organization_scheduled_deletion_date = (
realm.date_created + datetime.timedelta(days=settings.DEMO_ORG_DEADLINE_DAYS)
)
set_realm_permissions_based_on_org_type(realm)
realm.save()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
)
RealmUserDefault.objects.create(realm=realm)
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm,
Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:",
acting_user=None,
)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm,
Realm.INITIAL_PRIVATE_STREAM_NAME,
invite_only=True,
stream_description="A private stream for core team members.",
acting_user=None,
)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=["notifications_stream", "signup_notifications_stream"])
if plan_type is None and settings.BILLING_ENABLED:
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
admin_realm = get_realm(settings.SYSTEM_BOT_REALM)
sender = get_system_bot(settings.NOTIFICATION_BOT, admin_realm.id)
# Send a notification to the admin realm
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def update_scheduled_email_notifications_time(
user_profile: UserProfile, old_batching_period: int, new_batching_period: int
) -> None:
existing_scheduled_emails = ScheduledMessageNotificationEmail.objects.filter(
user_profile=user_profile
)
scheduled_timestamp_change = datetime.timedelta(
seconds=new_batching_period
) - datetime.timedelta(seconds=old_batching_period)
existing_scheduled_emails.update(
scheduled_timestamp=F("scheduled_timestamp") + scheduled_timestamp_change
)
@transaction.atomic(durable=True)
def do_change_user_setting(
user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int],
*,
acting_user: Optional[UserProfile],
) -> None:
old_value = getattr(user_profile, setting_name)
event_time = timezone_now()
if setting_name == "timezone":
assert isinstance(setting_value, str)
setting_value = canonicalize_timezone(setting_value)
else:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
# TODO: Move these database actions into a transaction.atomic block.
user_profile.save(update_fields=[setting_name])
if setting_name in UserProfile.notification_setting_types:
# Prior to all personal settings being managed by property_types,
# these were only created for notification settings.
#
# TODO: Start creating these for all settings, and do a
# backfilled=True migration.
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_SETTING_CHANGED,
event_time=event_time,
acting_user=acting_user,
modified_user=user_profile,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: setting_value,
"property": setting_name,
}
).decode(),
)
# Disabling digest emails should clear a user's email queue
if setting_name == "enable_digest_emails" and not setting_value:
clear_scheduled_emails(user_profile.id, ScheduledEmail.DIGEST)
if setting_name == "email_notifications_batching_period_seconds":
assert isinstance(old_value, int)
assert isinstance(setting_value, int)
update_scheduled_email_notifications_time(user_profile, old_value, setting_value)
event = {
"type": "user_settings",
"op": "update",
"property": setting_name,
"value": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event["language_name"] = get_language_name(setting_value)
transaction.on_commit(lambda: send_event(user_profile.realm, event, [user_profile.id]))
if setting_name in UserProfile.notification_settings_legacy:
# This legacy event format is for backwards-compatibility with
# clients that don't support the new user_settings event type.
# We only send this for settings added before Feature level 89.
legacy_event = {
"type": "update_global_notifications",
"user": user_profile.email,
"notification_name": setting_name,
"setting": setting_value,
}
transaction.on_commit(
lambda: send_event(user_profile.realm, legacy_event, [user_profile.id])
)
if setting_name in UserProfile.display_settings_legacy or setting_name == "timezone":
# This legacy event format is for backwards-compatibility with
# clients that don't support the new user_settings event type.
# We only send this for settings added before Feature level 89.
legacy_event = {
"type": "update_display_settings",
"user": user_profile.email,
"setting_name": setting_name,
"setting": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
legacy_event["language_name"] = get_language_name(setting_value)
transaction.on_commit(
lambda: send_event(user_profile.realm, legacy_event, [user_profile.id])
)
# Updates to the time zone display setting are sent to all users
if setting_name == "timezone":
payload = dict(
email=user_profile.email,
user_id=user_profile.id,
timezone=canonicalize_timezone(user_profile.timezone),
)
timezone_event = dict(type="realm_user", op="update", person=payload)
transaction.on_commit(
lambda: send_event(
user_profile.realm,
timezone_event,
active_user_ids(user_profile.realm_id),
)
)
if setting_name == "enable_drafts_synchronization" and setting_value is False:
# Delete all of the drafts from the backend but don't send delete events
# for them since all that's happened is that we stopped syncing changes,
# not deleted every previously synced draft - to do that use the DELETE
# endpoint.
Draft.objects.filter(user_profile=user_profile).delete()
def lookup_default_stream_groups(
default_stream_group_names: List[str], realm: Realm
) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Invalid default stream group {}").format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
transaction.on_commit(lambda: send_event(realm, event, active_non_guest_user_ids(realm.id)))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm)
),
)
transaction.on_commit(lambda: send_event(realm, event, active_non_guest_user_ids(realm.id)))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
@transaction.atomic(savepoint=False)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(
realm: Realm, group_name: str, description: str, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name)
)
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description
)
if not created:
raise JsonableError(
_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name)
)
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
if stream in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(
realm: Realm, group: DefaultStreamGroup, new_group_name: str
) -> None:
if group.name == new_group_name:
raise JsonableError(
_("This default stream group is already named '{}'").format(new_group_name)
)
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(
realm: Realm, group: DefaultStreamGroup, new_description: str
) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [
default.stream
for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)
]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in JSON serializable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted((stream.to_dict() for stream in streams), key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted((group.to_dict() for group in groups), key=lambda elt: elt["name"])
def do_update_user_activity_interval(
user_profile: UserProfile, log_time: datetime.datetime
) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# Two intervals overlap iff each interval ends after the other
# begins. In this case, we just extend the old interval to
# include the new interval.
if log_time <= last.end and effective_end >= last.start:
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(
user_profile=user_profile, start=log_time, end=effective_end
)
@statsd_increment("user_activity")
def do_update_user_activity(
user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime
) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id=user_profile_id,
client_id=client_id,
query=query,
defaults={"last_visit": log_time, "count": count},
)
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
# Most presence data is sent to clients in the main presence
# endpoint in response to the user's own presence; this results
# data that is 1-2 minutes stale for who is online. The flaw with
# this plan is when a user comes back online and then immediately
# sends a message, recipients may still see that user as offline!
# We solve that by sending an immediate presence update clients.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/presence.html for
# internals documentation on presence.
user_ids = active_user_ids(user_profile.realm_id)
if len(user_ids) > settings.USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS:
# These immediate presence generate quadratic work for Tornado
# (linear number of users in each event and the frequency of
# users coming online grows linearly with userbase too). In
# organizations with thousands of users, this can overload
# Tornado, especially if much of the realm comes online at the
# same time.
#
# The utility of these live-presence updates goes down as
# organizations get bigger (since one is much less likely to
# be paying attention to the sidebar); so beyond a limit, we
# stop sending them at all.
return
presence_dict = presence.to_dict()
event = dict(
type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict["client"]: presence_dict},
)
send_event(user_profile.realm, event, user_ids)
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ["ZulipDesktop"]:
return get_client("website")
else:
return client
@statsd_increment("user_presence")
def do_update_user_presence(
user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int
) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile=user_profile,
client=client,
defaults=defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {"user_profile_id": user_profile.id, "time": datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(
user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int,
new_user_input: bool,
) -> None:
event = {
"user_profile_id": user_profile.id,
"status": status,
"time": datetime_to_timestamp(log_time),
"client": client.name,
}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(
user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int,
emoji_name: Optional[str],
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
if away is None:
status = None
elif away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
event = dict(
type="user_status",
user_id=user_profile.id,
)
if away is not None:
event["away"] = away
if status_text is not None:
event["status_text"] = status_text
if emoji_name is not None:
event["emoji_name"] = emoji_name
event["emoji_code"] = emoji_code
event["reaction_type"] = reaction_type
send_event(realm, event, active_user_ids(realm.id))
@dataclass
class ReadMessagesEvent:
messages: List[int]
all: bool
type: str = field(default="update_message_flags", init=False)
op: str = field(default="add", init=False)
operation: str = field(default="add", init=False)
flag: str = field(default="read", init=False)
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event("bankruptcy")
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = (
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("message_id", flat=True)[0:10000]
)
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(user_profile=user_profile).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_stream_messages_as_read(
user_profile: UserProfile, stream_recipient_id: int, topic_name: Optional[str] = None
) -> int:
log_statsd_event("mark_stream_as_read")
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
msgs = msgs.filter(message__recipient_id=stream_recipient_id)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list("message_id", flat=True))
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_muted_user_messages_as_read(
user_profile: UserProfile,
muted_user: UserProfile,
) -> int:
messages = UserMessage.objects.filter(
user_profile=user_profile, message__sender=muted_user
).extra(where=[UserMessage.where_unread()])
message_ids = list(messages.values_list("message_id", flat=True))
count = messages.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_update_mobile_push_notification(
message: Message,
prior_mention_user_ids: Set[int],
mentions_user_ids: Set[int],
stream_push_user_ids: Set[int],
) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vice versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(
user_profile_ids: List[int], message_ids: List[int]
) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(
UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("user_profile_id", "message_id")
)
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish(
"missedmessage_mobile_notifications",
{
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
},
)
def do_update_message_flags(
user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]
) -> int:
valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
if operation not in ("add", "remove"):
raise JsonableError(_("Invalid message flag operation: '{}'").format(operation))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile, message_id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
if operation == "add":
count = msgs.update(flags=F("flags").bitor(flagattr))
elif operation == "remove":
count = msgs.update(flags=F("flags").bitand(~flagattr))
event = {
"type": "update_message_flags",
"op": operation,
"operation": operation,
"flag": flag,
"messages": messages,
"all": False,
}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def maybe_send_resolve_topic_notifications(
*,
user_profile: UserProfile,
stream: Stream,
old_topic: str,
new_topic: str,
changed_messages: List[Message],
) -> None:
# Note that topics will have already been stripped in check_update_message.
#
# This logic is designed to treat removing a weird "✔ ✔✔ "
# prefix as unresolving the topic.
if old_topic.lstrip(RESOLVED_TOPIC_PREFIX) != new_topic.lstrip(RESOLVED_TOPIC_PREFIX):
return
topic_resolved: bool = new_topic.startswith(RESOLVED_TOPIC_PREFIX) and not old_topic.startswith(
RESOLVED_TOPIC_PREFIX
)
topic_unresolved: bool = old_topic.startswith(
RESOLVED_TOPIC_PREFIX
) and not new_topic.startswith(RESOLVED_TOPIC_PREFIX)
if not topic_resolved and not topic_unresolved:
# If there's some other weird topic that does not toggle the
# state of "topic starts with RESOLVED_TOPIC_PREFIX", we do
# nothing. Any other logic could result in cases where we send
# these notifications in a non-alternating fashion.
#
# Note that it is still possible for an individual topic to
# have multiple "This topic was marked as resolved"
# notifications in a row: one can send new messages to the
# pre-resolve topic and then resolve the topic created that
# way to get multiple in the resolved topic. And then an
# administrator can the messages in between. We consider this
# to be a fundamental risk of irresponsible message deletion,
# not a bug with the "resolve topics" feature.
return
# Compute the users who either sent or reacted to messages that
# were moved via the "resolve topic' action. Only those users
# should be eligible for this message being managed as unread.
affected_participant_ids = (set(message.sender_id for message in changed_messages)) | set(
Reaction.objects.filter(message__in=changed_messages).values_list(
"user_profile_id", flat=True
)
)
sender = get_system_bot(settings.NOTIFICATION_BOT, user_profile.realm_id)
user_mention = silent_mention_syntax_for_user(user_profile)
with override_language(stream.realm.default_language):
if topic_resolved:
notification_string = _("{user} has marked this topic as resolved.")
elif topic_unresolved:
notification_string = _("{user} has marked this topic as unresolved.")
internal_send_stream_message(
sender,
stream,
new_topic,
notification_string.format(
user=user_mention,
),
limit_unread_user_ids=affected_participant_ids,
)
def send_message_moved_breadcrumbs(
user_profile: UserProfile,
old_stream: Stream,
old_topic: str,
old_thread_notification_string: Optional[str],
new_stream: Stream,
new_topic: Optional[str],
new_thread_notification_string: Optional[str],
changed_messages_count: int,
) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT, old_stream.realm_id)
if new_topic is None:
new_topic = old_topic
user_mention = silent_mention_syntax_for_user(user_profile)
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if new_thread_notification_string is not None:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
sender,
new_stream,
new_topic,
new_thread_notification_string.format(
old_location=old_topic_link,
user=user_mention,
changed_messages_count=changed_messages_count,
),
)
if old_thread_notification_string is not None:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
sender,
old_stream,
old_topic,
old_thread_notification_string.format(
user=user_mention,
new_location=new_topic_link,
changed_messages_count=changed_messages_count,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values("user_profile_id", "flags")
rows = list(query)
message_user_ids = {row["user_profile_id"] for row in rows}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {row["user_profile_id"] for row in rows if int(row["flags"]) & mask}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(
rendering_result: MessageRenderingResult, ums: Iterable[UserMessage]
) -> None:
wildcard = rendering_result.mentions_wildcard
mentioned_ids = rendering_result.mentions_user_ids
ids_with_alert_words = rendering_result.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if um.flags & flag:
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=["flags"])
def update_to_dict_cache(
changed_messages: List[Message], realm_id: Optional[int] = None
) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
def do_update_embedded_data(
user_profile: UserProfile,
message: Message,
content: Optional[str],
rendering_result: MessageRenderingResult,
) -> None:
timestamp = timezone_now()
event: Dict[str, Any] = {
"type": "update_message",
"user_id": None,
"edit_timestamp": datetime_to_timestamp(timestamp),
"message_id": message.id,
"rendering_only": True,
}
changed_messages = [message]
rendered_content: Optional[str] = None
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(rendering_result, ums)
rendered_content = rendering_result.rendered_content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event["message_ids"] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic(savepoint=False)
def do_update_message(
user_profile: UserProfile,
target_message: Message,
new_stream: Optional[Stream],
topic_name: Optional[str],
propagate_mode: str,
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool,
content: Optional[str],
rendering_result: Optional[MessageRenderingResult],
prior_mention_user_ids: Set[int],
mention_data: Optional[MentionData] = None,
) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
target_message.last_edit_time = timestamp
event: Dict[str, Any] = {
"type": "update_message",
"user_id": user_profile.id,
"edit_timestamp": datetime_to_timestamp(timestamp),
"message_id": target_message.id,
"rendering_only": False,
}
edit_history_event: Dict[str, Any] = {
"user_id": user_profile.id,
"timestamp": event["edit_timestamp"],
}
changed_messages = [target_message]
realm = user_profile.realm
stream_being_edited = None
if target_message.is_stream_message():
stream_id = target_message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, realm)
event["stream_name"] = stream_being_edited.name
event["stream_id"] = stream_being_edited.id
ums = UserMessage.objects.filter(message=target_message.id)
if content is not None:
assert rendering_result is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
update_user_message_flags(rendering_result, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event["orig_content"] = target_message.content
event["orig_rendered_content"] = target_message.rendered_content
edit_history_event["prev_content"] = target_message.content
edit_history_event["prev_rendered_content"] = target_message.rendered_content
edit_history_event[
"prev_rendered_content_version"
] = target_message.rendered_content_version
target_message.content = content
target_message.rendered_content = rendering_result.rendered_content
target_message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendering_result.rendered_content
event["prev_rendered_content_version"] = target_message.rendered_content_version
event["is_me_message"] = Message.is_status_message(
content, rendering_result.rendered_content
)
# target_message.has_image and target_message.has_link will have been
# already updated by Markdown rendering in the caller.
target_message.has_attachment = check_attachment_reference_change(
target_message, rendering_result
)
if target_message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = target_message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=target_message.recipient,
sender_id=target_message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event["online_push_user_ids"] = list(info["online_push_user_ids"])
event["pm_mention_push_disabled_user_ids"] = list(info["pm_mention_push_disabled_user_ids"])
event["pm_mention_email_disabled_user_ids"] = list(
info["pm_mention_email_disabled_user_ids"]
)
event["stream_push_user_ids"] = list(info["stream_push_user_ids"])
event["stream_email_user_ids"] = list(info["stream_email_user_ids"])
event["muted_sender_user_ids"] = list(info["muted_sender_user_ids"])
event["prior_mention_user_ids"] = list(prior_mention_user_ids)
event["presence_idle_user_ids"] = filter_presence_idle_user_ids(info["active_user_ids"])
event["all_bot_user_ids"] = list(info["all_bot_user_ids"])
if rendering_result.mentions_wildcard:
event["wildcard_mention_user_ids"] = list(info["wildcard_mention_user_ids"])
else:
event["wildcard_mention_user_ids"] = []
do_update_mobile_push_notification(
target_message,
prior_mention_user_ids,
rendering_result.mentions_user_ids,
info["stream_push_user_ids"],
)
if topic_name is not None or new_stream is not None:
orig_topic_name = target_message.topic_name()
event["propagate_mode"] = propagate_mode
if new_stream is not None:
assert content is None
assert target_message.is_stream_message()
assert stream_being_edited is not None
edit_history_event["prev_stream"] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
target_message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subs_to_old_stream = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=True
).select_related("user_profile")
subs_to_new_stream = list(
get_active_subscriptions_for_stream_id(
new_stream.id, include_deactivated_users=True
).select_related("user_profile")
)
old_stream_sub_ids = [user.user_profile_id for user in subs_to_old_stream]
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subs_to_old_stream if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
subs_losing_access = [
sub
for sub in subs_losing_usermessages
if sub.user_profile.is_guest or not new_stream.is_public()
]
ums = ums.exclude(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages]
)
subs_gaining_usermessages = []
if not new_stream.is_history_public_to_subscribers():
# For private streams, with history not public to subscribers,
# We find out users who are not present in the msgs' old stream
# and create new UserMessage for these users so that they can
# access this message.
subs_gaining_usermessages += [
user_id for user_id in new_stream_sub_ids if user_id not in old_stream_sub_ids
]
if topic_name is not None:
topic_name = truncate_topic(topic_name)
target_message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(target_message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
update_edit_history(target_message, timestamp, edit_history_event)
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
assert stream_being_edited is not None
# Other messages should only get topic/stream fields in their edit history.
topic_only_edit_history_event = {
k: v
for (k, v) in edit_history_event.items()
if k
not in [
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
]
}
messages_list = update_messages_for_topic_edit(
acting_user=user_profile,
edited_message=target_message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
old_stream=stream_being_edited,
edit_history_event=topic_only_edit_history_event,
last_edit_time=timestamp,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
changed_message_ids = [msg.id for msg in changed_messages]
if subs_gaining_usermessages:
ums_to_create = []
for message_id in changed_message_ids:
for user_profile_id in subs_gaining_usermessages:
# The fact that the user didn't have a UserMessage originally means we can infer that the user
# was not mentioned in the original message (even if mention syntax was present, it would not
# take effect for a user who was not subscribed). If we were editing the message's content, we
# would rerender the message and then use the new stream's data to determine whether this is
# a mention of a subscriber; but as we are not doing so, we choose to preserve the "was this
# mention syntax an actual mention" decision made during the original rendering for implementation
# simplicity. As a result, the only flag to consider applying here is read.
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message_id,
flags=UserMessage.flags.read,
)
ums_to_create.append(um)
bulk_insert_ums(ums_to_create)
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages],
message_id__in=changed_message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": changed_message_ids,
"message_type": "stream",
"stream_id": stream_being_edited.id,
"topic": orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=target_message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event["message_ids"] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscriptions = subscriptions.exclude(
user_profile_id__in=[um.user_profile_id for um in ums]
)
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscriptions = subscriptions.exclude(
user_profile_id__in=delete_event_notify_user_ids
)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error traceback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub
for sub in subs_to_new_stream
if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids
]
subscriptions = subscriptions.exclude(
user_profile_id__in=[sub.user_profile_id for sub in old_stream_unsubbed_guests]
)
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
users_to_be_notified += list(map(subscriber_info, sorted(list(subscriber_ids))))
send_event(user_profile.realm, event, users_to_be_notified)
if len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None:
# Notify users that the topic was moved.
changed_messages_count = len(changed_messages)
if propagate_mode == "change_all":
moved_all_visible_messages = True
else:
# With other propagate modes, if the user in fact moved
# all messages in the stream, we want to explain it was a
# full-topic move.
#
# For security model reasons, we don't want to allow a
# user to take any action that would leak information
# about older messages they cannot access (E.g. the only
# remaining messages are in a stream without shared
# history). The bulk_access_messages call below addresses
# that concern.
#
# bulk_access_messages is inefficient for this task, since
# we just want to do the exists() version of this
# query. But it's nice to reuse code, and this bulk
# operation is likely cheaper than a `GET /messages`
# unless the topic has thousands of messages of history.
unmoved_messages = messages_for_topic(
stream_being_edited.recipient_id,
orig_topic_name,
)
visible_unmoved_messages = bulk_access_messages(
user_profile, unmoved_messages, stream=stream_being_edited
)
moved_all_visible_messages = len(visible_unmoved_messages) == 0
old_thread_notification_string = None
if send_notification_to_old_thread:
if moved_all_visible_messages:
old_thread_notification_string = gettext_lazy(
"This topic was moved to {new_location} by {user}."
)
elif changed_messages_count == 1:
old_thread_notification_string = gettext_lazy(
"A message was moved from this topic to {new_location} by {user}."
)
else:
old_thread_notification_string = gettext_lazy(
"{changed_messages_count} messages were moved from this topic to {new_location} by {user}."
)
new_thread_notification_string = None
if send_notification_to_new_thread:
if moved_all_visible_messages:
new_thread_notification_string = gettext_lazy(
"This topic was moved here from {old_location} by {user}."
)
elif changed_messages_count == 1:
new_thread_notification_string = gettext_lazy(
"A message was moved here from {old_location} by {user}."
)
else:
new_thread_notification_string = gettext_lazy(
"{changed_messages_count} messages were moved here from {old_location} by {user}."
)
send_message_moved_breadcrumbs(
user_profile,
stream_being_edited,
orig_topic_name,
old_thread_notification_string,
new_stream,
topic_name,
new_thread_notification_string,
changed_messages_count,
)
if (
topic_name is not None
and new_stream is None
and content is None
and len(changed_messages) > 0
):
assert stream_being_edited is not None
maybe_send_resolve_topic_notifications(
user_profile=user_profile,
stream=stream_being_edited,
old_topic=orig_topic_name,
new_topic=topic_name,
changed_messages=changed_messages,
)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event["stream_id"] = stream_id
event["topic"] = sample_message.topic_name()
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event["message_type"] = message_type
transaction.on_commit(lambda: send_event(realm, event, users_to_notify))
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(
Message.objects.filter(sender=user).values_list("id", flat=True).order_by("id")
)
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS["messages_in_stream:is_bot:day"]
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values("stream_id").annotate(value=Sum("value"))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(
stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]
) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def get_web_public_subs(realm: Realm) -> SubscriptionInfo:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in get_web_public_streams_queryset(realm):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict["is_muted"] = False
stream_dict["color"] = get_next_color()
stream_dict["desktop_notifications"] = True
stream_dict["audible_notifications"] = True
stream_dict["push_notifications"] = True
stream_dict["email_notifications"] = True
stream_dict["pin_to_top"] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(
stream.id, stream.date_created, {}
)
stream_dict["stream_weekly_traffic"] = stream_weekly_traffic
stream_dict["email_address"] = ""
subscribed.append(stream_dict)
return SubscriptionInfo(
subscriptions=subscribed,
unsubscribed=[],
never_subscribed=[],
)
def build_stream_dict_for_sub(
user: UserProfile,
sub_dict: RawSubscriptionDict,
raw_stream_dict: RawStreamDict,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = raw_stream_dict["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(raw_stream_dict[field_name])
continue
result[field_name] = raw_stream_dict[field_name]
# Copy Subscription.API_FIELDS.
for field_name in Subscription.API_FIELDS:
result[field_name] = sub_dict[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
result["in_home_view"] = not result["is_muted"]
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
result["is_announcement_only"] = (
raw_stream_dict["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Add a few computed fields not directly from the data models.
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
raw_stream_dict["id"], raw_stream_dict["date_created"], recent_traffic
)
result["email_address"] = encode_email_address_helper(
raw_stream_dict["name"], raw_stream_dict["email_token"], show_sender=True
)
# Our caller may add a subscribers field.
return result
def build_stream_dict_for_never_sub(
raw_stream_dict: RawStreamDict,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = raw_stream_dict["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(raw_stream_dict[field_name])
continue
result[field_name] = raw_stream_dict[field_name]
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
raw_stream_dict["id"], raw_stream_dict["date_created"], recent_traffic
)
# Backwards-compatibility addition of removed field.
result["is_announcement_only"] = (
raw_stream_dict["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Our caller may add a subscribers field.
return result
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(
user_profile: UserProfile,
include_subscribers: bool = True,
) -> SubscriptionInfo:
realm = user_profile.realm
all_streams: QuerySet[RawStreamDict] = get_active_streams(realm).values(
*Stream.API_FIELDS,
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token",
)
recip_id_to_stream_id: Dict[int, int] = {
stream["recipient_id"]: stream["id"] for stream in all_streams
}
all_streams_map: Dict[int, RawStreamDict] = {stream["id"]: stream for stream in all_streams}
sub_dicts_query: Iterable[RawSubscriptionDict] = (
get_stream_subscriptions_for_user(user_profile)
.values(
*Subscription.API_FIELDS,
"recipient_id",
"active",
)
.order_by("recipient_id")
)
# We only care about subscriptions for active streams.
sub_dicts: List[RawSubscriptionDict] = [
sub_dict
for sub_dict in sub_dicts_query
if recip_id_to_stream_id.get(sub_dict["recipient_id"])
]
def get_stream_id(sub_dict: RawSubscriptionDict) -> int:
return recip_id_to_stream_id[sub_dict["recipient_id"]]
traffic_stream_ids = {get_stream_id(sub_dict) for sub_dict in sub_dicts}
recent_traffic = get_streams_traffic(stream_ids=traffic_stream_ids)
# Okay, now we finally get to populating our main results, which
# will be these three lists.
subscribed = []
unsubscribed = []
never_subscribed = []
sub_unsub_stream_ids = set()
for sub_dict in sub_dicts:
stream_id = get_stream_id(sub_dict)
sub_unsub_stream_ids.add(stream_id)
raw_stream_dict = all_streams_map[stream_id]
stream_dict = build_stream_dict_for_sub(
user=user_profile,
sub_dict=sub_dict,
raw_stream_dict=raw_stream_dict,
recent_traffic=recent_traffic,
)
# is_active is represented in this structure by which list we include it in.
is_active = sub_dict["active"]
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = set(all_streams_map) - sub_unsub_stream_ids
else:
web_public_stream_ids = {stream["id"] for stream in all_streams if stream["is_web_public"]}
never_subscribed_stream_ids = web_public_stream_ids - sub_unsub_stream_ids
never_subscribed_streams = [
all_streams_map[stream_id] for stream_id in never_subscribed_stream_ids
]
for raw_stream_dict in never_subscribed_streams:
is_public = not raw_stream_dict["invite_only"]
if is_public or user_profile.is_realm_admin:
stream_dict = build_stream_dict_for_never_sub(
raw_stream_dict=raw_stream_dict, recent_traffic=recent_traffic
)
never_subscribed.append(stream_dict)
if include_subscribers:
# The highly optimized bulk_get_subscriber_user_ids wants to know which
# streams we are subscribed to, for validation purposes, and it uses that
# info to know if it's allowed to find OTHER subscribers.
subscribed_stream_ids = {
get_stream_id(sub_dict) for sub_dict in sub_dicts if sub_dict["active"]
}
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
subscribed_stream_ids,
)
for lst in [subscribed, unsubscribed, never_subscribed]:
for stream_dict in lst:
assert isinstance(stream_dict["stream_id"], int)
stream_id = stream_dict["stream_id"]
stream_dict["subscribers"] = subscriber_map[stream_id]
return SubscriptionInfo(
subscriptions=sorted(subscribed, key=lambda x: x["name"]),
unsubscribed=sorted(unsubscribed, key=lambda x: x["name"]),
never_subscribed=sorted(never_subscribed, key=lambda x: x["name"]),
)
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool = False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
helper_result = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
subscribed = helper_result.subscriptions
unsubscribed = helper_result.unsubscribed
return (subscribed, unsubscribed)
class ActivePresenceIdleUserData(TypedDict):
alerted: bool
notifications_data: UserMessageNotificationsData
def get_active_presence_idle_user_ids(
realm: Realm,
sender_id: int,
active_users_data: List[ActivePresenceIdleUserData],
) -> List[int]:
"""
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications.
* They are no longer "present" according to the
UserPresence table.
"""
if realm.presence_disabled:
return []
user_ids = set()
for user_data in active_users_data:
user_notifications_data: UserMessageNotificationsData = user_data["notifications_data"]
alerted = user_data["alerted"]
# We only need to know the presence idle state for a user if this message would be notifiable
# for them if they were indeed idle. Only including those users in the calculation below is a
# very important optimization for open communities with many inactive users.
if user_notifications_data.is_notifiable(sender_id, idle=True) or alerted:
user_ids.add(user_notifications_data.user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = (
UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
)
.exclude(client__name="ZulipMobile")
.distinct("user_profile_id")
.values("user_profile_id")
)
active_user_ids = {row["user_profile_id"] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(idle_user_ids)
def do_send_confirmation_email(
invitee: PreregistrationUser,
referrer: UserProfile,
email_language: str,
invite_expires_in_days: Union[Optional[int], UnspecifiedValue] = UnspecifiedValue(),
) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(
invitee, Confirmation.INVITATION, validity_in_days=invite_expires_in_days
)
context = {
"referrer_full_name": referrer.full_name,
"referrer_email": referrer.delivery_email,
"activate_url": activation_url,
"referrer_realm_name": referrer.realm.name,
}
send_email(
"zerver/emails/invitation",
to_emails=[invitee.email],
from_address=FromAddress.tokenized_no_reply_address(),
language=email_language,
context=context,
realm=referrer.realm,
)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
def estimate_recent_invites(realms: Collection[Realm], *, days: int) -> int:
"""An upper bound on the number of invites sent in the last `days` days"""
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property="invites_sent::day",
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum("value"))["value__sum"]
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
"""Discourage using invitation emails as a vector for carrying spam."""
msg = _(
"To protect users, Zulip limits the number of invitations you can send in one day. Because you have reached the limit, no invitations were sent."
)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(
msg,
[],
sent_invitations=False,
daily_limit_reached=True,
)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(
msg,
[],
sent_invitations=False,
daily_limit_reached=True,
)
def do_invite_users(
user_profile: UserProfile,
invitee_emails: Collection[str],
streams: Collection[Stream],
*,
invite_expires_in_days: Optional[int],
invite_as: int = PreregistrationUser.INVITE_AS["MEMBER"],
) -> None:
num_invites = len(invitee_emails)
check_invite_limit(user_profile.realm, num_invites)
if settings.BILLING_ENABLED:
from corporate.lib.registration import check_spare_licenses_available_for_inviting_new_users
check_spare_licenses_available_for_inviting_new_users(user_profile.realm, num_invites)
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin:
raise InvitationError(
_(
"Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."
),
[],
sent_invitations=False,
)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == "":
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
"""
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
"""
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped,
sent_invitations=False,
)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(
_("We weren't able to invite anyone."), skipped, sent_invitations=False
)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["invites_sent::day"],
None,
timezone_now(),
increment=len(validated_emails),
)
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(
email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm
)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {
"prereg_id": prereg_user.id,
"referrer_id": user_profile.id,
"email_language": user_profile.realm.default_language,
"invite_expires_in_days": invite_expires_in_days,
}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(
_(
"Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"
),
skipped,
sent_invitations=True,
)
notify_invites_changed(user_profile.realm)
def get_invitation_expiry_date(confirmation_obj: Confirmation) -> Optional[int]:
expiry_date = confirmation_obj.expiry_date
if expiry_date is None:
return expiry_date
return datetime_to_timestamp(expiry_date)
def do_get_invites_controlled_by_user(user_profile: UserProfile) -> List[Dict[str, Any]]:
"""
Returns a list of dicts representing invitations that can be controlled by user_profile.
This isn't necessarily the same as all the invitations generated by the user, as administrators
can control also invitations that they did not themselves create.
"""
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(
dict(
email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
expiry_date=get_invitation_expiry_date(invitee.confirmation.get()),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False,
)
)
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
multiuse_confirmation_objs = Confirmation.objects.filter(
realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE
).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
assert invite is not None
invites.append(
dict(
invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
expiry_date=get_invitation_expiry_date(confirmation_obj),
id=invite.id,
link_url=confirmation_url(
confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE,
),
invited_as=invite.invited_as,
is_multiuse=True,
)
)
return invites
def get_valid_invite_confirmations_generated_by_user(
user_profile: UserProfile,
) -> List[Confirmation]:
prereg_user_ids = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
).values_list("id", flat=True)
confirmations = list(
Confirmation.objects.filter(type=Confirmation.INVITATION, object_id__in=prereg_user_ids)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(referred_by=user_profile).values_list(
"id", flat=True
)
confirmations += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
object_id__in=multiuse_invite_ids,
).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))
)
return confirmations
def revoke_invites_generated_by_user(user_profile: UserProfile) -> None:
confirmations_to_revoke = get_valid_invite_confirmations_generated_by_user(user_profile)
now = timezone_now()
for confirmation in confirmations_to_revoke:
confirmation.expiry_date = now
Confirmation.objects.bulk_update(confirmations_to_revoke, ["expiry_date"])
if len(confirmations_to_revoke):
notify_invites_changed(realm=user_profile.realm)
def do_create_multiuse_invite_link(
referred_by: UserProfile,
invited_as: int,
invite_expires_in_days: Optional[int],
streams: Sequence[Stream] = [],
) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by.realm)
return create_confirmation_link(
invite, Confirmation.MULTIUSE_INVITE, validity_in_days=invite_expires_in_days
)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
realm = prereg_user.realm
assert realm is not None
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(realm)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
realm = multiuse_invite.referred_by.realm
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(realm)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
expiry_date = prereg_user.confirmation.get().expiry_date
if expiry_date is None:
invite_expires_in_days = None
else:
# The resent invitation is reset to expire as long after the
# reminder is sent as it lasted originally.
invite_expires_in_days = (expiry_date - prereg_user.invited_at).days
prereg_user.confirmation.clear()
do_increment_logging_stat(
prereg_user.realm, COUNT_STATS["invites_sent::day"], None, prereg_user.invited_at
)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {
"prereg_id": prereg_user.id,
"referrer_id": prereg_user.referred_by.id,
"email_language": prereg_user.referred_by.realm.default_language,
"invite_expires_in_days": invite_expires_in_days,
}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(
realm: Realm, name: str, author: UserProfile, image_file: IO[bytes]
) -> RealmEmoji:
try:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
except django.db.utils.IntegrityError:
# Match the string in upload_emoji.
raise JsonableError(_("A custom emoji with this name already exists."))
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
is_animated = False
try:
is_animated = upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
realm_emoji.file_name = emoji_file_name
realm_emoji.is_animated = is_animated
realm_emoji.save(update_fields=["file_name", "is_animated"])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=["deactivated"])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Sequence[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(
user_profile: UserProfile,
stream: Stream,
topic: str,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, stream.recipient_id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
try:
remove_topic_mute(user_profile, stream.id, topic)
except UserTopic.DoesNotExist:
raise JsonableError(_("Topic is not muted"))
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mute_user(
user_profile: UserProfile,
muted_user: UserProfile,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_user_mute(user_profile, muted_user, date_muted)
do_mark_muted_user_messages_as_read(user_profile, muted_user)
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_MUTED,
event_time=date_muted,
extra_data=orjson.dumps({"muted_user_id": muted_user.id}).decode(),
)
def do_unmute_user(mute_object: MutedUser) -> None:
user_profile = mute_object.user_profile
muted_user = mute_object.muted_user
mute_object.delete()
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_UNMUTED,
event_time=timezone_now(),
extra_data=orjson.dumps({"unmuted_user_id": muted_user.id}).decode(),
)
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_linkifiers(realm: Realm) -> None:
realm_linkifiers = linkifiers_for_realm(realm.id)
event: Dict[str, object] = dict(type="realm_linkifiers", realm_linkifiers=realm_linkifiers)
send_event(realm, event, active_user_ids(realm.id))
# Below is code for backwards compatibility. The now deprecated
# "realm_filters" event-type is used by older clients, and uses
# tuples.
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_linkifier(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter(realm=realm, pattern=pattern, url_format_string=url_format_string)
linkifier.full_clean()
linkifier.save()
notify_linkifiers(realm)
return linkifier.id
def do_remove_linkifier(
realm: Realm, pattern: Optional[str] = None, id: Optional[int] = None
) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, id=id).delete()
notify_linkifiers(realm)
def do_update_linkifier(realm: Realm, id: int, pattern: str, url_format_string: str) -> None:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter.objects.get(realm=realm, id=id)
linkifier.pattern = pattern
linkifier.url_format_string = url_format_string
linkifier.full_clean()
linkifier.save(update_fields=["pattern", "url_format_string"])
notify_linkifiers(realm)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(
realm=realm, domain=domain, allow_subdomains=allow_subdomains
)
event = dict(
type="realm_domains",
op="add",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=["allow_subdomains"])
event = dict(
type="realm_domains",
op="change",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(
realm_domain: RealmDomain, *, acting_user: Optional[UserProfile]
) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, "emails_restricted_to_domains", False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def notify_realm_playgrounds(realm: Realm) -> None:
event = dict(type="realm_playgrounds", realm_playgrounds=get_realm_playgrounds(realm))
send_event(realm, event, active_user_ids(realm.id))
def do_add_realm_playground(realm: Realm, **kwargs: Any) -> int:
realm_playground = RealmPlayground(realm=realm, **kwargs)
# We expect full_clean to always pass since a thorough input validation
# is performed in the view (using check_url, check_pygments_language, etc)
# before calling this function.
realm_playground.full_clean()
realm_playground.save()
notify_realm_playgrounds(realm)
return realm_playground.id
def do_remove_realm_playground(realm: Realm, realm_playground: RealmPlayground) -> None:
realm_playground.delete()
notify_realm_playgrounds(realm)
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Get streams with subscribers"""
exists_expression = Exists(
Subscription.objects.filter(
active=True,
is_user_active=True,
user_profile__realm=realm,
recipient_id=OuterRef("recipient_id"),
),
)
occupied_streams = (
Stream.objects.filter(realm=realm, deactivated=False)
.annotate(occupied=exists_expression)
.filter(occupied=True)
)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: # nocoverage
query = get_web_public_streams_queryset(realm)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile,
include_public: bool = True,
include_web_public: bool = False,
include_subscribed: bool = True,
include_all_active: bool = False,
include_default: bool = False,
include_owner_subscribed: bool = False,
) -> List[Dict[str, Any]]:
# This function is only used by API clients now.
if include_all_active and not user_profile.is_realm_admin:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all active streams in the realm.
query = Stream.objects.filter(realm=user_profile.realm, deactivated=False)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_web_public:
# This should match get_web_public_streams_queryset
web_public_check = Q(
is_web_public=True,
invite_only=False,
history_public_to_subscribers=True,
deactivated=False,
)
add_filter_option(web_public_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream["is_default"] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(
user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]
) -> None:
event = {
"type": "attachment",
"op": op,
"attachment": attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
is_message_web_public = False
if message.is_stream_message():
stream = Stream.objects.get(id=message.recipient.type_id)
is_message_realm_public = stream.is_public()
is_message_web_public = stream.is_web_public
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id,
path_id,
message.id,
)
continue
claimed = True
attachment = claim_attachment(
user_profile, path_id, message, is_message_realm_public, is_message_web_public
)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(
message: Message, rendering_result: MessageRenderingResult
) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(rendering_result.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields", fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(
realm: Realm, field_subtype: str
) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
custom_profile_field = CustomProfileField(
realm=realm,
name=field_data["name"],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data["hint"],
field_data=orjson.dumps(dict(subtype=field_subtype)).decode(),
)
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def try_add_realm_custom_profile_field(
realm: Realm,
name: str,
field_type: int,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> CustomProfileField:
custom_profile_field = CustomProfileField(realm=realm, name=name, field_type=field_type)
custom_profile_field.hint = hint
if (
custom_profile_field.field_type == CustomProfileField.SELECT
or custom_profile_field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
custom_profile_field.field_data = orjson.dumps(field_data or {}).decode()
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm)
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(
realm: Realm,
field: CustomProfileField,
name: str,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> None:
field.name = name
field.hint = hint
if (
field.field_type == CustomProfileField.SELECT
or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
field.field_data = orjson.dumps(field_data or {}).decode()
field.save()
notify_realm_custom_profile_fields(realm)
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
custom_profile_fields = CustomProfileField.objects.filter(realm=realm)
for custom_profile_field in custom_profile_fields:
if custom_profile_field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for custom_profile_field in custom_profile_fields:
custom_profile_field.order = order_mapping[custom_profile_field.id]
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
def notify_user_update_custom_profile_data(
user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]
) -> None:
data = dict(id=field["id"], value=field["value"])
if field["rendered_value"]:
data["rendered_value"] = field["rendered_value"]
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(
user_profile: UserProfile,
data: List[Dict[str, Union[int, ProfileDataElementValue]]],
) -> None:
with transaction.atomic():
for custom_profile_field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile, field_id=custom_profile_field["id"]
)
# field_value.value is a TextField() so we need to have field["value"]
# in string form to correctly make comparisons and assignments.
if isinstance(custom_profile_field["value"], str):
custom_profile_field_value_string = custom_profile_field["value"]
else:
custom_profile_field_value_string = orjson.dumps(
custom_profile_field["value"]
).decode()
if not created and field_value.value == custom_profile_field_value_string:
# If the field value isn't actually being changed to a different one,
# we have nothing to do here for this field.
continue
field_value.value = custom_profile_field_value_string
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(
custom_profile_field_value_string
)
field_value.save(update_fields=["value", "rendered_value"])
else:
field_value.save(update_fields=["value"])
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type,
},
)
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
custom_profile_field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(
field=custom_profile_field, user_profile=user_profile
)
field_value.delete()
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_id,
"value": None,
"rendered_value": None,
"type": custom_profile_field.field_type,
},
)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(
type="user_group",
op="add",
group=dict(
name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
is_system_group=user_group.is_system_group,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(
realm: Realm, name: str, initial_members: List[UserProfile], description: str
) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, str]) -> None:
event = dict(type="user_group", op="update", group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=["name"])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=["description"])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(
bot_profile: UserProfile, service_interface: int, service_payload_url: str
) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[
dict(
base_url=service.base_url, interface=service.interface, token=service.token
)
],
),
),
bot_owner_user_ids(bot_profile),
)
def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile),
)
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [
{
"config_data": get_bot_config(user_profile),
"service_name": services[0].name,
}
]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(
bot_dicts: List[Dict[str, Any]], realm: Realm
) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict["id"] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [
bot_dict["id"] for bot_dict in bot_dicts if bot_dict["bot_type"] == UserProfile.EMBEDDED_BOT
]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [
{
"config_data": bot_config,
"service_name": services[0].name,
}
]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(
user_profile: UserProfile, include_all_realm_bots_if_admin: bool = True
) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(
realm=user_profile.realm, is_bot=True, bot_owner=user_profile
).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [
{
"email": botdict["email"],
"user_id": botdict["id"],
"full_name": botdict["full_name"],
"bot_type": botdict["bot_type"],
"is_active": botdict["is_active"],
"api_key": botdict["api_key"],
"default_sending_stream": botdict["default_sending_stream__name"],
"default_events_register_stream": botdict["default_events_register_stream__name"],
"default_all_public_streams": botdict["default_all_public_streams"],
"owner_id": botdict["bot_owner_id"],
"avatar_url": avatar_url_from_dict(botdict),
"services": services_by_ids[botdict["id"]],
}
for botdict in result
]
def do_send_user_group_members_update_event(
event_name: str, user_group: UserGroup, user_ids: List[int]
) -> None:
event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
memberships = [
UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile)
for user_profile in user_profiles
]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("add_members", user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id, user_profile__in=user_profiles
).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("remove_members", user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None:
event = dict(type="user_group", op="remove", group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT,
event_time=timezone_now(),
)
context = {"confirmation_url": url, "realm_uri": realm.uri, "realm_name": realm.name}
language = realm.default_language
send_email_to_admins(
"zerver/emails/realm_reactivation",
realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language,
context=context,
)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm,
dict(type="has_zoom_token", value=token is not None),
[user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type="realm_export", exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `orjson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = orjson.loads(export_extra_data)
export_path = export_data.get("export_path")
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update(deleted_timestamp=timezone_now().timestamp())
export.extra_data = orjson.dumps(export_data).decode()
export.save(update_fields=["extra_data"])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]<|fim▁end|> | assert message.rendered_content is None |
<|file_name|>experimental_lambdify.py<|end_file_name|><|fim▁begin|>""" rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debugging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They received an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args):
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparison. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e) or 'unorderable '
'types' in str(e) or "not "
"supported between instances of"
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for i in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
# the & and | operators don't work on tuples, see discussion #12108
exprstr = exprstr.replace(" & "," and ").replace(" | "," or ")
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
namespace.update({'Eq': lambda x, y: x == y})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
self.eval_str = eval_str
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
def __call__(self, *args, **kwargs):
return self.lambda_func(*args, **kwargs)
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
# 'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v<|fim▁hole|>
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
# Wrapping should only happen on the outermost expression, which
# is the only thing we know will be a number.
float_wrap_evalf = self.float_wrap_evalf
complex_wrap_evalf = self.complex_wrap_evalf
self.float_wrap_evalf = False
self.complex_wrap_evalf = False
ret = template % (func_name, self.tree2str_translate(argtree))
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
return ret
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not polluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d<|fim▁end|> | return dict_fun |
<|file_name|>storage_transfer_client.go<|end_file_name|><|fim▁begin|>// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
package storagetransfer
import (
"context"
"fmt"
"math"
"net/url"
"time"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
gtransport "google.golang.org/api/transport/grpc"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
storagetransferpb "google.golang.org/genproto/googleapis/storagetransfer/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/proto"
)
var newClientHook clientHook
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetGoogleServiceAccount []gax.CallOption
CreateTransferJob []gax.CallOption
UpdateTransferJob []gax.CallOption
GetTransferJob []gax.CallOption
ListTransferJobs []gax.CallOption
PauseTransferOperation []gax.CallOption
ResumeTransferOperation []gax.CallOption
RunTransferJob []gax.CallOption
}
func defaultGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("storagetransfer.googleapis.com:443"),
internaloption.WithDefaultMTLSEndpoint("storagetransfer.mtls.googleapis.com:443"),
internaloption.WithDefaultAudience("https://storagetransfer.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
}
func defaultCallOptions() *CallOptions {
return &CallOptions{
GetGoogleServiceAccount: []gax.CallOption{},
CreateTransferJob: []gax.CallOption{},
UpdateTransferJob: []gax.CallOption{},
GetTransferJob: []gax.CallOption{},
ListTransferJobs: []gax.CallOption{},
PauseTransferOperation: []gax.CallOption{},
ResumeTransferOperation: []gax.CallOption{},
RunTransferJob: []gax.CallOption{},
}
}
// internalClient is an interface that defines the methods availaible from Storage Transfer API.
type internalClient interface {
Close() error
setGoogleClientInfo(...string)
Connection() *grpc.ClientConn
GetGoogleServiceAccount(context.Context, *storagetransferpb.GetGoogleServiceAccountRequest, ...gax.CallOption) (*storagetransferpb.GoogleServiceAccount, error)
CreateTransferJob(context.Context, *storagetransferpb.CreateTransferJobRequest, ...gax.CallOption) (*storagetransferpb.TransferJob, error)
UpdateTransferJob(context.Context, *storagetransferpb.UpdateTransferJobRequest, ...gax.CallOption) (*storagetransferpb.TransferJob, error)
GetTransferJob(context.Context, *storagetransferpb.GetTransferJobRequest, ...gax.CallOption) (*storagetransferpb.TransferJob, error)
ListTransferJobs(context.Context, *storagetransferpb.ListTransferJobsRequest, ...gax.CallOption) *TransferJobIterator
PauseTransferOperation(context.Context, *storagetransferpb.PauseTransferOperationRequest, ...gax.CallOption) error
ResumeTransferOperation(context.Context, *storagetransferpb.ResumeTransferOperationRequest, ...gax.CallOption) error
RunTransferJob(context.Context, *storagetransferpb.RunTransferJobRequest, ...gax.CallOption) (*RunTransferJobOperation, error)
RunTransferJobOperation(name string) *RunTransferJobOperation
}
// Client is a client for interacting with Storage Transfer API.
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
//
// Storage Transfer Service and its protos.
// Transfers data between between Google Cloud Storage buckets or from a data
// source external to Google to a Cloud Storage bucket.
type Client struct {
// The internal transport-dependent client.
internalClient internalClient
// The call options for this service.
CallOptions *CallOptions
// LROClient is used internally to handle long-running operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
}
// Wrapper methods routed to the internal client.
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.internalClient.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
c.internalClient.setGoogleClientInfo(keyval...)
}
// Connection returns a connection to the API service.
//
// Deprecated.
func (c *Client) Connection() *grpc.ClientConn {
return c.internalClient.Connection()
}
// GetGoogleServiceAccount returns the Google service account that is used by Storage Transfer
// Service to access buckets in the project where transfers
// run or in other projects. Each Google service account is associated
// with one Google Cloud Platform Console project. Users
// should add this service account to the Google Cloud Storage bucket
// ACLs to grant access to Storage Transfer Service. This service
// account is created and owned by Storage Transfer Service and can
// only be used by Storage Transfer Service.
func (c *Client) GetGoogleServiceAccount(ctx context.Context, req *storagetransferpb.GetGoogleServiceAccountRequest, opts ...gax.CallOption) (*storagetransferpb.GoogleServiceAccount, error) {
return c.internalClient.GetGoogleServiceAccount(ctx, req, opts...)
}
// CreateTransferJob creates a transfer job that runs periodically.
func (c *Client) CreateTransferJob(ctx context.Context, req *storagetransferpb.CreateTransferJobRequest, opts ...gax.CallOption) (*storagetransferpb.TransferJob, error) {
return c.internalClient.CreateTransferJob(ctx, req, opts...)
}
// UpdateTransferJob updates a transfer job. Updating a job’s transfer spec does not affect
// transfer operations that are running already.
//
// Note: The job’s status
// field can be modified using this RPC (for example, to set a job’s status to
// DELETED,
// DISABLED, or
// ENABLED).
func (c *Client) UpdateTransferJob(ctx context.Context, req *storagetransferpb.UpdateTransferJobRequest, opts ...gax.CallOption) (*storagetransferpb.TransferJob, error) {
return c.internalClient.UpdateTransferJob(ctx, req, opts...)
}
// GetTransferJob gets a transfer job.
func (c *Client) GetTransferJob(ctx context.Context, req *storagetransferpb.GetTransferJobRequest, opts ...gax.CallOption) (*storagetransferpb.TransferJob, error) {
return c.internalClient.GetTransferJob(ctx, req, opts...)
}
// ListTransferJobs lists transfer jobs.
func (c *Client) ListTransferJobs(ctx context.Context, req *storagetransferpb.ListTransferJobsRequest, opts ...gax.CallOption) *TransferJobIterator {
return c.internalClient.ListTransferJobs(ctx, req, opts...)
}
// PauseTransferOperation pauses a transfer operation.
func (c *Client) PauseTransferOperation(ctx context.Context, req *storagetransferpb.PauseTransferOperationRequest, opts ...gax.CallOption) error {
return c.internalClient.PauseTransferOperation(ctx, req, opts...)
}
// ResumeTransferOperation resumes a transfer operation that is paused.
func (c *Client) ResumeTransferOperation(ctx context.Context, req *storagetransferpb.ResumeTransferOperationRequest, opts ...gax.CallOption) error {
return c.internalClient.ResumeTransferOperation(ctx, req, opts...)
}
// RunTransferJob attempts to start a new TransferOperation for the current TransferJob. A
// TransferJob has a maximum of one active TransferOperation. If this method
// is called while a TransferOperation is active, an error wil be returned.
func (c *Client) RunTransferJob(ctx context.Context, req *storagetransferpb.RunTransferJobRequest, opts ...gax.CallOption) (*RunTransferJobOperation, error) {
return c.internalClient.RunTransferJob(ctx, req, opts...)
}
// RunTransferJobOperation returns a new RunTransferJobOperation from a given name.
// The name must be that of a previously created RunTransferJobOperation, possibly from a different process.
func (c *Client) RunTransferJobOperation(name string) *RunTransferJobOperation {
return c.internalClient.RunTransferJobOperation(name)
}
// gRPCClient is a client for interacting with Storage Transfer API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type gRPCClient struct {
// Connection pool of gRPC connections to the service.
connPool gtransport.ConnPool
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
disableDeadlines bool
// Points back to the CallOptions field of the containing Client
CallOptions **CallOptions
// The gRPC API client.
client storagetransferpb.StorageTransferServiceClient
// LROClient is used internally to handle long-running operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient **lroauto.OperationsClient
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new storage transfer service client based on gRPC.
// The returned client must be Closed when it is done being used to clean up its underlying connections.
//
// Storage Transfer Service and its protos.
// Transfers data between between Google Cloud Storage buckets or from a data
// source external to Google to a Cloud Storage bucket.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
clientOpts := defaultGRPCClientOptions()
if newClientHook != nil {
hookOpts, err := newClientHook(ctx, clientHookParams{})
if err != nil {
return nil, err
}
clientOpts = append(clientOpts, hookOpts...)
}
disableDeadlines, err := checkDisableDeadlines()
if err != nil {
return nil, err
}
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
if err != nil {
return nil, err
}
client := Client{CallOptions: defaultCallOptions()}
c := &gRPCClient{
connPool: connPool,
disableDeadlines: disableDeadlines,
client: storagetransferpb.NewStorageTransferServiceClient(connPool),
CallOptions: &client.CallOptions,
}
c.setGoogleClientInfo()
client.internalClient = c
client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
if err != nil {
// This error "should not happen", since we are just reusing old connection pool
// and never actually need to dial.
// If this does happen, we could leak connp. However, we cannot close conn:
// If the user invoked the constructor with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO: investigate error conditions.
return nil, err
}
c.LROClient = &client.LROClient
return &client, nil
}
// Connection returns a connection to the API service.
//
// Deprecated.
func (c *gRPCClient) Connection() *grpc.ClientConn {
return c.connPool.Conn()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *gRPCClient) Close() error {
return c.connPool.Close()
}
func (c *gRPCClient) GetGoogleServiceAccount(ctx context.Context, req *storagetransferpb.GetGoogleServiceAccountRequest, opts ...gax.CallOption) (*storagetransferpb.GoogleServiceAccount, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project_id", url.QueryEscape(req.GetProjectId())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetGoogleServiceAccount[0:len((*c.CallOptions).GetGoogleServiceAccount):len((*c.CallOptions).GetGoogleServiceAccount)], opts...)
var resp *storagetransferpb.GoogleServiceAccount
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetGoogleServiceAccount(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *gRPCClient) CreateTransferJob(ctx context.Context, req *storagetransferpb.CreateTransferJobRequest, opts ...gax.CallOption) (*storagetransferpb.TransferJob, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).CreateTransferJob[0:len((*c.CallOptions).CreateTransferJob):len((*c.CallOptions).CreateTransferJob)], opts...)
var resp *storagetransferpb.TransferJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateTransferJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *gRPCClient) UpdateTransferJob(ctx context.Context, req *storagetransferpb.UpdateTransferJobRequest, opts ...gax.CallOption) (*storagetransferpb.TransferJob, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "job_name", url.QueryEscape(req.GetJobName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).UpdateTransferJob[0:len((*c.CallOptions).UpdateTransferJob):len((*c.CallOptions).UpdateTransferJob)], opts...)
var resp *storagetransferpb.TransferJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateTransferJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *gRPCClient) GetTransferJob(ctx context.Context, req *storagetransferpb.GetTransferJobRequest, opts ...gax.CallOption) (*storagetransferpb.TransferJob, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "job_name", url.QueryEscape(req.GetJobName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetTransferJob[0:len((*c.CallOptions).GetTransferJob):len((*c.CallOptions).GetTransferJob)], opts...)
var resp *storagetransferpb.TransferJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *gRPCClient) ListTransferJobs(ctx context.Context, req *storagetransferpb.ListTransferJobsRequest, opts ...gax.CallOption) *TransferJobIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).ListTransferJobs[0:len((*c.CallOptions).ListTransferJobs):len((*c.CallOptions).ListTransferJobs)], opts...)
it := &TransferJobIterator{}
req = proto.Clone(req).(*storagetransferpb.ListTransferJobsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*storagetransferpb.TransferJob, string, error) {
resp := &storagetransferpb.ListTransferJobsResponse{}
if pageToken != "" {
req.PageToken = pageToken
}
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else if pageSize != 0 {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferJobs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
it.Response = resp
return resp.GetTransferJobs(), resp.GetNextPageToken(), nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.GetPageSize())
it.pageInfo.Token = req.GetPageToken()
return it
}
func (c *gRPCClient) PauseTransferOperation(ctx context.Context, req *storagetransferpb.PauseTransferOperationRequest, opts ...gax.CallOption) error {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).PauseTransferOperation[0:len((*c.CallOptions).PauseTransferOperation):len((*c.CallOptions).PauseTransferOperation)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.PauseTransferOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
func (c *gRPCClient) ResumeTransferOperation(ctx context.Context, req *storagetransferpb.ResumeTransferOperationRequest, opts ...gax.CallOption) error {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ResumeTransferOperation[0:len((*c.CallOptions).ResumeTransferOperation):len((*c.CallOptions).ResumeTransferOperation)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.ResumeTransferOperation(ctx, req, settings.GRPC...)
return err
}, opts...)<|fim▁hole|>}
func (c *gRPCClient) RunTransferJob(ctx context.Context, req *storagetransferpb.RunTransferJobRequest, opts ...gax.CallOption) (*RunTransferJobOperation, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "job_name", url.QueryEscape(req.GetJobName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).RunTransferJob[0:len((*c.CallOptions).RunTransferJob):len((*c.CallOptions).RunTransferJob)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.RunTransferJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &RunTransferJobOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, resp),
}, nil
}
// RunTransferJobOperation manages a long-running operation from RunTransferJob.
type RunTransferJobOperation struct {
lro *longrunning.Operation
}
// RunTransferJobOperation returns a new RunTransferJobOperation from a given name.
// The name must be that of a previously created RunTransferJobOperation, possibly from a different process.
func (c *gRPCClient) RunTransferJobOperation(name string) *RunTransferJobOperation {
return &RunTransferJobOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *RunTransferJobOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.WaitWithInterval(ctx, nil, time.Minute, opts...)
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *RunTransferJobOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Poll(ctx, nil, opts...)
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *RunTransferJobOperation) Metadata() (*storagetransferpb.TransferOperation, error) {
var meta storagetransferpb.TransferOperation
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *RunTransferJobOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *RunTransferJobOperation) Name() string {
return op.lro.Name()
}
// TransferJobIterator manages a stream of *storagetransferpb.TransferJob.
type TransferJobIterator struct {
items []*storagetransferpb.TransferJob
pageInfo *iterator.PageInfo
nextFunc func() error
// Response is the raw response for the current page.
// It must be cast to the RPC response type.
// Calling Next() or InternalFetch() updates this value.
Response interface{}
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*storagetransferpb.TransferJob, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferJobIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferJobIterator) Next() (*storagetransferpb.TransferJob, error) {
var item *storagetransferpb.TransferJob
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferJobIterator) bufLen() int {
return len(it.items)
}
func (it *TransferJobIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}<|fim▁end|> | return err |
<|file_name|>uninstall.ts<|end_file_name|><|fim▁begin|>import { ConfigManager } from './configuration/configManager';<|fim▁hole|>
void ConfigManager.removeSettings();<|fim▁end|> | |
<|file_name|>parse_number_test.cpp<|end_file_name|><|fim▁begin|>/* Copyright 2014 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits>
#include "mongo/base/parse_number.h"
#include "mongo/base/status.h"
#include "mongo/platform/cstdint.h"
#include "mongo/platform/float_utils.h"
#include "mongo/util/mongoutils/str.h" // for str::stream()!
#include "mongo/unittest/unittest.h"
#define ASSERT_PARSES(TYPE, INPUT_STRING, EXPECTED_VALUE) do { \
TYPE v; \
ASSERT_OK(parseNumberFromString(INPUT_STRING, &v)); \
ASSERT_EQUALS(static_cast<TYPE>(EXPECTED_VALUE), v); \
} while (false)
#define ASSERT_PARSES_WITH_BASE(TYPE, INPUT_STRING, BASE, EXPECTED_VALUE) do { \
TYPE v; \
ASSERT_OK(parseNumberFromStringWithBase(INPUT_STRING, BASE, &v)); \
ASSERT_EQUALS(static_cast<TYPE>(EXPECTED_VALUE), v); \
} while (false)
namespace mongo {
namespace {
template <typename _NumberType>
class CommonNumberParsingTests {
public:
typedef _NumberType NumberType;
typedef std::numeric_limits<NumberType> Limits;
static void TestRejectingBadBases() {
NumberType ignored;
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("0", -1, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("10", 1, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("-10", 37, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase(" ", -1, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("f", 37, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("^%", -1, &ignored));
}
static void TestParsingNonNegatives() {
ASSERT_PARSES(NumberType, "10", 10);
ASSERT_PARSES(NumberType, "0", 0);
ASSERT_PARSES(NumberType, "1", 1);
ASSERT_PARSES(NumberType, "0xff", 0xff);
ASSERT_PARSES(NumberType, "077", 077);
}
static void TestParsingNegatives() {
if (Limits::is_signed) {
ASSERT_PARSES(NumberType, "-10", -10);
ASSERT_PARSES(NumberType, "-0xff", -0xff);
ASSERT_PARSES(NumberType, "-077", -077);
}
else {
NumberType ignored;
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-0xff", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-077", &ignored));
}
}
static void TestParsingGarbage() {
NumberType ignored;
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString(" ", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString(" 10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("15b", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("--10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("+-10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("++10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("--10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("0x+10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("0x-10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("0+10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("0-10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("1+10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("1-10", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("48*3", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("0x", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("+", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("+0x", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-0x", &ignored));
}
static void TestParsingWithExplicitBase() {
NumberType x;
ASSERT_PARSES_WITH_BASE(NumberType, "15b", 16, 0x15b);
ASSERT_PARSES_WITH_BASE(NumberType, "77", 8, 077);
ASSERT_PARSES_WITH_BASE(NumberType, "z", 36, 35);
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("1b", 10, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("80", 8, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("0X", 16, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("0x", 16, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("0x", 8, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("0X", 8, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("0x", 10, &x));<|fim▁hole|> ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("+0X", 8, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("+0x", 10, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("+0X", 10, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("-0X", 16, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("-0x", 16, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("-0x", 8, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("-0X", 8, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("-0x", 10, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("-0X", 10, &x));
}
static void TestParsingLimits() {
using namespace mongoutils;
NumberType ignored;
ASSERT_PARSES(NumberType, std::string(str::stream() << Limits::max()), Limits::max());
ASSERT_PARSES(NumberType, std::string(str::stream() << Limits::min()), Limits::min());
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString(std::string(str::stream() << Limits::max() << '0'), &ignored));
if (Limits::is_signed)
ASSERT_EQUALS(
ErrorCodes::FailedToParse,
parseNumberFromString(std::string(str::stream() << Limits::min() << '0'), &ignored));
}
};
#define GENERAL_NUMBER_TESTS(SHORT_NAME, TYPE) \
class ParseNumberTests##SHORT_NAME : public unittest::Test { \
public: \
typedef CommonNumberParsingTests<TYPE> TestFns; \
}; \
TEST_F(ParseNumberTests##SHORT_NAME, RejectBadBases) { \
TestFns::TestRejectingBadBases(); \
} \
TEST_F(ParseNumberTests##SHORT_NAME, ParseNonNegatives) { \
TestFns::TestParsingNonNegatives(); \
} \
TEST_F(ParseNumberTests##SHORT_NAME, ParseNegatives) { \
TestFns::TestParsingNegatives(); \
} \
TEST_F(ParseNumberTests##SHORT_NAME, ParseGarbage) { \
TestFns::TestParsingGarbage(); \
} \
TEST_F(ParseNumberTests##SHORT_NAME, ParseWithExplicitBase) { \
TestFns::TestParsingWithExplicitBase(); \
} \
TEST_F(ParseNumberTests##SHORT_NAME, TestParsingLimits) { \
TestFns::TestParsingLimits(); \
}
GENERAL_NUMBER_TESTS(Short, short)
GENERAL_NUMBER_TESTS(Int, int)
GENERAL_NUMBER_TESTS(Long, long)
GENERAL_NUMBER_TESTS(LongLong, long long)
GENERAL_NUMBER_TESTS(UnsignedShort, unsigned short)
GENERAL_NUMBER_TESTS(UnsignedInt, unsigned int)
GENERAL_NUMBER_TESTS(UnsignedLong, unsigned long)
GENERAL_NUMBER_TESTS(UnsignedLongLong, unsigned long long)
GENERAL_NUMBER_TESTS(Int16, int16_t);
GENERAL_NUMBER_TESTS(Int32, int32_t);
GENERAL_NUMBER_TESTS(Int64, int64_t);
GENERAL_NUMBER_TESTS(UInt16, uint16_t);
GENERAL_NUMBER_TESTS(UInt32, uint32_t);
GENERAL_NUMBER_TESTS(UInt64, uint64_t);
TEST(ParseNumber, NotNullTerminated) {
ASSERT_PARSES(int, StringData("1234", 3), 123);
}
TEST(ParseNumber, Int8) {
int8_t ignored;
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("-129", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("-130", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("-900", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("128", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("130", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("900", &ignored));
for (int32_t i = -128; i <= 127; ++i)
ASSERT_PARSES(int8_t, std::string(mongoutils::str::stream() << i), i);
}
TEST(ParseNumber, UInt8) {
uint8_t ignored;
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("-129", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("-130", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("-900", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("+256", &ignored));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString("+900", &ignored));
for (uint32_t i = 0; i <= 255; ++i)
ASSERT_PARSES(uint8_t, std::string(mongoutils::str::stream() << i), i);
}
TEST(Double, TestRejectingBadBases) {
double ignored;
// Only supported base for parseNumberFromStringWithBase<double> is 0.
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("0", -1, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("0", 1, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("0", 8, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("0", 10, &ignored));
ASSERT_EQUALS(ErrorCodes::BadValue, parseNumberFromStringWithBase("0", 16, &ignored));
}
TEST(Double, TestParsingGarbage) {
double d;
CommonNumberParsingTests<double>::TestParsingGarbage();
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>("1.0.1", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>("1.0-1", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>(" 1.0", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>("1.0P4", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>("1e6 ", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>(" 1e6", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>("1e6 ", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>(" 1e6", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString<double>("0xabcab.defPa", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse,
parseNumberFromString<double>(StringData("1.0\0garbage",
StringData::LiteralTag()),
&d));
}
TEST(Double, TestParsingOverflow) {
double d;
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("1e309", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-1e309", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("1e-400", &d));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromString("-1e-400", &d));
}
TEST(Double, TestParsingNan) {
double d = 0;
ASSERT_OK(parseNumberFromString("NaN", &d));
ASSERT_TRUE(isNaN(d));
}
TEST(Double, TestParsingInfinity) {
double d = 0;
ASSERT_OK(parseNumberFromString("infinity", &d));
ASSERT_TRUE(isInf(d));
d = 0;
ASSERT_OK(parseNumberFromString("-Infinity", &d));
ASSERT_TRUE(isInf(d));
}
TEST(Double, TestParsingNormal) {
ASSERT_PARSES(double, "10", 10);
ASSERT_PARSES(double, "0", 0);
ASSERT_PARSES(double, "1", 1);
ASSERT_PARSES(double, "-10", -10);
ASSERT_PARSES(double, "1e8", 1e8);
ASSERT_PARSES(double, "1e-8", 1e-8);
ASSERT_PARSES(double, "12e-8", 12e-8);
ASSERT_PARSES(double, "-485.381e-8", -485.381e-8);
#if !(defined(_WIN32) || defined(__sunos__))
// Parse hexadecimal representations of a double. Hex literals not supported by MSVC, and
// not parseable by the Windows SDK libc or the Solaris libc in the mode we build.
ASSERT_PARSES(double, "0xff", 0xff);
ASSERT_PARSES(double, "-0xff", -0xff);
ASSERT_PARSES(double, "0xabcab.defdefP-10", 0xabcab.defdefP-10);
#endif
}
} // namespace
} // namespace mongo<|fim▁end|> | ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("0X", 10, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("+0X", 16, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("+0x", 16, &x));
ASSERT_EQUALS(ErrorCodes::FailedToParse, parseNumberFromStringWithBase("+0x", 8, &x)); |
<|file_name|>csr_firewall_svc_helper.py<|end_file_name|><|fim▁begin|># Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco.plugins.cisco.cfg_agent.service_helpers import (
service_helper)
from neutron.common import rpc as n_rpc
from neutron import context as n_context
from neutron.i18n import _LE
from neutron.plugins.common import constants
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import oslo_messaging
from neutron_fwaas.services.firewall.drivers.cisco import csr_acl_driver
LOG = logging.getLogger(__name__)
CSR_FW_EVENT_Q_NAME = 'csr_fw_event_q'
CSR_FW_EVENT_CREATE = 'FW_EVENT_CREATE'
CSR_FW_EVENT_UPDATE = 'FW_EVENT_UPDATE'
CSR_FW_EVENT_DELETE = 'FW_EVENT_DELETE'
class CsrFirewalllPluginApi(object):
"""CsrFirewallServiceHelper (Agent) side of the ACL RPC API."""
@log_helpers.log_method_call
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
@log_helpers.log_method_call
def get_firewalls_for_device(self, context, **kwargs):
"""Get Firewalls with rules for a device from Plugin."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_firewalls_for_device', host=self.host)
@log_helpers.log_method_call
def get_firewalls_for_tenant(self, context, **kwargs):
"""Get Firewalls with rules for a tenant from the Plugin."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_firewalls_for_tenant', host=self.host)
<|fim▁hole|> cctxt = self.client.prepare()
return cctxt.call(context,
'get_tenants_with_firewalls', host=self.host)
@log_helpers.log_method_call
def set_firewall_status(self, context, fw_id, status, status_data=None):
"""Make a RPC to set the status of a firewall."""
cctxt = self.client.prepare()
return cctxt.call(context, 'set_firewall_status', host=self.host,
firewall_id=fw_id, status=status,
status_data=status_data)
def firewall_deleted(self, context, firewall_id):
"""Make a RPC to indicate that the firewall resources are deleted."""
cctxt = self.client.prepare()
return cctxt.call(context, 'firewall_deleted', host=self.host,
firewall_id=firewall_id)
class CsrFirewallServiceHelper(object):
@log_helpers.log_method_call
def __init__(self, host, conf, cfg_agent):
super(CsrFirewallServiceHelper, self).__init__()
self.conf = conf
self.cfg_agent = cfg_agent
self.fullsync = True
self.event_q = service_helper.QueueMixin()
self.fw_plugin_rpc = CsrFirewalllPluginApi(
'CISCO_FW_PLUGIN', conf.host)
self.topic = 'CISCO_FW'
self._setup_rpc()
self.acl_driver = csr_acl_driver.CsrAclDriver()
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [self]
self.conn.create_consumer(self.topic,
self.endpoints, fanout=True)
self.conn.consume_in_threads()
### Notifications from Plugin ####
def create_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to create a firewall."""
LOG.debug("create_firewall: firewall %s", firewall)
event_data = {'event': CSR_FW_EVENT_CREATE,
'context': context,
'firewall': firewall,
'host': host}
self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data)
def update_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to update a firewall."""
LOG.debug("update_firewall: firewall %s", firewall)
event_data = {'event': CSR_FW_EVENT_UPDATE,
'context': context,
'firewall': firewall,
'host': host}
self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data)
def delete_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to delete a firewall."""
LOG.debug("delete_firewall: firewall %s", firewall)
event_data = {'event': CSR_FW_EVENT_DELETE,
'context': context,
'firewall': firewall,
'host': host}
self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data)
def _invoke_firewall_driver(self, context, firewall, func_name):
LOG.debug("_invoke_firewall_driver: %s", func_name)
try:
if func_name == 'delete_firewall':
return_code = self.acl_driver.__getattribute__(func_name)(
None, None, firewall)
if not return_code:
LOG.debug("firewall %s", firewall['id'])
self.fw_plugin_rpc.set_firewall_status(
context, firewall['id'], constants.ERROR)
else:
self.fw_plugin_rpc.firewall_deleted(
context, firewall['id'])
else:
return_code, status = self.acl_driver.__getattribute__(
func_name)(None, None, firewall)
if not return_code:
LOG.debug("firewall %s", firewall['id'])
self.fw_plugin_rpc.set_firewall_status(
context, firewall['id'], constants.ERROR)
else:
LOG.debug("status %s", status)
self.fw_plugin_rpc.set_firewall_status(
context, firewall['id'], constants.ACTIVE, status)
except Exception:
LOG.debug("_invoke_firewall_driver: PRC failure")
self.fullsync = True
def _process_firewall_pending_op(self, context, firewall_list):
for firewall in firewall_list:
firewall_status = firewall['status']
if firewall_status == 'PENDING_CREATE':
self._invoke_firewall_driver(
context, firewall, 'create_firewall')
elif firewall_status == 'PENDING_UPDATE':
self._invoke_firewall_driver(
context, firewall, 'update_firewall')
elif firewall_status == 'PENDING_DELETE':
self._invoke_firewall_driver(
context, firewall, 'delete_firewall')
def _process_fullsync(self):
LOG.debug("_process_fullsync")
try:
context = n_context.get_admin_context()
tenants = self.fw_plugin_rpc.get_tenants_with_firewalls(
context)
LOG.debug("tenants with firewall: %s", tenants)
for tenant_id in tenants:
ctx = n_context.Context('', tenant_id)
firewall_list = self.fw_plugin_rpc.get_firewalls_for_tenant(
ctx)
self._process_firewall_pending_op(ctx, firewall_list)
except Exception:
LOG.debug("_process_fullsync: RPC failure")
self.fullsync = True
def _process_devices(self, device_ids):
LOG.debug("_process_devices: device_ids %s", device_ids)
try:
for device_id in device_ids:
ctx = n_context.Context('', device_id)
firewall_list = self.fw_plugin_rpc.get_firewalls_for_device(
ctx)
self._process_firewall_pending_op(ctx, firewall_list)
except Exception:
LOG.debug("_process_devices: RPC failure")
self.fullsync = True
def _process_event_q(self):
while True:
try:
event_data = self.event_q.dequeue(CSR_FW_EVENT_Q_NAME)
if not event_data:
return
except ValueError:
LOG.debug("_process_event_q: no queue yet")
return
LOG.debug("_process_event_q: event_data %s", event_data)
event = event_data['event']
context = event_data['context']
firewall = event_data['firewall']
if event == CSR_FW_EVENT_CREATE:
self._invoke_firewall_driver(
context, firewall, 'create_firewall')
elif event == CSR_FW_EVENT_UPDATE:
self._invoke_firewall_driver(
context, firewall, 'update_firewall')
elif event == CSR_FW_EVENT_DELETE:
self._invoke_firewall_driver(
context, firewall, 'delete_firewall')
else:
LOG.error(_LE("invalid event %s"), event)
def process_service(self, device_ids=None, removed_devices_info=None):
try:
if self.fullsync:
self.fullsync = False
self._process_fullsync()
else:
if device_ids:
self._process_devices(device_ids)
if removed_devices_info:
LOG.debug("process_service: removed_devices_info %s",
removed_devices_info)
# do nothing for now
else:
self._process_event_q()
except Exception:
LOG.exception(_LE('process_service exception ERROR'))<|fim▁end|> | @log_helpers.log_method_call
def get_tenants_with_firewalls(self, context, **kwargs):
"""Get Tenants that have Firewalls configured from plugin.""" |
<|file_name|>RuleAnythingUntil.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2009-2013 by The Regents of the University of California
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License from
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uci.ics.asterix.lexergenerator.rules;
public class RuleAnythingUntil implements Rule {
private char expected;
public RuleAnythingUntil clone() {
return new RuleAnythingUntil(expected);
}
public RuleAnythingUntil(char expected) {
this.expected = expected;
}
@Override
public String toString() {
return " .* " + String.valueOf(expected);
}
@Override
public int hashCode() {
return 10 * (int) expected;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;<|fim▁hole|> if (o instanceof RuleAnythingUntil) {
if (((RuleAnythingUntil) o).expected == this.expected) {
return true;
}
}
return false;
}
@Override
public String javaAction() {
return "currentChar = readNextChar();";
}
@Override
public String javaMatch(String action) {
return "boolean escaped = false;\n" + "while (currentChar != '" + expected + "' || escaped) {\n"
+ "if(!escaped && currentChar == '\\\\\\\\') {\n" + "escaped = true;\n" + "containsEscapes = true;\n"
+ "} else {\n" + "escaped = false;\n" + "}\n" + "currentChar = readNextChar();\n" + "}\n"
+ "if (currentChar == '" + expected + "') {" + action + "}\n";
}
}<|fim▁end|> | |
<|file_name|>form.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import web
from web import form as webform
import httpconfig
class Form(object):
"""Form class"""
def __init__(self, names=[]):
self._form = self.createForm(names)
self.httpConfig = httpconfig.HttpConfig(web.ctx.env["DOCUMENT_ROOT"])
@property
def form(self):
return self._form
def createForm(self, names=[]):
# Text area for sending path data
pathDataArea = webform.Textarea("", rows=30, cols=90, value="", id="pathData", hidden=True)
form = webform.Form(pathDataArea)
return form<|fim▁end|> | # http://www.apache.org/licenses/LICENSE-2.0 |
<|file_name|>update_ingest.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from datetime import timedelta, timezone, datetime
from flask import current_app as app
from werkzeug.exceptions import HTTPException
import superdesk
from superdesk.activity import ACTIVITY_EVENT, notify_and_add_activity
from superdesk.celery_app import celery
from superdesk.celery_task_utils import get_lock_id, get_host_id
from superdesk.errors import ProviderError
from superdesk.io import registered_feeding_services, registered_feed_parsers
from superdesk.io.iptc import subject_codes
from superdesk.lock import lock, unlock
from superdesk.media.media_operations import download_file_from_url, process_file
from superdesk.media.renditions import generate_renditions
from superdesk.metadata.item import GUID_NEWSML, GUID_FIELD, FAMILY_ID, ITEM_TYPE, CONTENT_TYPE, CONTENT_STATE, \
ITEM_STATE
from superdesk.metadata.utils import generate_guid
from superdesk.notification import push_notification
from superdesk.stats import stats
from superdesk.upload import url_for_media
from superdesk.utc import utcnow, get_expiry_date
from superdesk.workflow import set_default_state
UPDATE_SCHEDULE_DEFAULT = {'minutes': 5}
LAST_UPDATED = 'last_updated'
LAST_ITEM_UPDATE = 'last_item_update'
IDLE_TIME_DEFAULT = {'hours': 0, 'minutes': 0}
logger = logging.getLogger(__name__)
def is_service_and_parser_registered(provider):
"""
Tests if the Feed Service and Feed Parser associated with are registered with application.
:param provider:
:type provider: dict :py:class:`superdesk.io.ingest_provider_model.IngestProviderResource`
:return: True if both Feed Service and Feed Parser are registered. False otherwise.
:rtype: bool
"""
return provider.get('feeding_service') in registered_feeding_services and \
provider.get('feed_parser') is None or provider.get('feed_parser') in registered_feed_parsers
def is_scheduled(provider):
"""Test if given provider should be scheduled for update.
:param provider: ingest provider
"""
now = utcnow()
last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return last_updated + timedelta(**update_schedule) < now
def is_closed(provider):
"""Test if provider is closed.
:param provider: ingest provider
"""
return provider.get('is_closed', False)
def filter_expired_items(provider, items):
"""
Filters out the item from the list of articles to be ingested
if they are expired and item['type'] not in provider['content_types'].
:param provider: Ingest Provider Details.
:type provider: dict :py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:param items: list of items received from the provider
:type items: list
:return: list of items which can be saved into ingest collection
:rtype: list
"""
def is_not_expired(item):
if item.get('expiry') or item.get('versioncreated'):
expiry = item.get('expiry', item['versioncreated'] + delta)
if expiry.tzinfo:
return expiry > utcnow()
return False
try:
delta = timedelta(minutes=provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']))
filtered_items = [item for item in items if is_not_expired(item) and
item[ITEM_TYPE] in provider['content_types']]
if len(items) != len(filtered_items):
logger.debug('Received {0} articles from provider {1}, but only {2} are eligible to be saved in ingest'
.format(len(items), provider['name'], len(filtered_items)))
return filtered_items
except Exception as ex:
raise ProviderError.providerFilterExpiredContentError(ex, provider)
def get_provider_rule_set(provider):
if provider.get('rule_set'):
return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
def get_provider_routing_scheme(provider):
"""Returns the ingests provider's routing scheme configuration.
If provider has a routing scheme defined (i.e. scheme ID is not None), the
scheme is fetched from the database. If not, nothing is returned.
For all scheme rules that have a reference to a content filter defined,
that filter's configuration is fetched from the database as well and
embedded into the corresponding scheme rule.
:param dict provider: ingest provider configuration
:return: fetched provider's routing scheme configuration (if any)
:rtype: dict or None
"""
if not provider.get('routing_scheme'):
return None
schemes_service = superdesk.get_resource_service('routing_schemes')
filters_service = superdesk.get_resource_service('content_filters')
scheme = schemes_service.find_one(_id=provider['routing_scheme'], req=None)
# for those routing rules that have a content filter defined,
# get that filter from DB and embed it into the rule...
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
return scheme
def get_task_ttl(provider):
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600
def get_is_idle(provider):
last_item = provider.get(LAST_ITEM_UPDATE)
idle_time = provider.get('idle_time', IDLE_TIME_DEFAULT)
if isinstance(idle_time['hours'], datetime):
idle_hours = 0
else:
idle_hours = idle_time['hours']
if isinstance(idle_time['minutes'], datetime):
idle_minutes = 0
else:
idle_minutes = idle_time['minutes']
# there is an update time and the idle time is none zero
if last_item and (idle_hours != 0 or idle_minutes != 0):
if utcnow() > last_item + timedelta(hours=idle_hours, minutes=idle_minutes):
return True
return False
def get_task_id(provider):
return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get(superdesk.config.ID_FIELD))
class UpdateIngest(superdesk.Command):
"""Update ingest providers."""
option_list = {superdesk.Option('--provider', '-p', dest='provider_name')}
def run(self, provider_name=None):
lookup = {} if not provider_name else {'name': provider_name}
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup=lookup):
if not is_closed(provider) and is_service_and_parser_registered(provider) and is_scheduled(provider):
kwargs = {
'provider': provider,
'rule_set': get_provider_rule_set(provider),
'routing_scheme': get_provider_routing_scheme(provider)
}
update_provider.apply_async(expires=get_task_ttl(provider), kwargs=kwargs)
@celery.task(soft_time_limit=1800, bind=True)
def update_provider(self, provider, rule_set=None, routing_scheme=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
:param self:
:type self:
:param provider: Ingest Provider Details
:type provider: dict :py:class:`superdesk.io.ingest_provider_model.IngestProviderResource`
:param rule_set: Translation Rule Set if one is associated with Ingest Provider.
:type rule_set: dict :py:class:`apps.rules.rule_sets.RuleSetsResource`
:param routing_scheme: Routing Scheme if one is associated with Ingest Provider.
:type routing_scheme: dict :py:class:`apps.rules.routing_rules.RoutingRuleSchemeResource`
"""
lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
host_name = get_host_id(self)
if not lock(lock_name, host_name, expire=1800):
return
try:
feeding_service = registered_feeding_services[provider['feeding_service']]
feeding_service = feeding_service.__class__()
update = {LAST_UPDATED: utcnow()}
for items in feeding_service.update(provider):
ingest_items(items, provider, feeding_service, rule_set, routing_scheme)
stats.incr('ingest.ingested_items', len(items))
if items:
update[LAST_ITEM_UPDATE] = utcnow()
# Some Feeding Services update the collection and by this time the _etag might have been changed.
# So it's necessary to fetch it once again. Otherwise, OriginalChangedError is raised.
ingest_provider_service = superdesk.get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(req=None, _id=provider[superdesk.config.ID_FIELD])
ingest_provider_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)
if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
admins = superdesk.get_resource_service('users').get_users_by_user_type('administrator')
notify_and_add_activity(
ACTIVITY_EVENT,
'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
resource='ingest_providers', user_list=admins, name=provider.get('name'),
last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))
logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
if LAST_ITEM_UPDATE in update: # Only push a notification if there has been an update
push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
finally:
unlock(lock_name, host_name)
def process_anpa_category(item, provider):
try:
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if anpa_categories:
for item_category in item['anpa_category']:
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and item_category['qcode'].lower() == anpa_category['qcode'].lower():
item_category['name'] = anpa_category['name']
# make the case of the qcode match what we hold in our dictionary
item_category['qcode'] = anpa_category['qcode']
break
except Exception as ex:
raise ProviderError.anpaError(ex, provider)
def derive_category(item, provider):
"""
Assuming that the item has at least one itpc subject use the vocabulary map to derive an anpa category
:param item:
:return: An item with a category if possible
"""
try:
categories = []
subject_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='iptc_category_map')
if subject_map:
for entry in (map_entry for map_entry in subject_map['items'] if map_entry['is_active']):
for subject in item.get('subject', []):
if subject['qcode'] == entry['subject']:
if not any(c['qcode'] == entry['category'] for c in categories):
categories.append({'qcode': entry['category']})
if len(categories):
item['anpa_category'] = categories
process_anpa_category(item, provider)
except Exception as ex:
logger.exception(ex)
def process_iptc_codes(item, provider):
"""
Ensures that the higher level IPTC codes are present by inserting them if missing, for example
if given 15039001 (Formula One) make sure that 15039000 (motor racing) and 15000000 (sport) are there as well
:param item: A story item
:return: A story item with possible expanded subjects
"""
try:
def iptc_already_exists(code):
for entry in item['subject']:
if 'qcode' in entry and code == entry['qcode']:
return True
return False
for subject in item['subject']:
if 'qcode' in subject and len(subject['qcode']) == 8:
top_qcode = subject['qcode'][:2] + '000000'
if not iptc_already_exists(top_qcode):
item['subject'].append({'qcode': top_qcode, 'name': subject_codes[top_qcode]})
mid_qcode = subject['qcode'][:5] + '000'
if not iptc_already_exists(mid_qcode):
item['subject'].append({'qcode': mid_qcode, 'name': subject_codes[mid_qcode]})
except Exception as ex:
raise ProviderError.iptcError(ex, provider)
def derive_subject(item):
"""
Assuming that the item has an anpa category try to derive a subject using the anpa category vocabulary
:param item:
:return:
"""
try:
category_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if category_map:
for cat in item['anpa_category']:
map_entry = next(
(code for code in category_map['items'] if code['qcode'] == cat['qcode'] and code['is_active']),
None)
if map_entry and 'subject' in map_entry:
item['subject'] = [
{'qcode': map_entry.get('subject'), 'name': subject_codes[map_entry.get('subject')]}]
except Exception as ex:
logger.exception(ex)
def apply_rule_set(item, provider, rule_set=None):
"""
Applies rules set on the item to be ingested into the system. If there's no rule set then the item will
be returned without any change.
:param item: Item to be ingested
:param provider: provider object from whom the item was received
:return: item
"""
try:
if rule_set is None and provider.get('rule_set') is not None:
rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
if rule_set and 'body_html' in item:
body = item['body_html']
for rule in rule_set['rules']:
body = body.replace(rule['old'], rule['new'])
item['body_html'] = body
return item
except Exception as ex:
raise ProviderError.ruleError(ex, provider)
def ingest_cancel(item):
"""
Given an item that has a pubstatus of canceled finds all versions of this item and mark them as canceled as well.
Uses the URI to identify those items in ingest that are related to this cancellation.
:param item:
:return:
"""
ingest_service = superdesk.get_resource_service('ingest')
lookup = {'uri': item.get('uri')}
family_members = ingest_service.get_from_mongo(req=None, lookup=lookup)
for relative in family_members:
update = {'pubstatus': 'canceled', ITEM_STATE: CONTENT_STATE.KILLED}
ingest_service.patch(relative['_id'], update)
def ingest_items(items, provider, feeding_service, rule_set=None, routing_scheme=None):
all_items = filter_expired_items(provider, items)
items_dict = {doc[GUID_FIELD]: doc for doc in all_items}
items_in_package = []
failed_items = set()
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
items_in_package = [ref['residRef'] for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) != CONTENT_TYPE.COMPOSITE]:
ingested = ingest_item(item, provider, feeding_service, rule_set,
routing_scheme=routing_scheme if not item[GUID_FIELD] in items_in_package else None)
if not ingested:
failed_items.add(item[GUID_FIELD])
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]:
if ref['residRef'] in failed_items:
failed_items.add(item[GUID_FIELD])
continue
ref.setdefault('location', 'ingest')
itemRendition = items_dict.get(ref['residRef'], {}).get('renditions')
if itemRendition:
ref.setdefault('renditions', itemRendition)
ref[GUID_FIELD] = ref['residRef']
if items_dict.get(ref['residRef']):
ref['residRef'] = items_dict.get(ref['residRef'], {}).get(superdesk.config.ID_FIELD)
if item[GUID_FIELD] in failed_items:
continue<|fim▁hole|> if not ingested:
failed_items.add(item[GUID_FIELD])
app.data._search_backend('ingest').bulk_insert('ingest', [item for item in all_items
if item[GUID_FIELD] not in failed_items])
if failed_items:
logger.error('Failed to ingest the following items: %s', failed_items)
return failed_items
def ingest_item(item, provider, feeding_service, rule_set=None, routing_scheme=None):
try:
item.setdefault(superdesk.config.ID_FIELD, generate_guid(type=GUID_NEWSML))
item[FAMILY_ID] = item[superdesk.config.ID_FIELD]
item['ingest_provider'] = str(provider[superdesk.config.ID_FIELD])
item.setdefault('source', provider.get('source', ''))
set_default_state(item, CONTENT_STATE.INGESTED)
item['expiry'] = get_expiry_date(provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']),
item.get('versioncreated'))
if 'anpa_category' in item:
process_anpa_category(item, provider)
if 'subject' in item:
process_iptc_codes(item, provider)
if 'anpa_category' not in item:
derive_category(item, provider)
elif 'anpa_category' in item:
derive_subject(item)
apply_rule_set(item, provider, rule_set)
ingest_service = superdesk.get_resource_service('ingest')
if item.get('ingest_provider_sequence') is None:
ingest_service.set_ingest_provider_sequence(item, provider)
old_item = ingest_service.find_one(guid=item[GUID_FIELD], req=None)
if item.get('pubstatus', '') == 'canceled':
item[ITEM_STATE] = CONTENT_STATE.KILLED
ingest_cancel(item)
rend = item.get('renditions', {})
if rend:
baseImageRend = rend.get('baseImage') or next(iter(rend.values()))
if baseImageRend:
href = feeding_service.prepare_href(baseImageRend['href'])
update_renditions(item, href, old_item)
new_version = True
if old_item:
# In case we already have the item, preserve the _id
item[superdesk.config.ID_FIELD] = old_item[superdesk.config.ID_FIELD]
ingest_service.put_in_mongo(item[superdesk.config.ID_FIELD], item)
# if the feed is versioned and this is not a new version
if 'version' in item and 'version' in old_item and item.get('version') == old_item.get('version'):
new_version = False
else:
try:
ingest_service.post_in_mongo([item])
except HTTPException as e:
logger.error("Exception while persisting item in ingest collection", e)
if routing_scheme and new_version:
routed = ingest_service.find_one(_id=item[superdesk.config.ID_FIELD], req=None)
superdesk.get_resource_service('routing_schemes').apply_routing_scheme(routed, provider, routing_scheme)
except Exception as ex:
logger.exception(ex)
try:
superdesk.app.sentry.captureException()
except:
pass
return False
return True
def update_renditions(item, href, old_item):
"""
If the old_item has renditions uploaded in to media then the old rendition details are
assigned to the item, this avoids repeatedly downloading the same image and leaving the media entries orphaned.
If there is no old_item the original is downloaded and renditions are
generated.
:param item: parsed item from source
:param href: reference to original
:param old_item: the item that we have already ingested, if it exists
:return: item with renditions
"""
inserted = []
try:
# If there is an existing set of renditions we keep those
if old_item:
media = old_item.get('renditions', {}).get('original', {}).get('media', {})
if media:
item['renditions'] = old_item['renditions']
item['mimetype'] = old_item.get('mimetype')
item['filemeta'] = old_item.get('filemeta')
return
content, filename, content_type = download_file_from_url(href)
file_type, ext = content_type.split('/')
metadata = process_file(content, file_type)
file_guid = app.media.put(content, filename, content_type, metadata)
inserted.append(file_guid)
rendition_spec = app.config.get('RENDITIONS', {}).get('picture', {})
renditions = generate_renditions(content, file_guid, inserted, file_type,
content_type, rendition_spec, url_for_media)
item['renditions'] = renditions
item['mimetype'] = content_type
item['filemeta'] = metadata
except Exception:
for file_id in inserted:
app.media.delete(file_id)
raise
superdesk.command('ingest:update', UpdateIngest())<|fim▁end|> |
ingested = ingest_item(item, provider, feeding_service, rule_set, routing_scheme) |
<|file_name|>simd-intrinsic-generic-scatter.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-emscripten
// ignore-tidy-linelength
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
#![feature(repr_simd, platform_intrinsics)]
#![allow(non_camel_case_types)]<|fim▁hole|>#[derive(Copy, Clone, PartialEq, Debug)]
pub struct Vec2<T>(pub T, pub T);
#[repr(simd)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub struct Vec4<T>(pub T, pub T, pub T, pub T);
extern "platform-intrinsic" {
fn simd_scatter<T, P, M>(value: T, pointers: P, mask: M);
}
// CHECK-LABEL: @scatter_f32x2
#[no_mangle]
pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>,
values: Vec2<f32>) {
// CHECK: call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> {{.*}}, <2 x float*> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}})
simd_scatter(values, pointers, mask)
}
// CHECK-LABEL: @scatter_pf32x2
#[no_mangle]
pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2<i32>,
values: Vec2<*const f32>) {
// CHECK: call void @llvm.masked.scatter.v2p0f32.v2p0p0f32(<2 x float*> {{.*}}, <2 x float**> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}})
simd_scatter(values, pointers, mask)
}<|fim▁end|> |
#[repr(simd)] |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>import { Message, MessagePayload } from '../main'
import { ObservableWithInitialValue, Observable } from '../observable'
export interface RegisterMessages<S> {
/**
* Registers an Observable and call the handler function every time the observable has a new value.
* The handler is called with the current state and the new value of the observable.
* Returning undefined or the current state in the handler is a no-op.
*/
<T>(observable: Observable<T>, handler: (value: T) => S|void): void
/**
* Registers a Message and call the handler function every time the message is sent.
* The handler is called with the current state.
* Returning undefined or the current state in the handler is a no-op.
*/
(message: Message.NoPayload, handler: () => S|void): void
/**
* Registers a Message and call the handler function every time the message is sent.
* The handler is called with the current state and the payload of the message.
* Returning undefined or the current state in the handler is a no-op.
*/
<P>(message: Message.OnePayload<P>, handler: (payload: P) => S|void): void
/**
* Registers a Message and call the handler function every time the message is sent.
* The handler is called with the current state and the payload of the message.
* Returning undefined or the current state in the handler is a no-op.
*/
<P1, P2>(message: Message.TwoPayloads<P1, P2>, handler: (payload1: P1, payload2: P2) => S|void): void
}
export interface Messages {
/**
* Listens for a particular message. This is used to create a new Observable.
*/
listen<P>(message: Message.OnePayload<P>): Observable<P>
/**
* Listens for a particular message. This is used to create a new Observable.
*/
listen(message: Message.NoPayload): Observable<undefined>
/**
* Sends a message to self.
*
* Example:
* msg.send(AjaxSuccess([1, 2]))<|fim▁hole|>
export interface Store<S> {
/**
* The observable of this store's state.
* This observable always have a value.
*/
state: ObservableWithInitialValue<S>
/**
* Sends a message to this store.
*/
send<P>(payload: MessagePayload<P>): void
/**
* Destroys this transient store
*/
destroy(): void
}
interface StoreOptions {
name?: string
log?: boolean
}
interface RegisterHandlersParams<S> {
on: RegisterMessages<S>
msg: Messages
state: ObservableWithInitialValue<S>
}
/**
* Creates a new store.
*/
export function Store<S>(
initialState: S,
registerHandlers: (params: RegisterHandlersParams<S>) => void,
options?: StoreOptions
): Store<S><|fim▁end|> | */
send<P>(payload: MessagePayload<P>): void
} |
<|file_name|>grafana_dashboard.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Thierry Sallé (@tsalle)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_dashboard
author:
- "Thierry Sallé (@tsalle)"
version_added: "2.5"
short_description: Manage grafana dashboards
description:
- Create, update, delete, export grafana dashboards via API
options:
grafana_url:
required: true
description:
- Grafana url
grafana_user:
required: false
default: admin
description:
- Grafana API user
grafana_password:
required: false
default: admin
description:
- Grafana API password
grafana_api_key:
required: false
description:
- Grafana API key
- If set, I(grafana_user) and I(grafana_password) will be ignored
org_id:
required: false
description:
- Grafana Organisation ID where the dashboard will be imported / exported
- Not used when I(grafana_api_key) is set, because the grafana_api_key only belong to one organisation.
default: 1
state:
required: true
default: present
description:
- State of the dashboard.
choices: ['present', 'absent', 'export']
slug:
description:
- slug of the dashboard. It's the friendly url name of the dashboard.
- When state is present, this parameter can override the slug in the meta section of the json file.
- If you want to import a json dashboard exported directly from the interface (not from the api),
- you have to specify the slug parameter because there is no meta section in the exported json.
path:
description:
- path to the json file containing the grafana dashboard to import or export.
overwrite:
default: false
description:
- override existing dashboard when state is present.
message:
description:
- Set a commit message for the version history.
- Only used when state is present
validate_certs:
default: true
type: bool
description:
- If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
'''
EXAMPLES = '''
---
- name: import grafana dashboard foo
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_api_key: XXXXXXXXXXXX
state: present
message: "updated by ansible"
overwrite: true
path: /path/to/dashboards/foo.json
- name: export dashboard
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_api_key: XXXXXXXXXXXX
state: export
slug: foo
path: /path/to/dashboards/foo.json
'''
RETURN = '''
---
slug:
description: slug of the created / deleted / exported dashboard.
returned: success
type: string
sample: foo
'''
import base64
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
__metaclass__ = type
class GrafanaAPIException(Exception):
pass
class GrafanaMalformedJson(Exception):
pass
class GrafanaExportException(Exception):
pass
def grafana_switch_organisation(module, grafana_url, org_id, headers):
r, info = fetch_url(module, '%s/api/user/using/%s' % (grafana_url, org_id), headers=headers, method='POST')
if info['status'] != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (org_id, info))
def grafana_dashboard_exists(module, grafana_url, slug, headers):
dashboard_exists = False
dashboard = {}
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (grafana_url, slug), headers=headers, method='GET')
if info['status'] == 200:
dashboard_exists = True
try:
dashboard = json.loads(r.read())
except Exception as e:
raise GrafanaMalformedJson(e)
elif info['status'] == 404:
dashboard_exists = False
else:
raise GrafanaAPIException('Unable to get dashboard %s : %s' % (slug, info))
return dashboard_exists, dashboard
def grafana_create_dashboard(module, data):
# define data payload for grafana API
try:
with open(data['path'], 'r') as json_file:
payload = json.load(json_file)
except Exception as e:
raise GrafanaMalformedJson("Can't load json file %s" % str(e))
# define http header
headers = {'content-type': 'application/json; charset=utf8'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
auth = base64.encodestring('%s:%s' % (data['grafana_user'], data['grafana_password'])).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
if data.get('slug'):
slug = data['slug']
elif 'meta' in payload and 'slug' in payload['meta']:
slug = payload['meta']['slug']
else:
raise GrafanaMalformedJson('No slug found in json')
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], slug, headers=headers)
result = {}
if dashboard_exists is True:
if dashboard == payload:
# unchanged
result['slug'] = data['slug']
result['msg'] = "Dashboard %s unchanged." % data['slug']
result['changed'] = False
else:
# update
if 'overwrite' in data and data['overwrite']:
payload['overwrite'] = True
if 'message' in data and data['message']:
payload['message'] = data['message']
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
result['slug'] = slug
result['msg'] = "Dashboard %s updated" % slug
result['changed'] = True
else:
body = json.loads(info['body'])
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (slug, body['message']))
else:
# create
if 'dashboard' not in payload:
payload = {'dashboard': payload}
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
result['msg'] = "Dashboard %s created" % slug
result['changed'] = True
result['slug'] = slug
else:
raise GrafanaAPIException('Unable to create the new dashboard %s : %s - %s.' % (slug, info['status'], info))
return result
def grafana_delete_dashboard(module, data):
# define http headers
headers = {'content-type': 'application/json'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
auth = base64.encodestring('%s:%s' % (data['grafana_user'], data['grafana_password'])).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], data['slug'], headers=headers)
result = {}
if dashboard_exists is True:
# delete
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (data['grafana_url'], data['slug']), headers=headers, method='DELETE')
if info['status'] == 200:
result['msg'] = "Dashboard %s deleted" % data['slug']
result['changed'] = True
result['slug'] = data['slug']
else:
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (data['slug'], info))
else:
# dashboard does not exists : do nothing
result = {'msg': "Dashboard %s does not exists" % data['slug'],
'changed': False,
'slug': data['slug']}
return result
def grafana_export_dashboard(module, data):
# define http headers
headers = {'content-type': 'application/json'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
auth = base64.encodestring('%s:%s' % (data['grafana_user'], data['grafana_password'])).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], data['slug'], headers=headers)
if dashboard_exists is True:
try:
with open(data['path'], 'w') as f:
f.write(json.dumps(dashboard))
except Exception as e:
raise GrafanaExportException("Can't write json file : %s" % str(e))
result = {'msg': "Dashboard %s exported to %s" % (data['slug'], data['path']),
'slug': data['slug'],
'changed': True}
else:
result = {'msg': "Dashboard %s does not exists" % data['slug'],
'slug': data['slug'],
'changed': False}
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent', 'export'],
default='present'),
grafana_url=dict(required=True),
grafana_user=dict(default='admin'),
grafana_password=dict(default='admin', no_log=True),
grafana_api_key=dict(type='str', no_log=True),
org_id=dict(default=1, type='int'),
slug=dict(type='str'),
path=dict(type='str'),
overwrite=dict(type='bool', default=False),
message=dict(type='str'),<|fim▁hole|> mutually_exclusive=[['grafana_user', 'grafana_api_key']],
)
try:
if module.params['state'] == 'present':
result = grafana_create_dashboard(module, module.params)
elif module.params['state'] == 'absent':
result = grafana_delete_dashboard(module, module.params)
else:
result = grafana_export_dashboard(module, module.params)
except GrafanaAPIException as e:
module.fail_json(
failed=True,
msg="error : %s" % e
)
return
except GrafanaMalformedJson as e:
module.fail_json(
failed=True,
msg="error : json file does not contain a meta section with a slug parameter, or you did'nt specify the slug parameter"
)
return
except GrafanaExportException as e:
module.fail_json(
failed=True,
msg="error : json file cannot be written : %s" % str(e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()<|fim▁end|> | validate_certs=dict(type='bool', default=True)
),
supports_check_mode=False,
required_together=[['grafana_user', 'grafana_password', 'org_id']], |
<|file_name|>15.2.3.3-4-230.js<|end_file_name|><|fim▁begin|>/// Copyright (c) 2012 Ecma International. All rights reserved.
/**
* @path ch15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-230.js
* @description Object.getOwnPropertyDescriptor - ensure that 'writable' property of returned object is data property with correct 'configurable' attribute
*/
function testcase() {
var obj = { "property": "ownDataProperty" };<|fim▁hole|>
var desc = Object.getOwnPropertyDescriptor(obj, "property");
var propDefined = ("writable" in desc);
try {
delete desc.writable;
var propDeleted = "writable" in desc;
return propDefined && !propDeleted;
} catch (e) {
return false;
}
}
runTestCase(testcase);<|fim▁end|> | |
<|file_name|>opcodes.py<|end_file_name|><|fim▁begin|>#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""OpCodes module
This module implements the data structures which define the cluster
operations - the so-called opcodes.
Every operation which modifies the cluster state is expressed via
opcodes.
"""
# this are practically structures, so disable the message about too
# few public methods:
# pylint: disable-msg=R0903
import logging
import re
import operator
from ganeti import constants
from ganeti import errors
from ganeti import ht
# Common opcode attributes
#: output fields for a query operation
_POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Selected output fields")
#: the shutdown timeout
_PShutdownTimeout = \
("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down")
#: the force parameter
_PForce = ("force", False, ht.TBool, "Whether to force the operation")
#: a required instance name (for single-instance LUs)
_PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString,
"Instance name")
#: Whether to ignore offline nodes
_PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool,
"Whether to ignore offline nodes")
#: a required node name (for single-node LUs)
_PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name")
#: a required node group name (for single-group LUs)
_PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name")
#: Migration type (live/non-live)
_PMigrationMode = ("mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.HT_MIGRATION_MODES)),
"Migration mode")
#: Obsolete 'live' migration mode (boolean)
_PMigrationLive = ("live", None, ht.TMaybeBool,
"Legacy setting for live migration, do not use")
#: Tag type
_PTagKind = ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES), None)
#: List of tag strings
_PTags = ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None)
_PForceVariant = ("force_variant", False, ht.TBool,
"Whether to force an unknown OS variant")
_PWaitForSync = ("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize")
_PIgnoreConsistency = ("ignore_consistency", False, ht.TBool,
"Whether to ignore disk consistency")
_PStorageName = ("name", ht.NoDefault, ht.TMaybeString, "Storage name")
_PUseLocking = ("use_locking", False, ht.TBool,
"Whether to use synchronization")
_PNameCheck = ("name_check", True, ht.TBool, "Whether to check name")
_PNodeGroupAllocPolicy = \
("alloc_policy", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.VALID_ALLOC_POLICIES)),
"Instance allocation policy")
_PGroupNodeParams = ("ndparams", None, ht.TMaybeDict,
"Default node parameters for group")
_PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP),
"Resource(s) to query for")
_PIpCheckDoc = "Whether to ensure instance's IP address is inactive"
#: Do not remember instance state changes
_PNoRemember = ("no_remember", False, ht.TBool,
"Do not remember the state change")
#: Target node for instance migration/failover
_PMigrationTargetNode = ("target_node", None, ht.TMaybeString,
"Target node for shared-storage instances")
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
#: Utility function for L{OpClusterSetParams}
_TestClusterOsList = ht.TOr(ht.TNone,
ht.TListOf(ht.TAnd(ht.TList, ht.TIsLength(2),
ht.TMap(ht.WithDesc("GetFirstItem")(operator.itemgetter(0)),
ht.TElemOf(constants.DDMS_VALUES)))))
# TODO: Generate check from constants.INIC_PARAMS_TYPES
#: Utility function for testing NIC definitions
_TestNicDef = ht.TDictOf(ht.TElemOf(constants.INIC_PARAMS),
ht.TOr(ht.TNone, ht.TNonEmptyString))
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
"NODE_": "N_",
"INSTANCE_": "I_",
}
def _NameToId(name):
"""Convert an opcode class name to an OP_ID.
@type name: string
@param name: the class name, as OpXxxYyy
@rtype: string
@return: the name in the OP_XXXX_YYYY format
"""
if not name.startswith("Op"):
return None
# Note: (?<=[a-z])(?=[A-Z]) would be ideal, since it wouldn't
# consume any input, and hence we would just have all the elements
# in the list, one by one; but it seems that split doesn't work on
# non-consuming input, hence we have to process the input string a
# bit
name = _OPID_RE.sub(r"\1,\2", name)
elems = name.split(",")
return "_".join(n.upper() for n in elems)
def RequireFileStorage():
"""Checks that file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when file storage is disabled
"""
if not constants.ENABLE_FILE_STORAGE:
raise errors.OpPrereqError("File storage disabled at configure time",
errors.ECODE_INVAL)
def RequireSharedFileStorage():
"""Checks that shared file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when shared file storage is disabled
"""
if not constants.ENABLE_SHARED_FILE_STORAGE:
raise errors.OpPrereqError("Shared file storage disabled at"
" configure time", errors.ECODE_INVAL)
@ht.WithDesc("CheckFileStorage")
def _CheckFileStorage(value):
"""Ensures file storage is enabled if used.
"""
if value == constants.DT_FILE:
RequireFileStorage()
elif value == constants.DT_SHARED_FILE:
RequireSharedFileStorage()
return True
_CheckDiskTemplate = ht.TAnd(ht.TElemOf(constants.DISK_TEMPLATES),
_CheckFileStorage)
def _CheckStorageType(storage_type):
"""Ensure a given storage type is valid.
"""
if storage_type not in constants.VALID_STORAGE_TYPES:
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
errors.ECODE_INVAL)
if storage_type == constants.ST_FILE:
RequireFileStorage()
return True
#: Storage type parameter
_PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType,
"Storage type")
class _AutoOpParamSlots(type):
"""Meta class for opcode definitions.
"""
def __new__(mcs, name, bases, attrs):
"""Called when a class should be created.
@param mcs: The meta class
@param name: Name of created class
@param bases: Base classes
@type attrs: dict
@param attrs: Class attributes
"""
assert "__slots__" not in attrs, \
"Class '%s' defines __slots__ when it should use OP_PARAMS" % name
assert "OP_ID" not in attrs, "Class '%s' defining OP_ID" % name
attrs["OP_ID"] = _NameToId(name)
# Always set OP_PARAMS to avoid duplicates in BaseOpCode.GetAllParams
params = attrs.setdefault("OP_PARAMS", [])
# Use parameter names as slots
slots = [pname for (pname, _, _, _) in params]
assert "OP_DSC_FIELD" not in attrs or attrs["OP_DSC_FIELD"] in slots, \
"Class '%s' uses unknown field in OP_DSC_FIELD" % name
attrs["__slots__"] = slots
return type.__new__(mcs, name, bases, attrs)
class BaseOpCode(object):
"""A simple serializable object.
This object serves as a parent class for OpCode without any custom
field handling.
"""
# pylint: disable-msg=E1101
# as OP_ID is dynamically defined
__metaclass__ = _AutoOpParamSlots
def __init__(self, **kwargs):
"""Constructor for BaseOpCode.
The constructor takes only keyword arguments and will set
attributes on this object based on the passed arguments. As such,
it means that you should not pass arguments which are not in the
__slots__ attribute for this class.
"""
slots = self._all_slots()
for key in kwargs:
if key not in slots:
raise TypeError("Object %s doesn't support the parameter '%s'" %
(self.__class__.__name__, key))
setattr(self, key, kwargs[key])
def __getstate__(self):
"""Generic serializer.
This method just returns the contents of the instance as a
dictionary.
@rtype: C{dict}
@return: the instance attributes and their values
"""
state = {}
for name in self._all_slots():
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""Generic unserializer.
This method just restores from the serialized state the attributes
of the current instance.
@param state: the serialized opcode data
@type state: C{dict}
"""
if not isinstance(state, dict):
raise ValueError("Invalid data to __setstate__: expected dict, got %s" %
type(state))
for name in self._all_slots():
if name not in state and hasattr(self, name):
delattr(self, name)
for name in state:
setattr(self, name, state[name])
@classmethod
def _all_slots(cls):
"""Compute the list of all declared slots for a class.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "__slots__", []))
return slots
@classmethod
def GetAllParams(cls):
"""Compute list of all parameters for an opcode.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "OP_PARAMS", []))
return slots
def Validate(self, set_defaults):
"""Validate opcode parameters, optionally setting default values.
@type set_defaults: bool
@param set_defaults: Whether to set default values
@raise errors.OpPrereqError: When a parameter value doesn't match
requirements
"""
for (attr_name, default, test, _) in self.GetAllParams():
assert test == ht.NoType or callable(test)
if not hasattr(self, attr_name):
if default == ht.NoDefault:
raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
elif set_defaults:
if callable(default):
dval = default()
else:
dval = default
setattr(self, attr_name, dval)
if test == ht.NoType:
# no tests here
continue
if set_defaults or hasattr(self, attr_name):
attr_val = getattr(self, attr_name)
if not test(attr_val):
logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
self.OP_ID, attr_name, type(attr_val), attr_val)<|fim▁hole|> raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
(self.OP_ID, attr_name),
errors.ECODE_INVAL)
class OpCode(BaseOpCode):
"""Abstract OpCode.
This is the root of the actual OpCode hierarchy. All clases derived
from this class should override OP_ID.
@cvar OP_ID: The ID of this opcode. This should be unique amongst all
children of this class.
@cvar OP_DSC_FIELD: The name of a field whose value will be included in the
string returned by Summary(); see the docstring of that
method for details).
@cvar OP_PARAMS: List of opcode attributes, the default values they should
get if not already defined, and types they must match.
@cvar WITH_LU: Boolean that specifies whether this should be included in
mcpu's dispatch table
@ivar dry_run: Whether the LU should be run in dry-run mode, i.e. just
the check steps
@ivar priority: Opcode priority for queue
"""
# pylint: disable-msg=E1101
# as OP_ID is dynamically defined
WITH_LU = True
OP_PARAMS = [
("dry_run", None, ht.TMaybeBool, "Run checks only, don't execute"),
("debug_level", None, ht.TOr(ht.TNone, ht.TPositiveInt), "Debug level"),
("priority", constants.OP_PRIO_DEFAULT,
ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"),
]
def __getstate__(self):
"""Specialized getstate for opcodes.
This method adds to the state dictionary the OP_ID of the class,
so that on unload we can identify the correct class for
instantiating the opcode.
@rtype: C{dict}
@return: the state as a dictionary
"""
data = BaseOpCode.__getstate__(self)
data["OP_ID"] = self.OP_ID
return data
@classmethod
def LoadOpCode(cls, data):
"""Generic load opcode method.
The method identifies the correct opcode class from the dict-form
by looking for a OP_ID key, if this is not found, or its value is
not available in this module as a child of this class, we fail.
@type data: C{dict}
@param data: the serialized opcode
"""
if not isinstance(data, dict):
raise ValueError("Invalid data to LoadOpCode (%s)" % type(data))
if "OP_ID" not in data:
raise ValueError("Invalid data to LoadOpcode, missing OP_ID")
op_id = data["OP_ID"]
op_class = None
if op_id in OP_MAPPING:
op_class = OP_MAPPING[op_id]
else:
raise ValueError("Invalid data to LoadOpCode: OP_ID %s unsupported" %
op_id)
op = op_class()
new_data = data.copy()
del new_data["OP_ID"]
op.__setstate__(new_data)
return op
def Summary(self):
"""Generates a summary description of this opcode.
The summary is the value of the OP_ID attribute (without the "OP_"
prefix), plus the value of the OP_DSC_FIELD attribute, if one was
defined; this field should allow to easily identify the operation
(for an instance creation job, e.g., it would be the instance
name).
"""
assert self.OP_ID is not None and len(self.OP_ID) > 3
# all OP_ID start with OP_, we remove that
txt = self.OP_ID[3:]
field_name = getattr(self, "OP_DSC_FIELD", None)
if field_name:
field_value = getattr(self, field_name, None)
if isinstance(field_value, (list, tuple)):
field_value = ",".join(str(i) for i in field_value)
txt = "%s(%s)" % (txt, field_value)
return txt
def TinySummary(self):
"""Generates a compact summary description of the opcode.
"""
assert self.OP_ID.startswith("OP_")
text = self.OP_ID[3:]
for (prefix, supplement) in _SUMMARY_PREFIX.items():
if text.startswith(prefix):
return supplement + text[len(prefix):]
return text
# cluster opcodes
class OpClusterPostInit(OpCode):
"""Post cluster initialization.
This opcode does not touch the cluster at all. Its purpose is to run hooks
after the cluster has been initialized.
"""
class OpClusterDestroy(OpCode):
"""Destroy the cluster.
This opcode has no other parameters. All the state is irreversibly
lost after the execution of this opcode.
"""
class OpClusterQuery(OpCode):
"""Query cluster information."""
class OpClusterVerifyConfig(OpCode):
"""Verify the cluster config.
"""
OP_PARAMS = [
("verbose", False, ht.TBool, None),
("error_codes", False, ht.TBool, None),
("debug_simulate_errors", False, ht.TBool, None),
]
class OpClusterVerifyGroup(OpCode):
"""Run verify on a node group from the cluster.
@type skip_checks: C{list}
@ivar skip_checks: steps to be skipped from the verify process; this
needs to be a subset of
L{constants.VERIFY_OPTIONAL_CHECKS}; currently
only L{constants.VERIFY_NPLUSONE_MEM} can be passed
"""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
("group_name", ht.NoDefault, ht.TNonEmptyString, None),
("skip_checks", ht.EmptyList,
ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS)), None),
("verbose", False, ht.TBool, None),
("error_codes", False, ht.TBool, None),
("debug_simulate_errors", False, ht.TBool, None),
]
class OpClusterVerifyDisks(OpCode):
"""Verify the cluster disks.
Parameters: none
Result: a tuple of four elements:
- list of node names with bad data returned (unreachable, etc.)
- dict of node names with broken volume groups (values: error msg)
- list of instances with degraded disks (that should be activated)
- dict of instances with missing logical volumes (values: (node, vol)
pairs with details about the missing volumes)
In normal operation, all lists should be empty. A non-empty instance
list (3rd element of the result) is still ok (errors were fixed) but
non-empty node list means some node is down, and probably there are
unfixable drbd errors.
Note that only instances that are drbd-based are taken into
consideration. This might need to be revisited in the future.
"""
class OpClusterRepairDiskSizes(OpCode):
"""Verify the disk sizes of the instances and fixes configuration
mimatches.
Parameters: optional instances list, in case we want to restrict the
checks to only a subset of the instances.
Result: a list of tuples, (instance, disk, new-size) for changed
configurations.
In normal operation, the list should be empty.
@type instances: list
@ivar instances: the list of instances to check, or empty for all instances
"""
OP_PARAMS = [
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
]
class OpClusterConfigQuery(OpCode):
"""Query cluster configuration values."""
OP_PARAMS = [
_POutputFields
]
class OpClusterRename(OpCode):
"""Rename the cluster.
@type name: C{str}
@ivar name: The new name of the cluster. The name and/or the master IP
address will be changed to match the new name and its IP
address.
"""
OP_DSC_FIELD = "name"
OP_PARAMS = [
("name", ht.NoDefault, ht.TNonEmptyString, None),
]
class OpClusterSetParams(OpCode):
"""Change the parameters of the cluster.
@type vg_name: C{str} or C{None}
@ivar vg_name: The new volume group name or None to disable LVM usage.
"""
OP_PARAMS = [
("vg_name", None, ht.TMaybeString, "Volume group name"),
("enabled_hypervisors", None,
ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
ht.TNone),
"List of enabled hypervisors"),
("hvparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide hypervisor parameter defaults, hypervisor-dependent"),
("beparams", None, ht.TOr(ht.TDict, ht.TNone),
"Cluster-wide backend parameter defaults"),
("os_hvp", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide per-OS hypervisor parameter defaults"),
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide OS parameter defaults"),
("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
"Set UID pool, must be list of lists describing UID ranges (two items,"
" start and end inclusive)"),
("add_uids", None, ht.NoType,
"Extend UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be added"),
("remove_uids", None, ht.NoType,
"Shrink UID pool, must be list of lists describing UID ranges (two"
" items, start and end inclusive) to be removed"),
("maintain_node_health", None, ht.TMaybeBool,
"Whether to automatically maintain node health"),
("prealloc_wipe_disks", None, ht.TMaybeBool,
"Whether to wipe disks before allocating them to instances"),
("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"),
("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"),
("drbd_helper", None, ht.TOr(ht.TString, ht.TNone), "DRBD helper program"),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone),
"Default iallocator for cluster"),
("master_netdev", None, ht.TOr(ht.TString, ht.TNone),
"Master network device"),
("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone),
"List of reserved LVs"),
("hidden_os", None, _TestClusterOsList,
"Modify list of hidden operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
("blacklisted_os", None, _TestClusterOsList,
"Modify list of blacklisted operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
]
class OpClusterRedistConf(OpCode):
"""Force a full push of the cluster configuration.
"""
class OpQuery(OpCode):
"""Query for resources/items.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
@ivar filter: Query filter
"""
OP_PARAMS = [
_PQueryWhat,
("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Requested fields"),
("filter", None, ht.TOr(ht.TNone, ht.TListOf),
"Query filter"),
]
class OpQueryFields(OpCode):
"""Query for available resource/item fields.
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
"""
OP_PARAMS = [
_PQueryWhat,
("fields", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
"Requested fields; if not given, all are returned"),
]
class OpOobCommand(OpCode):
"""Interact with OOB."""
OP_PARAMS = [
("node_names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"List of nodes to run the OOB command against"),
("command", None, ht.TElemOf(constants.OOB_COMMANDS),
"OOB command to be run"),
("timeout", constants.OOB_TIMEOUT, ht.TInt,
"Timeout before the OOB helper will be terminated"),
("ignore_status", False, ht.TBool,
"Ignores the node offline status for power off"),
("power_delay", constants.OOB_POWER_DELAY, ht.TPositiveFloat,
"Time in seconds to wait between powering on nodes"),
]
# node opcodes
class OpNodeRemove(OpCode):
"""Remove a node.
@type node_name: C{str}
@ivar node_name: The name of the node to remove. If the node still has
instances on it, the operation will fail.
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
]
class OpNodeAdd(OpCode):
"""Add a node to the cluster.
@type node_name: C{str}
@ivar node_name: The name of the node to add. This can be a short name,
but it will be expanded to the FQDN.
@type primary_ip: IP address
@ivar primary_ip: The primary IP of the node. This will be ignored when the
opcode is submitted, but will be filled during the node
add (so it will be visible in the job query).
@type secondary_ip: IP address
@ivar secondary_ip: The secondary IP of the node. This needs to be passed
if the cluster has been initialized in 'dual-network'
mode, otherwise it must not be given.
@type readd: C{bool}
@ivar readd: Whether to re-add an existing node to the cluster. If
this is not passed, then the operation will abort if the node
name is already in the cluster; use this parameter to 'repair'
a node that had its configuration broken, or was reinstalled
without removal from the cluster.
@type group: C{str}
@ivar group: The node group to which this node will belong.
@type vm_capable: C{bool}
@ivar vm_capable: The vm_capable node attribute
@type master_capable: C{bool}
@ivar master_capable: The master_capable node attribute
"""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
("primary_ip", None, ht.NoType, "Primary IP address"),
("secondary_ip", None, ht.TMaybeString, "Secondary IP address"),
("readd", False, ht.TBool, "Whether node is re-added to cluster"),
("group", None, ht.TMaybeString, "Initial node group"),
("master_capable", None, ht.TMaybeBool,
"Whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Whether node can host instances"),
("ndparams", None, ht.TMaybeDict, "Node parameters"),
]
class OpNodeQuery(OpCode):
"""Compute the list of nodes."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpNodeQueryvols(OpCode):
"""Get list of volumes on node."""
OP_PARAMS = [
_POutputFields,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpNodeQueryStorage(OpCode):
"""Get information on storage for node(s)."""
OP_PARAMS = [
_POutputFields,
_PStorageType,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "List of nodes"),
("name", None, ht.TMaybeString, "Storage name"),
]
class OpNodeModifyStorage(OpCode):
"""Modifies the properies of a storage unit"""
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
("changes", ht.NoDefault, ht.TDict, "Requested changes"),
]
class OpRepairNodeStorage(OpCode):
"""Repairs the volume group on a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PStorageType,
_PStorageName,
_PIgnoreConsistency,
]
class OpNodeSetParams(OpCode):
"""Change the parameters of a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
("master_candidate", None, ht.TMaybeBool,
"Whether the node should become a master candidate"),
("offline", None, ht.TMaybeBool,
"Whether the node should be marked as offline"),
("drained", None, ht.TMaybeBool,
"Whether the node should be marked as drained"),
("auto_promote", False, ht.TBool,
"Whether node(s) should be promoted to master candidate if necessary"),
("master_capable", None, ht.TMaybeBool,
"Denote whether node can become master or master candidate"),
("vm_capable", None, ht.TMaybeBool,
"Denote whether node can host instances"),
("secondary_ip", None, ht.TMaybeString,
"Change node's secondary IP address"),
("ndparams", None, ht.TMaybeDict, "Set node parameters"),
("powered", None, ht.TMaybeBool,
"Whether the node should be marked as powered"),
]
class OpNodePowercycle(OpCode):
"""Tries to powercycle a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PForce,
]
class OpNodeMigrate(OpCode):
"""Migrate all instances from a node."""
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
class OpNodeEvacStrategy(OpCode):
"""Compute the evacuation strategy for a list of nodes."""
OP_DSC_FIELD = "nodes"
OP_PARAMS = [
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None),
("remote_node", None, ht.TMaybeString, None),
("iallocator", None, ht.TMaybeString, None),
]
# instance opcodes
class OpInstanceCreate(OpCode):
"""Create an instance.
@ivar instance_name: Instance name
@ivar mode: Instance creation mode (one of L{constants.INSTANCE_CREATE_MODES})
@ivar source_handshake: Signed handshake from source (remote import only)
@ivar source_x509_ca: Source X509 CA in PEM format (remote import only)
@ivar source_instance_name: Previous name of instance (remote import only)
@ivar source_shutdown_timeout: Shutdown timeout used for source instance
(remote import only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
_PWaitForSync,
_PNameCheck,
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
("disks", ht.NoDefault,
# TODO: Generate check from constants.IDISK_PARAMS_TYPES
ht.TListOf(ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
ht.TOr(ht.TNonEmptyString, ht.TInt))),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
" (%s)" %
(constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_SIZE,
constants.IDISK_MODE,
" or ".join("``%s``" % i for i in sorted(constants.DISK_ACCESS_SET)))),
("disk_template", ht.NoDefault, _CheckDiskTemplate, "Disk template"),
("file_driver", None, ht.TOr(ht.TNone, ht.TElemOf(constants.FILE_DRIVER)),
"Driver for file-backed disks"),
("file_storage_dir", None, ht.TMaybeString,
"Directory for storing file-backed disks"),
("hvparams", ht.EmptyDict, ht.TDict,
"Hypervisor parameters for instance, hypervisor-dependent"),
("hypervisor", None, ht.TMaybeString, "Hypervisor"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding which node(s) to use"),
("identify_defaults", False, ht.TBool,
"Reset instance parameters to default if equal"),
("ip_check", True, ht.TBool, _PIpCheckDoc),
("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES),
"Instance creation mode"),
("nics", ht.NoDefault, ht.TListOf(_TestNicDef),
"List of NIC (network interface) definitions, for example"
" ``[{}, {}, {\"%s\": \"198.51.100.4\"}]``; each NIC definition can"
" contain the optional values %s" %
(constants.INIC_IP,
", ".join("``%s``" % i for i in sorted(constants.INIC_PARAMS)))),
("no_install", None, ht.TMaybeBool,
"Do not install the OS (will disable automatic start)"),
("osparams", ht.EmptyDict, ht.TDict, "OS parameters for instance"),
("os_type", None, ht.TMaybeString, "Operating system"),
("pnode", None, ht.TMaybeString, "Primary node"),
("snode", None, ht.TMaybeString, "Secondary node"),
("source_handshake", None, ht.TOr(ht.TList, ht.TNone),
"Signed handshake from source (remote import only)"),
("source_instance_name", None, ht.TMaybeString,
"Source instance name (remote import only)"),
("source_shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
ht.TPositiveInt,
"How long source instance was given to shut down (remote import only)"),
("source_x509_ca", None, ht.TMaybeString,
"Source X509 CA in PEM format (remote import only)"),
("src_node", None, ht.TMaybeString, "Source node for import"),
("src_path", None, ht.TMaybeString, "Source directory for import"),
("start", True, ht.TBool, "Whether to start instance after creation"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"),
]
class OpInstanceReinstall(OpCode):
"""Reinstall an instance's OS."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForceVariant,
("os_type", None, ht.TMaybeString, "Instance operating system"),
("osparams", None, ht.TMaybeDict, "Temporary OS parameters"),
]
class OpInstanceRemove(OpCode):
"""Remove an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_failures", False, ht.TBool,
"Whether to ignore failures during removal"),
]
class OpInstanceRename(OpCode):
"""Rename an instance."""
OP_PARAMS = [
_PInstanceName,
_PNameCheck,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New instance name"),
("ip_check", False, ht.TBool, _PIpCheckDoc),
]
class OpInstanceStartup(OpCode):
"""Startup an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PIgnoreOfflineNodes,
("hvparams", ht.EmptyDict, ht.TDict,
"Temporary hypervisor parameters, hypervisor-dependent"),
("beparams", ht.EmptyDict, ht.TDict, "Temporary backend parameters"),
_PNoRemember,
]
class OpInstanceShutdown(OpCode):
"""Shutdown an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PIgnoreOfflineNodes,
("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down"),
_PNoRemember,
]
class OpInstanceReboot(OpCode):
"""Reboot an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("ignore_secondaries", False, ht.TBool,
"Whether to start the instance even if secondary disks are failing"),
("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES),
"How to reboot instance"),
]
class OpInstanceReplaceDisks(OpCode):
"""Replace the disks of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES),
"Replacement mode"),
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"Disk indexes"),
("remote_node", None, ht.TMaybeString, "New secondary node"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding new secondary node"),
("early_release", False, ht.TBool,
"Whether to release locks as soon as possible"),
]
class OpInstanceFailover(OpCode):
"""Failover an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
class OpInstanceMigrate(OpCode):
"""Migrate an instance.
This migrates (without shutting down an instance) to its secondary
node.
@ivar instance_name: the name of the instance
@ivar mode: the migration mode (live, non-live or None for auto)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PMigrationMode,
_PMigrationLive,
_PMigrationTargetNode,
("cleanup", False, ht.TBool,
"Whether a previously failed migration should be cleaned up"),
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
("allow_failover", False, ht.TBool,
"Whether we can fallback to failover if migration is not possible"),
]
class OpInstanceMove(OpCode):
"""Move an instance.
This move (with shutting down an instance and data copying) to an
arbitrary node.
@ivar instance_name: the name of the instance
@ivar target_node: the destination node
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"),
_PIgnoreConsistency,
]
class OpInstanceConsole(OpCode):
"""Connect to an instance's console."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName
]
class OpInstanceActivateDisks(OpCode):
"""Activate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("ignore_size", False, ht.TBool, "Whether to ignore recorded size"),
]
class OpInstanceDeactivateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
]
class OpInstanceRecreateDisks(OpCode):
"""Deactivate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"List of disk indexes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
]
class OpInstanceQuery(OpCode):
"""Compute the list of instances."""
OP_PARAMS = [
_POutputFields,
_PUseLocking,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all instances, instance names otherwise"),
]
class OpInstanceQueryData(OpCode):
"""Compute the run-time status of instances."""
OP_PARAMS = [
_PUseLocking,
("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Instance names"),
("static", False, ht.TBool,
"Whether to only return configuration data without querying"
" nodes"),
]
class OpInstanceSetParams(OpCode):
"""Change the parameters of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PForce,
_PForceVariant,
# TODO: Use _TestNicDef
("nics", ht.EmptyList, ht.TList,
"List of NIC changes. Each item is of the form ``(op, settings)``."
" ``op`` can be ``%s`` to add a new NIC with the specified settings,"
" ``%s`` to remove the last NIC or a number to modify the settings"
" of the NIC with that index." %
(constants.DDM_ADD, constants.DDM_REMOVE)),
("disks", ht.EmptyList, ht.TList, "List of disk changes. See ``nics``."),
("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"),
("hvparams", ht.EmptyDict, ht.TDict,
"Per-instance hypervisor parameters, hypervisor-dependent"),
("disk_template", None, ht.TOr(ht.TNone, _CheckDiskTemplate),
"Disk template for instance"),
("remote_node", None, ht.TMaybeString,
"Secondary node (used when changing disk template)"),
("os_name", None, ht.TMaybeString,
"Change instance's OS name. Does not reinstall the instance."),
("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize, when changing template"),
]
class OpInstanceGrowDisk(OpCode):
"""Grow a disk of an instance."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PWaitForSync,
("disk", ht.NoDefault, ht.TInt, "Disk index"),
("amount", ht.NoDefault, ht.TInt,
"Amount of disk space to add (megabytes)"),
]
# Node group opcodes
class OpGroupAdd(OpCode):
"""Add a node group to the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
]
class OpGroupAssignNodes(OpCode):
"""Assign nodes to a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PForce,
("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"List of nodes to assign"),
]
class OpGroupQuery(OpCode):
"""Compute the list of node groups."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all groups, group names otherwise"),
]
class OpGroupSetParams(OpCode):
"""Change the parameters of a node group."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
]
class OpGroupRemove(OpCode):
"""Remove a node group from the cluster."""
OP_DSC_FIELD = "group_name"
OP_PARAMS = [
_PGroupName,
]
class OpGroupRename(OpCode):
"""Rename a node group in the cluster."""
OP_PARAMS = [
_PGroupName,
("new_name", ht.NoDefault, ht.TNonEmptyString, "New group name"),
]
# OS opcodes
class OpOsDiagnose(OpCode):
"""Compute the list of guest operating systems."""
OP_PARAMS = [
_POutputFields,
("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Which operating systems to diagnose"),
]
# Exports opcodes
class OpBackupQuery(OpCode):
"""Compute the list of exported images."""
OP_PARAMS = [
_PUseLocking,
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"Empty list to query all nodes, node names otherwise"),
]
class OpBackupPrepare(OpCode):
"""Prepares an instance export.
@ivar instance_name: Instance name
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
]
class OpBackupExport(OpCode):
"""Export an instance.
For local exports, the export destination is the node name. For remote
exports, the export destination is a list of tuples, each consisting of
hostname/IP address, port, HMAC and HMAC salt. The HMAC is calculated using
the cluster domain secret over the value "${index}:${hostname}:${port}". The
destination X509 CA must be a signed certificate.
@ivar mode: Export mode (one of L{constants.EXPORT_MODES})
@ivar target_node: Export destination
@ivar x509_key_name: X509 key to use (remote export only)
@ivar destination_x509_ca: Destination X509 CA in PEM format (remote export
only)
"""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
_PShutdownTimeout,
# TODO: Rename target_node as it changes meaning for different export modes
# (e.g. "destination")
("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList),
"Destination information, depends on export mode"),
("shutdown", True, ht.TBool, "Whether to shutdown instance before export"),
("remove_instance", False, ht.TBool,
"Whether to remove instance after export"),
("ignore_remove_failures", False, ht.TBool,
"Whether to ignore failures while removing instances"),
("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES),
"Export mode"),
("x509_key_name", None, ht.TOr(ht.TList, ht.TNone),
"Name of X509 key (remote export only)"),
("destination_x509_ca", None, ht.TMaybeString,
"Destination X509 CA (remote export only)"),
]
class OpBackupRemove(OpCode):
"""Remove an instance's export."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
]
# Tags opcodes
class OpTagsGet(OpCode):
"""Returns the tags of the given object."""
OP_DSC_FIELD = "name"
OP_PARAMS = [
_PTagKind,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTagsSearch(OpCode):
"""Searches the tags in the cluster for a given pattern."""
OP_DSC_FIELD = "pattern"
OP_PARAMS = [
("pattern", ht.NoDefault, ht.TNonEmptyString, None),
]
class OpTagsSet(OpCode):
"""Add a list of tags on a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
class OpTagsDel(OpCode):
"""Remove a list of tags from a given object."""
OP_PARAMS = [
_PTagKind,
_PTags,
# Name is only meaningful for nodes and instances
("name", ht.NoDefault, ht.TMaybeString, None),
]
# Test opcodes
class OpTestDelay(OpCode):
"""Sleeps for a configured amount of time.
This is used just for debugging and testing.
Parameters:
- duration: the time to sleep
- on_master: if true, sleep on the master
- on_nodes: list of nodes in which to sleep
If the on_master parameter is true, it will execute a sleep on the
master (before any node sleep).
If the on_nodes list is not empty, it will sleep on those nodes
(after the sleep on the master, if that is enabled).
As an additional feature, the case of duration < 0 will be reported
as an execution error, so this opcode can be used as a failure
generator. The case of duration == 0 will not be treated specially.
"""
OP_DSC_FIELD = "duration"
OP_PARAMS = [
("duration", ht.NoDefault, ht.TFloat, None),
("on_master", True, ht.TBool, None),
("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("repeat", 0, ht.TPositiveInt, None),
]
class OpTestAllocator(OpCode):
"""Allocator framework testing.
This opcode has two modes:
- gather and return allocator input for a given mode (allocate new
or replace secondary) and a given instance definition (direction
'in')
- run a selected allocator for a given operation (as above) and
return the allocator output (direction 'out')
"""
OP_DSC_FIELD = "allocator"
OP_PARAMS = [
("direction", ht.NoDefault,
ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS), None),
("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES), None),
("name", ht.NoDefault, ht.TNonEmptyString, None),
("nics", ht.NoDefault, ht.TOr(ht.TNone, ht.TListOf(
ht.TDictOf(ht.TElemOf([constants.INIC_MAC, constants.INIC_IP, "bridge"]),
ht.TOr(ht.TNone, ht.TNonEmptyString)))), None),
("disks", ht.NoDefault, ht.TOr(ht.TNone, ht.TList), None),
("hypervisor", None, ht.TMaybeString, None),
("allocator", None, ht.TMaybeString, None),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
("memory", None, ht.TOr(ht.TNone, ht.TPositiveInt), None),
("vcpus", None, ht.TOr(ht.TNone, ht.TPositiveInt), None),
("os", None, ht.TMaybeString, None),
("disk_template", None, ht.TMaybeString, None),
("evac_nodes", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
("instances", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
("evac_mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)), None),
("target_groups", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString)),
None),
]
class OpTestJqueue(OpCode):
"""Utility opcode to test some aspects of the job queue.
"""
OP_PARAMS = [
("notify_waitlock", False, ht.TBool, None),
("notify_exec", False, ht.TBool, None),
("log_messages", ht.EmptyList, ht.TListOf(ht.TString), None),
("fail", False, ht.TBool, None),
]
class OpTestDummy(OpCode):
"""Utility opcode used by unittests.
"""
OP_PARAMS = [
("result", ht.NoDefault, ht.NoType, None),
("messages", ht.NoDefault, ht.NoType, None),
("fail", ht.NoDefault, ht.NoType, None),
("submit_jobs", None, ht.NoType, None),
]
WITH_LU = False
def _GetOpList():
"""Returns list of all defined opcodes.
Does not eliminate duplicates by C{OP_ID}.
"""
return [v for v in globals().values()
if (isinstance(v, type) and issubclass(v, OpCode) and
hasattr(v, "OP_ID") and v is not OpCode)]
OP_MAPPING = dict((v.OP_ID, v) for v in _GetOpList())<|fim▁end|> | |
<|file_name|>256-header_op-add2.py<|end_file_name|><|fim▁begin|>from base import *
DIR = "headerop_add2"
HEADERS = [("X-What-Rocks", "Cherokee does"),
("X-What-Sucks", "Failed QA tests"),
("X-What-Is-It", "A successful test")]
CONF = """
vserver!1!rule!2560!match = directory
vserver!1!rule!2560!match!directory = /%(DIR)s
vserver!1!rule!2560!handler = dirlist
"""
class Test (TestBase):<|fim▁hole|> def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Header Ops: Add multiple headers"
self.request = "GET /%s/ HTTP/1.0\r\n" %(DIR)
self.expected_error = 200
self.conf = CONF%(globals())
n = 2
for h,v in HEADERS:
self.conf += "vserver!1!rule!2560!header_op!%d!type = add\n" %(n)
self.conf += "vserver!1!rule!2560!header_op!%d!header = %s\n" %(n, h)
self.conf += "vserver!1!rule!2560!header_op!%d!value = %s\n" %(n, v)
n += 1
def CustomTest (self):
header = self.reply[:self.reply.find("\r\n\r\n")+2]
for h,v in HEADERS:
if not "%s: %s\r\n" %(h,v) in header:
return -1
return 0
def Prepare (self, www):
self.Mkdir (www, DIR)<|fim▁end|> | |
<|file_name|>system.py<|end_file_name|><|fim▁begin|># Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from trove.common.i18n import _
from trove.common import utils
ALTER_DB_CFG = "ALTER DATABASE %s SET %s = %s"
ALTER_DB_RESET_CFG = "ALTER DATABASE %s CLEAR %s"
ALTER_USER_PASSWORD = "ALTER USER %s IDENTIFIED BY '%s'"
ADD_DB_TO_NODE = ("/opt/vertica/bin/adminTools -t db_add_node -a"
" %s -d %s -p '%s'")
REMOVE_DB_FROM_NODE = ("/opt/vertica/bin/adminTools -t db_remove_node -s"
" %s -d %s -i -p '%s'")
CREATE_DB = ("/opt/vertica/bin/adminTools -t create_db -s"
" %s -d %s -c %s -D %s -p '%s'")
CREATE_USER = "CREATE USER %s IDENTIFIED BY '%s'"
ENABLE_FOR_USER = "ALTER USER %s DEFAULT ROLE %s"
GRANT_TO_USER = "GRANT %s to %s"
INSTALL_VERTICA = ("/opt/vertica/sbin/install_vertica -s %s"
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
MARK_DESIGN_KSAFE = "SELECT MARK_DESIGN_KSAFE(%s)"
NODE_STATUS = "SELECT node_state FROM nodes where node_state <> '%s'"
STOP_DB = "/opt/vertica/bin/adminTools -t stop_db -F -d %s -p '%s'"
START_DB = "/opt/vertica/bin/adminTools -t start_db -d %s -p '%s'"
STATUS_ACTIVE_DB = "/opt/vertica/bin/adminTools -t show_active_db"<|fim▁hole|> "-d %s -p '%s'")
SEND_CONF_TO_SERVER = ("rsync -v -e 'ssh -o "
"UserKnownHostsFile=/dev/null -o "
"StrictHostKeyChecking=no' --perms --owner --group "
"%s %s:%s")
SSH_KEY_GEN = "ssh-keygen -f %s/.ssh/id_rsa -t rsa -N ''"
UPDATE_VERTICA = ("/opt/vertica/sbin/update_vertica %s %s "
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
UPDATE_REMOVE = ("/opt/vertica/sbin/update_vertica --remove-hosts %s "
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
UPDATE_ADD = ("/opt/vertica/sbin/update_vertica --add-hosts %s "
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
USER_EXISTS = ("/opt/vertica/bin/vsql -w '%s' -c "
"\"select 1 from users where user_name = '%s'\" "
"| grep row | awk '{print $1}' | cut -c2-")
VERTICA_ADMIN = "dbadmin"
VERTICA_ADMIN_GRP = "verticadba"
VERTICA_AGENT_SERVICE_COMMAND = "service vertica_agent %s"
VERTICA_CONF = "/etc/vertica.cnf"
INSTALL_TIMEOUT = 1000
CREATE_LIBRARY = "CREATE LIBRARY %s AS '%s'"
CREATE_SOURCE = "CREATE SOURCE %s AS LANGUAGE '%s' NAME '%s' LIBRARY %s"
UDL_LIBS = [
{
'func_name': "curl",
'lib_name': "curllib",
'language': "C++",
'factory': "CurlSourceFactory",
'path': "/opt/vertica/sdk/examples/build/cURLLib.so"
},
]
def shell_execute(command, command_executor="root"):
# This method encapsulates utils.execute for 2 purpose:
# 1. Helps in safe testing.
# 2. Helps in executing commands as other user, using their environment.
# Note: This method uses su because using sudo -i -u <user> <command>
# does not works with vertica installer
# and it has problems while executing remote commands.
return utils.execute("sudo", "su", "-", command_executor, "-c", "%s"
% command)
class VSqlError(object):
def __init__(self, stderr):
"""Parse the stderr part of the VSql output.
stderr looks like: "ERROR 3117: Division by zero"
:param stderr: string from executing statement via vsql
"""
parse = re.match("^(ERROR|WARNING) (\d+): (.+)$", stderr)
if not parse:
raise ValueError(_("VSql stderr %(msg)s not recognized.")
% {'msg': stderr})
self.type = parse.group(1)
self.code = int(parse.group(2))
self.msg = parse.group(3)
def is_warning(self):
return bool(self.type == "WARNING")
def __str__(self):
return "Vertica %s (%s): %s" % (self.type, self.code, self.msg)
def exec_vsql_command(dbadmin_password, command):
"""Executes a VSQL command with the given dbadmin password."""
out, err = shell_execute("/opt/vertica/bin/vsql -w \'%s\' -c \"%s\""
% (dbadmin_password, command),
VERTICA_ADMIN)
if err:
err = VSqlError(err)
return out, err<|fim▁end|> | STATUS_DB_DOWN = "/opt/vertica/bin/adminTools -t db_status -s DOWN"
SET_RESTART_POLICY = ("/opt/vertica/bin/adminTools -t set_restart_policy " |
<|file_name|>parse-data.js<|end_file_name|><|fim▁begin|>'use strict';
var _ = require( 'lodash' );
/* RESPONSE DATA STRUCTURE
==========================================================================
data = [ ..., { // Day Object
stop_id: '2681',
stop_name: 'Highland Ave @ Benton Rd',
mode: [ ..., { // Mode Object
mode_name: 'Bus',
route_type: '3',
route: [ ..., { // Route Object
route_id: '88',
route_name: '88',
direction: [ ..., { // Direction Object
direction_id: '1',
direction_name: 'Inbound',
trip: [ ..., { // Trip Object<|fim▁hole|> trip_name: '6:40 am from Clarendon Hill Busway to Lechmere'
}]
}]
}]
}]
}]
*/
/**
* Convert from a Day Object to a flat array of data objects representing trips
* @param {Number} dayIndex A numeric index for a weekday, from 0 to 6
* @param {Object} route A route object from the API response
* @return {Array} A flat array of trip data objects
*/
function convertRouteToTripsArray( dayIndex, route ) {
/* jshint camelcase: false */
var routeId = route.route_id;
var routeName = route.route_name;
return _.map( route.direction, function( direction ) {
return _.map( direction.trip, function( trip ) {
return {
day: +dayIndex,
name: trip.trip_name,
route: routeName,
routeId: routeId,
time: +trip.sch_arr_dt,
// Trip ID is NOT UNIQUE: avoid collection issues by avoiding 'id'
tripId: +trip.trip_id
};
});
});
}
/**
* Convert the raw API responses into a flat array of trips
* @param {Array} data The API response object
* @return {Array} A flat array of object literals describing trips
*/
function parseData( data ) {
return _.chain( data )
.reduce(function( memo, day, dayIndex ) {
var trips = _.chain( day.mode )
.pluck( 'route' )
.flatten()
// Create the mapper method for the relevant day
.map( _.partial( convertRouteToTripsArray, dayIndex ) )
.flattenDeep() // Deep flatten
.value();
// Concatenate with the prior results
return memo.concat( trips );
}, [])
.sortBy(function( trip ) {
return trip.time;
})
.value();
}
module.exports = parseData;<|fim▁end|> | sch_arr_dt: '1434278820',
sch_dep_dt: '1434278820',
trip_id: '26616679', |
<|file_name|>recurring-list.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
import { NavController } from 'ionic-angular';
@Component({
templateUrl: 'recurring-list.html'
})<|fim▁hole|> submitted = false;
constructor(
public nav: NavController) {}
add() {
}
}<|fim▁end|> |
export class RecurringListPage {
login: {username?: string, password?: string} = {}; |
<|file_name|>htmlparagraphelement.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLParagraphElementBinding;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLParagraphElement {
htmlelement: HTMLElement
}
impl HTMLParagraphElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLParagraphElement {
HTMLParagraphElement {
htmlelement:
HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLParagraphElement> {
Node::reflect_node(box HTMLParagraphElement::new_inherited(local_name, prefix, document),
document,
HTMLParagraphElementBinding::Wrap)
}
}<|fim▁end|> | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this |
<|file_name|>azure_storageaccountclient.go<|end_file_name|><|fim▁begin|>// +build !providerless
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storageaccountclient
import (
"context"
"fmt"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
azclients "k8s.io/legacy-cloud-providers/azure/clients"
"k8s.io/legacy-cloud-providers/azure/clients/armclient"
"k8s.io/legacy-cloud-providers/azure/metrics"
"k8s.io/legacy-cloud-providers/azure/retry"
)
var _ Interface = &Client{}
// Client implements StorageAccount client Interface.
type Client struct {
armClient armclient.Interface
subscriptionID string
// Rate limiting configures.
rateLimiterReader flowcontrol.RateLimiter
rateLimiterWriter flowcontrol.RateLimiter
// ARM throttling configures.
RetryAfterReader time.Time
RetryAfterWriter time.Time
}
// New creates a new StorageAccount client with ratelimiting.
func New(config *azclients.ClientConfig) *Client {
baseURI := config.ResourceManagerEndpoint
authorizer := config.Authorizer
armClient := armclient.New(authorizer, baseURI, config.UserAgent, APIVersion, config.Location, config.Backoff)
rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
klog.V(2).Infof("Azure StorageAccountClient (read ops) using rate limit config: QPS=%g, bucket=%d",
config.RateLimitConfig.CloudProviderRateLimitQPS,
config.RateLimitConfig.CloudProviderRateLimitBucket)
klog.V(2).Infof("Azure StorageAccountClient (write ops) using rate limit config: QPS=%g, bucket=%d",
config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
client := &Client{
armClient: armClient,
rateLimiterReader: rateLimiterReader,
rateLimiterWriter: rateLimiterWriter,
subscriptionID: config.SubscriptionID,
}
return client
}
// GetProperties gets properties of the StorageAccount.
func (c *Client) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (storage.Account, *retry.Error) {
mc := metrics.NewMetricContext("storage_account", "get", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterReader.TryAccept() {
mc.RateLimitedCount()
return storage.Account{}, retry.GetRateLimitError(false, "StorageAccountGet")
}
// Report errors if the client is throttled.
if c.RetryAfterReader.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("StorageAccountGet", "client throttled", c.RetryAfterReader)
return storage.Account{}, rerr
}
result, rerr := c.getStorageAccount(ctx, resourceGroupName, accountName)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterReader = rerr.RetryAfter
}
return result, rerr
}
return result, nil
}
// getStorageAccount gets properties of the StorageAccount.
func (c *Client) getStorageAccount(ctx context.Context, resourceGroupName string, accountName string) (storage.Account, *retry.Error) {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Storage/storageAccounts",
accountName,
)
result := storage.Account{}
response, rerr := c.armClient.GetResource(ctx, resourceID, "")
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageaccount.get.request", resourceID, rerr.Error())
return result, rerr
}
err := autorest.Respond(
response,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result))
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageaccount.get.respond", resourceID, err)
return result, retry.GetError(response, err)
}
result.Response = autorest.Response{Response: response}
return result, nil
}
// ListKeys get a list of storage account keys.
func (c *Client) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (storage.AccountListKeysResult, *retry.Error) {
mc := metrics.NewMetricContext("storage_account", "list_keys", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterReader.TryAccept() {
mc.RateLimitedCount()
return storage.AccountListKeysResult{}, retry.GetRateLimitError(false, "StorageAccountListKeys")
}
// Report errors if the client is throttled.
if c.RetryAfterReader.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("StorageAccountListKeys", "client throttled", c.RetryAfterReader)
return storage.AccountListKeysResult{}, rerr
}
result, rerr := c.listStorageAccountKeys(ctx, resourceGroupName, accountName)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterReader = rerr.RetryAfter
}
return result, rerr
}
return result, nil<|fim▁hole|>func (c *Client) listStorageAccountKeys(ctx context.Context, resourceGroupName string, accountName string) (storage.AccountListKeysResult, *retry.Error) {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Storage/storageAccounts",
accountName,
)
result := storage.AccountListKeysResult{}
response, rerr := c.armClient.PostResource(ctx, resourceID, "listKeys", struct{}{})
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageaccount.listkeys.request", resourceID, rerr.Error())
return result, rerr
}
err := autorest.Respond(
response,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result))
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageaccount.listkeys.respond", resourceID, err)
return result, retry.GetError(response, err)
}
result.Response = autorest.Response{Response: response}
return result, nil
}
// Create creates a StorageAccount.
func (c *Client) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) *retry.Error {
mc := metrics.NewMetricContext("storage_account", "create", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterWriter.TryAccept() {
mc.RateLimitedCount()
return retry.GetRateLimitError(true, "StorageAccountCreate")
}
// Report errors if the client is throttled.
if c.RetryAfterWriter.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("StorageAccountCreate", "client throttled", c.RetryAfterWriter)
return rerr
}
rerr := c.createStorageAccount(ctx, resourceGroupName, accountName, parameters)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterWriter = rerr.RetryAfter
}
return rerr
}
return nil
}
// createStorageAccount creates or updates a StorageAccount.
func (c *Client) createStorageAccount(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) *retry.Error {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Storage/storageAccounts",
accountName,
)
response, rerr := c.armClient.PutResource(ctx, resourceID, parameters)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageAccount.put.request", resourceID, rerr.Error())
return rerr
}
if response != nil && response.StatusCode != http.StatusNoContent {
_, rerr = c.createResponder(response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageAccount.put.respond", resourceID, rerr.Error())
return rerr
}
}
return nil
}
func (c *Client) createResponder(resp *http.Response) (*storage.Account, *retry.Error) {
result := &storage.Account{}
err := autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result))
result.Response = autorest.Response{Response: resp}
return result, retry.GetError(resp, err)
}
// Delete deletes a StorageAccount by name.
func (c *Client) Delete(ctx context.Context, resourceGroupName string, accountName string) *retry.Error {
mc := metrics.NewMetricContext("storage_account", "delete", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterWriter.TryAccept() {
mc.RateLimitedCount()
return retry.GetRateLimitError(true, "StorageAccountDelete")
}
// Report errors if the client is throttled.
if c.RetryAfterWriter.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("StorageAccountDelete", "client throttled", c.RetryAfterWriter)
return rerr
}
rerr := c.deleteStorageAccount(ctx, resourceGroupName, accountName)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterWriter = rerr.RetryAfter
}
return rerr
}
return nil
}
// deleteStorageAccount deletes a PublicIPAddress by name.
func (c *Client) deleteStorageAccount(ctx context.Context, resourceGroupName string, accountName string) *retry.Error {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Storage/storageAccounts",
accountName,
)
return c.armClient.DeleteResource(ctx, resourceID, "")
}
// ListByResourceGroup get a list storage accounts by resourceGroup.
func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]storage.Account, *retry.Error) {
mc := metrics.NewMetricContext("storage_account", "list_by_resource_group", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterReader.TryAccept() {
mc.RateLimitedCount()
return nil, retry.GetRateLimitError(false, "StorageAccountListByResourceGroup")
}
// Report errors if the client is throttled.
if c.RetryAfterReader.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("StorageAccountListByResourceGroup", "client throttled", c.RetryAfterReader)
return nil, rerr
}
result, rerr := c.ListStorageAccountByResourceGroup(ctx, resourceGroupName)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterReader = rerr.RetryAfter
}
return result, rerr
}
return result, nil
}
// ListStorageAccountByResourceGroup get a list storage accounts by resourceGroup.
func (c *Client) ListStorageAccountByResourceGroup(ctx context.Context, resourceGroupName string) ([]storage.Account, *retry.Error) {
resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts",
autorest.Encode("path", c.subscriptionID),
autorest.Encode("path", resourceGroupName))
result := make([]storage.Account, 0)
page := &AccountListResultPage{}
page.fn = c.listNextResults
resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
defer c.armClient.CloseResponse(ctx, resp)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageAccount.list.request", resourceID, rerr.Error())
return result, rerr
}
var err error
page.alr, err = c.listResponder(resp)
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageAccount.list.respond", resourceID, err)
return result, retry.GetError(resp, err)
}
for page.NotDone() {
result = append(result, *page.Response().Value...)
if err = page.NextWithContext(ctx); err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageAccount.list.next", resourceID, err)
return result, retry.GetError(page.Response().Response.Response, err)
}
}
return result, nil
}
func (c *Client) listResponder(resp *http.Response) (result storage.AccountListResult, err error) {
err = autorest.Respond(
resp,
autorest.ByIgnoring(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result))
result.Response = autorest.Response{Response: resp}
return
}
// StorageAccountResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (c *Client) StorageAccountResultPreparer(ctx context.Context, lr storage.AccountListResult) (*http.Request, error) {
if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
return nil, nil
}
decorators := []autorest.PrepareDecorator{
autorest.WithBaseURL(to.String(lr.NextLink)),
}
return c.armClient.PrepareGetRequest(ctx, decorators...)
}
// listNextResults retrieves the next set of results, if any.
func (c *Client) listNextResults(ctx context.Context, lastResults storage.AccountListResult) (result storage.AccountListResult, err error) {
req, err := c.StorageAccountResultPreparer(ctx, lastResults)
if err != nil {
return result, autorest.NewErrorWithError(err, "storageaccount", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, rerr := c.armClient.Send(ctx, req)
defer c.armClient.CloseResponse(ctx, resp)
if rerr != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(rerr.Error(), "storageaccount", "listNextResults", resp, "Failure sending next results request")
}
result, err = c.listResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "storageaccount", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// AccountListResultPage contains a page of Account values.
type AccountListResultPage struct {
fn func(context.Context, storage.AccountListResult) (storage.AccountListResult, error)
alr storage.AccountListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) {
next, err := page.fn(ctx, page.alr)
if err != nil {
return err
}
page.alr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *AccountListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page AccountListResultPage) NotDone() bool {
return !page.alr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page AccountListResultPage) Response() storage.AccountListResult {
return page.alr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page AccountListResultPage) Values() []storage.Account {
if page.alr.IsEmpty() {
return nil
}
return *page.alr.Value
}<|fim▁end|> | }
// listStorageAccountKeys get a list of storage account keys. |
<|file_name|>TAGSORT.C<|end_file_name|><|fim▁begin|>/*
#Identification: TAGSORT.FOR
#Revision: B003
#Task: Sort for PALL
#First issue: 901019
#Author: POLA Ivan Polak
#Changes: %datum: %autor:
#===============================================================*/
#include <stdio.h>
int TagSort(unsigned long *ival,int *itag,int ip,int ik)
{
/*#parametry:
ival - i - pole s hodnotami klicu
itag - o - pole ukazatelu do ival
ip,ik -i - pocatecni a koncovy index tridene casti ival
return: 1 = O.K.
2 = preplneni vnitr. zasobniku (asi nemuze nastat)
4 = kiks (urcite nemuze nastat)
#Funkce:
V poli ITAG jsou po skonceni prace adresy do pole IVAL a to tak,
aby posloupnost IVAL(ITAG(i)) byla SESTUPNA (!).
Trideni metodou QUICKSORT se zasobnikem.
TAGSORT vznikl z EXTSOR, ktery vznikl upravou MON3SOR_L:QSORT1.FOR
resp. MON3SOR_L:UMQSOR
tr.:
<|fim▁hole|> ival - i - field with values of sorting keys
itag - o - field of pointers to ival
ip,ik -i - initial and final index of sorted parts ival
return: 1 = O.K.
2 = overflow if the inner storage (probably cannot happen)
4 = flop (certainly cannot happen)
#Funkce:
After having finished the operation, the field ITAG contains the
addresses into the field IVAL in that way, that the order of
IVAL(ITAG(i)) will be DESCENDING (!).
Sorting with method QUICKSORT with storage.
TAGSORT emerged from EXTSOR, that was developed from
MON3SOR_L:QSORT1.FOR resp. MON3SOR_L:UMQSOR
c ***************************************************************
*/
#define MQSTA 200
int qsta[MQSTA]; // storage
int jx,is, l,r, i,j, ispr,iw; //auxiliary variable
// **********************************************************************
// main:
for(i=ip; i <= ik; i++) itag[i]= i; // initial setting itag
is=1; // whole field into storage
qsta[is]=ip;
qsta[is+1]=ik;
//--------------------
L10: l=qsta[is]; // take from storage
r=qsta[is+1];
is=is-2;
//-------------------
L20: i=l; // sorting section <i,j>
j=r;
ispr=((l+r)/2)+1;
if(ispr > r) ispr=r;
if(ispr < l) ispr=l; // central/middle index
jx=itag[ispr];
//-------------------
// 30 if(a[i]-x[1]) 200,110,300 // 200=.true. 300=.false
//30 if(ival[itag[i]] .lt. ival[jx] ) then
L30: if( ival[itag[i]] > ival[jx] ) //descending!
goto L200; // 200=.true. 300=.false
else
goto L300;
L200: i++;
goto L30;
// 300 if(x[1]-a[j]) 600,410,700 // 600=.true. 700=.false
//300 if( ival[jx] .lt. ival[itag[j]] ) then
L300: if( ival[jx] > ival[itag[j]] ) //descend.
goto L600; // 600=.true. 700=.false
else
goto L700;
L600: j--;
goto L300;
L700: if(i <= j) // prohozeni (tr.: swap)
{ if(i == j) goto L33;
// call lib$movc3(len,a(i),waa)
// call lib$movc3(len,a(j),a(i))
// call lib$movc3(len,waa,a(j))
//-------- super secure - pol 930114 !!!!!!!!!!!!!!1
if(i<ip || j>ik) return 4;
//-----------------------------------
iw=itag[i];
itag[i]=itag[j];
itag[j]=iw;
L33: i++;
j--;
}
if(i <= j) goto L30;
if(i < r) // save right part into stacku
{is=is+2;
//p: if(is >= MQSTA) perror("TagSort-plny zasobnik"); //stop ''
if(is >= MQSTA) return 2;
qsta[is]=i;
qsta[is+1]=r;
}
r=j; // sorting of the rest of left part
if(l < r) goto L20;
if(is > 0) goto L10;
return 1;
}<|fim▁end|> |
#parameters:
|
<|file_name|>test_guest_event_page.py<|end_file_name|><|fim▁begin|>import unittest
from datetime import datetime
from flask import url_for
from app import current_app as app
from app.helpers.data import save_to_db
from app.models.call_for_papers import CallForPaper
from tests.unittests.object_mother import ObjectMother
from tests.unittests.utils import OpenEventTestCase
class TestGuestEventPage(OpenEventTestCase):
def test_published_event_view(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('event_detail.display_event_detail_home', identifier=event.identifier),
follow_redirects=True)
self.assertTrue("Open Event" in rv.data, msg=rv.data)
def test_published_event_view_coc(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
event.code_of_conduct = 'Test Code of Conduct'
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('event_detail.display_event_coc', identifier=event.identifier),
follow_redirects=True)
self.assertTrue("Code of Conduct" in rv.data, msg=rv.data)
def test_unpublished_event_view_coc(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('event_detail.display_event_coc', identifier=event.identifier),
follow_redirects=True)
self.assertEqual(rv.status_code, 404)
def test_unpublished_event_view_attempt(self):
with app.test_request_context():
event = ObjectMother.get_event()
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('event_detail.display_event_detail_home', identifier=event.identifier),
follow_redirects=True)
self.assertEqual(rv.status_code, 404)
def test_soft_deleted_event_view_attempt(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
event.in_trash = True
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('event_detail.display_event_detail_home', identifier=event.identifier),
follow_redirects=True)
self.assertEqual(rv.status_code, 404)
def test_published_event_sessions_view(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
save_to_db(event, "Event Saved")
track = ObjectMother.get_track()
track.event_id = event.id
save_to_db(track, "Track Saved")
speaker = ObjectMother.get_speaker()
speaker.event_id = event.id
save_to_db(speaker, "Speaker Saved")
session = ObjectMother.get_session()
session.event_id = event.id
session.speakers = [speaker]
session.state = 'accepted'
save_to_db(session, "Session Saved")
rv = self.app.get(url_for('event_detail.display_event_sessions', identifier=event.identifier),
follow_redirects=True)
self.assertTrue("Sessions" in rv.data, msg=rv.data)
def test_published_event_schedule_view(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
event.schedule_published_on = datetime.now()
save_to_db(event, "Event Saved")
track = ObjectMother.get_track()
track.event_id = event.id
save_to_db(track, "Track Saved")
speaker = ObjectMother.get_speaker()
speaker.event_id = event.id
save_to_db(speaker, "Speaker Saved")
microlocation = ObjectMother.get_microlocation()
save_to_db(microlocation, "Microlocation Saved")
session = ObjectMother.get_session()
session.event_id = event.id
session.state = 'accepted'
session.microlocation_id = microlocation.id
session.speakers = [speaker]
save_to_db(session, "Session Saved")
rv = self.app.get(url_for('event_detail.display_event_schedule', identifier=event.identifier),
follow_redirects=True)
self.assertTrue("Schedule" in rv.data, msg=rv.data)
def test_published_event_unpublished_schedule_view_attempt(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
save_to_db(event, "Event Saved")
track = ObjectMother.get_track()
track.event_id = event.id
save_to_db(track, "Track Saved")
speaker = ObjectMother.get_speaker()
speaker.event_id = event.id
save_to_db(speaker, "Speaker Saved")
microlocation = ObjectMother.get_microlocation()
save_to_db(microlocation, "Microlocation Saved")
session = ObjectMother.get_session()
session.event_id = event.id
session.microlocation_id = microlocation.id
session.speakers = [speaker]
session.state = 'accepted'<|fim▁hole|>
def test_published_event_cfs_view(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
save_to_db(event, "Event Saved")
custom_form = ObjectMother.get_custom_form()
custom_form.event_id = event.id
save_to_db(custom_form, "Custom form saved")
call_for_papers = CallForPaper(announcement="Announce",
start_date=datetime(2003, 8, 4, 12, 30, 45),
end_date=datetime(2004, 8, 4, 12, 30, 45),
event_id=event.id)
save_to_db(call_for_papers, "Call for papers saved")
rv = self.app.get(url_for('event_detail.display_event_cfs',
identifier=event.identifier), follow_redirects=True)
self.assertTrue("Closed" in rv.data, msg=rv.data)
def test_published_event_cfs_view_attempt(self):
with app.test_request_context():
event = ObjectMother.get_event()
event.state = 'Published'
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('event_detail.display_event_cfs',
identifier=event.identifier), follow_redirects=True)
self.assertEqual(rv.status_code, 404)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | save_to_db(session, "Session Saved")
rv = self.app.get(url_for('event_detail.display_event_schedule', identifier=event.identifier),
follow_redirects=True)
self.assertEqual(rv.status_code, 404) |
<|file_name|>Conditional.tsx<|end_file_name|><|fim▁begin|>import React from "react"
type Props = {<|fim▁hole|>}
export const Conditional = ({ condition, children, wrap }: Props) =>
condition ? React.cloneElement(wrap(children)) : children<|fim▁end|> | condition: boolean
children: React.ReactElement
wrap: (children: React.ReactNode) => JSX.Element |
<|file_name|>mirrors.py<|end_file_name|><|fim▁begin|>##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188<|fim▁hole|># Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Schema for mirrors.yaml configuration file.
.. literalinclude:: ../spack/schema/mirrors.py
:lines: 32-
"""
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack mirror configuration file schema',
'type': 'object',
'additionalProperties': False,
'patternProperties': {
r'mirrors': {
'type': 'object',
'default': {},
'additionalProperties': False,
'patternProperties': {
r'\w[\w-]*': {
'type': 'string'},
},
},
},
}<|fim▁end|> | #
# For details, see https://github.com/llnl/spack |
<|file_name|>predict_sample.py<|end_file_name|><|fim▁begin|># Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle, DataProviderConverter
from paddle.trainer.PyDataProvider2 import dense_vector
from paddle.trainer.config_parser import parse_config
TEST_DATA = [[[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,<|fim▁hole|> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.215686, 0.533333, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.67451, 0.992157, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.070588, 0.886275, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.192157,
0.070588, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.670588, 0.992157,
0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.117647, 0.933333, 0.858824, 0.313725,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.090196, 0.858824, 0.992157, 0.831373, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.141176, 0.992157, 0.992157, 0.611765, 0.054902, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0.258824, 0.992157, 0.992157, 0.529412, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.368627, 0.992157, 0.992157, 0.419608, 0.003922, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.094118, 0.835294, 0.992157, 0.992157, 0.517647, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.603922, 0.992157, 0.992157, 0.992157, 0.603922,
0.545098, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0.447059, 0.992157, 0.992157,
0.956863, 0.062745, 0, 0, 0, 0, 0, 0, 0, 0, 0.011765, 0.666667, 0.992157,
0.992157, 0.992157, 0.992157, 0.992157, 0.745098, 0.137255, 0, 0, 0, 0, 0,
0.152941, 0.866667, 0.992157, 0.992157, 0.521569, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.070588, 0.992157, 0.992157, 0.992157, 0.803922, 0.352941, 0.745098,
0.992157, 0.945098, 0.317647, 0, 0, 0, 0, 0.580392, 0.992157, 0.992157,
0.764706, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157,
0.776471, 0.043137, 0, 0.007843, 0.27451, 0.882353, 0.941176, 0.176471, 0,
0, 0.180392, 0.898039, 0.992157, 0.992157, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0.070588, 0.992157, 0.992157, 0.713725, 0, 0, 0, 0, 0.627451,
0.992157, 0.729412, 0.062745, 0, 0.509804, 0.992157, 0.992157, 0.776471,
0.035294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.494118, 0.992157, 0.992157,
0.968627, 0.168627, 0, 0, 0, 0.423529, 0.992157, 0.992157, 0.364706, 0,
0.717647, 0.992157, 0.992157, 0.317647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.533333, 0.992157, 0.984314, 0.945098, 0.603922, 0, 0, 0, 0.003922,
0.466667, 0.992157, 0.988235, 0.976471, 0.992157, 0.992157, 0.788235,
0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.686275, 0.882353, 0.364706, 0,
0, 0, 0, 0, 0, 0.098039, 0.588235, 0.992157, 0.992157, 0.992157, 0.980392,
0.305882, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.101961, 0.67451, 0.321569,
0, 0, 0, 0, 0, 0, 0, 0.105882, 0.733333, 0.976471, 0.811765, 0.713725, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.65098, 0.992157, 0.321569, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.25098, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0.94902, 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.968627, 0.764706, 0.152941, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.498039, 0.25098, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0
]], [[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.298039, 0.333333, 0.333333, 0.333333, 0.337255,
0.333333, 0.333333, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0.027451, 0.223529, 0.776471, 0.964706, 0.988235, 0.988235, 0.988235,
0.992157, 0.988235, 0.988235, 0.780392, 0.098039, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.14902, 0.698039, 0.988235, 0.992157, 0.988235, 0.901961,
0.87451, 0.568627, 0.882353, 0.976471, 0.988235, 0.988235, 0.501961, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.647059, 0.988235, 0.988235,
0.745098, 0.439216, 0.098039, 0, 0, 0, 0.572549, 0.988235, 0.988235,
0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.933333, 0.992157,
0.941176, 0.247059, 0, 0, 0, 0, 0, 0, 0.188235, 0.898039, 0.992157,
0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.039216, 0.639216, 0.933333,
0.988235, 0.913725, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0.113725, 0.843137,
0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.235294, 0.988235,
0.992157, 0.988235, 0.815686, 0.07451, 0, 0, 0, 0, 0, 0, 0, 0.333333,
0.988235, 0.988235, 0.552941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.211765,
0.878431, 0.988235, 0.992157, 0.701961, 0.329412, 0.109804, 0, 0, 0, 0, 0,
0, 0, 0.698039, 0.988235, 0.913725, 0.145098, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.188235, 0.890196, 0.988235, 0.988235, 0.745098, 0.047059, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.882353, 0.988235, 0.568627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2,
0.933333, 0.992157, 0.992157, 0.992157, 0.447059, 0.294118, 0, 0, 0, 0, 0,
0, 0, 0, 0.447059, 0.992157, 0.768627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.623529, 0.988235, 0.988235, 0.988235, 0.988235, 0.992157, 0.47451, 0, 0,
0, 0, 0, 0, 0, 0.188235, 0.933333, 0.87451, 0.509804, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0.992157, 0.988235, 0.937255, 0.792157, 0.988235, 0.894118,
0.082353, 0, 0, 0, 0, 0, 0, 0.027451, 0.647059, 0.992157, 0.654902, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.623529, 0.988235, 0.913725, 0.329412, 0.376471,
0.184314, 0, 0, 0, 0, 0, 0, 0.027451, 0.513725, 0.988235, 0.635294,
0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.929412, 0.988235,
0.988235, 0.741176, 0.309804, 0, 0, 0, 0, 0, 0, 0.529412, 0.988235,
0.678431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.223529, 0.992157,
0.992157, 1, 0.992157, 0.992157, 0.992157, 0.992157, 1, 0.992157, 0.992157,
0.882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023529,
0.478431, 0.654902, 0.658824, 0.952941, 0.988235, 0.988235, 0.988235,
0.992157, 0.988235, 0.729412, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.647059, 0.764706, 0.764706, 0.768627,
0.580392, 0.047059, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0
]]]
def main():
conf = parse_config("./mnist_model/trainer_config.py", "")
print conf.data_config.load_data_args
network = swig_paddle.GradientMachine.createFromConfigProto(
conf.model_config)
assert isinstance(network, swig_paddle.GradientMachine) # For code hint.
network.loadParameters("./mnist_model/")
converter = DataProviderConverter([dense_vector(784)])
inArg = converter(TEST_DATA)
print network.forwardTest(inArg)
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0")
main()<|fim▁end|> | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
<|file_name|>validators.js<|end_file_name|><|fim▁begin|>define([
'lib/validators/moment',
'lib/validators/unequalTo'<|fim▁hole|><|fim▁end|> | ], function () {
}); |
<|file_name|>messagearchivesimple.py<|end_file_name|><|fim▁begin|>#/u/Goldensights
import praw
import time
import datetime
'''USER CONFIG'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
MAXPOSTS = 1000
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
PRINTFILE = 'messages.txt'
#This is the file, in the same directory as the .py file, where the messages are stored
SUBJECTLINE = "Newsletterly"
ITEMTYPE = 't4'
#The type of item to gather. t4 is a PM
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def work():<|fim▁hole|> unread = r.get_unread(limit=MAXPOSTS)
results = []
for message in unread:
if ITEMTYPE in message.fullname:
print(message.id, message.subject, end=" ")
if SUBJECTLINE.lower() in message.subject.lower():
print(message.body)
messagedate = datetime.datetime.utcfromtimestamp(message.created_utc)
messagedate = datetime.datetime.strftime(messagedate, "%B %d %Y %H:%M UTC")
results += [message.fullname + " : " + message.author.name, messagedate, message.body, "\n\n"]
else:
print()
message.mark_as_read()
logfile = open(PRINTFILE, "a")
for result in results:
print(result, file=logfile)
logfile.close()
while True:
try:
work()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
time.sleep(WAIT)<|fim▁end|> | |
<|file_name|>fix_provincie_autonome.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from ...territori.models import Territorio
class Command(BaseCommand):
help = 'Fix for provincie autonome'
def handle(self, *args, **options):
Territorio.objects.regioni().get(denominazione='TRENTINO-ALTO ADIGE/SUDTIROL').delete()<|fim▁hole|>
territorio.pk = None
territorio.tipo = Territorio.TIPO.R
territorio.cod_reg = territorio.cod_prov
territorio.cod_prov = None
territorio.denominazione = 'P.A. DI {}'.format(name)
territorio.slug = None
territorio.save()
Territorio.objects.provincie().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
Territorio.objects.comuni().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)<|fim▁end|> |
for name in ['BOLZANO', 'TRENTO']:
territorio = Territorio.objects.provincie().get(denominazione__istartswith=name) |
<|file_name|>testpatch.py<|end_file_name|><|fim▁begin|># Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from test.test_importlib.util import uncache
from unittest.mock import (
NonCallableMock, CallableMixin, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,<|fim▁hole|>
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'ord', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'ord'))
def test_patch_builtins_without_create(self):
@patch(__name__+'.ord')
def test_ord(mock_ord):
mock_ord.return_value = 101
return ord('c')
@patch(__name__+'.open')
def test_open(mock_open):
m = mock_open.return_value
m.read.return_value = 'abcd'
fobj = open('doesnotexists.txt')
data = fobj.read()
fobj.close()
return data
self.assertEqual(test_ord(), 101)
self.assertEqual(test_open(), 'abcd')
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function.assert_not_called()
self.assertRaises(AssertionError, function.assert_called)
self.assertRaises(AssertionError, function.assert_called_once)
function(1)
self.assertRaises(AssertionError, function.assert_not_called)
function.assert_called_with(1)
function.assert_called()
function.assert_called_once()
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
with uncache('squizz'):
squizz = Mock()
sys.modules['squizz'] = squizz
squizz.squozz = 6
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
with uncache('squizz'):
squizz = Mock()
sys.modules['squizz'] = squizz
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
def test_stopall_lifo(self):
stopped = []
class thing(object):
one = two = three = None
def get_patch(attribute):
class mypatch(_patch):
def stop(self):
stopped.append(attribute)
return super(mypatch, self).stop()
return mypatch(lambda: thing, attribute, None, None,
False, None, None, None, {})
[get_patch(val).start() for val in ("one", "two", "three")]
patch.stopall()
self.assertEqual(stopped, ["three", "two", "one"])
def test_special_attrs(self):
def foo(x=0):
"""TEST"""
return x
with patch.object(foo, '__defaults__', (1, )):
self.assertEqual(foo(), 1)
self.assertEqual(foo(), 0)
with patch.object(foo, '__doc__', "FUN"):
self.assertEqual(foo.__doc__, "FUN")
self.assertEqual(foo.__doc__, "TEST")
with patch.object(foo, '__module__', "testpatch2"):
self.assertEqual(foo.__module__, "testpatch2")
self.assertEqual(foo.__module__, 'unittest.test.testmock.testpatch')
with patch.object(foo, '__annotations__', dict([('s', 1, )])):
self.assertEqual(foo.__annotations__, dict([('s', 1, )]))
self.assertEqual(foo.__annotations__, dict())
def foo(*a, x=0):
return x
with patch.object(foo, '__kwdefaults__', dict([('x', 1, )])):
self.assertEqual(foo(), 1)
self.assertEqual(foo(), 0)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | "patch not restored")
|
<|file_name|>0006_auto_20190926_1218.py<|end_file_name|><|fim▁begin|># Generated by Django 2.2.5 on 2019-09-26 12:18
from django.db import migrations, models
import weblate.utils.backup
class Migration(migrations.Migration):
dependencies = [("wladmin", "0005_auto_20190926_1332")]
operations = [
migrations.AddField(
model_name="backupservice",
name="paperkey",
field=models.TextField(default=""),
preserve_default=False,
),
migrations.AddField(
model_name="backupservice",
name="passphrase",
field=models.CharField(
default=weblate.utils.backup.make_password, max_length=100
),
),
migrations.AlterField(
model_name="backuplog",
name="event",
field=models.CharField(
choices=[
("backup", "Backup performed"),
("prune", "Deleted the oldest backups"),
("init", "Repository initialization"),
],
max_length=100,<|fim▁hole|> name="repository",
field=models.CharField(
default="", max_length=500, verbose_name="Backup repository"
),
),
]<|fim▁end|> | ),
),
migrations.AlterField(
model_name="backupservice", |
<|file_name|>test_variables.rs<|end_file_name|><|fim▁begin|>use crate::ast::rules;
use super::utils::{interpret_rule, interpret_rule_env};
#[test]
fn test_variable_simple() {
let (_val, env) = interpret_rule("x = 3", rules::stat);
assert_eq!(env, r#"{"x": RefCell { value: Number(3.0) }}"#);
let (_val, env) = interpret_rule("x = 3, 2", rules::stat);
assert_eq!(env, r#"{"x": RefCell { value: Number(3.0) }}"#);
let (_val, env) = interpret_rule("x, y = 3, false", rules::stat);
assert_eq!(
env,
r#"{"x": RefCell { value: Number(3.0) }, "y": RefCell { value: Boolean(false) }}"#
);
let (_val, env) = interpret_rule("x, y = 3", rules::stat);
assert_eq!(
env,
r#"{"x": RefCell { value: Number(3.0) }, "y": RefCell { value: Nil }}"#
);
}
#[test]
fn test_variable_table() {
let (_val, env) = interpret_rule("x = {}", rules::stat);
assert_eq!(
env,
r#"{"x": RefCell { value: Table { id: 1, map: {}, metatable: {}, border: 0 } }}"#
);
let (_val, mut env) = interpret_rule("x = {y = 5, [5] = false}", rules::stat);
let (val, mut env) = interpret_rule_env("x.y", rules::var, &mut env);
assert_eq!(val, "Reference(RefCell { value: Number(5.0) })");
let (val, _env) = interpret_rule_env("x[5]", rules::var, &mut env);
assert_eq!(val, "Reference(RefCell { value: Boolean(false) })");
}
#[test]
fn test_variable_table_change() {<|fim▁hole|> let (val, mut env) = interpret_rule_env("x.y", rules::var, &mut env);
assert_eq!(val, "Reference(RefCell { value: Number(5.0) })");
let (_val, mut env) = interpret_rule_env("x.y = 7", rules::stat, &mut env);
let (val, _env) = interpret_rule_env("x.y", rules::var, &mut env);
assert_eq!(val, "Reference(RefCell { value: Number(7.0) })");
}
#[test]
#[should_panic(expected = "Runtime error: Attempt to index `Nil` value")]
fn test_variable_non_id() {
let (_val, _env) = interpret_rule("x.y", rules::var);
}<|fim▁end|> | let (_val, mut env) = interpret_rule("x = {y = 5}", rules::stat);
|
<|file_name|>harness.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import os
from rapidsms.router import Router
from rapidsms.backends.backend import Backend
from rapidsms.app import App
# a really dumb Logger stand-in
class MockLogger (list):
def __init__(self):
# enable logging during tests with an
# environment variable, since the runner
# doesn't seem to have args
self.to_console = os.environ.get("verbose", False)
def write (self, *args):
if self.to_console:
if len(args) == 3:
print args[2]
else:
print args[2] % args[3:]
self.append(args)
# a subclass of Router with all the moving parts replaced
class MockRouter (Router):
def __init__ (self):
Router.__init__(self)
self.logger = MockLogger()
def add_backend (self, backend):
self.backends.append(backend)
# def add_app (self, app):
# app.configure()
# self.apps.append(app)
def start (self):
self.running = True
self.start_all_backends()
self.start_all_apps()
def stop (self):
self.running = False
self.stop_all_backends()
class MockBackend (Backend):
def start (self):
self._running = True
self.outgoing = []
def run (self):
while self.running:
msg = self.next_message(0.25)
if msg is not None: self.outgoing.append(msg)
# a subclass of App with all the moving parts replaced
class MockApp (App):
def configure (self):
self.calls = []
def start (self):
self.calls.append(("start",))
def parse (self, message):
self.calls.append(("parse", message))
def handle (self, message):
self.calls.append(("handle", message))
def cleanup (self, message):
self.calls.append(("cleanup", message))
def outgoing (self, message):
self.calls.append(("outgoing", message))
def stop (self):
self.calls.append(("stop",))<|fim▁hole|> message.respond(message.peer + ": " + message.text)<|fim▁end|> |
class EchoApp (MockApp):
def handle (self, message):
MockApp.handle(self, message) |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from .models import *
from django.contrib import admin
from django.db import models
from website.base.form import TinyMCEAdminMixin
from django.utils.translation import ugettext_lazy as _
from mediastore.admin import ModelAdmin
class SessionAdmin(TinyMCEAdminMixin, ModelAdmin):
list_display = ('title','day_of_week','cost','is_public','is_featured','sort_value',)
list_editable = ('is_public','is_featured','sort_value')
fieldsets = (
(_('Session'), {
'fields':(
'title',
'list_description',
'day_of_week',
'club',
)
}),
(_('Description'),{
'fields':(
'description',
)
}),
(_('Cost'),{
'fields':(
'cost',
)
}),
(_('Location'),{
'fields':(
'location',
)
}),
(_('Settings'), {
'fields':(
'sort_value',
'is_public',
'is_featured',
)
})<|fim▁hole|>
admin.site.register(Session,SessionAdmin)<|fim▁end|> | ) |
<|file_name|>udp.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! UDP (User Datagram Protocol) network connections.
//!
//! This module contains the ability to open a UDP stream to a socket address.
//! The destination and binding addresses can either be an IPv4 or IPv6
//! address. There is no corresponding notion of a server because UDP is a
//! datagram protocol.
use clone::Clone;
use old_io::net::ip::{SocketAddr, IpAddr, ToSocketAddr};
use old_io::IoResult;
use option::Option;
use sys::udp::UdpSocket as UdpSocketImp;
use sys_common;
/// A User Datagram Protocol socket.
///
/// This is an implementation of a bound UDP socket. This supports both IPv4 and
/// IPv6 addresses, and there is no corresponding notion of a server because UDP
/// is a datagram protocol.
///
/// # Example
///
/// ```rust,no_run
/// # #![allow(unused_must_use)]
/// #![feature(slicing_syntax)]
///
/// use std::old_io::net::udp::UdpSocket;
/// use std::old_io::net::ip::{Ipv4Addr, SocketAddr};
/// fn main() {
/// let addr = SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 34254 };
/// let mut socket = match UdpSocket::bind(addr) {
/// Ok(s) => s,
/// Err(e) => panic!("couldn't bind socket: {}", e),
/// };
///
/// let mut buf = [0; 10];
/// match socket.recv_from(&mut buf) {
/// Ok((amt, src)) => {
/// // Send a reply to the socket we received data from
/// let buf = &mut buf[..amt];
/// buf.reverse();
/// socket.send_to(buf, src);
/// }
/// Err(e) => println!("couldn't receive a datagram: {}", e)
/// }
/// drop(socket); // close the socket
/// }
/// ```
pub struct UdpSocket {
inner: UdpSocketImp,
}
impl UdpSocket {
/// Creates a UDP socket from the given address.
///
/// Address type can be any implementor of `ToSocketAddr` trait. See its
/// documentation for concrete examples.
pub fn bind<A: ToSocketAddr>(addr: A) -> IoResult<UdpSocket> {
super::with_addresses(addr, |addr| {
UdpSocketImp::bind(addr).map(|s| UdpSocket { inner: s })
})
}
/// Receives data from the socket. On success, returns the number of bytes
/// read and the address from whence the data came.<|fim▁hole|> /// Sends data on the socket to the given address. Returns nothing on
/// success.
///
/// Address type can be any implementer of `ToSocketAddr` trait. See its
/// documentation for concrete examples.
pub fn send_to<A: ToSocketAddr>(&mut self, buf: &[u8], addr: A) -> IoResult<()> {
super::with_addresses(addr, |addr| self.inner.send_to(buf, addr))
}
/// Returns the socket address that this socket was created from.
pub fn socket_name(&mut self) -> IoResult<SocketAddr> {
self.inner.socket_name()
}
/// Joins a multicast IP address (becomes a member of it)
#[unstable(feature = "io")]
pub fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()> {
self.inner.join_multicast(multi)
}
/// Leaves a multicast IP address (drops membership from it)
#[unstable(feature = "io")]
pub fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()> {
self.inner.leave_multicast(multi)
}
/// Set the multicast loop flag to the specified value
///
/// This lets multicast packets loop back to local sockets (if enabled)
#[unstable(feature = "io")]
pub fn set_multicast_loop(&mut self, on: bool) -> IoResult<()> {
self.inner.set_multicast_loop(on)
}
/// Sets the multicast TTL
#[unstable(feature = "io")]
pub fn set_multicast_ttl(&mut self, ttl: int) -> IoResult<()> {
self.inner.multicast_time_to_live(ttl)
}
/// Sets this socket's TTL
#[unstable(feature = "io")]
pub fn set_ttl(&mut self, ttl: int) -> IoResult<()> {
self.inner.time_to_live(ttl)
}
/// Sets the broadcast flag on or off
#[unstable(feature = "io")]
pub fn set_broadcast(&mut self, broadcast: bool) -> IoResult<()> {
self.inner.set_broadcast(broadcast)
}
/// Sets the read/write timeout for this socket.
///
/// For more information, see `TcpStream::set_timeout`
#[unstable(feature = "io",
reason = "the timeout argument may change in type and value")]
pub fn set_timeout(&mut self, timeout_ms: Option<u64>) {
self.inner.set_timeout(timeout_ms)
}
/// Sets the read timeout for this socket.
///
/// For more information, see `TcpStream::set_timeout`
#[unstable(feature = "io",
reason = "the timeout argument may change in type and value")]
pub fn set_read_timeout(&mut self, timeout_ms: Option<u64>) {
self.inner.set_read_timeout(timeout_ms)
}
/// Sets the write timeout for this socket.
///
/// For more information, see `TcpStream::set_timeout`
#[unstable(feature = "io",
reason = "the timeout argument may change in type and value")]
pub fn set_write_timeout(&mut self, timeout_ms: Option<u64>) {
self.inner.set_write_timeout(timeout_ms)
}
}
impl Clone for UdpSocket {
/// Creates a new handle to this UDP socket, allowing for simultaneous
/// reads and writes of the socket.
///
/// The underlying UDP socket will not be closed until all handles to the
/// socket have been deallocated. Two concurrent reads will not receive
/// the same data. Instead, the first read will receive the first packet
/// received, and the second read will receive the second packet.
fn clone(&self) -> UdpSocket {
UdpSocket {
inner: self.inner.clone(),
}
}
}
impl sys_common::AsInner<UdpSocketImp> for UdpSocket {
fn as_inner(&self) -> &UdpSocketImp {
&self.inner
}
}
#[cfg(test)]
mod test {
use prelude::v1::*;
use sync::mpsc::channel;
use old_io::net::ip::*;
use old_io::test::*;
use old_io::{IoError, TimedOut, PermissionDenied, ShortWrite};
use super::*;
use thread::Thread;
// FIXME #11530 this fails on android because tests are run as root
#[cfg_attr(any(windows, target_os = "android"), ignore)]
#[test]
fn bind_error() {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
match UdpSocket::bind(addr) {
Ok(..) => panic!(),
Err(e) => assert_eq!(e.kind, PermissionDenied),
}
}
#[test]
fn socket_smoke_test_ip4() {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let _t = Thread::spawn(move|| {
match UdpSocket::bind(client_ip) {
Ok(ref mut client) => {
rx1.recv().unwrap();
client.send_to(&[99], server_ip).unwrap()
}
Err(..) => panic!()
}
tx2.send(()).unwrap();
});
match UdpSocket::bind(server_ip) {
Ok(ref mut server) => {
tx1.send(()).unwrap();
let mut buf = [0];
match server.recv_from(&mut buf) {
Ok((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
Err(..) => panic!()
}
}
Err(..) => panic!()
}
rx2.recv().unwrap();
}
#[test]
fn socket_smoke_test_ip6() {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (tx, rx) = channel::<()>();
let _t = Thread::spawn(move|| {
match UdpSocket::bind(client_ip) {
Ok(ref mut client) => {
rx.recv().unwrap();
client.send_to(&[99], server_ip).unwrap()
}
Err(..) => panic!()
}
});
match UdpSocket::bind(server_ip) {
Ok(ref mut server) => {
tx.send(()).unwrap();
let mut buf = [0];
match server.recv_from(&mut buf) {
Ok((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
Err(..) => panic!()
}
}
Err(..) => panic!()
}
}
pub fn socket_name(addr: SocketAddr) {
let server = UdpSocket::bind(addr);
assert!(server.is_ok());
let mut server = server.unwrap();
// Make sure socket_name gives
// us the socket we binded to.
let so_name = server.socket_name();
assert!(so_name.is_ok());
assert_eq!(addr, so_name.unwrap());
}
#[test]
fn socket_name_ip4() {
socket_name(next_test_ip4());
}
#[test]
fn socket_name_ip6() {
socket_name(next_test_ip6());
}
#[test]
fn udp_clone_smoke() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut sock1 = UdpSocket::bind(addr1).unwrap();
let sock2 = UdpSocket::bind(addr2).unwrap();
let _t = Thread::spawn(move|| {
let mut sock2 = sock2;
let mut buf = [0, 0];
assert_eq!(sock2.recv_from(&mut buf), Ok((1, addr1)));
assert_eq!(buf[0], 1);
sock2.send_to(&[2], addr1).unwrap();
});
let sock3 = sock1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let _t = Thread::spawn(move|| {
let mut sock3 = sock3;
rx1.recv().unwrap();
sock3.send_to(&[1], addr2).unwrap();
tx2.send(()).unwrap();
});
tx1.send(()).unwrap();
let mut buf = [0, 0];
assert_eq!(sock1.recv_from(&mut buf), Ok((1, addr2)));
rx2.recv().unwrap();
}
#[test]
fn udp_clone_two_read() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut sock1 = UdpSocket::bind(addr1).unwrap();
let sock2 = UdpSocket::bind(addr2).unwrap();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
let _t = Thread::spawn(move|| {
let mut sock2 = sock2;
sock2.send_to(&[1], addr1).unwrap();
rx.recv().unwrap();
sock2.send_to(&[2], addr1).unwrap();
rx.recv().unwrap();
});
let sock3 = sock1.clone();
let (done, rx) = channel();
let _t = Thread::spawn(move|| {
let mut sock3 = sock3;
let mut buf = [0, 0];
sock3.recv_from(&mut buf).unwrap();
tx2.send(()).unwrap();
done.send(()).unwrap();
});
let mut buf = [0, 0];
sock1.recv_from(&mut buf).unwrap();
tx1.send(()).unwrap();
rx.recv().unwrap();
}
#[test]
fn udp_clone_two_write() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut sock1 = UdpSocket::bind(addr1).unwrap();
let sock2 = UdpSocket::bind(addr2).unwrap();
let (tx, rx) = channel();
let (serv_tx, serv_rx) = channel();
let _t = Thread::spawn(move|| {
let mut sock2 = sock2;
let mut buf = [0, 1];
rx.recv().unwrap();
match sock2.recv_from(&mut buf) {
Ok(..) => {}
Err(e) => panic!("failed receive: {}", e),
}
serv_tx.send(()).unwrap();
});
let sock3 = sock1.clone();
let (done, rx) = channel();
let tx2 = tx.clone();
let _t = Thread::spawn(move|| {
let mut sock3 = sock3;
match sock3.send_to(&[1], addr2) {
Ok(..) => { let _ = tx2.send(()); }
Err(..) => {}
}
done.send(()).unwrap();
});
match sock1.send_to(&[2], addr2) {
Ok(..) => { let _ = tx.send(()); }
Err(..) => {}
}
drop(tx);
rx.recv().unwrap();
serv_rx.recv().unwrap();
}
#[cfg(not(windows))] // FIXME #17553
#[test]
fn recv_from_timeout() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut a = UdpSocket::bind(addr1).unwrap();
let a2 = UdpSocket::bind(addr2).unwrap();
let (tx, rx) = channel();
let (tx2, rx2) = channel();
let _t = Thread::spawn(move|| {
let mut a = a2;
assert_eq!(a.recv_from(&mut [0]), Ok((1, addr1)));
assert_eq!(a.send_to(&[0], addr1), Ok(()));
rx.recv().unwrap();
assert_eq!(a.send_to(&[0], addr1), Ok(()));
tx2.send(()).unwrap();
});
// Make sure that reads time out, but writes can continue
a.set_read_timeout(Some(20));
assert_eq!(a.recv_from(&mut [0]).err().unwrap().kind, TimedOut);
assert_eq!(a.recv_from(&mut [0]).err().unwrap().kind, TimedOut);
assert_eq!(a.send_to(&[0], addr2), Ok(()));
// Cloned handles should be able to block
let mut a2 = a.clone();
assert_eq!(a2.recv_from(&mut [0]), Ok((1, addr2)));
// Clearing the timeout should allow for receiving
a.set_timeout(None);
tx.send(()).unwrap();
assert_eq!(a2.recv_from(&mut [0]), Ok((1, addr2)));
// Make sure the child didn't die
rx2.recv().unwrap();
}
#[test]
fn send_to_timeout() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut a = UdpSocket::bind(addr1).unwrap();
let _b = UdpSocket::bind(addr2).unwrap();
a.set_write_timeout(Some(1000));
for _ in 0u..100 {
match a.send_to(&[0;4*1024], addr2) {
Ok(()) | Err(IoError { kind: ShortWrite(..), .. }) => {},
Err(IoError { kind: TimedOut, .. }) => break,
Err(e) => panic!("other error: {}", e),
}
}
}
}<|fim▁end|> | pub fn recv_from(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)> {
self.inner.recv_from(buf)
}
|
<|file_name|>nn.py<|end_file_name|><|fim▁begin|>"""Neural network operations."""
from __future__ import absolute_import as _abs
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
weight_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and weight_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
weight_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
<|fim▁hole|> -------
result : relay.Expr
The computed result.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
weight_layout, out_layout, out_dtype)
def softmax(data, axis):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
"""
return _make.softmax(data, axis)
def log_softmax(data, axis):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
"""
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale, w*scale)
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")
Parameters
----------
data : relay.Expr
The input data to the operator.
scale : relay.Expr
The scale factor for upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [NEAREST_NEIGHBOR, BILINEAR].
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : relay.Expr
The input data to the operator.
Returns
-------
result: relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)<|fim▁end|> | out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns |
<|file_name|>nxtWin5Control1.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Copyright (c) 2014 l8orre
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
#import sys
from PyQt4 import QtGui, Qt, QtCore
from PyQt4.QtCore import QObject , pyqtSignal, pyqtSlot, SIGNAL
from PyQt4.QtCore import QObject
#import numpy as np
from os import listdir as ls
#from PyQt4.Qt import QPixmap
import os
import time
import nxtPwt
#import requests, json
class nxtWin5Control(QObject):
""" class nxtWin5Control(): here"""
def __init__(self, app): #, application
super(QObject, self, ).__init__()
import nxtPwt.ui_nxtWin5 as nxtWin5 # the QtCreator-generated Widget.py!!
ui = nxtWin5.Ui_MainWindow() # Ui_MainWindow() is the autogenerated class in m2def.py
self.ui_nxtWin5 = ui
self.app = app #
# self.userDataContainer = self.app.nxtMain.userDataContainer
self.server = ''
self.account =''
self.secretPhrase = ''
#self.app.algo.ui_nxtWin = ui # make the ui_AlgoWin known to the Algo!!! this is N( at the algo when init'ing
self.app.nxtWin5 = self # make this WinControl1 known
def init(self): #, ui_AlgoWin):
""" the AlgoWin """
# maybe this gives trouble w/ MainWIn, self.app.algo = Algo1(self.app, ui)
### re - init hte algo here!!!
ui = self.ui_nxtWin5
<|fim▁hole|>
############################
############################
############################
########## Window Maintenance
def show(self):
self.uiFrame = QtGui.QMainWindow()
self.ui_nxtWin5.setupUi(self.uiFrame)
self.init() #self.ui_AlgoWin)
self.uiFrame.show()<|fim▁end|> | |
<|file_name|>parser.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import common
import sys, os, traceback
import time
import random
import re
import urllib
import string
from string import lower
from entities.CList import CList
from entities.CItemInfo import CItemInfo
from entities.CListItem import CListItem
from entities.CRuleItem import CRuleItem
import customReplacements as cr
import customConversions as cc
from utils import decryptionUtils as crypt
from utils import datetimeUtils as dt
from utils import rowbalance as rb
from utils.fileUtils import findInSubdirectory, getFileContent, getFileExtension
from utils.scrapingUtils import findVideoFrameLink, findContentRefreshLink, findRTMP, findJS, findPHP, getHostName, findEmbedPHPLink
from common import getHTML
class ParsingResult(object):
class Code:
SUCCESS = 0
CFGFILE_NOT_FOUND = 1
CFGSYNTAX_INVALID = 2
WEBREQUEST_FAILED = 3
def __init__(self, code, itemsList):
self.code = code
self.list = itemsList
self.message = None
class Parser(object):
"""
returns a list of items
"""
def parse(self, lItem):
url = lItem['url']
cfg = lItem['cfg']
ext = getFileExtension(url)
successfullyScraped = True
tmpList = None
if lItem['catcher']:
catcher = lItem['catcher']
cfg = os.path.join(common.Paths.catchersDir, '__' + catcher + '.cfg')
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
else:
if ext == 'cfg':
tmpList = self.__loadLocal(url, lItem)
if tmpList and tmpList.start != '' and len(tmpList.rules) > 0:
lItem['url'] = tmpList.start
successfullyScraped = self.__loadRemote(tmpList, lItem)
elif cfg:
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
# autoselect
if tmpList and tmpList.skill.find('autoselect') != -1 and len(tmpList.items) == 1:
m = tmpList.items[0]
m_type = m['type']
if m_type == 'rss':
common.log('Autoselect - ' + m['title'])
lItem = m
tmpList = self.parse(lItem).list
if not tmpList:
return ParsingResult(ParsingResult.Code.CFGSYNTAX_INVALID, None)
if tmpList and successfullyScraped == False:
return ParsingResult(ParsingResult.Code.WEBREQUEST_FAILED, tmpList)
# Remove duplicates
if tmpList.skill.find('allowDuplicates') == -1:
urls = []
for i in range(len(tmpList.items)-1,-1,-1):
item = tmpList.items[i]
tmpUrl = item['url']
tmpCfg = item['cfg']
if not tmpCfg:
tmpCfg = ''
if not urls.__contains__(tmpUrl + '|' + tmpCfg):
urls.append(tmpUrl + '|' + tmpCfg)
else:
tmpList.items.remove(item)
return ParsingResult(ParsingResult.Code.SUCCESS, tmpList)
"""
loads cfg, creates list and sets up rules for scraping
"""
def __loadLocal(self, filename, lItem = None):
params = []
#get Parameters
if filename.find('@') != -1:
params = filename.split('@')
filename = params.pop(0)
# get cfg file
cfg = filename
if not os.path.exists(cfg):
cfg = os.path.join(common.Paths.modulesDir, filename)
if not os.path.exists(cfg):
tmpPath = os.path.dirname(os.path.join(common.Paths.modulesDir, lItem["definedIn"]))
cfg = os.path.join(tmpPath ,filename)
if not os.path.exists(cfg):
srchFilename = filename
if filename.find('/') > -1:
srchFilename = srchFilename.split('/')[1]
try:
cfg = findInSubdirectory(srchFilename, common.Paths.modulesDir)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.favouritesFolder)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.customModulesDir)
except:
common.log('File not found: ' + srchFilename)
return None
#load file and apply parameters
data = getFileContent(cfg)
data = cr.CustomReplacements().replace(os.path.dirname(cfg), data, lItem, params)
#log
msg = 'Local file ' + filename + ' opened'
if len(params) > 0:
msg += ' with Parameter(s): '
msg += ",".join(params)
common.log(msg)
outputList = self.__parseCfg(filename, data, lItem)
return outputList
"""
scrape items according to rules and add them to the list
"""
def __loadRemote(self, inputList, lItem):
try:
inputList.curr_url = lItem['url']
count = 0
i = 1
maxits = 2 # 1 optimistic + 1 demystified
ignoreCache = False
demystify = False
back = ''
startUrl = inputList.curr_url
#print inputList, lItem
while count == 0 and i <= maxits:
if i > 1:
ignoreCache = True
demystify = True
# Trivial: url is from known streamer
if back:
lItem['referer'] = back
items = self.__parseHtml(inputList.curr_url, '"' + inputList.curr_url + '"', inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
# try to find items in html source code
if count == 0:
referer = ''
if lItem['referer']:
referer = lItem['referer']
data = common.getHTML(inputList.curr_url, None, referer, False, False, ignoreCache, demystify)
if data == '':
return False
msg = 'Remote URL ' + inputList.curr_url + ' opened'
if demystify:
msg += ' (demystified)'
common.log(msg)
if inputList.section != '':
section = inputList.section
data = self.__getSection(data, section)
if lItem['section']:
section = lItem['section']
data = self.__getSection(data, section)
items = self.__parseHtml(inputList.curr_url, data, inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
common.log(' -> ' + str(count) + ' item(s) found')
# find rtmp stream
#common.log('Find rtmp stream')
if count == 0:
item = self.__findRTMP(data, startUrl, lItem)
if item:
items = []
items.append(item)
count = 1
# find embedding javascripts
#common.log('Find embedding javascripts')
if count == 0:
item = findJS(data)
if item:
firstJS = item[0]
streamId = firstJS[0]
jsUrl = firstJS[1]
if not jsUrl.startswith('http://'):
jsUrl = urllib.basejoin(startUrl,jsUrl)
streamerName = getHostName(jsUrl)
jsSource = getHTML(jsUrl, None, startUrl, True, False)
phpUrl = findPHP(jsSource, streamId)
if phpUrl:
data = getHTML(phpUrl, None, startUrl, True, True)
item = self.__findRTMP(data, phpUrl, lItem)
if item:
if streamerName:
item['title'] = item['title'].replace('RTMP', streamerName)
items = []
items.append(item)
count = 1
else:
red = phpUrl
common.log(' -> Redirect: ' + red)
if back == red:
break
back = inputList.curr_url
inputList.curr_url = red
common.log(str(len(inputList.items)) + ' items ' + inputList.cfg + ' -> ' + red)
startUrl = red
continue
# find redirects
#common.log('find redirects')
if count == 0:
red = self.__findRedirect(startUrl, inputList.curr_url)
if startUrl == red:
common.log(' -> No redirect found')
else:
common.log(' -> Redirect: ' + red)
if back == red:
break
back = inputList.curr_url
inputList.curr_url = red
common.log(str(len(inputList.items)) + ' items ' + inputList.cfg + ' -> ' + red)
startUrl = red
i = 0
i += 1
if count != 0:
inputList.items = inputList.items + items
except:
traceback.print_exc(file = sys.stdout)
return False
return True
def __findRTMP(self, data, pageUrl, lItem):
rtmp = findRTMP(pageUrl, data)
if rtmp:
item = CListItem()
item['title'] = 'RTMP* - ' + rtmp[1]
item['type'] = 'video'
item['url'] = rtmp[0] + ' playPath=' + rtmp[1] + ' swfUrl=' + rtmp[2] +' swfVfy=1 live=true pageUrl=' + pageUrl
item.merge(lItem)
return item
return None
def __getSection(self, data, section):
p = re.compile(section, re.IGNORECASE + re.DOTALL + re.UNICODE)
m = p.search(data)
if m:
return m.group(0)
else:
common.log(' -> Section could not be found:' + section)
return data
def __findRedirect(self, page, referer='', demystify=False):
data = common.getHTML(page, None, referer = referer, xml = False, mobile=False, demystify = demystify)
if findVideoFrameLink(page, data):
return findVideoFrameLink(page, data)
elif findContentRefreshLink(data):
return findContentRefreshLink(data)
elif findEmbedPHPLink(data):
return findEmbedPHPLink(data)
if not demystify:
return self.__findRedirect(page, referer, True)
return page
def __parseCfg(self, cfgFile, data, lItem):
tmpList = CList()
data = data.replace('\r\n', '\n').split('\n')
items = []
tmp = None
hasOwnCfg = False
for m in data:
if m and m[0] != '#':
index = m.find('=')
if index != -1:
key = lower(m[:index]).strip()
value = m[index+1:]
index = value.find('|')
if value[:index] == 'sports.devil.locale':
value = common.translate(int(value[index+1:]))
elif value[:index] == 'sports.devil.image':
value = os.path.join(common.Paths.imgDir, value[index+1:])
if key == 'start':
tmpList.start = value
elif key == 'section':
tmpList.section = value
elif key == 'sort':
tmpList.sort = value
elif key == 'skill':
tmpList.skill = value
elif key == 'catcher':
tmpList.catcher = value
elif key == 'item_infos':
rule_tmp = CRuleItem()
hasOwnCfg = False
rule_tmp.infos = value
elif key == 'item_order':
rule_tmp.order = value
elif key == 'item_skill':
rule_tmp.skill = value
elif key == 'item_curr':
rule_tmp.curr = value
elif key == 'item_precheck':
rule_tmp.precheck = value
elif key.startswith('item_info'):
tmpkey = key[len('item_info'):]
if tmpkey == '_name':
info_tmp = CItemInfo()
info_tmp.name = value
if value == 'cfg':
hasOwnCfg = True
elif tmpkey == '_from':<|fim▁hole|> info_tmp.rule = value
elif tmpkey == '_default':
info_tmp.default = value
elif tmpkey == '_convert':
info_tmp.convert.append(value)
elif tmpkey == '_build':
info_tmp.build = value
rule_tmp.info_list.append(info_tmp)
elif key == 'item_url_build':
rule_tmp.url_build = value
if tmpList.catcher != '':
refInf = CItemInfo()
refInf.name = 'referer'
refInf.build = value
rule_tmp.info_list.append(refInf)
if not hasOwnCfg:
refInf = CItemInfo()
refInf.name = 'catcher'
refInf.build = tmpList.catcher
rule_tmp.info_list.append(refInf)
tmpList.rules.append(rule_tmp)
# static menu items (without regex)
elif key == 'title':
tmp = CListItem()
tmp['title'] = value
if tmpList.skill.find('videoTitle') > -1:
tmp['videoTitle'] = value
elif key == 'url':
tmp['url'] = value
if lItem:
tmp.merge(lItem)
if tmpList.catcher != '':
tmp['referer'] = value
if not hasOwnCfg:
tmp['catcher'] = tmpList.catcher
tmp['definedIn'] = cfgFile
items.append(tmp)
tmp = None
elif tmp != None:
if key == 'cfg':
hasOwnCfg = True
tmp[key] = value
tmpList.items = items
tmpList.cfg = cfgFile
return tmpList
def __parseHtml(self, url, data, rules, skills, definedIn, lItem):
#common.log('_parseHtml called' + url)
items = []
for item_rule in rules:
#common.log('rule: ' + item_rule.infos)
if not hasattr(item_rule, 'precheck') or (item_rule.precheck in data):
revid = re.compile(item_rule.infos, re.IGNORECASE + re.DOTALL + re.MULTILINE + re.UNICODE)
for reinfos in revid.findall(data):
tmp = CListItem()
if lItem['referer']:
tmp['referer'] = lItem['referer']
if item_rule.order.find('|') != -1:
infos_names = item_rule.order.split('|')
infos_values = list(reinfos)
i = 0
for name in infos_names:
tmp[name] = infos_values[i]
i = i+1
else:
tmp[item_rule.order] = reinfos
for info in item_rule.info_list:
info_value = tmp[info.name]
if info_value:
if info.build.find('%s') != -1:
tmpVal = info.build % info_value
tmp[info.name] = tmpVal
continue
if info.build.find('%s') != -1:
if info.src.__contains__('+'):
tmpArr = info.src.split('+')
src = ''
for t in tmpArr:
t = t.strip()
if t.find('\'') != -1:
src = src + t.strip('\'')
else:
src = src + tmp[t]
elif info.src.__contains__('||'):
variables = info.src.split('||')
src = firstNonEmpty(tmp, variables)
else:
src = tmp[info.src]
if src and info.convert != []:
tmp['referer'] = url
src = self.__parseCommands(tmp, src, info.convert)
if isinstance(src, dict):
for dKey in src:
tmp[dKey] = src[dKey]
src = src.values()[0]
info_value = info.build % (src)
else:
info_value = info.build
tmp[info.name] = info_value
if tmp['url']:
tmp['url'] = item_rule.url_build % (tmp['url'])
else:
tmp['url'] = url
tmp.merge(lItem)
if item_rule.skill.find('append') != -1:
tmp['url'] = url + tmp['url']
if item_rule.skill.find('space') != -1:
tmp['title'] = ' %s ' % tmp['title'].strip()
if skills.find('videoTitle') > -1:
tmp['videoTitle'] = tmp['title']
tmp['definedIn'] = definedIn
items.append(tmp)
return items
def __parseCommands(self, item, src, convCommands):
common.log('_parseCommands called')
# helping function
def parseCommand(txt):
command = {"command": txt, "params": ""}
if txt.find("(") > -1:
command["command"] = txt[0:txt.find("(")]
command["params"] = txt[len(command["command"]) + 1:-1]
return command
for convCommand in convCommands:
pComm = parseCommand(convCommand)
command = pComm["command"]
params = pComm["params"]
if params.find('@REFERER@'):
referer = item['referer']
if not referer:
referer = ''
params = params.replace('@REFERER@', referer)
if command == 'convDate':
src = cc.convDate(params, src)
elif command == 'convTimestamp':
src = cc.convTimestamp(params, src)
elif command == 'select':
src = cc.select(params, src)
if not src:
continue
elif command == 'unicode_escape':
src = src.decode('unicode-escape')
elif command == 'replaceFromDict':
dictName = str(params.strip('\''))
path = os.path.join(common.Paths.dictsDir, dictName + '.txt')
if not (os.path.exists(path)):
common.log('Dictionary file not found: ' + path)
continue
src = cc.replaceFromDict(path, src)
elif command == 'time':
src = time.time()
elif command == 'timediff':
src = dt.timediff(src,params.strip('\''))
elif command == 'offset':
src = cc.offset(params, src)
elif command == 'getSource':
src = cc.getSource(params, src)
elif command == 'quote':
try:
src = urllib.quote(params.strip("'").replace('%s', src),'')
except:
cleanParams = params.strip("'")
cleanParams = cleanParams.replace("%s",src)
src = urllib.quote(cleanParams.encode('utf-8'),'')
elif command == 'unquote':
src = urllib.unquote(params.strip("'").replace('%s', src))
elif command == 'parseText':
src = cc.parseText(item, params, src)
elif command == 'getInfo':
src = cc.getInfo(item, params, src)
elif command == 'getXML':
src = cc.getInfo(item, params, src, xml=True)
elif command == 'getMobile':
src = cc.getInfo(item, params, src, mobile=True)
elif command == 'decodeBase64':
src = cc.decodeBase64(src)
elif command == 'decodeRawUnicode':
src = cc.decodeRawUnicode(src)
elif command == 'resolve':
src = cc.resolve(src)
elif command == 'decodeXppod':
src = cc.decodeXppod(src)
elif command == 'decodeXppodHLS':
src = cc.decodeXppod_hls(src)
elif command == 'replace':
src = cc.replace(params, src)
elif command == 'replaceRegex':
src = cc.replaceRegex(params, src)
elif command == 'ifEmpty':
src = cc.ifEmpty(item, params, src)
elif command == 'isEqual':
src = cc.isEqual(item, params, src)
elif command == 'ifFileExists':
src = cc.ifFileExists(item, params, src)
elif command == 'ifExists':
src = cc.ifExists(item, params, src)
elif command == 'encryptJimey':
src = crypt.encryptJimey(params.strip("'").replace('%s', src))
elif command == 'gAesDec':
src = crypt.gAesDec(src,item.infos[params])
elif command == 'getCookies':
src = cc.getCookies(params, src)
elif command == 'destreamer':
src = crypt.destreamer(params.strip("'").replace('%s', src))
elif command == 'unixTimestamp':
src = dt.getUnixTimestamp()
elif command == 'rowbalance':
src = rb.get()
elif command == 'urlMerge':
src = cc.urlMerge(params, src)
elif command == 'translate':
try:
src = common.translate(int(src))
except:
pass
elif command == 'camelcase':
src = string.capwords(string.capwords(src, '-'))
elif command == 'lowercase':
src = string.lower(src)
elif command == 'reverse':
src = src[::-1]
elif command == 'demystify':
print 'demystify'
src = crypt.doDemystify(src)
print 'after demystify',src
elif command == 'random':
paramArr = params.split(',')
minimum = int(paramArr[0])
maximum = int(paramArr[1])
src = str(random.randrange(minimum,maximum))
elif command == 'debug':
common.log('Debug from cfg file: ' + src)
elif command == 'divide':
paramArr = params.split(',')
a = paramArr[0].strip().strip("'").replace('%s', src)
a = resolveVariable(a, item)
b = paramArr[1].strip().strip("'").replace('%s', src)
b = resolveVariable(b, item)
if not a or not b:
continue
a = int(a)
b = int(b)
try:
src = str(a/b)
except:
pass
return src
def resolveVariable(varStr, item):
if varStr.startswith('@') and varStr.endswith('@'):
return item.getInfo(varStr.strip('@'))
return varStr
def firstNonEmpty(tmp, variables):
for v in variables:
vClean = v.strip()
if vClean.find("'") != -1:
vClean = vClean.strip("'")
else:
vClean = tmp.getInfo(vClean)
if vClean != '':
return vClean
return ''<|fim▁end|> | info_tmp.src = value
elif tmpkey == '': |
<|file_name|>grin.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 The Grin Developers
//<|fim▁hole|>// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Grin P2P / API server
pub mod dandelion_monitor;
pub mod seed;
pub mod server;
pub mod sync;<|fim▁end|> | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. |
<|file_name|>IndexFactory.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2009-2015 Tilmann Zaeschke. All rights reserved.
*
* This file is part of ZooDB.
*
* ZooDB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ZooDB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ZooDB. If not, see <http://www.gnu.org/licenses/>.
*
* See the README and COPYING files for further information.
*/
package org.zoodb.internal.server.index;
import org.zoodb.internal.server.DiskIO.PAGE_TYPE;
import org.zoodb.internal.server.StorageChannel;
public class IndexFactory {
/**<|fim▁hole|> * @return a new index
*/
public static LongLongIndex createIndex(PAGE_TYPE type, StorageChannel storage) {
return new PagedLongLong(type, storage);
}
/**
* @param type
* @param storage
* @param pageId page id of the root page
* @return an index reconstructed from disk
*/
public static LongLongIndex loadIndex(PAGE_TYPE type, StorageChannel storage, int pageId) {
return new PagedLongLong(type, storage, pageId);
}
/**
* @param type
* @param storage
* @return a new index
*/
public static LongLongIndex.LongLongUIndex createUniqueIndex(PAGE_TYPE type,
StorageChannel storage) {
return new PagedUniqueLongLong(type, storage);
}
/**
* @param type
* @param storage
* @param pageId page id of the root page
* @return an index reconstructed from disk
*/
public static LongLongIndex.LongLongUIndex loadUniqueIndex(PAGE_TYPE type,
StorageChannel storage, int pageId) {
return new PagedUniqueLongLong(type, storage, pageId);
}
/**
* EXPERIMENTAL! Index that has bit width of key and value as parameters.
* @param type
* @param storage
* @return a new index
*/
public static LongLongIndex.LongLongUIndex createUniqueIndex(PAGE_TYPE type,
StorageChannel storage, int keySize, int valSize) {
return new PagedUniqueLongLong(type, storage, keySize, valSize);
}
/**
* EXPERIMENTAL! Index that has bit width of key and value as parameters.
* @param type
* @param storage
* @param pageId page id of the root page
* @return an index reconstructed from disk
*/
public static LongLongIndex.LongLongUIndex loadUniqueIndex(PAGE_TYPE type,
StorageChannel storage, int pageId, int keySize, int valSize) {
return new PagedUniqueLongLong(type, storage, pageId, keySize, valSize);
}
}<|fim▁end|> | * @param type
* @param storage |
<|file_name|>control-flow.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![feature(generators, generator_trait)]
use std::ops::{GeneratorState, Generator};
fn finish<T>(mut amt: usize, mut t: T) -> T::Return
where T: Generator<Yield = ()>
{
loop {
match unsafe { t.resume() } {
GeneratorState::Yielded(()) => amt = amt.checked_sub(1).unwrap(),
GeneratorState::Complete(ret) => {
assert_eq!(amt, 0);
return ret
}
}
}<|fim▁hole|>
fn main() {
finish(1, || yield);
finish(8, || {
for _ in 0..8 {
yield;
}
});
finish(1, || {
if true {
yield;
} else {
}
});
finish(1, || {
if false {
} else {
yield;
}
});
finish(2, || {
if { yield; false } {
yield;
panic!()
}
yield
});
}<|fim▁end|> |
} |
<|file_name|>currency.js<|end_file_name|><|fim▁begin|>dojo.provide("tests.currency");
dojo.require("dojo.currency");
tests.register("tests.currency",
[
{
// Test formatting and parsing of currencies in various locales pre-built in dojo.cldr
// NOTE: we can't set djConfig.extraLocale before bootstrapping unit tests, so directly
// load resources here for specific locales:
name: "currency",
setUp: function(){
var partLocaleList = ["en-us", "en-ca", "de-de"];
for(var i = 0 ; i < partLocaleList.length; i ++){
dojo.requireLocalization("dojo.cldr","currency",partLocaleList[i]);
dojo.requireLocalization("dojo.cldr","number",partLocaleList[i]);
}
},
runTest: function(t){
t.is("\u20ac123.45", dojo.currency.format(123.45, {currency: "EUR", locale: "en-us"}));
t.is("$123.45", dojo.currency.format(123.45, {currency: "USD", locale: "en-us"}));
t.is("$1,234.56", dojo.currency.format(1234.56, {currency: "USD", locale: "en-us"}));
t.is("US$123.45", dojo.currency.format(123.45, {currency: "USD", locale: "en-ca"}));
t.is("$123.45", dojo.currency.format(123.45, {currency: "CAD", locale: "en-ca"}));
t.is("Can$123.45", dojo.currency.format(123.45, {currency: "CAD", locale: "en-us"}));
t.is("123,45 \u20ac", dojo.currency.format(123.45, {currency: "EUR", locale: "de-de"}));
t.is("1.234,56 \u20ac", dojo.currency.format(1234.56, {currency: "EUR", locale: "de-de"}));
// There is no special currency symbol for ADP, so expect the ISO code instead
t.is("ADP123", dojo.currency.format(123, {currency: "ADP", locale: "en-us"}));<|fim▁hole|> t.is(123.45, dojo.currency.parse("$123.45", {currency: "USD", locale: "en-us"}));
t.is(1234.56, dojo.currency.parse("$1,234.56", {currency: "USD", locale: "en-us"}));
t.is(123.45, dojo.currency.parse("123,45 \u20ac", {currency: "EUR", locale: "de-de"}));
t.is(1234.56, dojo.currency.parse("1.234,56 \u20ac", {currency: "EUR", locale: "de-de"}));
t.is(1234.56, dojo.currency.parse("1.234,56\u20ac", {currency: "EUR", locale: "de-de"}));
t.is(1234, dojo.currency.parse("$1,234", {currency: "USD", locale: "en-us"}));
t.is(1234, dojo.currency.parse("$1,234", {currency: "USD", fractional: false, locale: "en-us"}));
t.t(isNaN(dojo.currency.parse("$1,234", {currency: "USD", fractional: true, locale: "en-us"})));
}
}
]
);<|fim▁end|> | |
<|file_name|>a744.js<|end_file_name|><|fim▁begin|>//~ name a744
alert(a744);<|fim▁hole|><|fim▁end|> |
//~ component a745.js |
<|file_name|>org.w3c.dom.DOMConfiguration.d.ts<|end_file_name|><|fim▁begin|>declare namespace org {
namespace w3c {
namespace dom {
interface DOMConfiguration {
setParameter(arg0: java.lang.String | string, arg1: java.lang.Object | any): void
getParameter(arg0: java.lang.String | string): java.lang.Object
canSetParameter(arg0: java.lang.String | string, arg1: java.lang.Object | any): boolean
getParameterNames(): org.w3c.dom.DOMStringList
}
}
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>authenticate.js<|end_file_name|><|fim▁begin|>/**<|fim▁hole|> * This method start authentication workflow.
* It should be called by application which require authentication.
*
* Author: Yuriy Movchan Date: 11/06/2013
*/
var uuid = require('uuid');
var async = require('async');
var oxutil = require('../util/util.js');
var state = require('../shared/state.js');
var push = require('../push/push.js');
exports.rest_api = function(req, res, authenticationStore, applicationService, deviceService) {
console.log("Authenticate: '" + req.params.deployment_id + "'", "user: '" + req.params.user_name + "'");
// Load device and application entries
async.series([ function(done) {
deviceService.getDeviceById(req.params.deployment_id, function(found_deployment_entry) {
deployment_entry = found_deployment_entry;
done();
});
}, function(done) {
if (deployment_entry) {
applicationService.getApplication(deployment_entry.oxPushApplication, function(found_application_entry) {
application_entry = found_application_entry;
done();
});
} else {
console.warn("Failed to find deployment entry: '%s'", req.params.deployment_id);
oxutil.sendFailedJsonResponse(res);
done();
}
} ], function() {
if (application_entry && deployment_entry) {
var application_configuration = JSON.parse(application_entry.oxPushApplicationConf);
// TODO: Validate application_ip and req.ip
var authentication_id = uuid.v1();
var authentication_entry = {
'authentication_id' : authentication_id,
'authentication_time' : Date.now(),
'expires_in' : 60,
'expires_at' : Date.now() + 60 * 1000,
'clean_up_at' : Date.now() + 180 * 1000,
'application_id' : application_entry.oxId,
'application_name' : application_configuration.name,
'application_description' : application_configuration.description,
'application_ip' : req.ip,
'user_name' : req.params.user_name,
'authentication_status' : state.PENDING,
};
authenticationStore.set(authentication_id, authentication_entry);
// Send message to device
var device_configuration = JSON.parse(deployment_entry.oxPushDeviceConf);
try {
push.sendAuthenticationMessageToDevice(device_configuration, authentication_id);
} catch (err) {
console.log("Failed to send notification message to device: '" + device_configuration.device_uuid);
}
console.log("Initialized authentication process: '" + authentication_id + "' for application: '"
+ authentication_entry.application_name + "'");
oxutil.sendJsonResponse(res, {
authentication_id : authentication_entry.authentication_id,
expires_in : authentication_entry.expires_in,
result : true,
});
} else {
console.warn("Failed to find application entry: '%s'", deployment_entry.oxPushApplication);
oxutil.sendFailedJsonResponse(res);
}
});
};<|fim▁end|> | |
<|file_name|>fixer_util.py<|end_file_name|><|fim▁begin|>"""
Utility functions from 2to3, 3to2 and python-modernize (and some home-grown
ones).
Licences:
2to3: PSF License v2
3to2: Apache Software License (from 3to2/setup.py)
python-modernize licence: BSD (from python-modernize/LICENSE)
"""
from lib2to3.fixer_util import (FromImport, Newline, is_import,
find_root, does_tree_import, Comma)
from lib2to3.pytree import Leaf, Node
from lib2to3.pygram import python_symbols as syms, python_grammar
from lib2to3.pygram import token
from lib2to3.fixer_util import (Node, Call, Name, syms, Comma, Number)
import re
## These functions are from 3to2 by Joe Amenta:
def Star(prefix=None):
return Leaf(token.STAR, u'*', prefix=prefix)
def DoubleStar(prefix=None):
return Leaf(token.DOUBLESTAR, u'**', prefix=prefix)
def Minus(prefix=None):
return Leaf(token.MINUS, u'-', prefix=prefix)
def commatize(leafs):
u"""
Accepts/turns: (Name, Name, ..., Name, Name)
Returns/into: (Name, Comma, Name, Comma, ..., Name, Comma, Name)
"""
new_leafs = []
for leaf in leafs:
new_leafs.append(leaf)
new_leafs.append(Comma())
del new_leafs[-1]
return new_leafs
def indentation(node):
u"""
Returns the indentation for this node
Iff a node is in a suite, then it has indentation.
"""
while node.parent is not None and node.parent.type != syms.suite:
node = node.parent
if node.parent is None:<|fim▁hole|> return u""
# The first three children of a suite are NEWLINE, INDENT, (some other node)
# INDENT.value contains the indentation for this suite
# anything after (some other node) has the indentation as its prefix.
if node.type == token.INDENT:
return node.value
elif node.prev_sibling is not None and node.prev_sibling.type == token.INDENT:
return node.prev_sibling.value
elif node.prev_sibling is None:
return u""
else:
return node.prefix
def indentation_step(node):
u"""
Dirty little trick to get the difference between each indentation level
Implemented by finding the shortest indentation string
(technically, the "least" of all of the indentation strings, but
tabs and spaces mixed won't get this far, so those are synonymous.)
"""
r = find_root(node)
# Collect all indentations into one set.
all_indents = set(i.value for i in r.pre_order() if i.type == token.INDENT)
if not all_indents:
# nothing is indented anywhere, so we get to pick what we want
return u" " # four spaces is a popular convention
else:
return min(all_indents)
def suitify(parent):
u"""
Turn the stuff after the first colon in parent's children
into a suite, if it wasn't already
"""
for node in parent.children:
if node.type == syms.suite:
# already in the prefered format, do nothing
return
# One-liners have no suite node, we have to fake one up
for i, node in enumerate(parent.children):
if node.type == token.COLON:
break
else:
raise ValueError(u"No class suite and no ':'!")
# Move everything into a suite node
suite = Node(syms.suite, [Newline(), Leaf(token.INDENT, indentation(node) + indentation_step(node))])
one_node = parent.children[i+1]
one_node.remove()
one_node.prefix = u''
suite.append_child(one_node)
parent.append_child(suite)
def NameImport(package, as_name=None, prefix=None):
u"""
Accepts a package (Name node), name to import it as (string), and
optional prefix and returns a node:
import <package> [as <as_name>]
"""
if prefix is None:
prefix = u""
children = [Name(u"import", prefix=prefix), package]
if as_name is not None:
children.extend([Name(u"as", prefix=u" "),
Name(as_name, prefix=u" ")])
return Node(syms.import_name, children)
_compound_stmts = (syms.if_stmt, syms.while_stmt, syms.for_stmt, syms.try_stmt, syms.with_stmt)
_import_stmts = (syms.import_name, syms.import_from)
def import_binding_scope(node):
u"""
Generator yields all nodes for which a node (an import_stmt) has scope
The purpose of this is for a call to _find() on each of them
"""
# import_name / import_from are small_stmts
assert node.type in _import_stmts
test = node.next_sibling
# A small_stmt can only be followed by a SEMI or a NEWLINE.
while test.type == token.SEMI:
nxt = test.next_sibling
# A SEMI can only be followed by a small_stmt or a NEWLINE
if nxt.type == token.NEWLINE:
break
else:
yield nxt
# A small_stmt can only be followed by either a SEMI or a NEWLINE
test = nxt.next_sibling
# Covered all subsequent small_stmts after the import_stmt
# Now to cover all subsequent stmts after the parent simple_stmt
parent = node.parent
assert parent.type == syms.simple_stmt
test = parent.next_sibling
while test is not None:
# Yes, this will yield NEWLINE and DEDENT. Deal with it.
yield test
test = test.next_sibling
context = parent.parent
# Recursively yield nodes following imports inside of a if/while/for/try/with statement
if context.type in _compound_stmts:
# import is in a one-liner
c = context
while c.next_sibling is not None:
yield c.next_sibling
c = c.next_sibling
context = context.parent
# Can't chain one-liners on one line, so that takes care of that.
p = context.parent
if p is None:
return
# in a multi-line suite
while p.type in _compound_stmts:
if context.type == syms.suite:
yield context
context = context.next_sibling
if context is None:
context = p.parent
p = context.parent
if p is None:
break
def ImportAsName(name, as_name, prefix=None):
new_name = Name(name)
new_as = Name(u"as", prefix=u" ")
new_as_name = Name(as_name, prefix=u" ")
new_node = Node(syms.import_as_name, [new_name, new_as, new_as_name])
if prefix is not None:
new_node.prefix = prefix
return new_node
def future_import(feature, node):
"""
This seems to work
"""
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
# Look for a shebang or encoding line
shebang_encoding_idx = None
for idx, node in enumerate(root.children):
# Is it a shebang or encoding line?
if is_shebang_comment(node) or is_encoding_comment(node):
shebang_encoding_idx = idx
if node.type == syms.simple_stmt and \
len(node.children) > 0 and node.children[0].type == token.STRING:
# skip over docstring
continue
names = check_future_import(node)
if not names:
# not a future statement; need to insert before this
break
if feature in names:
# already imported
return
import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
if shebang_encoding_idx == 0 and idx == 0:
# If this __future__ import would go on the first line,
# detach the shebang / encoding prefix from the current first line.
# and attach it to our new __future__ import node.
import_.prefix = root.children[0].prefix
root.children[0].prefix = u''
# End the __future__ import line with a newline and add a blank line
# afterwards:
children = [import_ , Newline()]
root.insert_child(idx, Node(syms.simple_stmt, children))
def future_import2(feature, node):
"""
An alternative to future_import() which might not work ...
"""
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
insert_pos = 0
for idx, node in enumerate(root.children):
if node.type == syms.simple_stmt and node.children and \
node.children[0].type == token.STRING:
insert_pos = idx + 1
break
for thing_after in root.children[insert_pos:]:
if thing_after.type == token.NEWLINE:
insert_pos += 1
continue
prefix = thing_after.prefix
thing_after.prefix = u""
break
else:
prefix = u""
import_ = FromImport(u"__future__", [Leaf(token.NAME, feature, prefix=u" ")])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children, prefix=prefix))
def parse_args(arglist, scheme):
u"""
Parse a list of arguments into a dict
"""
arglist = [i for i in arglist if i.type != token.COMMA]
ret_mapping = dict([(k, None) for k in scheme])
for i, arg in enumerate(arglist):
if arg.type == syms.argument and arg.children[1].type == token.EQUAL:
# argument < NAME '=' any >
slot = arg.children[0].value
ret_mapping[slot] = arg.children[2]
else:
slot = scheme[i]
ret_mapping[slot] = arg
return ret_mapping
# def is_import_from(node):
# """Returns true if the node is a statement "from ... import ..."
# """
# return node.type == syms.import_from
def is_import_stmt(node):
return (node.type == syms.simple_stmt and node.children and
is_import(node.children[0]))
def touch_import_top(package, name_to_import, node):
"""Works like `does_tree_import` but adds an import statement at the
top if it was not imported (but below any __future__ imports).
Based on lib2to3.fixer_util.touch_import()
Calling this multiple times adds the imports in reverse order.
Also adds "standard_library.install_aliases()" after "from future import
standard_library". This should probably be factored into another function.
"""
root = find_root(node)
if does_tree_import(package, name_to_import, root):
return
# Ideally, we would look for whether futurize --all-imports has been run,
# as indicated by the presence of ``from builtins import (ascii, ...,
# zip)`` -- and, if it has, we wouldn't import the name again.
# Look for __future__ imports and insert below them
found = False
for name in ['absolute_import', 'division', 'print_function',
'unicode_literals']:
if does_tree_import('__future__', name, root):
found = True
break
if found:
# At least one __future__ import. We want to loop until we've seen them
# all.
start, end = None, None
for idx, node in enumerate(root.children):
if check_future_import(node):
start = idx
# Start looping
idx2 = start
while node:
node = node.next_sibling
idx2 += 1
if not check_future_import(node):
end = idx2
break
break
assert start is not None
assert end is not None
insert_pos = end
else:
# No __future__ imports.
# We look for a docstring and insert the new node below that. If no docstring
# exists, just insert the node at the top.
for idx, node in enumerate(root.children):
if node.type != syms.simple_stmt:
break
if not (node.children and node.children[0].type == token.STRING):
# This is the usual case.
break
insert_pos = idx
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, u"import"),
Leaf(token.NAME, name_to_import, prefix=u" ")
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")])
if name_to_import == u'standard_library':
# Add:
# standard_library.install_aliases()
# after:
# from future import standard_library
install_hooks = Node(syms.simple_stmt,
[Node(syms.power,
[Leaf(token.NAME, u'standard_library'),
Node(syms.trailer, [Leaf(token.DOT, u'.'),
Leaf(token.NAME, u'install_aliases')]),
Node(syms.trailer, [Leaf(token.LPAR, u'('),
Leaf(token.RPAR, u')')])
])
]
)
children_hooks = [install_hooks, Newline()]
else:
children_hooks = []
FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")])
children_import = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children_import))
if len(children_hooks) > 0:
root.insert_child(insert_pos + 1, Node(syms.simple_stmt, children_hooks))
## The following functions are from python-modernize by Armin Ronacher:
# (a little edited).
def check_future_import(node):
"""If this is a future import, return set of symbols that are imported,
else return None."""
# node should be the import statement here
savenode = node
if not (node.type == syms.simple_stmt and node.children):
return set()
node = node.children[0]
# now node is the import_from node
if not (node.type == syms.import_from and
# node.type == token.NAME and # seems to break it
hasattr(node.children[1], 'value') and
node.children[1].value == u'__future__'):
return set()
node = node.children[3]
# now node is the import_as_name[s]
# print(python_grammar.number2symbol[node.type]) # breaks sometimes
if node.type == syms.import_as_names:
result = set()
for n in node.children:
if n.type == token.NAME:
result.add(n.value)
elif n.type == syms.import_as_name:
n = n.children[0]
assert n.type == token.NAME
result.add(n.value)
return result
elif node.type == syms.import_as_name:
node = node.children[0]
assert node.type == token.NAME
return set([node.value])
elif node.type == token.NAME:
return set([node.value])
else:
# TODO: handle brackets like this:
# from __future__ import (absolute_import, division)
assert False, "strange import: %s" % savenode
SHEBANG_REGEX = r'^#!.*python'
ENCODING_REGEX = r"^#.*coding[:=]\s*([-\w.]+)"
def is_shebang_comment(node):
"""
Comments are prefixes for Leaf nodes. Returns whether the given node has a
prefix that looks like a shebang line or an encoding line:
#!/usr/bin/env python
#!/usr/bin/python3
"""
return bool(re.match(SHEBANG_REGEX, node.prefix))
def is_encoding_comment(node):
"""
Comments are prefixes for Leaf nodes. Returns whether the given node has a
prefix that looks like an encoding line:
# coding: utf-8
# encoding: utf-8
# -*- coding: <encoding name> -*-
# vim: set fileencoding=<encoding name> :
"""
return bool(re.match(ENCODING_REGEX, node.prefix))
def wrap_in_fn_call(fn_name, args, prefix=None):
"""
Example:
>>> wrap_in_fn_call("oldstr", (arg,))
oldstr(arg)
>>> wrap_in_fn_call("olddiv", (arg1, arg2))
olddiv(arg1, arg2)
"""
assert len(args) > 0
if len(args) == 1:
newargs = args
elif len(args) == 2:
expr1, expr2 = args
newargs = [expr1, Comma(), expr2]
else:
assert NotImplementedError('write me')
return Call(Name(fn_name), newargs, prefix=prefix)<|fim▁end|> | |
<|file_name|>widgets.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# progressbar - Text progress bar library for Python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''Default ProgressBar widgets'''
from __future__ import division, absolute_import, with_statement
import datetime
import math
import abc
class AbstractWidget(object):
__metaclass__ = abc.ABCMeta
def format_updatable(updatable, pbar):
if hasattr(updatable, 'update'):
return updatable.update(pbar)
else:
return updatable
class Widget(AbstractWidget):
'''The base class for all widgets
The ProgressBar will call the widget's update value when the widget should
be updated. The widget's size may change between calls, but the widget may
display incorrectly if the size changes drastically and repeatedly.
The boolean TIME_SENSITIVE informs the ProgressBar that it should be
updated more often because it is time sensitive.
'''
TIME_SENSITIVE = False
@abc.abstractmethod
def update(self, pbar):
'''Updates the widget.
pbar - a reference to the calling ProgressBar
'''
class WidgetHFill(Widget):
'''The base class for all variable width widgets.
This widget is much like the \\hfill command in TeX, it will expand to
fill the line. You can use more than one in the same line, and they will
all have the same width, and together will fill the line.
'''
@abc.abstractmethod
def update(self, pbar, width):
'''Updates the widget providing the total width the widget must fill.
pbar - a reference to the calling ProgressBar
width - The total width the widget must fill
'''
class Timer(Widget):
'Widget which displays the elapsed seconds.'
TIME_SENSITIVE = True
def __init__(self, format='Elapsed Time: %s'):
self.format = format
@staticmethod
def format_time(seconds):
'Formats time as the string "HH:MM:SS".'
return str(datetime.timedelta(seconds=int(seconds)))
def update(self, pbar):
'Updates the widget to show the elapsed time.'
return self.format % self.format_time(pbar.seconds_elapsed)
class ETA(Timer):
'Widget which attempts to estimate the time of arrival.'<|fim▁hole|>
TIME_SENSITIVE = True
def _eta(self, pbar):
elapsed = pbar.seconds_elapsed
return elapsed * pbar.maxval / pbar.currval - elapsed
def update(self, pbar):
'Updates the widget to show the ETA or total time when finished.'
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
return 'ETA: %s' % self.format_time(self._eta(pbar))
class AdaptiveETA(ETA):
'''Widget which attempts to estimate the time of arrival.
Uses a sampled average of the speed based on the 10 last updates.
Very convenient for resuming the progress halfway.
'''
TIME_SENSITIVE = True
def __init__(self, num_samples=10, **kwargs):
ETA.__init__(self, **kwargs)
self.num_samples = num_samples
self.samples = []
self.sample_vals = []
self.last_sample_val = None
def _eta(self, pbar):
samples = self.samples
sample_vals = self.sample_vals
if pbar.currval != self.last_sample_val:
# Update the last sample counter, we only update if currval has
# changed
self.last_sample_val = pbar.currval
# Add a sample but limit the size to `num_samples`
samples.append(pbar.seconds_elapsed)
sample_vals.append(pbar.currval)
if len(samples) > self.num_samples:
samples.pop(0)
sample_vals.pop(0)
if len(samples) <= 1:
# No samples so just return the normal ETA calculation
return ETA._eta(self, pbar)
todo = pbar.maxval - pbar.currval
items = sample_vals[-1] - sample_vals[0]
duration = float(samples[-1] - samples[0])
per_item = duration / items
return todo * per_item
class FileTransferSpeed(Widget):
'Widget for showing the transfer speed (useful for file transfers).'
format = '%6.2f %s%s/s'
prefixes = ' kMGTPEZY'
def __init__(self, unit='B'):
self.unit = unit
def _speed(self, pbar):
speed = pbar.currval / pbar.seconds_elapsed
power = int(math.log(speed, 1000))
scaled = speed / 1000. ** power
return scaled, power
def update(self, pbar):
'Updates the widget with the current SI prefixed speed.'
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
scaled = power = 0
else:
scaled, power = self._speed(pbar)
return self.format % (scaled, self.prefixes[power], self.unit)
class AdaptiveTransferSpeed(FileTransferSpeed):
'Widget for showing the transfer speed, based on the last X samples'
def __init__(self, num_samples=10):
FileTransferSpeed.__init__(self)
self.num_samples = num_samples
self.samples = []
self.sample_vals = []
self.last_sample_val = None
def _speed(self, pbar):
samples = self.samples
sample_vals = self.sample_vals
if pbar.currval != self.last_sample_val:
# Update the last sample counter, we only update if currval has
# changed
self.last_sample_val = pbar.currval
# Add a sample but limit the size to `num_samples`
samples.append(pbar.seconds_elapsed)
sample_vals.append(pbar.currval)
if len(samples) > self.num_samples:
samples.pop(0)
sample_vals.pop(0)
if len(samples) <= 1:
# No samples so just return the parent's calculation
return FileTransferSpeed._speed(self, pbar)
items = sample_vals[-1] - sample_vals[0]
duration = float(samples[-1] - samples[0])
speed = items / duration
power = int(math.log(speed, 1000))
scaled = speed / 1000. ** power
return scaled, power
class AnimatedMarker(Widget):
'''An animated marker for the progress bar which defaults to appear as if
it were rotating.
'''
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
'''Updates the widget to show the next marker or the first marker when
finished'''
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1) % len(self.markers)
return self.markers[self.curmark]
# Alias for backwards compatibility
RotatingMarker = AnimatedMarker
class Counter(Widget):
'Displays the current count'
def __init__(self, format='%d'):
self.format = format
def update(self, pbar):
return self.format % pbar.currval
class Percentage(Widget):
'Displays the current percentage as a number with a percent sign.'
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class FormatLabel(Timer):
'Displays a formatted label'
mapping = {
'elapsed': ('seconds_elapsed', Timer.format_time),
'finished': ('finished', None),
'last_update': ('last_update_time', None),
'max': ('maxval', None),
'seconds': ('seconds_elapsed', None),
'start': ('start_time', None),
'value': ('currval', None)
}
def __init__(self, format):
self.format = format
def update(self, pbar):
context = {}
for name, (key, transform) in self.mapping.items():
try:
value = getattr(pbar, key)
if transform is None:
context[name] = value
else:
context[name] = transform(value)
except: # pragma: no cover
pass
return self.format % context
class SimpleProgress(Widget):
'Returns progress as a count of the total (e.g.: "5 of 47")'
def __init__(self, sep=' of '):
self.sep = sep
def update(self, pbar):
return '%d%s%d' % (pbar.currval, self.sep, pbar.maxval)
class Bar(WidgetHFill):
'A progress bar which stretches to fill the line.'
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=True):
'''Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
'''
self.marker = marker
self.left = left
self.right = right
self.fill = fill
self.fill_left = fill_left
def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marked, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
# Marked must *always* have length of 1
if pbar.maxval:
marked *= int(pbar.currval / pbar.maxval * width)
else: # pragma: no cover
marked = ''
if self.fill_left:
return '%s%s%s' % (left, marked.ljust(width, self.fill), right)
else:
return '%s%s%s' % (left, marked.rjust(width, self.fill), right)
class ReverseBar(Bar):
'A bar which has a marker which bounces from side to side.'
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=False):
'''Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
'''
self.marker = marker
self.left = left
self.right = right
self.fill = fill
self.fill_left = fill_left
class BouncingBar(Bar):
def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
if pbar.finished:
return '%s%s%s' % (left, width * marker, right)
position = int(pbar.currval % (width * 2 - 1))
if position > width:
position = width * 2 - position
lpad = self.fill * (position - 1)
rpad = self.fill * (width - len(marker) - len(lpad))
# Swap if we want to bounce the other way
if not self.fill_left:
rpad, lpad = lpad, rpad
return '%s%s%s%s%s' % (left, lpad, marker, rpad, right)<|fim▁end|> | |
<|file_name|>test_codecs.py<|end_file_name|><|fim▁begin|>from test import support
import unittest
import codecs
import locale
import sys, _testcapi, io
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(unittest.TestCase, MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate("\n \r\n \r \u2028".split()):
vw.append((i*200)*"\3042" + lineend)
vwo.append((i*200)*"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in "\n \r\n \r \u2028".split():
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
b"\xff", "strict", True)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
b"\xff", "strict", True)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, "utf-8")
self.assertRaises(UnicodeDecodeError, b"\xed\xa0\x80".decode, "utf-8")
self.assertEqual("[\uDC80]".encode("utf-8", "backslashreplace"),
b'[\\udc80]')
self.assertEqual("[\uDC80]".encode("utf-8", "xmlcharrefreplace"),
b'[�]')
self.assertEqual("[\uDC80]".encode("utf-8", "surrogateescape"),
b'[\x80]')
self.assertEqual("[\uDC80]".encode("utf-8", "ignore"),
b'[]')
self.assertEqual("[\uDC80]".encode("utf-8", "replace"),
b'[?]')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("utf-8", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("utf-8", "surrogatepass"),
"abc\ud800def")
self.assertTrue(codecs.lookup_error("surrogatepass"))
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
"a+-b",
[
"a",
"a",
"a+",
"a+-",
"a+-b",
]
)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = io.BytesIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write("a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
(b"\x00\x10\xff\xff", "\U0010ffff"),
(b"\x00\x00\x01\x01", "\U00000101"),
(b"", ""),
]
not_ok = [
b"\x7f\xff\xff\xff",
b"\x80\x00\x00\x00",
b"\x81\x00\x00\x00",
b"\x00",
b"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
b"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError as ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual(b"\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = "ab".encode("unicode_internal").decode()
ignored = decoder(bytes("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"ascii"),
"UnicodeInternalTest")
self.assertEqual(("ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder("a")[1], 1)
self.assertEqual(encoder("\xe9\u0142")[1], 2)
self.assertEqual(codecs.escape_encode(br'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams + [
"idna",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c])) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c])) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
<|fim▁hole|> self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(support.unlink, support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
try:
import zlib
except ImportError:
pass
else:
bytes_transform_encodings.append("zlib_codec")
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
if encoding in ['uu_codec', 'zlib_codec']:
continue
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_main():
support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
BasicUnicodeTest,
CharmapTest,
WithStmtTest,
TypesTest,
SurrogateEscapeTest,
BomTest,
TransformCodecTest,
)
if __name__ == "__main__":
test_main()<|fim▁end|> | def test_unicode_escape(self):
# Escape-decoding an unicode string is supported ang gives the same
# result as decoding the equivalent ASCII bytes string. |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>"""
The server module is responsible for managng the threaded UDP socket server.
This server is used to communicate with Matlab/Simulink simulations. It will be renamed appropriately soon
@todo: rename this module to indicate it's role in any Matlab connections
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import socketserver
import re
import struct
from re import split
from ast import literal_eval
from threading import Timer
from time import strftime
import numpy as np
import requests
from tower import utils
from tower.map.map_interface import MapInterface
from server_conf.config import settings
HOST = 'localhost'
PORT = 2002
# @todo: think about including in a separate module for exceptions
class CommandNotFound(Exception):
""" Command received does not match one listed in the command dictionary """
pass
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""
Threaded UDP server for receiving and responding to requests from client applications. We can run the server safely
by doing:
Example::
map_server = ThreadedUDPServer((HOST, PORT), UDP_Interrupt)
server_thread = None
logger.info('Instantiation succesful')
# terminate with Ctrl-C
try:
server_thread = Thread(target=map_server.serve_forever)
server_thread.daemon = False
logger.info("Threaded server loop running in: {}".format(server_thread.name))
print("Threaded server loop running in: {}".format(server_thread.name))
server_thread.start()
except KeyboardInterrupt:
server_thread.kill()
map_server.shutdown()
sys.exit(0)
"""
def get_request(self):
"""
Override native get_request function in order to print out who is connecting to the server
@todo make this Python3 compatible by using superclasses. Will need to update socketServer package
:return:
"""
(data, self.socket), client_addr = socketserver.UDPServer.get_request(self)
logger.info("Server connected to by:{}".format(client_addr))
return (data, self.socket), client_addr
class UDP_Interrupt(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client sockets, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def setup(self):
"""
Instantiate the connection with the worldEngine, the MapInterface.
:rtype : None
"""
# TODO : figure out how to make the map interface a singleton class
if not hasattr(self, 'mapInterface'):
self.mapInterface = MapInterface(settings['FILE_CONFIG']['filename'])
def handle(self):
"""
Handles UDP requests to the server.
The map interface class is responsible for parsing the request, and executing the requested function.
:return:
"""
socket = self.request[1]
data = self.request[0].strip()
logger.info("Address {} at {} wrote: '{}'".format(self.client_address[1], self.client_address[0], data))
cmd_strn, ret = self.command_service(data)
print(ret)
self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],
self.mapInterface.router[cmd_strn])
def command_service(self, rawCommand):
"""
Parse raw input and execute specified function with args
:param rawCommand: csv string from Matlab/Simulink of the form:
'command, namedArg1, arg1, namedArg2, arg2, ..., namedArgN, argN'
:return: the command and arguments as a dictionary
"""
pack = [x.strip() for x in split('[,()]*', rawCommand.strip())]
raw_cmd = pack[0]
argDict = {key: literal_eval(value) for key, value in utils.grouper(pack[1:], 2)}
cmd = self.mapInterface.commands[raw_cmd]
ret = cmd(**argDict)
logger.info("Command '{}' run with args {}".format(raw_cmd, argDict))
return raw_cmd, ret
def command_response(self, returned_data, socket, client_ip, client_address):
"""
Parse raw input and execute specified function with args
:param coords: coordinates in lat/lon
:return: the command and arguments as a dictionary
"""
returned_data.astype(np.float32)
response = returned_data.tostring('C')
response_length = len(response)
response_arr = [response]
if response_length > 256:
response_arr = list(self.split_by_n(response_arr[0], 256 * 4))
data = [response_length]
s = struct.pack('f' * len(data), *data)
socket.sendto(s, (client_ip, client_address))
for response_packet in response_arr:
socket.sendto(response_packet, (client_ip, client_address))
@staticmethod
def split_by_n(seq, n):
"""A generator to divide a sequence into chunks of n units."""
while seq:
yield seq[:n]
seq = seq[n:]
@staticmethod
def func_explode(s):
pattern = r'(\w[\w\d_]*)\((.*)\)$'
match = re.match(pattern, s)
if match:
return list(match.groups())
else:
return []
def finish(self):
pass
class Interrupt(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
<|fim▁hole|>
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.daemon = True
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def call_request(url=None, data=None, headers=None):
url = 'http://httpbin.org/post'
headers = {'content-type': 'application/json'}
time_stamp = strftime("%Y-%m-%d %H:%M:%S")
data = {'flightId': 0o001, 'time': time_stamp, 'latitude': 36, 'longitude': -120, 'altitude': 50, 'speed': 100,
'heading': 0, 'hasLanded': False, 'dataSource': "UCSC"}
r = requests.post(url, json=data, headers=headers)
print(r.status_code)<|fim▁end|> | def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs) |
<|file_name|>pogoInput.py<|end_file_name|><|fim▁begin|>import numpy as np
import warnings
import subprocess
import pogoFunctions as pF
import pdb
from PolyInterface import poly
class PogoInput:
def __init__(self,
fileName,
elementTypes,
signals,
historyMeasurement,
nodes = None,
elements = None,
geometryFile = None,
precision=8,
targetMeshSize = 5e-5,
nDims=2,
nDofPerNode = None,
notes = None,
runName = 'pogoJob',
nt = 100,
dt = 1e-8,
elementTypeRefs = None,
materialTypeRefs = None,
orientationRefs = None,
elementParameters = None,
materials = [[0,7e10,0.34,2700],],
orientations = None,
boundaryConditions = None,
historyMeasurementFrequency = 20,
fieldStoreIncrements = None,
folderIn = None,
totalForce = False,
version = 1.03,
writeFile = True):
self.fileName = fileName
### Header
self.header = np.array(['']*20, dtype='str')
if version not in [1.03, 1.04]:
raise ValueError('Input file version must be 1.03 or 1.04.')
headerString = '%pogo-inp{}'.format(version)
for c1 in range(0, len(headerString)):
self.header[c1] = headerString[c1]<|fim▁hole|>
### Precision
if precision not in [4,8]:
raise ValueError('Precision must be 4 or 8.')
self.precision = np.array([precision,],dtype='int32')
self.nDims = np.array([nDims,],dtype='int32')
### Number of degrees of freedom per node
if nDofPerNode == None:
nDofPerNode = self.nDims
if nDofPerNode not in [1,2,3]:
raise ValueError('Number of degrees of freedom must be 1, 2 or 3')
self.nDofPerNode = np.array([nDofPerNode,],dtype='int32')
### Set notes
self.notes = np.array(['']*1024, dtype='str')
if notes != None:
if len(notes) > 1024:
notes = notes[:1024]
for character in range(len(notes)):
self.notes[character] = notes[character]
### Set runname
self.runName = np.array(['']*80, dtype='str')
if len(runName) > 80:
runName = runName[:80]
for character in range(0, len(runName)):
self.runName[character] = runName[character]
### Set time step and run time
self.nt = np.array([nt,],dtype='int32')
self.dt = np.array([dt,],dtype=self.getPrecString())
### Node generation if necessary
if not np.any(nodes) and not geometryFile:
raise ValueError('Either a poly file or node/element definitions are required')
elif geometryFile and targetMeshSize and not np.any(elements) and not np.any(nodes):
if geometryFile.split('.')[-1] == 'dxf':
print 'Creating poly file from {}'.format(geometryFile)
poly.poly(geometryFile,elementSize = targetMeshSize,writeFile=True)
if geometryFile.split('.')[-1] == 'poly':
geometryFile = geometryFile[:-5]
if self.nDims == 2:
targetMeshArea = targetMeshSize*targetMeshSize
subprocess.call('triangle -q -j -a{:.12}F {}.poly'.format(targetMeshArea,geometryFile))
elif self.nDims == 3:
targetMeshVolume = targetMeshSize*targetMeshSize*targetMeshSize
### Add cwd
subprocess.call('tetgen {:.12}F {}.poly'.format(targetMeshVolume,geometryFile))
nodes = pF.loadNodeFile(geometryFile+'.1.node')
elements = pF.loadElementFile(geometryFile+'.1.ele')
### Number of nodes and node positions
if np.shape(nodes)[0] != nDims:
raise ValueError('nodes must be in shape (nDims, nNodes).')
self.nNodes = np.array([np.shape(nodes)[1],],dtype = 'int32')
self.nodes = nodes.astype(self.getPrecString()).T
### Number of elements and nodes per element
self.nElements = np.array([np.shape(elements)[1],],dtype='int32')
self.nNodesPerElement = np.array([np.shape(elements)[0],],dtype='int32')
### Element type refs
if elementTypeRefs == None:
elementTypeRefs = np.zeros(self.nElements)
if len(elementTypeRefs) != self.nElements:
raise ValueError('elementTypeRefs must be of length nElements.')
#if min(elementTypeRefs) != 0:
# raise ValueError('elementTypeRefs must be 1 indexed.')
self.elementTypeRefs = elementTypeRefs.astype('int32')# - 1
### Material type refs
if materialTypeRefs == None:
materialTypeRefs = np.zeros(self.nElements)
if len(materialTypeRefs) != self.nElements:
raise ValueError('materialTypeRefs must be of length nElements.')
#if min(materialTypeRefs) != 1:
# raise ValueError('materialTypeRefs must be 1 indexed.')
self.materialTypeRefs = materialTypeRefs.astype('int32') #- 1
### Element orientations
if orientationRefs == None:
orientationRefs = np.zeros(self.nElements,dtype = 'int32')
if len(orientationRefs)!= self.nElements:
raise ValueError('orientationRefs must be of length nElements.')
if min(elementTypeRefs) < 0: #unused values are set to 0 so -1 in zero indexing
raise ValueError('orientationRefs must be 1 indexed.')
self.orientationRefs = orientationRefs.astype('int32')# - 1
### Elements
if np.max(elements) > self.nNodes:
raise ValueError('elements points to nodes which are greater than nNodes.')
if np.min(elements) < 0:
raise ValueError('elements must be 1 indexed.')
self.elements = elements.astype('int32') - 1 #convert to zero indexing
self.elements = self.elements.T
### PML sets
self.nPmlSets = np.array([0,],dtype = 'int32')
self.pmlParams = np.array([0,],dtype = 'int32')
### Element types
self.nElementTypes = np.array([len(elementTypes),],dtype = 'int32')
if elementParameters == None:
elementParameters = np.array([0,]*len(elementTypes), dtype = 'int32')
if np.max(self.elementTypeRefs) > self.nElementTypes - 1:
raise ValueError('elementTypeRefs points to element types greater than the number of types of element.')
self.elementTypes = []
for ii,elementType in enumerate(elementTypes):
self.elementTypes.append(ElementType(elementType,elementParameters[ii],self.getPrecString()))
### Material types
self.nMaterials = np.array([len(materials),], dtype = 'int32')
self.materials = []
for material in materials:
self.materials.append(Material(material,self.getPrecString()))
### Orientations
if orientations == None:
self.nOr = np.array([0,],dtype ='int32')
self.orientations = None
else:
self.orientations = []
self.nOr = np.array([len(orientations),],dtype = 'int32')
for orientation in orientations:
self.orientations.append(Orientation(orientation,self.getPrecString()))
### Boundary conditions
if boundaryConditions == None:
self.nFixDof = np.array([0,],dtype ='int32')
self.boundaryConditions = None
else:
nSets = len(boundaryConditions) / 2
self.nFixDof = np.array([sum([len(boundaryConditions[c1*2]) for c1 in range(nSets)]),],dtype = 'int32')
self.boundaryConditions = []
for c1 in range(0,nSets):
#self.boundaryConditions.append(BoundaryCondition(boundaryConditions[c1]))
self.boundaryConditions.append(np.array([(boundaryConditions[c1*2]-1)*4 + boundaryConditions[c1*2+1]-1,],dtype='int32'))
### Input signals
self.nInputSignals = np.array([len(signals),],dtype = 'int32')
self.signals = []
for signal in signals:
self.signals.append(Signal(signal,totalForce,self.getPrecString(),dt))
### History measurements
if historyMeasurement == None:
warnings.warn('Warning : No history measurements requested.')
self.nMeas = 0
self.historyMeasurement = 0
else:
self.nMeas = np.array([len(historyMeasurement),],dtype = 'int32')
self.historyMeasurement = HistoryMeasurement(historyMeasurement,historyMeasurementFrequency)
### Field measurements
if fieldStoreIncrements == None:
self.nFieldStore = np.array([0,],dtype='int32')
self.fieldStoreIncrements = np.array([0,],dtype ='int32')
else:
self.nFieldStore = np.array([len(fieldStoreIncrements),],dtype = 'int32')
if np.max(fieldStoreIncrements) > nt or np.min(fieldStoreIncrements) < 1:
raise ValueError('fieldStoreIncrements out of range [1, nt].')
self.fieldStoreIncrements = np.array([fieldStoreIncrements-1,],dtype = 'int32')
### Write to file
if writeFile:
self.writeFile()
def getPrecString(self):
precString = 'float64'
if self.precision == 4:
self.precString = 'float32'
return precString
def writeFile(self):
with open(self.fileName + '.pogo-inp','wb') as f:
self.header.tofile(f)
self.precision.tofile(f)
self.nDims.tofile(f)
self.nDofPerNode.tofile(f)
self.notes.tofile(f)
self.runName.tofile(f)
self.nt.tofile(f)
self.dt.tofile(f)
self.nNodes.tofile(f)
self.nodes.tofile(f)
self.nElements.tofile(f)
self.nNodesPerElement.tofile(f)
self.elementTypeRefs.tofile(f)
self.materialTypeRefs.tofile(f)
self.orientationRefs.tofile(f)
self.elements.tofile(f)
self.nPmlSets.tofile(f)
self.pmlParams.tofile(f)
self.nElementTypes.tofile(f)
for elementType in self.elementTypes:
elementType.writeElementType(f)
self.nMaterials.tofile(f)
for material in self.materials:
material.writeMaterial(f)
self.nOr.tofile(f)
if not self.orientations == None:
for orientation in self.orientations:
orientation.writeOrientation(f)
self.nFixDof.tofile(f)
if not self.boundaryConditions == None:
for bc in self.boundaryConditions:
bc.tofile(f)
self.nInputSignals.tofile(f)
self.signals[0].nt.tofile(f)
self.signals[0].dt.tofile(f)
for signal in self.signals:
signal.writeSignal(f)
if self.nMeas>0:
self.historyMeasurement.writeHistory(f)
else:
np.array([0,], dtype='int32').tofile(f)
np.array([0,], dtype='int32').tofile(f)
self.nFieldStore.tofile(f)
self.fieldStoreIncrements.tofile(f)
class Material:
def __init__(self,materialInfo,precString):
self.matType = np.array([materialInfo[0],],dtype='int32')
self.matProps = np.array([materialInfo[1:],],dtype=precString)
self.nMatParams = np.array([len(materialInfo[1:]),],dtype='int32')
def writeMaterial(self,fileId):
self.matType.tofile(fileId)
self.nMatParams.tofile(fileId)
self.matProps.tofile(fileId)
class ElementType:
def __init__(self,elementType,elementParams,precString):
self.elTypeSave = np.array(['']*20,dtype='str')
for character in range(len(elementType)):
self.elTypeSave[character] = elementType[character]
if elementParams:
self.nParams = np.array([len(elementParameters),],dtype='int32')
self.params = np.array(elementParams,dtype = precString)
else:
self.params = np.array([0,],dtype='int32')
self.nParams = np.array([0,],dtype='int32')
def writeElementType(self,fileId):
self.elTypeSave.tofile(fileId)
self.nParams.tofile(fileId)
self.params.tofile(fileId)
class Orientation:
def __init__(self,orInfo,precString):
self.paramType = np.array([orInfo[0],], dtype='int32')
self.nOrParams = np.array([len(orInfo[1:]),],dtype='int32')
self.paramValues = np.array([orInfo[1:],],dtype = precString)
def writeOrientation(self,fileId):
self.paramType.tofile(fileId)
self.nOrParams.tofile(fileId)
self.paramValues.tofile(fileId)
class BoundaryCondition:
def __init__(self,BCs):
self.nodes = np.array(BCs[0])
self.dof = np.array(BCs[1])
def writeBoundaryCondition(self,fileId):
dofOut = np.array([(self.nodes-1)*4 + self.dof-1,],dtype='int32')
dofOut.tofile(fileId)
class HistoryMeasurement:
nodes = np.array([],dtype='int32')
dofs = np.array([],dtype='int32')
def __init__(self,histInfo,frequency):
###Add Input checking
for history in histInfo:
self.nodes = np.hstack((self.nodes,history[0]))
self.dofs = np.hstack((self.dofs,history[1]))
self.frequency = np.array([frequency,],dtype = 'int32')
self.nMeas = np.array([len(self.nodes),],dtype = 'int32')
###Must Add Version 1.04 support
def writeHistory(self,fileId):
self.nMeas.tofile(fileId)
self.frequency.tofile(fileId)
pdb.set_trace()
outHist = (self.nodes*4 + self.dofs).astype('int32')# - 1
outHist.tofile(fileId)
class FieldMeasurement:
def __init__(self,increments=0):
###Add input checking
self.increments = np.array([increments - 1],dtype='int32')
class Signal:
def __init__(self, signalInfo, totalForce, precString,dt):
if signalInfo:
nNodes = len(signalInfo[0])
self.type = np.array([signalInfo[3],],dtype = 'int32')
# if len(np.unique(signalInfo[0])) != nNodes:
# errStr = 'Duplicate nodes cannot be specified for a signal'
# raise ValueError(errStr)
if np.size(signalInfo[1]) != 1 and len(signalInfo[1]) != nNodes:
raise ValueError('Signal amplitude must be a scalar or a vector of amplitudes for each node signal applied to.')
if signalInfo[3] not in [0,1]:
raise ValueError('Signal type for signal {} must be 0 or 1.'.format(ii))
self.nNodes = np.array([len(signalInfo[0]),],dtype='int32')
self.nodes = np.array(signalInfo[0],dtype = 'int32')
if type(signalInfo[1]) is float:
if totalForce == True:
if sigType == 1:
raise ValueError('totalForce not supported for displacement load.')
else:
ampVal = signalInfo[1]/nNodes
else:
ampVal = signalInfo[1]
amp = np.array(np.ones(nNodes)*ampVal, dtype=precString)
elif type(signalInfo[1]) is np.ndarray:
if len(signalInfo[1]) != self.nNodes:
raise ValueError('If signal amplitude is an array, a value must be specified for each node in the transducer.')
if totalForce == True:
raise Warning('totalForce is not supported for loads specified for individual nodes.')
amp = np.array([signalInfo[1],], dtype=precString)
else:
raise ValueError('Signal amplitude not recognised')
self.amplitude = amp
self.dof = np.array(signalInfo[2],dtype ='int32')
self.shape = np.array(signalInfo[4],dtype = precString)
self.dt = np.array(dt,dtype=precString)
self.nt = np.array(len(signalInfo[4]),dtype = 'int32')
def writeSignal(self,fileId):
self.nNodes.tofile(fileId)
self.type.tofile(fileId)
dof = self.nodes*4 + self.dof-1
dof.tofile(fileId)
self.amplitude.tofile(fileId)
self.shape.tofile(fileId)<|fim▁end|> | |
<|file_name|>bcc_common.cc<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2015 PLUMgrid, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bcc_common.h"
#include "bpf_module.h"
extern "C" {
void * bpf_module_create_c(const char *filename, unsigned flags, const char *cflags[],
int ncflags, bool allow_rlimit, const char *dev_name) {
auto mod = new ebpf::BPFModule(flags, nullptr, true, "", allow_rlimit, dev_name);
if (mod->load_c(filename, cflags, ncflags) != 0) {
delete mod;
return nullptr;
}
return mod;
}
void * bpf_module_create_c_from_string(const char *text, unsigned flags, const char *cflags[],
int ncflags, bool allow_rlimit, const char *dev_name) {
auto mod = new ebpf::BPFModule(flags, nullptr, true, "", allow_rlimit, dev_name);
if (mod->load_string(text, cflags, ncflags) != 0) {
delete mod;
return nullptr;
}
return mod;
}
bool bpf_module_rw_engine_enabled() {
return ebpf::bpf_module_rw_engine_enabled();
}
<|fim▁hole|> delete mod;
}
size_t bpf_num_functions(void *program) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->num_functions();
}
const char * bpf_function_name(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->function_name(id);
}
void * bpf_function_start(void *program, const char *name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->function_start(name);
}
void * bpf_function_start_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->function_start(id);
}
size_t bpf_function_size(void *program, const char *name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->function_size(name);
}
size_t bpf_function_size_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->function_size(id);
}
char * bpf_module_license(void *program) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->license();
}
unsigned bpf_module_kern_version(void *program) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->kern_version();
}
size_t bpf_num_tables(void *program) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->num_tables();
}
size_t bpf_table_id(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return ~0ull;
return mod->table_id(table_name);
}
int bpf_table_fd(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_fd(table_name);
}
int bpf_table_fd_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_fd(id);
}
int bpf_table_type(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_type(table_name);
}
int bpf_table_type_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_type(id);
}
size_t bpf_table_max_entries(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->table_max_entries(table_name);
}
size_t bpf_table_max_entries_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->table_max_entries(id);
}
int bpf_table_flags(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_flags(table_name);
}
int bpf_table_flags_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_flags(id);
}
const char * bpf_table_name(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->table_name(id);
}
const char * bpf_table_key_desc(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->table_key_desc(table_name);
}
const char * bpf_table_key_desc_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->table_key_desc(id);
}
const char * bpf_table_leaf_desc(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->table_leaf_desc(table_name);
}
const char * bpf_table_leaf_desc_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return nullptr;
return mod->table_leaf_desc(id);
}
size_t bpf_table_key_size(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->table_key_size(table_name);
}
size_t bpf_table_key_size_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->table_key_size(id);
}
size_t bpf_table_leaf_size(void *program, const char *table_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->table_leaf_size(table_name);
}
size_t bpf_table_leaf_size_id(void *program, size_t id) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return 0;
return mod->table_leaf_size(id);
}
int bpf_table_key_snprintf(void *program, size_t id, char *buf, size_t buflen, const void *key) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_key_printf(id, buf, buflen, key);
}
int bpf_table_leaf_snprintf(void *program, size_t id, char *buf, size_t buflen, const void *leaf) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_leaf_printf(id, buf, buflen, leaf);
}
int bpf_table_key_sscanf(void *program, size_t id, const char *buf, void *key) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_key_scanf(id, buf, key);
}
int bpf_table_leaf_sscanf(void *program, size_t id, const char *buf, void *leaf) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->table_leaf_scanf(id, buf, leaf);
}
int bcc_func_load(void *program, int prog_type, const char *name,
const struct bpf_insn *insns, int prog_len,
const char *license, unsigned kern_version,
int log_level, char *log_buf, unsigned log_buf_size,
const char *dev_name) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return -1;
return mod->bcc_func_load(prog_type, name, insns, prog_len,
license, kern_version, log_level,
log_buf, log_buf_size, dev_name);
}
size_t bpf_perf_event_fields(void *program, const char *event) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod)
return 0;
return mod->perf_event_fields(event);
}
const char * bpf_perf_event_field(void *program, const char *event, size_t i) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod)
return nullptr;
return mod->perf_event_field(event, i);
}
}<|fim▁end|> | void bpf_module_destroy(void *program) {
auto mod = static_cast<ebpf::BPFModule *>(program);
if (!mod) return; |
<|file_name|>sale.ts<|end_file_name|><|fim▁begin|>/**
* Created by cghislai on 01/09/15.
*/
import {LocalAccountingTransaction} from './accountingTransaction';
import {LocalCompany} from './company';
import {LocalCustomer} from './customer';
import {InvoiceRef} from '../domain/invoice';
import {AccountingTransactionRef} from '../domain/accountingTransaction';
import * as Immutable from 'immutable';
export interface LocalSale extends Immutable.Map<string, any> {
id:number;
company:LocalCompany;
customer:LocalCustomer;
dateTime:Date;
invoiceRef:InvoiceRef; // Keep ref to avoid cyclic dependencies
vatExclusiveAmount:number;
vatAmount:number;
closed:boolean;
reference:string;
// FIXME: implement service in backend
accountingTransactionRef:AccountingTransactionRef;
discountRatio:number;
discountAmount:number;
totalPaid:number;
}
var SaleRecord = Immutable.Record({
id: null,
company: null,
customer: null,
dateTime: null,
invoice: null,
vatExclusiveAmount: null,
vatAmount: null,
closed: null,
reference: null,
accountingTransactionRef: null,
discountRatio: null,<|fim▁hole|>export class LocalSaleFactory {
static createNewSale(desc:any):LocalSale {
return <any>SaleRecord(desc);
}
}<|fim▁end|> | discountAmount: null
});
|
<|file_name|>test.rs<|end_file_name|><|fim▁begin|>#[macro_use]<|fim▁hole|>use std::collections::HashMap;
#[test]
fn it_returns_a_hashmap() {
let map: HashMap<_, u32> = hashmap! {
key: 2u32,
key2: 33u32
};
let mut tmp_map: HashMap<_, u32> = HashMap::new();
tmp_map.insert("key", 2u32);
tmp_map.insert("key2", 33u32);
let cmp_map = tmp_map;
assert_eq!(map, cmp_map)
}<|fim▁end|> | extern crate hashmacro; |
<|file_name|>dataSource.js<|end_file_name|><|fim▁begin|>define(["require", "exports", "./selection", "./collection", "./utils/arrayUtils"], function (require, exports, selection_1, collection_1, arrayUtils_1) {
Object.defineProperty(exports, "__esModule", { value: true });
var DataSource = (function () {
function DataSource(selection, config) {
this.selection = selection || new selection_1.Selection('single');
this.selectionEventID = this.selection.addEventListener(this.selectionEventCallback.bind(this));
this.selection.overrideGetRowKey(this.getRowKey.bind(this));
this.selection.overrideGetRowKeys(this.getRowKeys.bind(this));
this.arrayUtils = new arrayUtils_1.ArrayUtils();
this.key = null;
this.mainArray = null;
this.config = config;
if (this.config) {
this.key = config.key || '__avgKey';
this.rowHeight = config.rowHeight || 25;<|fim▁hole|> this.rowHeightCallback = config.rowHeightCallback || function () { return null; };
}
else {
this.key = '__avgKey';
this.rowHeight = 25;
this.groupHeight = 25;
this.rowHeightCallback = function () { return null; };
}
this.eventIdCount = -1;
this.eventCallBacks = [];
this.entity = null;
this.collection = new collection_1.Collection(this);
}
DataSource.prototype.getSelection = function () {
return this.selection;
};
DataSource.prototype.getKey = function () {
return this.key;
};
DataSource.prototype.length = function () {
return this.collection.length;
};
DataSource.prototype.triggerEvent = function (event) {
var _this = this;
this.eventCallBacks.forEach(function (FN, i) {
if (FN !== null) {
var alive = FN(event);
if (!alive) {
_this.eventCallBacks[i] = null;
}
}
});
};
DataSource.prototype.addEventListener = function (callback) {
this.eventIdCount++;
this.eventCallBacks.push(callback);
return this.eventIdCount;
};
DataSource.prototype.removeEventListener = function (id) {
this.eventCallBacks.splice(id, 1);
};
DataSource.prototype.setArray = function (array) {
this.collection = new collection_1.Collection(this);
this.selection.reset();
this.arrayUtils.resetGrouping();
this.arrayUtils.resetSort(this.key);
this.entity = null;
this.collection.setData(array);
this.mainArray = this.collection.getEntities();
this.triggerEvent('collection_changed');
};
DataSource.prototype.push = function (array) {
if (Array.isArray(array)) {
var grouping = this.arrayUtils.getGrouping();
var collection = this.collection.getEntities();
collection = collection.concat(array);
this.collection.setData(collection);
this.mainArray = this.collection.getEntities();
this.arrayUtils.runOrderbyOn(this.collection.getEntities());
var untouchedgrouped = this.collection.getEntities();
if (grouping.length > 0) {
var groupedArray = this.arrayUtils.group(untouchedgrouped, grouping, true);
this.collection.setData(groupedArray, untouchedgrouped);
}
this.triggerEvent('collection_updated');
}
};
DataSource.prototype.refresh = function (data) {
if (data) {
this.collection = new collection_1.Collection(this);
this.collection.setData(data);
this.mainArray = this.collection.getEntities();
this.entity = null;
}
var grouping = this.arrayUtils.getGrouping();
this.arrayUtils.runOrderbyOn(this.collection.getEntities());
if (grouping.length > 0) {
var unGroupedArray = this.collection.getEntities();
var groupedArray = this.arrayUtils.group(unGroupedArray, grouping, true);
this.collection.setData(groupedArray, unGroupedArray);
}
this.triggerEvent('collection_updated');
};
DataSource.prototype.select = function (row) {
this.entity = this.collection.getRow(row);
};
DataSource.prototype.query = function (options) {
if (options) {
var newArray = this.arrayUtils.query(this.mainArray, options);
this.collection.setData(newArray);
}
else {
this.collection.setData(this.mainArray);
}
this.orderBy(null, true);
this.triggerEvent('collection_filtered');
};
DataSource.prototype.orderBy = function (attribute, addToCurrentSort) {
var collection = this.collection.getEntities();
var result = this.arrayUtils.orderBy(collection, attribute, addToCurrentSort);
this.collection.setData(result.fixed, result.full);
this.triggerEvent('collection_sorted');
};
DataSource.prototype.getCurrentOrderBy = function () {
return this.arrayUtils.getOrderBy();
};
DataSource.prototype.getCurrentFilter = function () {
return this.arrayUtils.getCurrentFilter();
};
DataSource.prototype.getElement = function (row) {
if (row === undefined || row === null) {
throw new Error('row missing');
}
else {
return this.collection.getRow(row);
}
};
DataSource.prototype.updateRowData = function (attribute, data, rows) {
var entities = this.collection.getCurrentEntities();
rows.forEach(function (x) {
entities[x][attribute] = data;
});
};
DataSource.prototype.group = function (grouping, keepExpanded) {
var _this = this;
this.arrayUtils.resetSort();
grouping.forEach(function (group) {
_this.arrayUtils.setOrderBy(group.field, true);
});
this.arrayUtils.runOrderbyOn(this.collection.getEntities());
var ungroupedArray = this.collection.getEntities();
var groupedArray = this.arrayUtils.group(ungroupedArray, grouping, keepExpanded);
this.collection.setData(groupedArray, ungroupedArray);
this.triggerEvent('collection_grouped');
};
DataSource.prototype.groupCollapse = function (id) {
var groupedArray = this.arrayUtils.groupCollapse(id);
var ungroupedArray = this.collection.getEntities();
this.collection.setData(groupedArray, ungroupedArray);
if (id) {
this.triggerEvent('collection_collapsed');
}
else {
this.triggerEvent('collection_collapsed_all');
}
};
DataSource.prototype.groupExpand = function (id) {
var groupedArray = this.arrayUtils.groupExpand(id);
var ungroupedArray = this.collection.getEntities();
this.collection.setData(groupedArray, ungroupedArray);
if (id) {
this.triggerEvent('collection_expanded');
}
else {
this.triggerEvent('collection_expanded_all');
}
};
DataSource.prototype.getGrouping = function () {
return this.arrayUtils.getGrouping();
};
DataSource.prototype.addBlankRow = function () {
var newElement = {};
this.mainArray.unshift(newElement);
var collectionUngrouped = this.collection.getEntities();
var displayedCollection = this.collection.getCurrentEntities();
var index = collectionUngrouped.indexOf(newElement);
if (index === -1) {
collectionUngrouped.unshift(newElement);
}
displayedCollection.unshift(newElement);
this.collection.setData(displayedCollection, collectionUngrouped);
this.entity = newElement;
this.triggerEvent('collection_filtered');
};
DataSource.prototype.unshift = function (data) {
if (data) {
this.mainArray.unshift(data);
var displayedCollection = this.collection.getEntities();
var ungroupedCollection = this.collection.getCurrentEntities();
var index = displayedCollection.indexOf(data);
if (index === -1) {
displayedCollection.unshift(data);
}
ungroupedCollection.unshift(data);
this.collection.setData(ungroupedCollection, displayedCollection);
this.entity = data;
this.triggerEvent('collection_filtered');
}
};
DataSource.prototype.remove = function (rows) {
var _this = this;
var keysToDelete = new Set();
var returnArray = [];
if (Array.isArray(rows)) {
rows.forEach(function (row) {
keysToDelete.add(_this.getRowKey(row));
});
}
else {
if (this.entity && Number.isInteger(rows)) {
keysToDelete.add(this.getRowKey(rows));
}
}
if (keysToDelete.size > 0) {
var oldArray = this.collection.getEntities();
for (var i = 0; i < oldArray.length; i++) {
if (keysToDelete.has(oldArray[i][this.key]) === true) {
returnArray.push(oldArray.splice(i, 1)[0]);
i--;
}
}
this.collection.setData(oldArray);
this.refresh();
}
return returnArray;
};
DataSource.prototype.getCollectionStatus = function () {
var status = {
collectionLength: this.mainArray ? this.mainArray.length : 0,
filteredCollectionLength: this.collection.getEntities().length,
selectionLength: this.selection.getLength()
};
return status;
};
DataSource.prototype.setLocaleCompare = function (code, options) {
this.arrayUtils.setLocaleCompare(code, options);
};
DataSource.prototype.getRowHeightState = function () {
return this.collection.getRowHeightState();
};
DataSource.prototype.getRowKey = function (row) {
if (this.collection) {
return this.collection.getRowKey(row);
}
else {
return null;
}
};
DataSource.prototype.getRowKeys = function () {
if (this.collection) {
return this.collection.getRowKeys();
}
else {
return [];
}
};
DataSource.prototype.selectionEventCallback = function (e) {
this.triggerEvent(e);
return true;
};
return DataSource;
}());
exports.DataSource = DataSource;
});
//# sourceMappingURL=dataSource.js.map<|fim▁end|> | this.groupHeight = config.groupHeight || 25; |
<|file_name|>OculusRift.py<|end_file_name|><|fim▁begin|># start the service<|fim▁hole|>oculusrift = Runtime.start("oculusrift","OculusRift")<|fim▁end|> | |
<|file_name|>weave-ping.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# A Happy command line utility that tests Weave Ping among Weave nodes.
#
# The command is executed by instantiating and running WeavePing class.
#
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import set_test_path
from happy.Utils import *
import WeavePing
if __name__ == "__main__":
options = WeavePing.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:s:c:tuwqp:i:a:e:n:CE:T:",
["help", "origin=", "server=", "count=", "tcp", "udp", "wrmp", "interval=", "quiet",
"tap=", "case", "case_cert_path=", "case_key_path="])
except getopt.GetoptError as err:
print(WeavePing.WeavePing.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed server parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):<|fim▁hole|> elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-t", "--tcp"):
options["tcp"] = True
elif o in ("-u", "--udp"):
options["udp"] = True
elif o in ("-w", "--wrmp"):
options["wrmp"] = True
elif o in ("-o", "--origin"):
options["client"] = a
elif o in ("-s", "--server"):
options["server"] = a
elif o in ("-c", "--count"):
options["count"] = a
elif o in ("-i", "--interval"):
options["interval"] = a
elif o in ("-p", "--tap"):
options["tap"] = a
elif o in ("-C", "--case"):
options["case"] = True
elif o in ("-E", "--case_cert_path"):
options["case_cert_path"] = a
elif o in ("-T", "--case_key_path"):
options["case_key_path"] = a
else:
assert False, "unhandled option"
if len(args) == 1:
options["origin"] = args[0]
if len(args) == 2:
options["client"] = args[0]
options["server"] = args[1]
cmd = WeavePing.WeavePing(options)
cmd.start()<|fim▁end|> | print(WeavePing.WeavePing.__doc__)
sys.exit(0)
|
<|file_name|>organizations.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
u"""
.. module:: organizations
"""
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils.text import slugify
from django.views.generic import View
from apps.volontulo.forms import VolounteerToOrganizationContactForm
from apps.volontulo.lib.email import send_mail
from apps.volontulo.models import Offer
from apps.volontulo.models import Organization
from apps.volontulo.models import UserProfile
from apps.volontulo.utils import correct_slug
def organizations_list(request):
u"""View responsible for listing all organizations.
:param request: WSGIRequest instance
"""
organizations = Organization.objects.all()
return render(
request,
"organizations/list.html",
{'organizations': organizations},
)
class OrganizationsCreate(View):
u"""Class view supporting creation of new organization."""
@staticmethod<|fim▁hole|> @login_required
def get(request):
u"""Method responsible for rendering form for new organization."""
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
@staticmethod
@login_required
def post(request):
u"""Method responsible for saving new organization."""
if not (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
messages.error(
request,
u"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
organization = Organization(
name=request.POST.get('name'),
address=request.POST.get('address'),
description=request.POST.get('description'),
)
organization.save()
request.user.userprofile.organizations.add(organization)
messages.success(
request,
u"Organizacja została dodana."
)
return redirect(
'organization_view',
slug=slugify(organization.name),
id_=organization.id,
)
@correct_slug(Organization, 'organization_form', 'name')
@login_required
def organization_form(request, slug, id_): # pylint: disable=unused-argument
u"""View responsible for editing organization.
Edition will only work, if logged user has been registered as organization.
"""
org = Organization.objects.get(pk=id_)
users = [profile.user.email for profile in org.userprofiles.all()]
if (
request.user.is_authenticated() and
request.user.email not in users
):
messages.error(
request,
u'Nie masz uprawnień do edycji tej organizacji.'
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
if not (
request.user.is_authenticated() and
UserProfile.objects.get(user=request.user).organizations
):
return redirect('homepage')
if request.method == 'POST':
if (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
org.name = request.POST.get('name')
org.address = request.POST.get('address')
org.description = request.POST.get('description')
org.save()
messages.success(
request,
u'Oferta została dodana/zmieniona.'
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
else:
messages.error(
request,
u"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': org},
)
@correct_slug(Organization, 'organization_view', 'name')
def organization_view(request, slug, id_): # pylint: disable=unused-argument
u"""View responsible for viewing organization."""
org = get_object_or_404(Organization, id=id_)
offers = Offer.objects.filter(organization_id=id_)
allow_contact = True
allow_edit = False
allow_offer_create = False
if (
request.user.is_authenticated() and
request.user.userprofile in org.userprofiles.all()
):
allow_contact = False
allow_edit = True
allow_offer_create = True
if request.method == 'POST':
form = VolounteerToOrganizationContactForm(request.POST)
if form.is_valid():
# send email to first organization user (I assume it's main user)
profile = Organization.objects.get(id=id_).userprofiles.all()[0]
send_mail(
request,
'volunteer_to_organisation',
[
profile.user.email,
request.POST.get('email'),
],
{k: v for k, v in request.POST.items()},
)
messages.success(request, u'Email został wysłany.')
else:
messages.error(
request,
u"Formularz zawiera nieprawidłowe dane: {}".format(form.errors)
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': form,
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
},
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': VolounteerToOrganizationContactForm(),
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
}
)<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db.models import Transform
from django.db.models import DateTimeField, TimeField
from django.utils.functional import cached_property
class TimeValue(Transform):
lookup_name = 'time'
function = 'time'
def as_sql(self, compiler, connection):<|fim▁hole|> @cached_property
def output_field(self):
return TimeField()
DateTimeField.register_lookup(TimeValue)<|fim▁end|> | lhs, params = compiler.compile(self.lhs)
return 'TIME({})'.format(lhs), params
|
<|file_name|>campaign_manager.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Campaign Manager hook."""
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient import http
from googleapiclient.discovery import Resource, build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleCampaignManagerHook(GoogleBaseHook):
"""Hook for Google Campaign Manager."""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3.3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> Resource:
"""Retrieves connection to Campaign Manager."""
if not self._conn:
http_authorized = self._authorize()<|fim▁hole|> self._conn = build(
"dfareporting",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def delete_report(self, profile_id: str, report_id: str) -> Any:
"""
Deletes a report by its ID.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.delete(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def insert_report(self, profile_id: str, report: Dict[str, Any]) -> Any:
"""
Creates a report.
:param profile_id: The DFA user profile ID.
:param report: The report resource to be inserted.
"""
response = (
self.get_conn()
.reports()
.insert(profileId=profile_id, body=report)
.execute(num_retries=self.num_retries)
)
return response
def list_reports(
self,
profile_id: str,
max_results: Optional[int] = None,
scope: Optional[str] = None,
sort_field: Optional[str] = None,
sort_order: Optional[str] = None,
) -> List[dict]:
"""
Retrieves list of reports.
:param profile_id: The DFA user profile ID.
:param max_results: Maximum number of results to return.
:param scope: The scope that defines which results are returned.
:param sort_field: The field by which to sort the list.
:param sort_order: Order of sorted results.
"""
reports: List[dict] = []
conn = self.get_conn()
request = conn.reports().list(
profileId=profile_id,
maxResults=max_results,
scope=scope,
sortField=sort_field,
sortOrder=sort_order,
)
while request is not None:
response = request.execute(num_retries=self.num_retries)
reports.extend(response.get("items", []))
request = conn.reports().list_next(previous_request=request, previous_response=response)
return reports
def patch_report(self, profile_id: str, report_id: str, update_mask: dict) -> Any:
"""
Updates a report. This method supports patch semantics.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param update_mask: The relevant portions of a report resource,
according to the rules of patch semantics.
"""
response = (
self.get_conn()
.reports()
.patch(profileId=profile_id, reportId=report_id, body=update_mask)
.execute(num_retries=self.num_retries)
)
return response
def run_report(self, profile_id: str, report_id: str, synchronous: Optional[bool] = None) -> Any:
"""
Runs a report.
:param profile_id: The DFA profile ID.
:param report_id: The ID of the report.
:param synchronous: If set and true, tries to run the report synchronously.
"""
response = (
self.get_conn()
.reports()
.run(profileId=profile_id, reportId=report_id, synchronous=synchronous)
.execute(num_retries=self.num_retries)
)
return response
def update_report(self, profile_id: str, report_id: str) -> Any:
"""
Updates a report.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.update(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report(self, file_id: str, profile_id: str, report_id: str) -> Any:
"""
Retrieves a report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
"""
response = (
self.get_conn()
.reports()
.files()
.get(fileId=file_id, profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report_file(self, file_id: str, profile_id: str, report_id: str) -> http.HttpRequest:
"""
Retrieves a media part of report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
:return: googleapiclient.http.HttpRequest
"""
request = (
self.get_conn()
.reports()
.files()
.get_media(fileId=file_id, profileId=profile_id, reportId=report_id)
)
return request
@staticmethod
def _conversions_batch_request(
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
kind: str,
) -> Dict[str, Any]:
return {
"kind": kind,
"conversions": conversions,
"encryptionInfo": {
"kind": "dfareporting#encryptionInfo",
"encryptionEntityType": encryption_entity_type,
"encryptionEntityId": encryption_entity_id,
"encryptionSource": encryption_source,
},
}
def conversions_batch_insert(
self,
profile_id: str,
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_inserts: int = 0,
) -> Any:
"""
Inserts conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to insert, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/v3.3/conversions#resource
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_inserts: The maximum number of conversions that failed to be inserted
"""
response = (
self.get_conn()
.conversions()
.batchinsert(
profileId=profile_id,
body=self._conversions_batch_request(
conversions=conversions,
encryption_entity_type=encryption_entity_type,
encryption_entity_id=encryption_entity_id,
encryption_source=encryption_source,
kind="dfareporting#conversionsBatchInsertRequest",
),
)
.execute(num_retries=self.num_retries)
)
if response.get('hasFailures', False):
errored_conversions = [stat['errors'] for stat in response['status'] if 'errors' in stat]
if len(errored_conversions) > max_failed_inserts:
raise AirflowException(errored_conversions)
return response
def conversions_batch_update(
self,
profile_id: str,
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_updates: int = 0,
) -> Any:
"""
Updates existing conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to update, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/v3.3/conversions#resource
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_updates: The maximum number of conversions that failed to be updated
"""
response = (
self.get_conn()
.conversions()
.batchupdate(
profileId=profile_id,
body=self._conversions_batch_request(
conversions=conversions,
encryption_entity_type=encryption_entity_type,
encryption_entity_id=encryption_entity_id,
encryption_source=encryption_source,
kind="dfareporting#conversionsBatchUpdateRequest",
),
)
.execute(num_retries=self.num_retries)
)
if response.get('hasFailures', False):
errored_conversions = [stat['errors'] for stat in response['status'] if 'errors' in stat]
if len(errored_conversions) > max_failed_updates:
raise AirflowException(errored_conversions)
return response<|fim▁end|> | |
<|file_name|>FonttblDestinations.ts<|end_file_name|><|fim▁begin|>/*
The MIT License (MIT)
Copyright (c) 2015 Thomas Bluemel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import { Document } from "../../Document";
import { Helper, RTFJSError } from "../../Helper";
import { GlobalState } from "../Containers";
import { DestinationBase } from "./DestinationBase";
export class FonttblDestinationSub extends DestinationBase {
public index: number;
public fontname: string;
public altfontname: string;
public family: string;
public pitch: number;
public bias: number;
public charset: number;
private _fonttbl: FonttblDestination;
constructor(fonttbl: FonttblDestination) {
super("fonttbl:sub");
this._fonttbl = fonttbl;
this.index = null;
this.fontname = null;
this.altfontname = null;
this.family = null;
this.pitch = Helper.FONTPITCH.DEFAULT;
this.bias = 0;
this.charset = null;
}
public handleKeyword(keyword: string, param: number): boolean {
switch (keyword) {
case "f":
this.index = param;
return true;
case "fnil":
return true;
case "froman":
case "fswiss":
case "fmodern":
case "fscript":
case "fdecor":
case "ftech":
case "fbidi":
case "flomajor":
case "fhimajor":
case "fdbmajor":
case "fbimajor":
case "flominor":
case "fhiminor":
case "fdbminor":
case "fbiminor":
this.family = keyword.slice(1);
return true;
case "fprq":
switch (param) {<|fim▁hole|> this.pitch = Helper.FONTPITCH.FIXED;
break;
case 2:
this.pitch = Helper.FONTPITCH.VARIABLE;
break;
}
return true;
case "fbias":
if (param != null) {
this.bias = param;
}
return true;
case "fcharset":
if (param != null) {
this.charset = Helper._mapCharset(param);
if (this.charset == null) {
Helper.log("Unknown font charset: " + param);
}
}
return true;
case "cpg":
if (param != null) {
this.charset = param;
}
return true;
}
return false;
}
public appendText(text: string): void {
if (this.fontname == null) {
this.fontname = text;
} else {
this.fontname += text;
}
}
public apply(): void {
if (this.index == null) {
throw new RTFJSError("No font index provided");
}
if (this.fontname == null) {
throw new RTFJSError("No font name provided");
}
this._fonttbl.addSub(this);
delete this._fonttbl;
}
public setAltFontName(name: string): void {
this.altfontname = name;
}
}
export class FonttblDestination extends DestinationBase {
private _fonts: FonttblDestinationSub[];
private _sub: FonttblDestinationSub;
private inst: Document;
constructor(parser: GlobalState, inst: Document) {
super("fonttbl");
this._fonts = [];
this._sub = null;
this.inst = inst;
}
public sub(): FonttblDestinationSub {
return new FonttblDestinationSub(this);
}
public apply(): void {
Helper.log("[fonttbl] apply()");
for (const idx in this._fonts) {
Helper.log("[fonttbl][" + idx + "] index = " + this._fonts[idx].fontname
+ " alternative: " + this._fonts[idx].altfontname);
}
this.inst._fonts = this._fonts;
delete this._fonts;
}
public appendText(text: string): void {
this._sub.appendText(text);
this._sub.apply();
}
public handleKeyword(keyword: string, param: number): void {
if (keyword === "f") {
this._sub = this.sub();
}
this._sub.handleKeyword(keyword, param);
}
public addSub(sub: FonttblDestinationSub): void {
this._fonts[sub.index] = sub;
}
}<|fim▁end|> | case 0:
this.pitch = Helper.FONTPITCH.DEFAULT;
break;
case 1: |
<|file_name|>element.rs<|end_file_name|><|fim▁begin|>//! Improvements to `xmltree::Element`.
use {Error, Result};
use std::str::FromStr;
use xmltree::Element;
/// An extension to the xmltree element to make it more ergonomic.
pub trait Extension {
/// Returns a child element by slash-seperated names, or an error if the path does not exist.
///
/// # Examples
///
/// `Extension` is implemented for `xmltree::Element`:
///
/// ```
/// extern crate xmltree;
/// # extern crate riscan_pro;
/// # fn main () {
/// use xmltree::Element;
/// use riscan_pro::element::Extension;
/// use std::fs::File;
///
/// let file = File::open("data/project.RiSCAN/project.rsp").unwrap();
/// let element = Element::parse(file).unwrap();
/// let pop_matrix = element.child("pop/matrix").unwrap();
/// # }
/// ```
fn child(&self, path: &str) -> Result<&Element>;
/// Returns a vector of children, as selected by name.
///
/// # Examples
///
/// `Extension` is implemented for `xmltree::Element`:
///
/// ```
/// extern crate xmltree;
/// # extern crate riscan_pro;
/// # fn main () {
/// use xmltree::Element;
/// use riscan_pro::element::Extension;
/// use std::fs::File;
///
/// let file = File::open("data/project.RiSCAN/project.rsp").unwrap();
/// let element = Element::parse(file).unwrap();
/// let relector_clibrations = element.children("calibrations/reflcalibs/reflcalib").unwrap();
/// # }
/// ```
fn children(&self, path: &str) -> Result<&Vec<Element>>;
/// Returns this element's inner text as a string, or returns an error if there is no text.
///
/// # Examples
///
/// `Extension` is implemented for `xmltree::Element`:
///
/// ```
/// extern crate xmltree;
/// # extern crate riscan_pro;
/// # fn main () {
/// use xmltree::Element;
/// use riscan_pro::element::Extension;
/// use std::fs::File;
///
/// let file = File::open("data/project.RiSCAN/project.rsp").unwrap();
/// let element = Element::parse(file).unwrap();
/// let pop_matrix_str = element.child("pop/matrix").unwrap().as_str().unwrap();
/// # }
/// ```
fn as_str(&self) -> Result<&str>;
/// Returns this element's nodref attribute as a string.
///
/// The noderef is trimmed to only use the last element.
///
/// # Examples
///
/// `Extension` is implemented for `xmltree::Element`:
///
/// ```
/// extern crate xmltree;
/// # extern crate riscan_pro;
/// # fn main () {
/// use xmltree::Element;
/// use riscan_pro::element::Extension;
/// use std::fs::File;
///
/// let file = File::open("data/project.RiSCAN/project.rsp").unwrap();
/// let element = Element::parse(file).unwrap();
/// let logo = element
/// .child("collections/overlays/overlay/overlayitem/source_ref")
/// .unwrap()
/// .noderef()
/// .unwrap();
/// assert_eq!("Logo", logo);
/// # }
/// ```
fn noderef(&self) -> Result<&str>;
/// Parses this element's inner text, or returns an error if there is no text or if the parse
/// fails.
///<|fim▁hole|> ///
/// ```
/// extern crate xmltree;
/// # extern crate riscan_pro;
/// # fn main () {
/// use xmltree::Element;
/// use riscan_pro::element::Extension;
/// use std::fs::File;
///
/// let file = File::open("data/project.RiSCAN/project.rsp").unwrap();
/// let element = Element::parse(file).unwrap();
/// let version: f64 = element.child("app_version").unwrap().parse_text().unwrap();
/// # }
/// ```
fn parse_text<T>(&self) -> Result<T>
where
T: FromStr,
Error: From<<T as FromStr>::Err>,
{
self.as_str().and_then(|s| s.parse().map_err(Error::from))
}
}
impl Extension for Element {
fn child(&self, path: &str) -> Result<&Element> {
burrow(self, path.split('/'))
}
fn children(&self, path: &str) -> Result<&Vec<Element>> {
let mut iter = path.split('/').rev();
let last = iter.next().ok_or_else(|| {
Error::MissingChild(self.name.clone(), String::new())
})?;
let element = burrow(self, iter.rev())?;
if element.children.iter().all(|child| child.name == last) {
Ok(&element.children)
} else {
Err(Error::MissingChild(element.name.clone(), last.to_string()))
}
}
fn as_str(&self) -> Result<&str> {
self.text.as_ref().map(|s| s.as_str()).ok_or_else(|| {
Error::NoElementText(self.clone())
})
}
fn noderef(&self) -> Result<&str> {
if let Some(noderef) = self.attributes.get("noderef").and_then(
|s| s.split('/').last(),
)
{
Ok(noderef)
} else {
Err(Error::MissingNoderef(self.clone()))
}
}
}
fn burrow<'a, I: Iterator<Item = &'a str>>(mut element: &Element, iter: I) -> Result<&Element> {
for name in iter {
if let Some(child) = element.get_child(name) {
element = child;
} else {
return Err(Error::MissingChild(element.name.clone(), name.to_string()));
}
}
Ok(element)
}
#[cfg(test)]
mod tests {
use super::*;
fn project() -> Element {
use std::fs::File;
Element::parse(File::open("data/project.RiSCAN/project.rsp").unwrap()).unwrap()
}
#[test]
fn child() {
let project = project();
assert_eq!("pop", project.child("pop").unwrap().name);
assert_eq!("matrix", project.child("pop/matrix").unwrap().name);
assert_eq!(
"reflcalib",
project
.child("calibrations/reflcalibs/reflcalib")
.unwrap()
.name
);
assert!(project.child("").is_err());
assert!(project.child("not-an-element").is_err());
}
#[test]
fn children() {
let project = project();
let reflectors = project
.children("calibrations/reflcalibs/reflcalib")
.unwrap();
assert_eq!(4, reflectors.len());
assert!(project.children("pop").is_err());
assert!(project.children("").is_err());
assert!(project.children("not-an-element").is_err());
}
#[test]
fn as_str() {
let project = project();
assert!(project.as_str().is_err());
assert_eq!(
"RiSCAN PRO",
project.child("app_caption").unwrap().as_str().unwrap()
);
}
#[test]
fn noderef() {
let project = project();
assert_eq!(
"Logo",
project
.child("collections/overlays/overlay/overlayitem/source_ref")
.unwrap()
.noderef()
.unwrap()
);
assert_eq!(
"Infratec_VarioCAM_HD_15mm_11-16-2015_Preston",
project
.child("geometry_objects/images/image/camcalib_ref")
.unwrap()
.noderef()
.unwrap()
);
}
}<|fim▁end|> | /// # Examples
///
/// `Extension` is implemented for `xmltree::Element`: |
<|file_name|>drawing_tester.py<|end_file_name|><|fim▁begin|>import contextlib
import os
import shutil
import tempfile
import numpy
from PIL import Image
from kiva.fonttools import Font
from kiva.constants import MODERN
class DrawingTester(object):
""" Basic drawing tests for graphics contexts.
"""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.filename = os.path.join(self.directory, 'rendered')
self.gc = self.create_graphics_context(300, 300)
self.gc.clear()
self.gc.set_stroke_color((1.0, 0.0, 0.0))
self.gc.set_fill_color((1.0, 0.0, 0.0))
self.gc.set_line_width(5)
def tearDown(self):
del self.gc
shutil.rmtree(self.directory)
def test_line(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 204)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rectangle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 104)
self.gc.line_to(107, 184)
self.gc.line_to(187, 184)
self.gc.line_to(187, 104)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rect(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.rect(0, 0, 200, 200)
self.gc.stroke_path()
def test_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.stroke_path()
def test_quarter_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, numpy.pi / 2)
self.gc.stroke_path()
def test_text(self):
with self.draw_and_check():
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_circle_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_star_fill(self):<|fim▁hole|> self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.fill_path()
def test_star_eof_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.eof_fill_path()
def test_circle_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(150, 150, 100, 100)
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_text_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(23, 77, 100, 23)
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_star_clip(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.close_path()
self.gc.clip()
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
#### Required methods ####################################################
@contextlib.contextmanager
def draw_and_check(self):
""" A context manager to check the result.
"""
raise NotImplementedError()
def create_graphics_context(self, width, length):
""" Create the desired graphics context
"""
raise NotImplementedError()
class DrawingImageTester(DrawingTester):
""" Basic drawing tests for graphics contexts of gui toolkits.
"""
@contextlib.contextmanager
def draw_and_check(self):
yield
filename = "{0}.png".format(self.filename)
self.gc.save(filename)
self.assertImageSavedWithContent(filename)
def assertImageSavedWithContent(self, filename):
""" Load the image and check that there is some content in it.
"""
image = numpy.array(Image.open(filename))
# default is expected to be a totally white image
self.assertEqual(image.shape[:2], (300, 300))
if image.shape[2] == 3:
check = numpy.sum(image == [255, 0, 0], axis=2) == 3
elif image.shape[2] == 4:
check = numpy.sum(image == [255, 0, 0, 255], axis=2) == 4
else:
self.fail(
'Pixel size is not 3 or 4, but {0}'.format(image.shape[2]))
if check.any():
return
self.fail('The image looks empty, no red pixels where drawn')<|fim▁end|> | with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200) |
<|file_name|>condition.js<|end_file_name|><|fim▁begin|>"use strict";
var cases = module.exports = [];
cases[0] = {};
cases[0].resources = [{
"resource": {
"id": "Condition/c-0-0",
"resourceType": "Condition",
"status": "confirmed",
"onsetDateTime": "2012-08-05",
"dateAsserted": "2012-08-05",
"abatementBoolean": true,
"code": {
"coding": [{
"code": "233604007",
"system": "http://snomed.info/sct",
"display": "Pneumonia"
}]
}
}
}];
cases[0].input = cases[0].resources[0];
cases[0].result = {
"problem": {
"code": {
"name": "Pneumonia",
"code": "233604007",
"code_system_name": "SNOMED CT"
},
"date_time": {
"low": {
"date": "2012-08-05T00:00:00.000Z",
"precision": "day"
}
}
},
"status": {
"name": "Resolved"
}
};
cases[1] = {};
cases[1].resources = [{
"resource": {
"id": "Condition/c-1-0",
"resourceType": "Condition",
"status": "confirmed",
"onsetDateTime": "2007-01-03",
"dateAsserted": "2007-01-03",
"code": {
"coding": [{
"code": "195967001",
"system": "http://snomed.info/sct",
"display": "Asthma"
}]
}
}
}];
cases[1].input = cases[1].resources[0];
cases[1].result = {
"problem": {
"code": {
"name": "Asthma",
"code": "195967001",
"code_system_name": "SNOMED CT"
},
"date_time": {
"low": {
"date": "2007-01-03T00:00:00.000Z",
"precision": "day"
}
}
}
};
cases[2] = {};
cases[2].resources = [{<|fim▁hole|> "status": "confirmed",
"onsetDateTime": "2007-01-03",
"dateAsserted": "2007-01-03",
"abatementDate": "2012-09-05",
"code": {
"coding": [{
"code": "195967001",
"system": "http://snomed.info/sct",
"display": "Asthma"
}]
}
}
}];
cases[2].input = cases[2].resources[0];
cases[2].result = {
"problem": {
"code": {
"name": "Asthma",
"code": "195967001",
"code_system_name": "SNOMED CT"
},
"date_time": {
"low": {
"date": "2007-01-03T00:00:00.000Z",
"precision": "day"
},
"high": {
"date": "2012-09-05T00:00:00.000Z",
"precision": "day"
}
}
}
};<|fim▁end|> | "resource": {
"id": "Condition/c-2-0",
"resourceType": "Condition", |
<|file_name|>utils.js<|end_file_name|><|fim▁begin|>import { isObject, isFunction, keys } from 'lodash';
// Removes all functions in an object so you can test
// against objects without errors
export const withoutFunctions = (obj) => {
if (!isObject(obj)) {
return obj;
}
let temp = {}
const _keys = keys(obj)
_keys.forEach((k) => {
if (isObject(obj[k])) {
temp[k] = withoutFunctions(obj[k]);<|fim▁hole|> temp[k] = obj[k];
}
});
return temp;
}<|fim▁end|> | } else if (isFunction(obj[k])) {
temp[k] = null;
} else { |
<|file_name|>int.rs<|end_file_name|><|fim▁begin|>use arch::int;
pub fn mask_interrupt(i: u32, mask: bool) {
int::mask_interrupt(i, mask);
}
pub fn set_int(i: u32, idt_idx: u32) {
int::set_int(i, idt_idx);
}
pub fn fire_timer() {
int::fire_timer();
}
pub fn enable_interrupts() {
int::enable_interrupts();
}
pub fn disable_interrupts() {
int::disable_interrupts();
}<|fim▁hole|> match int_id {
80 => {
println!("[ TASK ] System call from user space");
}
81 => {
println!("[ OK ] Interrupt test passes");
},
33 => println!("Keyboard interrupt detected"),
13 => {
println!("GPF 0x{:x} err: 0x{:x}", retaddr, error_code);
disable_interrupts();
loop{}
}
14 => {
println!("PAGE FAULT 0x{:x}, addr: 0x{:x}", error_code, retaddr);
loop{};
},
_ => {
//println!("OTHER INTERRUPT {}", ctx.int_id);
//loop{};
}
}
}<|fim▁end|> |
pub fn interrupt_handler(int_id: u32, error_code: u32, retaddr: usize) { |
<|file_name|>recognize.spec.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Routes} from '../src/config';
import {recognize} from '../src/recognize';
import {ActivatedRouteSnapshot, RouterStateSnapshot} from '../src/router_state';
import {PRIMARY_OUTLET, Params} from '../src/shared';
import {DefaultUrlSerializer, UrlTree} from '../src/url_tree';
describe('recognize', () => {
it('should work', () => {
checkRecognize([{path: 'a', component: ComponentA}], 'a', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.root, '', {}, RootComponent);
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
});
});
it('should freeze params object', () => {
checkRecognize([{path: 'a/:id', component: ComponentA}], 'a/10', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.root, '', {}, RootComponent);
const child = s.firstChild(s.root) !;
expect(Object.isFrozen(child.params)).toBeTruthy();
});
});
it('should support secondary routes', () => {
checkRecognize(
[
{path: 'a', component: ComponentA}, {path: 'b', component: ComponentB, outlet: 'left'},
{path: 'c', component: ComponentC, outlet: 'right'}
],
'a(left:b//right:c)', (s: RouterStateSnapshot) => {
const c = s.children(s.root);
checkActivatedRoute(c[0], 'a', {}, ComponentA);
checkActivatedRoute(c[1], 'b', {}, ComponentB, 'left');
checkActivatedRoute(c[2], 'c', {}, ComponentC, 'right');
});
});
it('should set url segment and index properly', () => {
const url = tree('a(left:b//right:c)');
recognize(
RootComponent,
[
{path: 'a', component: ComponentA}, {path: 'b', component: ComponentB, outlet: 'left'},
{path: 'c', component: ComponentC, outlet: 'right'}
],
url, 'a(left:b//right:c)')
.subscribe((s) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const c = s.children(s.root);
expect(c[0]._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(c[0]._lastPathIndex).toBe(0);
expect(c[1]._urlSegment).toBe(url.root.children['left']);
expect(c[1]._lastPathIndex).toBe(0);
expect(c[2]._urlSegment).toBe(url.root.children['right']);
expect(c[2]._lastPathIndex).toBe(0);
});
});
it('should set url segment and index properly (nested case)', () => {
const url = tree('a/b/c');
recognize(
RootComponent,
[
{path: 'a/b', component: ComponentA, children: [{path: 'c', component: ComponentC}]},
],
url, 'a/b/c')
.subscribe((s: RouterStateSnapshot) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const compA = s.firstChild(s.root) !;
expect(compA._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(compA._lastPathIndex).toBe(1);
const compC = s.firstChild(<any>compA) !;
expect(compC._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(compC._lastPathIndex).toBe(2);
});
});
it('should set url segment and index properly (wildcard)', () => {
const url = tree('a/b/c');
recognize(
RootComponent,
[
{path: 'a', component: ComponentA, children: [{path: '**', component: ComponentB}]},
],
url, 'a/b/c')
.subscribe((s: RouterStateSnapshot) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const compA = s.firstChild(s.root) !;
expect(compA._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(compA._lastPathIndex).toBe(0);
const compC = s.firstChild(<any>compA) !;
expect(compC._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(compC._lastPathIndex).toBe(2);
});
});
it('should match routes in the depth first order', () => {
checkRecognize(
[
{path: 'a', component: ComponentA, children: [{path: ':id', component: ComponentB}]},
{path: 'a/:id', component: ComponentC}
],
'a/paramA', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.root, '', {}, RootComponent);
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
checkActivatedRoute(
s.firstChild(<any>s.firstChild(s.root)) !, 'paramA', {id: 'paramA'}, ComponentB);
});
checkRecognize(
[{path: 'a', component: ComponentA}, {path: 'a/:id', component: ComponentC}], 'a/paramA',
(s: RouterStateSnapshot) => {
checkActivatedRoute(s.root, '', {}, RootComponent);
checkActivatedRoute(s.firstChild(s.root) !, 'a/paramA', {id: 'paramA'}, ComponentC);
});
});
it('should use outlet name when matching secondary routes', () => {
checkRecognize(
[
{path: 'a', component: ComponentA}, {path: 'b', component: ComponentB, outlet: 'left'},
{path: 'b', component: ComponentC, outlet: 'right'}
],
'a(right:b)', (s: RouterStateSnapshot) => {
const c = s.children(s.root);
checkActivatedRoute(c[0], 'a', {}, ComponentA);
checkActivatedRoute(c[1], 'b', {}, ComponentC, 'right');
});
});
it('should handle non top-level secondary routes', () => {
checkRecognize(
[
{
path: 'a',
component: ComponentA,
children: [
{path: 'b', component: ComponentB},
{path: 'c', component: ComponentC, outlet: 'left'}
]
},<|fim▁hole|> checkActivatedRoute(c[1], 'c', {}, ComponentC, 'left');
});
});
it('should sort routes by outlet name', () => {
checkRecognize(
[
{path: 'a', component: ComponentA}, {path: 'c', component: ComponentC, outlet: 'c'},
{path: 'b', component: ComponentB, outlet: 'b'}
],
'a(c:c//b:b)', (s: RouterStateSnapshot) => {
const c = s.children(s.root);
checkActivatedRoute(c[0], 'a', {}, ComponentA);
checkActivatedRoute(c[1], 'b', {}, ComponentB, 'b');
checkActivatedRoute(c[2], 'c', {}, ComponentC, 'c');
});
});
it('should support matrix parameters', () => {
checkRecognize(
[
{path: 'a', component: ComponentA, children: [{path: 'b', component: ComponentB}]},
{path: 'c', component: ComponentC, outlet: 'left'}
],
'a;a1=11;a2=22/b;b1=111;b2=222(left:c;c1=1111;c2=2222)', (s: RouterStateSnapshot) => {
const c = s.children(s.root);
checkActivatedRoute(c[0], 'a', {a1: '11', a2: '22'}, ComponentA);
checkActivatedRoute(s.firstChild(<any>c[0]) !, 'b', {b1: '111', b2: '222'}, ComponentB);
checkActivatedRoute(c[1], 'c', {c1: '1111', c2: '2222'}, ComponentC, 'left');
});
});
describe('data', () => {
it('should set static data', () => {
checkRecognize(
[{path: 'a', data: {one: 1}, component: ComponentA}], 'a', (s: RouterStateSnapshot) => {
const r: ActivatedRouteSnapshot = s.firstChild(s.root) !;
expect(r.data).toEqual({one: 1});
});
});
it('should merge componentless route\'s data', () => {
checkRecognize(
[{
path: 'a',
data: {one: 1},
children: [{path: 'b', data: {two: 2}, component: ComponentB}]
}],
'a/b', (s: RouterStateSnapshot) => {
const r: ActivatedRouteSnapshot = s.firstChild(<any>s.firstChild(s.root)) !;
expect(r.data).toEqual({one: 1, two: 2});
});
});
it('should set resolved data', () => {
checkRecognize(
[{path: 'a', resolve: {one: 'some-token'}, component: ComponentA}], 'a',
(s: RouterStateSnapshot) => {
const r: ActivatedRouteSnapshot = s.firstChild(s.root) !;
expect(r._resolve).toEqual({one: 'some-token'});
});
});
});
describe('empty path', () => {
describe('root', () => {
it('should work', () => {
checkRecognize([{path: '', component: ComponentA}], '', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, '', {}, ComponentA);
});
});
it('should match when terminal', () => {
checkRecognize(
[{path: '', pathMatch: 'full', component: ComponentA}], '',
(s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, '', {}, ComponentA);
});
});
it('should work (nested case)', () => {
checkRecognize(
[{path: '', component: ComponentA, children: [{path: '', component: ComponentB}]}], '',
(s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, '', {}, ComponentA);
checkActivatedRoute(s.firstChild(<any>s.firstChild(s.root)) !, '', {}, ComponentB);
});
});
it('should set url segment and index properly', () => {
const url = tree('');
recognize(
RootComponent,
[{path: '', component: ComponentA, children: [{path: '', component: ComponentB}]}], url,
'')
.forEach((s: RouterStateSnapshot) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const c = s.firstChild(s.root) !;
expect(c._urlSegment).toBe(url.root);
expect(c._lastPathIndex).toBe(-1);
const c2 = s.firstChild(<any>s.firstChild(s.root)) !;
expect(c2._urlSegment).toBe(url.root);
expect(c2._lastPathIndex).toBe(-1);
});
});
it('should inherit params', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: '', component: ComponentB, children: [{path: '', component: ComponentC}]}
]
}],
'/a;p=1', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {p: '1'}, ComponentA);
checkActivatedRoute(s.firstChild(s.firstChild(s.root) !) !, '', {p: '1'}, ComponentB);
checkActivatedRoute(
s.firstChild(s.firstChild(s.firstChild(s.root) !) !) !, '', {p: '1'}, ComponentC);
});
});
});
describe('aux split is in the middle', () => {
it('should match (non-terminal)', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: 'b', component: ComponentB},
{path: '', component: ComponentC, outlet: 'aux'}
]
}],
'a/b', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
const c = s.children(s.firstChild(s.root) !);
checkActivatedRoute(c[0], 'b', {}, ComponentB);
checkActivatedRoute(c[1], '', {}, ComponentC, 'aux');
});
});
it('should match (non-termianl) when both primary and secondary and primary has a child',
() => {
const config = [{
path: 'parent',
children: [
{
path: '',
component: ComponentA,
children: [
{path: 'b', component: ComponentB},
{path: 'c', component: ComponentC},
]
},
{
path: '',
component: ComponentD,
outlet: 'secondary',
}
]
}];
checkRecognize(config, 'parent/b', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.root, '', {}, RootComponent);
checkActivatedRoute(s.firstChild(s.root) !, 'parent', {}, undefined !);
const cc = s.children(s.firstChild(s.root) !);
checkActivatedRoute(cc[0], '', {}, ComponentA);
checkActivatedRoute(cc[1], '', {}, ComponentD, 'secondary');
checkActivatedRoute(s.firstChild(cc[0]) !, 'b', {}, ComponentB);
});
});
it('should match (terminal)', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: 'b', component: ComponentB},
{path: '', pathMatch: 'full', component: ComponentC, outlet: 'aux'}
]
}],
'a/b', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
const c = s.children(s.firstChild(s.root) !);
expect(c.length).toEqual(1);
checkActivatedRoute(c[0], 'b', {}, ComponentB);
});
});
it('should set url segment and index properly', () => {
const url = tree('a/b');
recognize(
RootComponent, [{
path: 'a',
component: ComponentA,
children: [
{path: 'b', component: ComponentB},
{path: '', component: ComponentC, outlet: 'aux'}
]
}],
url, 'a/b')
.forEach((s: RouterStateSnapshot) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const a = s.firstChild(s.root) !;
expect(a._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(a._lastPathIndex).toBe(0);
const b = s.firstChild(a) !;
expect(b._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(b._lastPathIndex).toBe(1);
const c = s.children(a)[1];
expect(c._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(c._lastPathIndex).toBe(0);
});
});
it('should set url segment and index properly when nested empty-path segments', () => {
const url = tree('a');
recognize(
RootComponent, [{
path: 'a',
children: [
{path: '', component: ComponentB, children: [{path: '', component: ComponentC}]}
]
}],
url, 'a')
.forEach((s: RouterStateSnapshot) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const a = s.firstChild(s.root) !;
expect(a._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(a._lastPathIndex).toBe(0);
const b = s.firstChild(a) !;
expect(b._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(b._lastPathIndex).toBe(0);
const c = s.firstChild(b) !;
expect(c._urlSegment).toBe(url.root.children[PRIMARY_OUTLET]);
expect(c._lastPathIndex).toBe(0);
});
});
it('should set url segment and index properly when nested empty-path segments (2)', () => {
const url = tree('');
recognize(
RootComponent, [{
path: '',
children: [
{path: '', component: ComponentB, children: [{path: '', component: ComponentC}]}
]
}],
url, '')
.forEach((s: RouterStateSnapshot) => {
expect(s.root._urlSegment).toBe(url.root);
expect(s.root._lastPathIndex).toBe(-1);
const a = s.firstChild(s.root) !;
expect(a._urlSegment).toBe(url.root);
expect(a._lastPathIndex).toBe(-1);
const b = s.firstChild(a) !;
expect(b._urlSegment).toBe(url.root);
expect(b._lastPathIndex).toBe(-1);
const c = s.firstChild(b) !;
expect(c._urlSegment).toBe(url.root);
expect(c._lastPathIndex).toBe(-1);
});
});
});
describe('aux split at the end (no right child)', () => {
it('should match (non-terminal)', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: '', component: ComponentB},
{path: '', component: ComponentC, outlet: 'aux'},
]
}],
'a', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
const c = s.children(s.firstChild(s.root) !);
checkActivatedRoute(c[0], '', {}, ComponentB);
checkActivatedRoute(c[1], '', {}, ComponentC, 'aux');
});
});
it('should match (terminal)', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: '', pathMatch: 'full', component: ComponentB},
{path: '', pathMatch: 'full', component: ComponentC, outlet: 'aux'},
]
}],
'a', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
const c = s.children(s.firstChild(s.root) !);
checkActivatedRoute(c[0], '', {}, ComponentB);
checkActivatedRoute(c[1], '', {}, ComponentC, 'aux');
});
});
it('should work only only primary outlet', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: '', component: ComponentB},
{path: 'c', component: ComponentC, outlet: 'aux'},
]
}],
'a/(aux:c)', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
const c = s.children(s.firstChild(s.root) !);
checkActivatedRoute(c[0], '', {}, ComponentB);
checkActivatedRoute(c[1], 'c', {}, ComponentC, 'aux');
});
});
it('should work when split is at the root level', () => {
checkRecognize(
[
{path: '', component: ComponentA}, {path: 'b', component: ComponentB},
{path: 'c', component: ComponentC, outlet: 'aux'}
],
'(aux:c)', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.root, '', {}, RootComponent);
const children = s.children(s.root);
expect(children.length).toEqual(2);
checkActivatedRoute(children[0], '', {}, ComponentA);
checkActivatedRoute(children[1], 'c', {}, ComponentC, 'aux');
});
});
});
describe('split at the end (right child)', () => {
it('should match (non-terminal)', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: '', component: ComponentB, children: [{path: 'd', component: ComponentD}]},
{
path: '',
component: ComponentC,
outlet: 'aux',
children: [{path: 'e', component: ComponentE}]
},
]
}],
'a/(d//aux:e)', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a', {}, ComponentA);
const c = s.children(s.firstChild(s.root) !);
checkActivatedRoute(c[0], '', {}, ComponentB);
checkActivatedRoute(s.firstChild(c[0]) !, 'd', {}, ComponentD);
checkActivatedRoute(c[1], '', {}, ComponentC, 'aux');
checkActivatedRoute(s.firstChild(c[1]) !, 'e', {}, ComponentE);
});
});
});
});
describe('wildcards', () => {
it('should support simple wildcards', () => {
checkRecognize(
[{path: '**', component: ComponentA}], 'a/b/c/d;a1=11', (s: RouterStateSnapshot) => {
checkActivatedRoute(s.firstChild(s.root) !, 'a/b/c/d', {a1: '11'}, ComponentA);
});
});
});
describe('componentless routes', () => {
it('should work', () => {
checkRecognize(
[{
path: 'p/:id',
children: [
{path: 'a', component: ComponentA},
{path: 'b', component: ComponentB, outlet: 'aux'}
]
}],
'p/11;pp=22/(a;pa=33//aux:b;pb=44)', (s: RouterStateSnapshot) => {
const p = s.firstChild(s.root) !;
checkActivatedRoute(p, 'p/11', {id: '11', pp: '22'}, undefined !);
const c = s.children(p);
checkActivatedRoute(c[0], 'a', {id: '11', pp: '22', pa: '33'}, ComponentA);
checkActivatedRoute(c[1], 'b', {id: '11', pp: '22', pb: '44'}, ComponentB, 'aux');
});
});
it('should merge params until encounters a normal route', () => {
checkRecognize(
[{
path: 'p/:id',
children: [{
path: 'a/:name',
children: [{
path: 'b',
component: ComponentB,
children: [{path: 'c', component: ComponentC}]
}]
}]
}],
'p/11/a/victor/b/c', (s: RouterStateSnapshot) => {
const p = s.firstChild(s.root) !;
checkActivatedRoute(p, 'p/11', {id: '11'}, undefined !);
const a = s.firstChild(p) !;
checkActivatedRoute(a, 'a/victor', {id: '11', name: 'victor'}, undefined !);
const b = s.firstChild(a) !;
checkActivatedRoute(b, 'b', {id: '11', name: 'victor'}, ComponentB);
const c = s.firstChild(b) !;
checkActivatedRoute(c, 'c', {}, ComponentC);
});
});
});
describe('empty URL leftovers', () => {
it('should not throw when no children matching', () => {
checkRecognize(
[{path: 'a', component: ComponentA, children: [{path: 'b', component: ComponentB}]}],
'/a', (s: RouterStateSnapshot) => {
const a = s.firstChild(s.root);
checkActivatedRoute(a !, 'a', {}, ComponentA);
});
});
it('should not throw when no children matching (aux routes)', () => {
checkRecognize(
[{
path: 'a',
component: ComponentA,
children: [
{path: 'b', component: ComponentB},
{path: '', component: ComponentC, outlet: 'aux'},
]
}],
'/a', (s: RouterStateSnapshot) => {
const a = s.firstChild(s.root) !;
checkActivatedRoute(a, 'a', {}, ComponentA);
checkActivatedRoute(a.children[0], '', {}, ComponentC, 'aux');
});
});
});
describe('custom path matchers', () => {
it('should use custom path matcher', () => {
const matcher = (s: any, g: any, r: any) => {
if (s[0].path === 'a') {
return {consumed: s.slice(0, 2), posParams: {id: s[1]}};
} else {
return null;
}
};
checkRecognize(
[{
matcher: matcher,
component: ComponentA,
children: [{path: 'b', component: ComponentB}]
}] as any,
'/a/1;p=99/b', (s: RouterStateSnapshot) => {
const a = s.root.firstChild !;
checkActivatedRoute(a, 'a/1', {id: '1', p: '99'}, ComponentA);
checkActivatedRoute(a.firstChild !, 'b', {}, ComponentB);
});
});
});
describe('query parameters', () => {
it('should support query params', () => {
const config = [{path: 'a', component: ComponentA}];
checkRecognize(config, 'a?q=11', (s: RouterStateSnapshot) => {
expect(s.root.queryParams).toEqual({q: '11'});
expect(s.root.queryParamMap.get('q')).toEqual('11');
});
});
it('should freeze query params object', () => {
checkRecognize([{path: 'a', component: ComponentA}], 'a?q=11', (s: RouterStateSnapshot) => {
expect(Object.isFrozen(s.root.queryParams)).toBeTruthy();
});
});
});
describe('fragment', () => {
it('should support fragment', () => {
const config = [{path: 'a', component: ComponentA}];
checkRecognize(
config, 'a#f1', (s: RouterStateSnapshot) => { expect(s.root.fragment).toEqual('f1'); });
});
});
describe('error handling', () => {
it('should error when two routes with the same outlet name got matched', () => {
recognize(
RootComponent,
[
{path: 'a', component: ComponentA}, {path: 'b', component: ComponentB, outlet: 'aux'},
{path: 'c', component: ComponentC, outlet: 'aux'}
],
tree('a(aux:b//aux:c)'), 'a(aux:b//aux:c)')
.subscribe((_) => {}, (s: RouterStateSnapshot) => {
expect(s.toString())
.toContain(
'Two segments cannot have the same outlet name: \'aux:b\' and \'aux:c\'.');
});
});
});
});
function checkRecognize(config: Routes, url: string, callback: any): void {
recognize(RootComponent, config, tree(url), url).subscribe(callback, e => { throw e; });
}
function checkActivatedRoute(
actual: ActivatedRouteSnapshot, url: string, params: Params, cmp: Function,
outlet: string = PRIMARY_OUTLET): void {
if (actual === null) {
expect(actual).not.toBeNull();
} else {
expect(actual.url.map(s => s.path).join('/')).toEqual(url);
expect(actual.params).toEqual(params);
expect(actual.component).toBe(cmp);
expect(actual.outlet).toEqual(outlet);
}
}
function tree(url: string): UrlTree {
return new DefaultUrlSerializer().parse(url);
}
class RootComponent {}
class ComponentA {}
class ComponentB {}
class ComponentC {}
class ComponentD {}
class ComponentE {}<|fim▁end|> | ],
'a/(b//left:c)', (s: RouterStateSnapshot) => {
const c = s.children(<any>s.firstChild(s.root));
checkActivatedRoute(c[0], 'b', {}, ComponentB, PRIMARY_OUTLET); |
<|file_name|>issue-17718-static-sync.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(optin_builtin_traits)]
use std::marker::Sync;
struct Foo;
impl !Sync for Foo {}
static FOO: usize = 3;
static BAR: Foo = Foo;
//~^ ERROR: the trait `core::marker::Sync` is not implemented
fn main() {}<|fim▁end|> | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
<|file_name|>triqs_cthyb_qmc.cpp<|end_file_name|><|fim▁begin|>#if defined HAVE_CONFIG_H
#include "config.h"
#endif
#include <solver_core.hpp>
#include <triqs/operators/many_body_operator.hpp>
#include <triqs/hilbert_space/fundamental_operator_set.hpp>
#include <triqs/gfs.hpp>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <iomanip>
//#include <boost/mpi/communicator.hpp>
#include "triqs_cthyb_qmc.hpp"
using namespace std;
using namespace cthyb;
using triqs::utility::many_body_operator;
using triqs::utility::c;
using triqs::utility::c_dag;
using triqs::utility::n;
using namespace triqs::gfs;
using indices_type = triqs::utility::many_body_operator<double>::indices_t;
void ctqmc_triqs_run( bool rot_inv, bool leg_measure, bool hist, bool wrt_files, bool tot_not, /*boolean*/
int n_orbitals, int n_freq, int n_tau, int n_l, int n_cycles_, int cycle_length, int ntherm, int verbo, int seed, /*integer*/
double beta_, /*double*/
double *epsi, double *umat_ij, double *umat_ijkl, std::complex<double> *f_iw_ptr, std::complex<double> *g_iw_ptr, double *g_tau, double *gl, MPI_Fint *MPI_world_ptr ){ /*array pointers & simple pointers*/
cout.setf(ios::fixed); //cout.precision(17);
//Initialize Boost mpi environment
int rank;
boost::mpi::environment env;
//int rank = boost::mpi::communicator(MPI_Comm_f2c(*MPI_world_ptr), boost::mpi::comm_attach).rank();
{
boost::mpi::communicator c;
c << MPI_Comm_f2c( *MPI_world_ptr );
//auto c_ = MPI_Comm_f2c(*MPI_world_ptr);
//boost::mpi::communicator c = boost::mpi::communicator(MPI_Comm_f2c(*MPI_world_ptr), boost::mpi::comm_attach);
rank=c.rank();
//MPI_Comm_rank(MPI_Comm_f2c(*MPI_world_ptr), &rank);
}
// Parameters relay from Fortran and default values affectation
double beta = beta_; //Temperature inverse
int num_orbitals = n_orbitals;
//double U = U_; //Impurity site Interaction term
double mu = 0.0;//Chemical Potential Functionality avail but not used here
//bool n_n_only = !(rot_inv);//density-density?
int Nfreq = n_freq; //Number of Matsubara frequencies
int Ntau = n_tau; //Number of imaginary times
int Nl = n_l; //Number of Legendre measures 30
int n_cycles = n_cycles_;
int nspin = 2; //always it's Nature
//Print informations about the Solver Parameters
if(rank==0 && verbo>0){
std::cout << endl <<" == Key Input Parameters for the TRIQS CTHYB solver == "<<endl<<endl;
std::cout <<" Beta = "<< beta <<" [eV]^(-1) "<<endl;
std::cout <<" Mu = "<< mu <<" [eV]"<<endl;
std::cout <<" Nflavour = "<< num_orbitals <<endl;
std::cout <<" Nfreq = "<< Nfreq << endl;
std::cout <<" Ntau = "<< Ntau << endl;
std::cout <<" Nl = "<< Nl << endl;
std::cout <<" Ncycles = "<< n_cycles << endl;
std::cout <<" Cycle length = "<< cycle_length << endl;
std::cout <<" Ntherm = "<< ntherm << endl;
std::cout <<" Verbosity (0->3) = "<< verbo << endl;
std::cout <<" Abinit Seed = "<< seed << endl <<endl;
for(int o = 0; o < num_orbitals; ++o){
std::cout << " e- level["<< o+1 <<"] = "<< setprecision(17) << epsi[o] <<" [eV]"<< endl; //ed != cste => vecteur a parcourir ensuite
}
std::cout << endl;
std::cout <<" U(i,j) [eV] = " <<endl<< endl<<"\t";
for(int o = 0; o < num_orbitals; ++o){
for(int oo = 0; oo < num_orbitals; ++oo){
std::cout << setprecision(17) << fixed <<umat_ij[o+oo*num_orbitals] << "\t"; //ed != cste => vecteur a parcourir ensuite
}
std::cout << endl<<"\t";
}
}
// Hamiltonian definition
many_body_operator<double> H;
//Spin Orbit general case num_orbitals*2=Nflavour)
//Init Hamiltonian Basic terms
///H = init_Hamiltonian( epsi, num_orbitals, U, J ); // U=cste
if(!rot_inv){
if(rank==0 && verbo>0) std::cout <<endl<<" == Density-Density Terms Included == "<<endl<<endl;
H = init_Hamiltonian( epsi, num_orbitals, umat_ij );
//}else if(rank==0 && verbo>0) std::cout <<" == Rotationnaly Invariant Terms Not Included == "<< endl << endl;
//Include density-density term (J != 0) spin flips and pair hopping => density-density term case
}else{//if(rot_inv){
if(rank==0 && verbo>0)std::cout <<" == Rotationnaly Invariant Terms Included == "<< endl << endl;
if(tot_not){
H = init_fullHamiltonian( epsi, num_orbitals, umat_ijkl );
}else{ //up and down separation
H = init_fullHamiltonianUpDown( epsi, num_orbitals, umat_ijkl );
}
}//else if(rank==0 && verbo>0) std::cout <<endl<<" == Density-Density Terms Not Included == "<<endl<<endl;
std::map<std::string, indices_type> gf_struct;
if( tot_not ){
std::map<std::string, indices_type> gf_struct_tmp{{"tot",indices_type{}}};//{{"tot",indices_type{}}}; //{0,1}
for(int o = 0; o < num_orbitals; ++o){
gf_struct_tmp["tot"].push_back(o);
}
gf_struct = gf_struct_tmp;
}else{ //spin orb case: general case
std::map<std::string, indices_type> gf_struct_tmp{{"up",indices_type{}},{"down",indices_type{}}};
for(int o = 0; o < num_orbitals; ++o){
gf_struct_tmp["up"].push_back(o);
gf_struct_tmp["down"].push_back(o);
}
gf_struct = gf_struct_tmp;
}
if(rank==0 && verbo>0) std::cout <<" == Green Function Structure Initialized == "<< endl << endl;
// Construct CTQMC solver with mesh parameters
solver_core solver(beta, gf_struct, Nfreq, Ntau, Nl);
if(rank==0 && verbo>0) std::cout <<" == Solver Core Initialized == "<< endl << endl;
//Fill in "hybridation+eps"=F(iw)~Delta(iw) coming from fortran inside delta_iw term RHS
gf<imfreq> delta_iw = gf<imfreq>{{beta, Fermion, Nfreq}, {num_orbitals,num_orbitals}};
auto w_mesh = delta_iw.mesh();
ofstream delta_init; delta_init.open("delta_init_check"); // only on one rank !
if(rank==0){
delta_init << "# w_index l l' Im(iw) delta(iw)\n" << endl;
}
for(std::size_t w_index = 0; w_index < w_mesh.size(); ++w_index){<|fim▁hole|>
for(int o = 0; o < num_orbitals; ++o){
for(int oo = 0; oo < num_orbitals; ++oo){
cell(o,oo) = f_iw_ptr[o+oo*num_orbitals+w_index*num_orbitals*num_orbitals];
//cout <<"[IN C++]"<<" F[ w= "<< w_index <<" , l= "<< o <<" , l_= "<< oo <<" ] = "<< setprecision(15) << f_iw_ptr[o+oo*num_orbitals+w_index*num_orbitals*num_orbitals] << endl;
// if(o==oo){
//std::cout << w_index <<"\t"<< o <<"\t"<< oo <<"\t"<< imag(iw) <<"\t"<< setprecision(15) << f_iw_ptr[o+oo*num_orbitals+w_index*num_orbitals*num_orbitals] << endl;
// }
if(rank==0){
delta_init << w_index+1 <<"\t"<< o+1 <<"\t"<< oo+1 <<"\t"<< imag(iw) <<"\t"<< setprecision(17) << fixed << f_iw_ptr[o+oo*num_orbitals+w_index*num_orbitals*num_orbitals] << endl;
}
}
}
}
if(rank==0 && verbo>0) std::cout <<" == F(iw) Initialized == "<< endl << endl;
triqs::clef::placeholder<0> om_;
auto g0_iw = gf<imfreq>{{beta, Fermion, Nfreq}, {num_orbitals,num_orbitals}};
g0_iw(om_) << om_ + mu - delta_iw(om_); //calculate G0(iw)^-1 matrix
if(rank==0 && verbo>0) std::cout <<" == G0(iw)^-1 Initialized == "<< endl << endl;
if(tot_not){
solver.G0_iw()[0] = triqs::gfs::inverse( g0_iw ); //inverse G0(iw) matrix and affect to solver
}else{
for (int i = 0; i < nspin; ++i){
solver.G0_iw()[i] = triqs::gfs::inverse( g0_iw );
}
}
if(rank==0 && verbo>0) std::cout <<" == G0(iw)^-1 Inverted => G0(iw) Constructed == "<< endl << endl;
// if(rank==0){ //Output Test of none interacting G0
//
// ofstream G0_up, G0_down;
//
// G0_up.open("G0_up_non_interagissant");
// G0_up << "# w_index l l' Im(iw) Real(G0) Im(G0)\n" << endl;
//
//
// //Report G0_iw
// int compteur = 0;
// for(std::size_t w_index = 0; w_index < w_mesh.size(); ++w_index){
// auto iw = w_mesh.index_to_point(w_index);
// for(int oo = 0; oo < num_orbitals; ++oo){
// for(int o = 0; o < num_orbitals; ++o){
//
//
//
// G0_up << fixed << w_index <<"\t"<< o <<"\t"<< oo <<"\t"<< fixed<< imag(iw) <<"\t"<< real(solver.G0_iw()[0].data()(w_index,o,oo)) <<"\t"<< imag(solver.G0_iw()[0].data()(w_index,o,oo)) << endl;
// compteur++;
// }
// }
// }
// }
if(rank==0 && verbo>0) std::cout <<" == Solver Parametrization == "<< endl << endl;
// Solver parameters
auto paramCTQMC = solve_parameters_t(H, n_cycles);
paramCTQMC.max_time = -1;
paramCTQMC.random_name = "";
paramCTQMC.random_seed = 123 * rank + 567;//seed;
paramCTQMC.length_cycle = cycle_length;
paramCTQMC.n_warmup_cycles = ntherm;
paramCTQMC.verbosity=verbo;
paramCTQMC.measure_g_l = leg_measure;
//paramCTQMC.move_double = true;
// paramCTQMC.make_histograms=hist;
if(rank==0 && verbo>0) std::cout <<" == Starting Solver [node "<< rank <<"] == "<< endl << endl;
// Solve!
solver.solve(paramCTQMC);
if(rank==0 && verbo>0) std::cout <<" == Reporting == "<< endl << endl;
// Report some stuff
// if(rank==0){
// ofstream glegendre, g_iw;
// g_iw.open("g_iw");
//Report G_iw
int compteur = 0;
//g_iw << fixed << setprecision(17) << w_index << "\t";
for(int oo = 0; oo < num_orbitals; ++oo){
for(int o = 0; o < num_orbitals; ++o){
for(std::size_t w_index = 0; w_index < w_mesh.size(); ++w_index){ //Pour une frequence donnee
auto iw = w_mesh.index_to_point(w_index);
g_iw_ptr[compteur] = solver.G0_iw()[0].data()(w_index,o,oo);
//g_iw << fixed<< setprecision(17) << solver.G0_iw()[0].data()(w_index,o,oo);
compteur++;
}
}
//g_iw << endl;
}
//Report G_tau
compteur = 0;
for(int oo = 0; oo < num_orbitals; ++oo){
for(int o = 0; o < num_orbitals; ++o){
for(int tau = 0; tau < n_tau; ++tau){
g_tau[compteur]= solver.G_tau()[0].data()(tau,o,oo);
compteur++;
}
}
}
//Report G(l)
if( leg_measure ){
compteur = 0;
for(int oo = 0; oo < num_orbitals; ++oo){
for(int o = 0; o < num_orbitals; ++o){
for(int l = 0; l < n_l; ++l){
gl[compteur]= solver.G_l()[0].data()(l,o,oo);
compteur++;
}
}
}
}
//Write G(tau)
if( rank==0 && wrt_files ){
ofstream gtau;
gtau.open("Gtau_triqs.dat");
double _tau_=0.0;
for(int tau = 0; tau < n_tau; ++tau){
_tau_=((tau*beta)/n_tau)*27.2113845; //en Harthree
gtau << fixed << setprecision(17) <<_tau_ << "\t";
for(int o = 0; o < num_orbitals; ++o){
for(int oo = 0; oo < num_orbitals; ++oo){
gtau << fixed<< setprecision(17) << solver.G_tau()[0].data()(tau,o,oo) <<"\t";
}
}
gtau << endl;
}
ofstream g_iw;
g_iw.open("giw");
for(std::size_t w_index = 0; w_index < w_mesh.size(); ++w_index){
auto iw = w_mesh.index_to_point(w_index);
for(int o = 0; o < num_orbitals; ++o){
for(int oo = 0; oo < num_orbitals; ++oo){
g_iw << fixed << setprecision(17) << solver.G0_iw()[0].data()(w_index,o,oo);
}
}
g_iw << endl;
}
}
//Write U(i,j)
if( rank==0 && wrt_files ){
ofstream umat;
umat.open("umat");
for(int o = 0; o < num_orbitals; ++o){
for(int oo = 0; oo < num_orbitals; ++oo){
umat <<"U( "<< o+1 <<" , "<< oo+1 <<" )= "<< fixed << setprecision(17) << umat_ij[o+(oo*num_orbitals)] <<endl;
}
}
}
if( leg_measure && rank==0 && wrt_files ){
ofstream g_l;
g_l.open("gl");
for(int l = 0; l < n_l; ++l){
for(int o = 0; o < num_orbitals; ++o){
for(int oo = 0; oo < num_orbitals; ++oo){
g_l << fixed << setprecision(17) << solver.G_l()[0].data()(l,o,oo) <<"\t";
}
}
g_l << endl;
}
}
if(rank==0 && verbo>0) std::cout <<" == CTHYB-QMC Process Finished [node "<< rank <<"] == "<< endl << endl;
}
/********************************************************/
/****************** Functions Used **********************/
/********************************************************/
// Hamiltonian Initialization with precalculate coeff (J=0 and U(i,j))
many_body_operator<double> init_Hamiltonian( double *eps, int nflavor, double *U){
many_body_operator<double> H; double coeff;
for(int i = 0; i < nflavor; ++i)
for(int j = 0; j < nflavor; ++j){
//cas diago principale:
if(i==j){
H += eps[i] * n("tot",i) ;
}
else{
coeff = double(U[i+j*nflavor])*0.5;
//cout << "U = "<< U[i+j*nflavor] <<" /2 = "<< coeff <<endl;
H += coeff * n("tot",i) * n("tot",j);
}
}
return H;
}
// Hamiltonian Initialization with precalculated coeff U(i,j,k,l) in "tot" notation
many_body_operator<double> init_fullHamiltonian( double *eps, int nflavor, double *U){
many_body_operator<double> H; double coeff;
for(int i = 0; i < nflavor; ++i)
for(int j = 0; j < nflavor; ++j){
if(i==j){
H += eps[i] * n("tot",i) ; //Levels On diagonal Hamiltonian Matrix
}
for(int k = 0; k < nflavor; ++k)
for(int l = 0; l < nflavor; ++l){
//cas diago principale:
coeff = 0.5 * double( U[i+j*nflavor+k*nflavor*nflavor+l*nflavor*nflavor*nflavor] );
H += coeff * c_dag("tot",i)*c_dag("tot",j)*c("tot",l)*c("tot",k); //changing l <-> k order index to be in accordance with the Abinit definition
}
}
return H;
}
// Hamiltonian Initialization with precalculated coeff U(i,j,k,l) in "up" and "down" notation
many_body_operator<double> init_fullHamiltonianUpDown( double *eps, int nflavor, double *U ){
many_body_operator<double> H; double coeff;
for(int i = 0; i < nflavor; ++i)
for(int j = 0; j < nflavor; ++j){
if(i==j){
H += eps[i] * ( n("up",i) + n("down",i) ); //Levels On diagonal Hamiltonian Matrix
}
for(int k = 0; k < nflavor; ++k)
for(int l = 0; l < nflavor; ++l){
coeff = 0.5 * double(U[i+j*nflavor+k*nflavor*nflavor+l*nflavor*nflavor*nflavor]);
// for(int s = 0; s < nspin; ++s)
// for(int s1 = 0; s1 < nspin; ++s1){
H += coeff * c_dag("up",i) * c_dag("down",j) * c("up",l) * c("down",k);
H += coeff * c_dag("down",i) * c_dag("up",j) * c("down",l) * c("up",k);
// }
}
}
return H;
}<|fim▁end|> |
auto iw = w_mesh.index_to_point(w_index);
auto cell = delta_iw[w_index]; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.