code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
import * as React from "react";
import * as ReactDOM from "react-dom/client";
import { act } from "@testing-library/react";
import { MemoryRouter, Routes, Route, Link } from "../../index";
function click(anchor: HTMLAnchorElement, eventInit?: MouseEventInit) {
let event = new MouseEvent("click", {
view: window,
bubbles: true,
cancelable: true,
...eventInit,
});
anchor.dispatchEvent(event);
return event;
}
describe("A <Link> click", () => {
let node: HTMLDivElement;
beforeEach(() => {
node = document.createElement("div");
document.body.appendChild(node);
});
afterEach(() => {
document.body.removeChild(node);
node = null!;
});
it("navigates to the new page", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="../about">About</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
let event: MouseEvent;
act(() => {
event = click(anchor);
});
expect(event.defaultPrevented).toBe(true);
let h1 = node.querySelector("h1");
expect(h1).not.toBeNull();
expect(h1?.textContent).toEqual("About");
});
it("navigates to the new page when using an absolute URL on the same origin", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="http://localhost/about">About</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
let event: MouseEvent;
act(() => {
event = click(anchor);
});
expect(event.defaultPrevented).toBe(true);
let h1 = node.querySelector("h1");
expect(h1).not.toBeNull();
expect(h1?.textContent).toEqual("About");
});
describe("when an external absolute URL is specified", () => {
it("does not prevent default", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="https://remix.run">About</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
let event: MouseEvent;
act(() => {
event = click(anchor);
});
expect(event.defaultPrevented).toBe(false);
});
it("calls provided listener", () => {
let handlerCalled;
let defaultPrevented;
function Home() {
return (
<div>
<h1>Home</h1>
<Link
to="https://remix.run"
onClick={(e) => {
handlerCalled = true;
defaultPrevented = e.defaultPrevented;
}}
>
About
</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
act(() => {
click(node.querySelector("a"));
});
expect(handlerCalled).toBe(true);
expect(defaultPrevented).toBe(false);
});
});
describe("when a same-origin/different-basename absolute URL is specified", () => {
it("does not prevent default", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="http://localhost/not/base">About</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/base/home"]} basename="/base">
<Routes>
<Route path="home" element={<Home />} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
let event: MouseEvent;
act(() => {
event = click(anchor);
});
expect(event.defaultPrevented).toBe(false);
});
it("calls provided listener", () => {
let handlerCalled;
let defaultPrevented;
function Home() {
return (
<div>
<h1>Home</h1>
<Link
to="http://localhost/not/base"
onClick={(e) => {
handlerCalled = true;
defaultPrevented = e.defaultPrevented;
}}
>
About
</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/base/home"]} basename="/base">
<Routes>
<Route path="home" element={<Home />} />
</Routes>
</MemoryRouter>,
);
});
act(() => {
click(node.querySelector("a"));
});
expect(handlerCalled).toBe(true);
expect(defaultPrevented).toBe(false);
});
});
describe("when reloadDocument is specified", () => {
it("does not prevent default", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link reloadDocument to="../about">
About
</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
let event: MouseEvent;
act(() => {
event = click(anchor);
});
expect(event.defaultPrevented).toBe(false);
});
it("calls provided listener", () => {
let handlerCalled;
let defaultPrevented;
function Home() {
return (
<div>
<h1>Home</h1>
<Link
reloadDocument
to="../about"
onClick={(e) => {
handlerCalled = true;
defaultPrevented = e.defaultPrevented;
}}
>
About
</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
act(() => {
click(node.querySelector("a"));
});
expect(handlerCalled).toBe(true);
expect(defaultPrevented).toBe(false);
});
});
describe("when preventDefault is used on the click handler", () => {
it("stays on the same page", () => {
function Home() {
function handleClick(event: React.MouseEvent<HTMLAnchorElement>) {
event.preventDefault();
}
return (
<div>
<h1>Home</h1>
<Link to="../about" onClick={handleClick}>
About
</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
act(() => {
click(anchor);
});
let h1 = node.querySelector("h1");
expect(h1).not.toBeNull();
expect(h1?.textContent).toEqual("Home");
});
});
describe("with a right click", () => {
it("stays on the same page", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="../about">About</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
act(() => {
// https://developer.mozilla.org/en-US/docs/Web/API/MouseEvent/button
let RightMouseButton = 2;
click(anchor, { button: RightMouseButton });
});
let h1 = node.querySelector("h1");
expect(h1).not.toBeNull();
expect(h1?.textContent).toEqual("Home");
});
});
describe("when the link is supposed to open in a new window", () => {
it("stays on the same page", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="../about" target="_blank">
About
</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
act(() => {
click(anchor);
});
let h1 = node.querySelector("h1");
expect(h1).not.toBeNull();
expect(h1?.textContent).toEqual("Home");
});
});
describe("when the modifier keys are used", () => {
it("stays on the same page", () => {
function Home() {
return (
<div>
<h1>Home</h1>
<Link to="../about">About</Link>
</div>
);
}
act(() => {
ReactDOM.createRoot(node).render(
<MemoryRouter initialEntries={["/home"]}>
<Routes>
<Route path="home" element={<Home />} />
<Route path="about" element={<h1>About</h1>} />
</Routes>
</MemoryRouter>,
);
});
let anchor = node.querySelector("a");
expect(anchor).not.toBeNull();
act(() => {
click(anchor, { ctrlKey: true });
});
let h1 = node.querySelector("h1");
expect(h1).not.toBeNull();
expect(h1?.textContent).toEqual("Home");
});
});
}); | typescript | github | https://github.com/remix-run/react-router | packages/react-router/__tests__/dom/link-click-test.tsx |
from django.conf import settings
from django.core.management.base import BaseCommand
from iati.models import Activity
from iati.transaction.models import Transaction
class Command(BaseCommand):
def update_searchable_activities(self):
"""
Set all activities to searchable if the reporting org is in the
settings.ROOT_ORGANISATIONS list
"""
# set all activities as non searchable
Activity.objects.filter(
is_searchable=True
).exclude(
reporting_organisations__ref__in=settings.ROOT_ORGANISATIONS
).update(
is_searchable=False
)
# set all root activities as searchable
Activity.objects.filter(
is_searchable=False,
reporting_organisations__ref__in=settings.ROOT_ORGANISATIONS
).update(
is_searchable=True
)
# loop through root activities and set children as searchable
activities = Activity.objects.filter(
reporting_organisations__ref__in=settings.ROOT_ORGANISATIONS)
for activity in activities:
self.set_children_searchable(activity)
def set_children_searchable(self, orig_activity):
"""
sets all the children to searchable
recursively calls itself but keeps a list of already set activities
"""
# all transactions where this id is given as provider activity
provider_activity_transactions = Transaction.objects.filter(
provider_organisation__provider_activity_id=orig_activity.id)
for transaction in provider_activity_transactions:
activity = transaction.activity
if not activity.is_searchable:
activity.is_searchable = True
activity.save()
self.set_children_searchable(activity)
return
def handle(self, *args, **options):
self.update_searchable_activities() | unknown | codeparrot/codeparrot-clean | ||
"""Add support for the Xiaomi TVs."""
import logging
import pymitv
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "Xiaomi TV"
_LOGGER = logging.getLogger(__name__)
SUPPORT_XIAOMI_TV = SUPPORT_VOLUME_STEP | SUPPORT_TURN_ON | SUPPORT_TURN_OFF
# No host is needed for configuration, however it can be set.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Xiaomi TV platform."""
# If a hostname is set. Discovery is skipped.
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
if host is not None:
# Check if there's a valid TV at the IP address.
if not pymitv.Discover().check_ip(host):
_LOGGER.error("Could not find Xiaomi TV with specified IP: %s", host)
else:
# Register TV with Home Assistant.
add_entities([XiaomiTV(host, name)])
else:
# Otherwise, discover TVs on network.
add_entities(XiaomiTV(tv, DEFAULT_NAME) for tv in pymitv.Discover().scan())
class XiaomiTV(MediaPlayerEntity):
"""Represent the Xiaomi TV for Home Assistant."""
def __init__(self, ip, name):
"""Receive IP address and name to construct class."""
# Initialize the Xiaomi TV.
self._tv = pymitv.TV(ip)
# Default name value, only to be overridden by user.
self._name = name
self._state = STATE_OFF
@property
def name(self):
"""Return the display name of this TV."""
return self._name
@property
def state(self):
"""Return _state variable, containing the appropriate constant."""
return self._state
@property
def assumed_state(self):
"""Indicate that state is assumed."""
return True
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_XIAOMI_TV
def turn_off(self):
"""
Instruct the TV to turn sleep.
This is done instead of turning off,
because the TV won't accept any input when turned off. Thus, the user
would be unable to turn the TV back on, unless it's done manually.
"""
if self._state != STATE_OFF:
self._tv.sleep()
self._state = STATE_OFF
def turn_on(self):
"""Wake the TV back up from sleep."""
if self._state != STATE_ON:
self._tv.wake()
self._state = STATE_ON
def volume_up(self):
"""Increase volume by one."""
self._tv.volume_up()
def volume_down(self):
"""Decrease volume by one."""
self._tv.volume_down() | unknown | codeparrot/codeparrot-clean | ||
"""Tests for letsencrypt.plugins.common."""
import unittest
import mock
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt.tests import acme_util
from letsencrypt.tests import test_util
class NamespaceFunctionsTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.common.*_namespace functions."""
def test_option_namespace(self):
from letsencrypt.plugins.common import option_namespace
self.assertEqual("foo-", option_namespace("foo"))
def test_dest_namespace(self):
from letsencrypt.plugins.common import dest_namespace
self.assertEqual("foo_", dest_namespace("foo"))
def test_dest_namespace_with_dashes(self):
from letsencrypt.plugins.common import dest_namespace
self.assertEqual("foo_bar_", dest_namespace("foo-bar"))
class PluginTest(unittest.TestCase):
"""Test for letsencrypt.plugins.common.Plugin."""
def setUp(self):
from letsencrypt.plugins.common import Plugin
class MockPlugin(Plugin): # pylint: disable=missing-docstring
@classmethod
def add_parser_arguments(cls, add):
add("foo-bar", dest="different_to_foo_bar", x=1, y=None)
self.plugin_cls = MockPlugin
self.config = mock.MagicMock()
self.plugin = MockPlugin(config=self.config, name="mock")
def test_init(self):
self.assertEqual("mock", self.plugin.name)
self.assertEqual(self.config, self.plugin.config)
def test_option_namespace(self):
self.assertEqual("mock-", self.plugin.option_namespace)
def test_dest_namespace(self):
self.assertEqual("mock_", self.plugin.dest_namespace)
def test_dest(self):
self.assertEqual("mock_foo_bar", self.plugin.dest("foo-bar"))
self.assertEqual("mock_foo_bar", self.plugin.dest("foo_bar"))
def test_conf(self):
self.assertEqual(self.config.mock_foo_bar, self.plugin.conf("foo-bar"))
def test_inject_parser_options(self):
parser = mock.MagicMock()
self.plugin_cls.inject_parser_options(parser, "mock")
# note that inject_parser_options doesn't check if dest has
# correct prefix
parser.add_argument.assert_called_once_with(
"--mock-foo-bar", dest="different_to_foo_bar", x=1, y=None)
class AddrTest(unittest.TestCase):
"""Tests for letsencrypt.client.plugins.common.Addr."""
def setUp(self):
from letsencrypt.plugins.common import Addr
self.addr1 = Addr.fromstring("192.168.1.1")
self.addr2 = Addr.fromstring("192.168.1.1:*")
self.addr3 = Addr.fromstring("192.168.1.1:80")
def test_fromstring(self):
self.assertEqual(self.addr1.get_addr(), "192.168.1.1")
self.assertEqual(self.addr1.get_port(), "")
self.assertEqual(self.addr2.get_addr(), "192.168.1.1")
self.assertEqual(self.addr2.get_port(), "*")
self.assertEqual(self.addr3.get_addr(), "192.168.1.1")
self.assertEqual(self.addr3.get_port(), "80")
def test_str(self):
self.assertEqual(str(self.addr1), "192.168.1.1")
self.assertEqual(str(self.addr2), "192.168.1.1:*")
self.assertEqual(str(self.addr3), "192.168.1.1:80")
def test_get_addr_obj(self):
self.assertEqual(str(self.addr1.get_addr_obj("443")), "192.168.1.1:443")
self.assertEqual(str(self.addr2.get_addr_obj("")), "192.168.1.1")
self.assertEqual(str(self.addr1.get_addr_obj("*")), "192.168.1.1:*")
def test_eq(self):
self.assertEqual(self.addr1, self.addr2.get_addr_obj(""))
self.assertNotEqual(self.addr1, self.addr2)
self.assertFalse(self.addr1 == 3333)
def test_set_inclusion(self):
from letsencrypt.plugins.common import Addr
set_a = set([self.addr1, self.addr2])
addr1b = Addr.fromstring("192.168.1.1")
addr2b = Addr.fromstring("192.168.1.1:*")
set_b = set([addr1b, addr2b])
self.assertEqual(set_a, set_b)
class DvsniTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.common.DvsniTest."""
auth_key = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
achalls = [
achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b'dvsni1'), "pending"),
domain="encryption-example.demo", account_key=auth_key),
achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b'dvsni2'), "pending"),
domain="letsencrypt.demo", account_key=auth_key),
]
def setUp(self):
from letsencrypt.plugins.common import Dvsni
self.sni = Dvsni(configurator=mock.MagicMock())
def test_add_chall(self):
self.sni.add_chall(self.achalls[0], 0)
self.assertEqual(1, len(self.sni.achalls))
self.assertEqual([0], self.sni.indices)
def test_setup_challenge_cert(self):
# This is a helper function that can be used for handling
# open context managers more elegantly. It avoids dealing with
# __enter__ and __exit__ calls.
# http://www.voidspace.org.uk/python/mock/helpers.html#mock.mock_open
mock_open, mock_safe_open = mock.mock_open(), mock.mock_open()
response = challenges.DVSNIResponse(validation=mock.Mock())
achall = mock.MagicMock()
achall.gen_cert_and_response.return_value = (response, "cert", "key")
with mock.patch("letsencrypt.plugins.common.open",
mock_open, create=True):
with mock.patch("letsencrypt.plugins.common.le_util.safe_open",
mock_safe_open):
# pylint: disable=protected-access
self.assertEqual(response, self.sni._setup_challenge_cert(
achall, "randomS1"))
# pylint: disable=no-member
mock_open.assert_called_once_with(self.sni.get_cert_path(achall), "wb")
mock_open.return_value.write.assert_called_once_with("cert")
mock_safe_open.assert_called_once_with(
self.sni.get_key_path(achall), "wb", chmod=0o400)
mock_safe_open.return_value.write.assert_called_once_with("key")
if __name__ == "__main__":
unittest.main() # pragma: no cover | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* ri_triggers.c
*
* Generic trigger procedures for referential integrity constraint
* checks.
*
* Note about memory management: the private hashtables kept here live
* across query and transaction boundaries, in fact they live as long as
* the backend does. This works because the hashtable structures
* themselves are allocated by dynahash.c in its permanent DynaHashCxt,
* and the SPI plans they point to are saved using SPI_keepplan().
* There is not currently any provision for throwing away a no-longer-needed
* plan --- consider improving this someday.
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
*
* src/backend/utils/adt/ri_triggers.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/xact.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
#include "commands/trigger.h"
#include "executor/executor.h"
#include "executor/spi.h"
#include "lib/ilist.h"
#include "miscadmin.h"
#include "parser/parse_coerce.h"
#include "parser/parse_relation.h"
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/datum.h"
#include "utils/fmgroids.h"
#include "utils/guc.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/rls.h"
#include "utils/ruleutils.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
/*
* Local definitions
*/
#define RI_MAX_NUMKEYS INDEX_MAX_KEYS
#define RI_INIT_CONSTRAINTHASHSIZE 64
#define RI_INIT_QUERYHASHSIZE (RI_INIT_CONSTRAINTHASHSIZE * 4)
#define RI_KEYS_ALL_NULL 0
#define RI_KEYS_SOME_NULL 1
#define RI_KEYS_NONE_NULL 2
/* RI query type codes */
/* these queries are executed against the PK (referenced) table: */
#define RI_PLAN_CHECK_LOOKUPPK 1
#define RI_PLAN_CHECK_LOOKUPPK_FROM_PK 2
#define RI_PLAN_LAST_ON_PK RI_PLAN_CHECK_LOOKUPPK_FROM_PK
/* these queries are executed against the FK (referencing) table: */
#define RI_PLAN_CASCADE_ONDELETE 3
#define RI_PLAN_CASCADE_ONUPDATE 4
#define RI_PLAN_NO_ACTION 5
/* For RESTRICT, the same plan can be used for both ON DELETE and ON UPDATE triggers. */
#define RI_PLAN_RESTRICT 6
#define RI_PLAN_SETNULL_ONDELETE 7
#define RI_PLAN_SETNULL_ONUPDATE 8
#define RI_PLAN_SETDEFAULT_ONDELETE 9
#define RI_PLAN_SETDEFAULT_ONUPDATE 10
#define MAX_QUOTED_NAME_LEN (NAMEDATALEN*2+3)
#define MAX_QUOTED_REL_NAME_LEN (MAX_QUOTED_NAME_LEN*2)
#define RIAttName(rel, attnum) NameStr(*attnumAttName(rel, attnum))
#define RIAttType(rel, attnum) attnumTypeId(rel, attnum)
#define RIAttCollation(rel, attnum) attnumCollationId(rel, attnum)
#define RI_TRIGTYPE_INSERT 1
#define RI_TRIGTYPE_UPDATE 2
#define RI_TRIGTYPE_DELETE 3
/*
* RI_ConstraintInfo
*
* Information extracted from an FK pg_constraint entry. This is cached in
* ri_constraint_cache.
*
* Note that pf/pp/ff_eq_oprs may hold the overlaps operator instead of equals
* for the PERIOD part of a temporal foreign key.
*/
typedef struct RI_ConstraintInfo
{
Oid constraint_id; /* OID of pg_constraint entry (hash key) */
bool valid; /* successfully initialized? */
Oid constraint_root_id; /* OID of topmost ancestor constraint;
* same as constraint_id if not inherited */
uint32 oidHashValue; /* hash value of constraint_id */
uint32 rootHashValue; /* hash value of constraint_root_id */
NameData conname; /* name of the FK constraint */
Oid pk_relid; /* referenced relation */
Oid fk_relid; /* referencing relation */
char confupdtype; /* foreign key's ON UPDATE action */
char confdeltype; /* foreign key's ON DELETE action */
int ndelsetcols; /* number of columns referenced in ON DELETE
* SET clause */
int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on
* delete */
char confmatchtype; /* foreign key's match type */
bool hasperiod; /* if the foreign key uses PERIOD */
int nkeys; /* number of key columns */
int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */
int16 fk_attnums[RI_MAX_NUMKEYS]; /* attnums of referencing cols */
Oid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = FK) */
Oid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = PK) */
Oid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK = FK) */
Oid period_contained_by_oper; /* anyrange <@ anyrange (or
* multiranges) */
Oid agged_period_contained_by_oper; /* fkattr <@ range_agg(pkattr) */
Oid period_intersect_oper; /* anyrange * anyrange (or
* multiranges) */
dlist_node valid_link; /* Link in list of valid entries */
} RI_ConstraintInfo;
/*
* RI_QueryKey
*
* The key identifying a prepared SPI plan in our query hashtable
*/
typedef struct RI_QueryKey
{
Oid constr_id; /* OID of pg_constraint entry */
int32 constr_queryno; /* query type ID, see RI_PLAN_XXX above */
} RI_QueryKey;
/*
* RI_QueryHashEntry
*/
typedef struct RI_QueryHashEntry
{
RI_QueryKey key;
SPIPlanPtr plan;
} RI_QueryHashEntry;
/*
* RI_CompareKey
*
* The key identifying an entry showing how to compare two values
*/
typedef struct RI_CompareKey
{
Oid eq_opr; /* the equality operator to apply */
Oid typeid; /* the data type to apply it to */
} RI_CompareKey;
/*
* RI_CompareHashEntry
*/
typedef struct RI_CompareHashEntry
{
RI_CompareKey key;
bool valid; /* successfully initialized? */
FmgrInfo eq_opr_finfo; /* call info for equality fn */
FmgrInfo cast_func_finfo; /* in case we must coerce input */
} RI_CompareHashEntry;
/*
* Local data
*/
static HTAB *ri_constraint_cache = NULL;
static HTAB *ri_query_cache = NULL;
static HTAB *ri_compare_cache = NULL;
static dclist_head ri_constraint_cache_valid_list;
/*
* Local function prototypes
*/
static bool ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
TupleTableSlot *oldslot,
const RI_ConstraintInfo *riinfo);
static Datum ri_restrict(TriggerData *trigdata, bool is_no_action);
static Datum ri_set(TriggerData *trigdata, bool is_set_null, int tgkind);
static void quoteOneName(char *buffer, const char *name);
static void quoteRelationName(char *buffer, Relation rel);
static void ri_GenerateQual(StringInfo buf,
const char *sep,
const char *leftop, Oid leftoptype,
Oid opoid,
const char *rightop, Oid rightoptype);
static void ri_GenerateQualCollation(StringInfo buf, Oid collation);
static int ri_NullCheck(TupleDesc tupDesc, TupleTableSlot *slot,
const RI_ConstraintInfo *riinfo, bool rel_is_pk);
static void ri_BuildQueryKey(RI_QueryKey *key,
const RI_ConstraintInfo *riinfo,
int32 constr_queryno);
static bool ri_KeysEqual(Relation rel, TupleTableSlot *oldslot, TupleTableSlot *newslot,
const RI_ConstraintInfo *riinfo, bool rel_is_pk);
static bool ri_CompareWithCast(Oid eq_opr, Oid typeid, Oid collid,
Datum lhs, Datum rhs);
static void ri_InitHashTables(void);
static void InvalidateConstraintCacheCallBack(Datum arg, int cacheid, uint32 hashvalue);
static SPIPlanPtr ri_FetchPreparedPlan(RI_QueryKey *key);
static void ri_HashPreparedPlan(RI_QueryKey *key, SPIPlanPtr plan);
static RI_CompareHashEntry *ri_HashCompareOp(Oid eq_opr, Oid typeid);
static void ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname,
int tgkind);
static const RI_ConstraintInfo *ri_FetchConstraintInfo(Trigger *trigger,
Relation trig_rel, bool rel_is_pk);
static const RI_ConstraintInfo *ri_LoadConstraintInfo(Oid constraintOid);
static Oid get_ri_constraint_root(Oid constrOid);
static SPIPlanPtr ri_PlanCheck(const char *querystr, int nargs, Oid *argtypes,
RI_QueryKey *qkey, Relation fk_rel, Relation pk_rel);
static bool ri_PerformCheck(const RI_ConstraintInfo *riinfo,
RI_QueryKey *qkey, SPIPlanPtr qplan,
Relation fk_rel, Relation pk_rel,
TupleTableSlot *oldslot, TupleTableSlot *newslot,
bool is_restrict,
bool detectNewRows, int expect_OK);
static void ri_ExtractValues(Relation rel, TupleTableSlot *slot,
const RI_ConstraintInfo *riinfo, bool rel_is_pk,
Datum *vals, char *nulls);
pg_noreturn static void ri_ReportViolation(const RI_ConstraintInfo *riinfo,
Relation pk_rel, Relation fk_rel,
TupleTableSlot *violatorslot, TupleDesc tupdesc,
int queryno, bool is_restrict, bool partgone);
/*
* RI_FKey_check -
*
* Check foreign key existence (combined for INSERT and UPDATE).
*/
static Datum
RI_FKey_check(TriggerData *trigdata)
{
const RI_ConstraintInfo *riinfo;
Relation fk_rel;
Relation pk_rel;
TupleTableSlot *newslot;
RI_QueryKey qkey;
SPIPlanPtr qplan;
riinfo = ri_FetchConstraintInfo(trigdata->tg_trigger,
trigdata->tg_relation, false);
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
newslot = trigdata->tg_newslot;
else
newslot = trigdata->tg_trigslot;
/*
* We should not even consider checking the row if it is no longer valid,
* since it was either deleted (so the deferred check should be skipped)
* or updated (in which case only the latest version of the row should be
* checked). Test its liveness according to SnapshotSelf. We need pin
* and lock on the buffer to call HeapTupleSatisfiesVisibility. Caller
* should be holding pin, but not lock.
*/
if (!table_tuple_satisfies_snapshot(trigdata->tg_relation, newslot, SnapshotSelf))
return PointerGetDatum(NULL);
/*
* Get the relation descriptors of the FK and PK tables.
*
* pk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR KEY SHARE will get on it.
*/
fk_rel = trigdata->tg_relation;
pk_rel = table_open(riinfo->pk_relid, RowShareLock);
switch (ri_NullCheck(RelationGetDescr(fk_rel), newslot, riinfo, false))
{
case RI_KEYS_ALL_NULL:
/*
* No further check needed - an all-NULL key passes every type of
* foreign key constraint.
*/
table_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
case RI_KEYS_SOME_NULL:
/*
* This is the only case that differs between the three kinds of
* MATCH.
*/
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_FULL:
/*
* Not allowed - MATCH FULL says either all or none of the
* attributes can be NULLs
*/
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel),
NameStr(riinfo->conname)),
errdetail("MATCH FULL does not allow mixing of null and nonnull key values."),
errtableconstraint(fk_rel,
NameStr(riinfo->conname))));
table_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
case FKCONSTR_MATCH_SIMPLE:
/*
* MATCH SIMPLE - if ANY column is null, the key passes
* the constraint.
*/
table_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
#ifdef NOT_USED
case FKCONSTR_MATCH_PARTIAL:
/*
* MATCH PARTIAL - all non-null columns must match. (not
* implemented, can be done by modifying the query below
* to only include non-null columns, or by writing a
* special version here)
*/
break;
#endif
}
case RI_KEYS_NONE_NULL:
/*
* Have a full qualified key - continue below for all three kinds
* of MATCH.
*/
break;
}
SPI_connect();
/* Fetch or prepare a saved plan for the real check */
ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_CHECK_LOOKUPPK);
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
StringInfoData querybuf;
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char paramname[16];
const char *querysep;
Oid queryoids[RI_MAX_NUMKEYS];
const char *pk_only;
/* ----------
* The query string built is
* SELECT 1 FROM [ONLY] <pktable> x WHERE pkatt1 = $1 [AND ...]
* FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* corresponding FK attributes.
*
* But for temporal FKs we need to make sure
* the FK's range is completely covered.
* So we use this query instead:
* SELECT 1
* FROM (
* SELECT pkperiodatt AS r
* FROM [ONLY] pktable x
* WHERE pkatt1 = $1 [AND ...]
* AND pkperiodatt && $n
* FOR KEY SHARE OF x
* ) x1
* HAVING $n <@ range_agg(x1.r)
* Note if FOR KEY SHARE ever allows GROUP BY and HAVING
* we can make this a bit simpler.
* ----------
*/
initStringInfo(&querybuf);
pk_only = pk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteRelationName(pkrelname, pk_rel);
if (riinfo->hasperiod)
{
quoteOneName(attname,
RIAttName(pk_rel, riinfo->pk_attnums[riinfo->nkeys - 1]));
appendStringInfo(&querybuf,
"SELECT 1 FROM (SELECT %s AS r FROM %s%s x",
attname, pk_only, pkrelname);
}
else
{
appendStringInfo(&querybuf, "SELECT 1 FROM %s%s x",
pk_only, pkrelname);
}
querysep = "WHERE";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(attname,
RIAttName(pk_rel, riinfo->pk_attnums[i]));
sprintf(paramname, "$%d", i + 1);
ri_GenerateQual(&querybuf, querysep,
attname, pk_type,
riinfo->pf_eq_oprs[i],
paramname, fk_type);
querysep = "AND";
queryoids[i] = fk_type;
}
appendStringInfoString(&querybuf, " FOR KEY SHARE OF x");
if (riinfo->hasperiod)
{
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[riinfo->nkeys - 1]);
appendStringInfoString(&querybuf, ") x1 HAVING ");
sprintf(paramname, "$%d", riinfo->nkeys);
ri_GenerateQual(&querybuf, "",
paramname, fk_type,
riinfo->agged_period_contained_by_oper,
"pg_catalog.range_agg", ANYMULTIRANGEOID);
appendStringInfoString(&querybuf, "(x1.r)");
}
/* Prepare and save the plan */
qplan = ri_PlanCheck(querybuf.data, riinfo->nkeys, queryoids,
&qkey, fk_rel, pk_rel);
}
/*
* Now check that foreign key exists in PK table
*
* XXX detectNewRows must be true when a partitioned table is on the
* referenced side. The reason is that our snapshot must be fresh in
* order for the hack in find_inheritance_children() to work.
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
NULL, newslot,
false,
pk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE,
SPI_OK_SELECT);
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
table_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
/*
* RI_FKey_check_ins -
*
* Check foreign key existence at insert event on FK table.
*/
Datum
RI_FKey_check_ins(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_check_ins", RI_TRIGTYPE_INSERT);
/* Share code with UPDATE case. */
return RI_FKey_check((TriggerData *) fcinfo->context);
}
/*
* RI_FKey_check_upd -
*
* Check foreign key existence at update event on FK table.
*/
Datum
RI_FKey_check_upd(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_check_upd", RI_TRIGTYPE_UPDATE);
/* Share code with INSERT case. */
return RI_FKey_check((TriggerData *) fcinfo->context);
}
/*
* ri_Check_Pk_Match
*
* Check to see if another PK row has been created that provides the same
* key values as the "oldslot" that's been modified or deleted in our trigger
* event. Returns true if a match is found in the PK table.
*
* We assume the caller checked that the oldslot contains no NULL key values,
* since otherwise a match is impossible.
*/
static bool
ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
TupleTableSlot *oldslot,
const RI_ConstraintInfo *riinfo)
{
SPIPlanPtr qplan;
RI_QueryKey qkey;
bool result;
/* Only called for non-null rows */
Assert(ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) == RI_KEYS_NONE_NULL);
SPI_connect();
/*
* Fetch or prepare a saved plan for checking PK table with values coming
* from a PK row
*/
ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_CHECK_LOOKUPPK_FROM_PK);
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
StringInfoData querybuf;
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char paramname[16];
const char *querysep;
const char *pk_only;
Oid queryoids[RI_MAX_NUMKEYS];
/* ----------
* The query string built is
* SELECT 1 FROM [ONLY] <pktable> x WHERE pkatt1 = $1 [AND ...]
* FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* PK attributes themselves.
*
* But for temporal FKs we need to make sure
* the old PK's range is completely covered.
* So we use this query instead:
* SELECT 1
* FROM (
* SELECT pkperiodatt AS r
* FROM [ONLY] pktable x
* WHERE pkatt1 = $1 [AND ...]
* AND pkperiodatt && $n
* FOR KEY SHARE OF x
* ) x1
* HAVING $n <@ range_agg(x1.r)
* Note if FOR KEY SHARE ever allows GROUP BY and HAVING
* we can make this a bit simpler.
* ----------
*/
initStringInfo(&querybuf);
pk_only = pk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteRelationName(pkrelname, pk_rel);
if (riinfo->hasperiod)
{
quoteOneName(attname, RIAttName(pk_rel, riinfo->pk_attnums[riinfo->nkeys - 1]));
appendStringInfo(&querybuf,
"SELECT 1 FROM (SELECT %s AS r FROM %s%s x",
attname, pk_only, pkrelname);
}
else
{
appendStringInfo(&querybuf, "SELECT 1 FROM %s%s x",
pk_only, pkrelname);
}
querysep = "WHERE";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
quoteOneName(attname,
RIAttName(pk_rel, riinfo->pk_attnums[i]));
sprintf(paramname, "$%d", i + 1);
ri_GenerateQual(&querybuf, querysep,
attname, pk_type,
riinfo->pp_eq_oprs[i],
paramname, pk_type);
querysep = "AND";
queryoids[i] = pk_type;
}
appendStringInfoString(&querybuf, " FOR KEY SHARE OF x");
if (riinfo->hasperiod)
{
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[riinfo->nkeys - 1]);
appendStringInfoString(&querybuf, ") x1 HAVING ");
sprintf(paramname, "$%d", riinfo->nkeys);
ri_GenerateQual(&querybuf, "",
paramname, fk_type,
riinfo->agged_period_contained_by_oper,
"pg_catalog.range_agg", ANYMULTIRANGEOID);
appendStringInfoString(&querybuf, "(x1.r)");
}
/* Prepare and save the plan */
qplan = ri_PlanCheck(querybuf.data, riinfo->nkeys, queryoids,
&qkey, fk_rel, pk_rel);
}
/*
* We have a plan now. Run it.
*/
result = ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
false,
true, /* treat like update */
SPI_OK_SELECT);
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
return result;
}
/*
* RI_FKey_noaction_del -
*
* Give an error and roll back the current transaction if the
* delete has resulted in a violation of the given referential
* integrity constraint.
*/
Datum
RI_FKey_noaction_del(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_del", RI_TRIGTYPE_DELETE);
/* Share code with RESTRICT/UPDATE cases. */
return ri_restrict((TriggerData *) fcinfo->context, true);
}
/*
* RI_FKey_restrict_del -
*
* Restrict delete from PK table to rows unreferenced by foreign key.
*
* The SQL standard intends that this referential action occur exactly when
* the delete is performed, rather than after. This appears to be
* the only difference between "NO ACTION" and "RESTRICT". In Postgres
* we still implement this as an AFTER trigger, but it's non-deferrable.
*/
Datum
RI_FKey_restrict_del(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_del", RI_TRIGTYPE_DELETE);
/* Share code with NO ACTION/UPDATE cases. */
return ri_restrict((TriggerData *) fcinfo->context, false);
}
/*
* RI_FKey_noaction_upd -
*
* Give an error and roll back the current transaction if the
* update has resulted in a violation of the given referential
* integrity constraint.
*/
Datum
RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_upd", RI_TRIGTYPE_UPDATE);
/* Share code with RESTRICT/DELETE cases. */
return ri_restrict((TriggerData *) fcinfo->context, true);
}
/*
* RI_FKey_restrict_upd -
*
* Restrict update of PK to rows unreferenced by foreign key.
*
* The SQL standard intends that this referential action occur exactly when
* the update is performed, rather than after. This appears to be
* the only difference between "NO ACTION" and "RESTRICT". In Postgres
* we still implement this as an AFTER trigger, but it's non-deferrable.
*/
Datum
RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_upd", RI_TRIGTYPE_UPDATE);
/* Share code with NO ACTION/DELETE cases. */
return ri_restrict((TriggerData *) fcinfo->context, false);
}
/*
* ri_restrict -
*
* Common code for ON DELETE RESTRICT, ON DELETE NO ACTION,
* ON UPDATE RESTRICT, and ON UPDATE NO ACTION.
*/
static Datum
ri_restrict(TriggerData *trigdata, bool is_no_action)
{
const RI_ConstraintInfo *riinfo;
Relation fk_rel;
Relation pk_rel;
TupleTableSlot *oldslot;
RI_QueryKey qkey;
SPIPlanPtr qplan;
riinfo = ri_FetchConstraintInfo(trigdata->tg_trigger,
trigdata->tg_relation, true);
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR KEY SHARE will get on it.
*/
fk_rel = table_open(riinfo->fk_relid, RowShareLock);
pk_rel = trigdata->tg_relation;
oldslot = trigdata->tg_trigslot;
/*
* If another PK row now exists providing the old key values, we should
* not do anything. However, this check should only be made in the NO
* ACTION case; in RESTRICT cases we don't wish to allow another row to be
* substituted.
*
* If the foreign key has PERIOD, we incorporate looking for replacement
* rows in the main SQL query below, so we needn't do it here.
*/
if (is_no_action && !riinfo->hasperiod &&
ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo))
{
table_close(fk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
SPI_connect();
/*
* Fetch or prepare a saved plan for the restrict lookup (it's the same
* query for delete and update cases)
*/
ri_BuildQueryKey(&qkey, riinfo, is_no_action ? RI_PLAN_NO_ACTION : RI_PLAN_RESTRICT);
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
StringInfoData querybuf;
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char periodattname[MAX_QUOTED_NAME_LEN];
char paramname[16];
const char *querysep;
Oid queryoids[RI_MAX_NUMKEYS];
const char *fk_only;
/* ----------
* The query string built is
* SELECT 1 FROM [ONLY] <fktable> x WHERE $1 = fkatt1 [AND ...]
* FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* corresponding PK attributes.
* ----------
*/
initStringInfo(&querybuf);
fk_only = fk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteRelationName(fkrelname, fk_rel);
appendStringInfo(&querybuf, "SELECT 1 FROM %s%s x",
fk_only, fkrelname);
querysep = "WHERE";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
sprintf(paramname, "$%d", i + 1);
ri_GenerateQual(&querybuf, querysep,
paramname, pk_type,
riinfo->pf_eq_oprs[i],
attname, fk_type);
querysep = "AND";
queryoids[i] = pk_type;
}
/*----------
* For temporal foreign keys, a reference could still be valid if the
* referenced range didn't change too much. Also if a referencing
* range extends past the current PK row, we don't want to check that
* part: some other PK row should fulfill it. We only want to check
* the part matching the PK record we've changed. Therefore to find
* invalid records we do this:
*
* SELECT 1 FROM [ONLY] <fktable> x WHERE $1 = x.fkatt1 [AND ...]
* -- begin temporal
* AND $n && x.fkperiod
* AND NOT coalesce((x.fkperiod * $n) <@
* (SELECT range_agg(r)
* FROM (SELECT y.pkperiod r
* FROM [ONLY] <pktable> y
* WHERE $1 = y.pkatt1 [AND ...] AND $n && y.pkperiod
* FOR KEY SHARE OF y) y2), false)
* -- end temporal
* FOR KEY SHARE OF x
*
* We need the coalesce in case the first subquery returns no rows.
* We need the second subquery because FOR KEY SHARE doesn't support
* aggregate queries.
*/
if (riinfo->hasperiod && is_no_action)
{
Oid pk_period_type = RIAttType(pk_rel, riinfo->pk_attnums[riinfo->nkeys - 1]);
Oid fk_period_type = RIAttType(fk_rel, riinfo->fk_attnums[riinfo->nkeys - 1]);
StringInfoData intersectbuf;
StringInfoData replacementsbuf;
char *pk_only = pk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteOneName(attname, RIAttName(fk_rel, riinfo->fk_attnums[riinfo->nkeys - 1]));
sprintf(paramname, "$%d", riinfo->nkeys);
appendStringInfoString(&querybuf, " AND NOT coalesce(");
/* Intersect the fk with the old pk range */
initStringInfo(&intersectbuf);
appendStringInfoChar(&intersectbuf, '(');
ri_GenerateQual(&intersectbuf, "",
attname, fk_period_type,
riinfo->period_intersect_oper,
paramname, pk_period_type);
appendStringInfoChar(&intersectbuf, ')');
/* Find the remaining history */
initStringInfo(&replacementsbuf);
appendStringInfoString(&replacementsbuf, "(SELECT pg_catalog.range_agg(r) FROM ");
quoteOneName(periodattname, RIAttName(pk_rel, riinfo->pk_attnums[riinfo->nkeys - 1]));
quoteRelationName(pkrelname, pk_rel);
appendStringInfo(&replacementsbuf, "(SELECT y.%s r FROM %s%s y",
periodattname, pk_only, pkrelname);
/* Restrict pk rows to what matches */
querysep = "WHERE";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
quoteOneName(attname,
RIAttName(pk_rel, riinfo->pk_attnums[i]));
sprintf(paramname, "$%d", i + 1);
ri_GenerateQual(&replacementsbuf, querysep,
paramname, pk_type,
riinfo->pp_eq_oprs[i],
attname, pk_type);
querysep = "AND";
queryoids[i] = pk_type;
}
appendStringInfoString(&replacementsbuf, " FOR KEY SHARE OF y) y2)");
ri_GenerateQual(&querybuf, "",
intersectbuf.data, fk_period_type,
riinfo->agged_period_contained_by_oper,
replacementsbuf.data, ANYMULTIRANGEOID);
/* end of coalesce: */
appendStringInfoString(&querybuf, ", false)");
}
appendStringInfoString(&querybuf, " FOR KEY SHARE OF x");
/* Prepare and save the plan */
qplan = ri_PlanCheck(querybuf.data, riinfo->nkeys, queryoids,
&qkey, fk_rel, pk_rel);
}
/*
* We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
!is_no_action,
true, /* must detect new rows */
SPI_OK_SELECT);
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
table_close(fk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
/*
* RI_FKey_cascade_del -
*
* Cascaded delete foreign key references at delete event on PK table.
*/
Datum
RI_FKey_cascade_del(PG_FUNCTION_ARGS)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
const RI_ConstraintInfo *riinfo;
Relation fk_rel;
Relation pk_rel;
TupleTableSlot *oldslot;
RI_QueryKey qkey;
SPIPlanPtr qplan;
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_del", RI_TRIGTYPE_DELETE);
riinfo = ri_FetchConstraintInfo(trigdata->tg_trigger,
trigdata->tg_relation, true);
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowExclusiveLock mode since that's what our
* eventual DELETE will get on it.
*/
fk_rel = table_open(riinfo->fk_relid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
oldslot = trigdata->tg_trigslot;
SPI_connect();
/* Fetch or prepare a saved plan for the cascaded delete */
ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_CASCADE_ONDELETE);
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
StringInfoData querybuf;
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char paramname[16];
const char *querysep;
Oid queryoids[RI_MAX_NUMKEYS];
const char *fk_only;
/* ----------
* The query string built is
* DELETE FROM [ONLY] <fktable> WHERE $1 = fkatt1 [AND ...]
* The type id's for the $ parameters are those of the
* corresponding PK attributes.
* ----------
*/
initStringInfo(&querybuf);
fk_only = fk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteRelationName(fkrelname, fk_rel);
appendStringInfo(&querybuf, "DELETE FROM %s%s",
fk_only, fkrelname);
querysep = "WHERE";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
sprintf(paramname, "$%d", i + 1);
ri_GenerateQual(&querybuf, querysep,
paramname, pk_type,
riinfo->pf_eq_oprs[i],
attname, fk_type);
querysep = "AND";
queryoids[i] = pk_type;
}
/* Prepare and save the plan */
qplan = ri_PlanCheck(querybuf.data, riinfo->nkeys, queryoids,
&qkey, fk_rel, pk_rel);
}
/*
* We have a plan now. Build up the arguments from the key values in the
* deleted PK tuple and delete the referencing rows
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
false,
true, /* must detect new rows */
SPI_OK_DELETE);
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
table_close(fk_rel, RowExclusiveLock);
return PointerGetDatum(NULL);
}
/*
* RI_FKey_cascade_upd -
*
* Cascaded update foreign key references at update event on PK table.
*/
Datum
RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
const RI_ConstraintInfo *riinfo;
Relation fk_rel;
Relation pk_rel;
TupleTableSlot *newslot;
TupleTableSlot *oldslot;
RI_QueryKey qkey;
SPIPlanPtr qplan;
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_upd", RI_TRIGTYPE_UPDATE);
riinfo = ri_FetchConstraintInfo(trigdata->tg_trigger,
trigdata->tg_relation, true);
/*
* Get the relation descriptors of the FK and PK tables and the new and
* old tuple.
*
* fk_rel is opened in RowExclusiveLock mode since that's what our
* eventual UPDATE will get on it.
*/
fk_rel = table_open(riinfo->fk_relid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
newslot = trigdata->tg_newslot;
oldslot = trigdata->tg_trigslot;
SPI_connect();
/* Fetch or prepare a saved plan for the cascaded update */
ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_CASCADE_ONUPDATE);
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
StringInfoData querybuf;
StringInfoData qualbuf;
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char paramname[16];
const char *querysep;
const char *qualsep;
Oid queryoids[RI_MAX_NUMKEYS * 2];
const char *fk_only;
/* ----------
* The query string built is
* UPDATE [ONLY] <fktable> SET fkatt1 = $1 [, ...]
* WHERE $n = fkatt1 [AND ...]
* The type id's for the $ parameters are those of the
* corresponding PK attributes. Note that we are assuming
* there is an assignment cast from the PK to the FK type;
* else the parser will fail.
* ----------
*/
initStringInfo(&querybuf);
initStringInfo(&qualbuf);
fk_only = fk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteRelationName(fkrelname, fk_rel);
appendStringInfo(&querybuf, "UPDATE %s%s SET",
fk_only, fkrelname);
querysep = "";
qualsep = "WHERE";
for (int i = 0, j = riinfo->nkeys; i < riinfo->nkeys; i++, j++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
appendStringInfo(&querybuf,
"%s %s = $%d",
querysep, attname, i + 1);
sprintf(paramname, "$%d", j + 1);
ri_GenerateQual(&qualbuf, qualsep,
paramname, pk_type,
riinfo->pf_eq_oprs[i],
attname, fk_type);
querysep = ",";
qualsep = "AND";
queryoids[i] = pk_type;
queryoids[j] = pk_type;
}
appendBinaryStringInfo(&querybuf, qualbuf.data, qualbuf.len);
/* Prepare and save the plan */
qplan = ri_PlanCheck(querybuf.data, riinfo->nkeys * 2, queryoids,
&qkey, fk_rel, pk_rel);
}
/*
* We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, newslot,
false,
true, /* must detect new rows */
SPI_OK_UPDATE);
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
table_close(fk_rel, RowExclusiveLock);
return PointerGetDatum(NULL);
}
/*
* RI_FKey_setnull_del -
*
* Set foreign key references to NULL values at delete event on PK table.
*/
Datum
RI_FKey_setnull_del(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_del", RI_TRIGTYPE_DELETE);
/* Share code with UPDATE case */
return ri_set((TriggerData *) fcinfo->context, true, RI_TRIGTYPE_DELETE);
}
/*
* RI_FKey_setnull_upd -
*
* Set foreign key references to NULL at update event on PK table.
*/
Datum
RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_upd", RI_TRIGTYPE_UPDATE);
/* Share code with DELETE case */
return ri_set((TriggerData *) fcinfo->context, true, RI_TRIGTYPE_UPDATE);
}
/*
* RI_FKey_setdefault_del -
*
* Set foreign key references to defaults at delete event on PK table.
*/
Datum
RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_del", RI_TRIGTYPE_DELETE);
/* Share code with UPDATE case */
return ri_set((TriggerData *) fcinfo->context, false, RI_TRIGTYPE_DELETE);
}
/*
* RI_FKey_setdefault_upd -
*
* Set foreign key references to defaults at update event on PK table.
*/
Datum
RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
{
/* Check that this is a valid trigger call on the right time and event. */
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_upd", RI_TRIGTYPE_UPDATE);
/* Share code with DELETE case */
return ri_set((TriggerData *) fcinfo->context, false, RI_TRIGTYPE_UPDATE);
}
/*
* ri_set -
*
* Common code for ON DELETE SET NULL, ON DELETE SET DEFAULT, ON UPDATE SET
* NULL, and ON UPDATE SET DEFAULT.
*/
static Datum
ri_set(TriggerData *trigdata, bool is_set_null, int tgkind)
{
const RI_ConstraintInfo *riinfo;
Relation fk_rel;
Relation pk_rel;
TupleTableSlot *oldslot;
RI_QueryKey qkey;
SPIPlanPtr qplan;
int32 queryno;
riinfo = ri_FetchConstraintInfo(trigdata->tg_trigger,
trigdata->tg_relation, true);
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowExclusiveLock mode since that's what our
* eventual UPDATE will get on it.
*/
fk_rel = table_open(riinfo->fk_relid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
oldslot = trigdata->tg_trigslot;
SPI_connect();
/*
* Fetch or prepare a saved plan for the trigger.
*/
switch (tgkind)
{
case RI_TRIGTYPE_UPDATE:
queryno = is_set_null
? RI_PLAN_SETNULL_ONUPDATE
: RI_PLAN_SETDEFAULT_ONUPDATE;
break;
case RI_TRIGTYPE_DELETE:
queryno = is_set_null
? RI_PLAN_SETNULL_ONDELETE
: RI_PLAN_SETDEFAULT_ONDELETE;
break;
default:
elog(ERROR, "invalid tgkind passed to ri_set");
}
ri_BuildQueryKey(&qkey, riinfo, queryno);
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
StringInfoData querybuf;
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char paramname[16];
const char *querysep;
const char *qualsep;
Oid queryoids[RI_MAX_NUMKEYS];
const char *fk_only;
int num_cols_to_set;
const int16 *set_cols;
switch (tgkind)
{
case RI_TRIGTYPE_UPDATE:
num_cols_to_set = riinfo->nkeys;
set_cols = riinfo->fk_attnums;
break;
case RI_TRIGTYPE_DELETE:
/*
* If confdelsetcols are present, then we only update the
* columns specified in that array, otherwise we update all
* the referencing columns.
*/
if (riinfo->ndelsetcols != 0)
{
num_cols_to_set = riinfo->ndelsetcols;
set_cols = riinfo->confdelsetcols;
}
else
{
num_cols_to_set = riinfo->nkeys;
set_cols = riinfo->fk_attnums;
}
break;
default:
elog(ERROR, "invalid tgkind passed to ri_set");
}
/* ----------
* The query string built is
* UPDATE [ONLY] <fktable> SET fkatt1 = {NULL|DEFAULT} [, ...]
* WHERE $1 = fkatt1 [AND ...]
* The type id's for the $ parameters are those of the
* corresponding PK attributes.
* ----------
*/
initStringInfo(&querybuf);
fk_only = fk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
quoteRelationName(fkrelname, fk_rel);
appendStringInfo(&querybuf, "UPDATE %s%s SET",
fk_only, fkrelname);
/*
* Add assignment clauses
*/
querysep = "";
for (int i = 0; i < num_cols_to_set; i++)
{
quoteOneName(attname, RIAttName(fk_rel, set_cols[i]));
appendStringInfo(&querybuf,
"%s %s = %s",
querysep, attname,
is_set_null ? "NULL" : "DEFAULT");
querysep = ",";
}
/*
* Add WHERE clause
*/
qualsep = "WHERE";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
sprintf(paramname, "$%d", i + 1);
ri_GenerateQual(&querybuf, qualsep,
paramname, pk_type,
riinfo->pf_eq_oprs[i],
attname, fk_type);
qualsep = "AND";
queryoids[i] = pk_type;
}
/* Prepare and save the plan */
qplan = ri_PlanCheck(querybuf.data, riinfo->nkeys, queryoids,
&qkey, fk_rel, pk_rel);
}
/*
* We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
false,
true, /* must detect new rows */
SPI_OK_UPDATE);
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
table_close(fk_rel, RowExclusiveLock);
if (is_set_null)
return PointerGetDatum(NULL);
else
{
/*
* If we just deleted or updated the PK row whose key was equal to the
* FK columns' default values, and a referencing row exists in the FK
* table, we would have updated that row to the same values it already
* had --- and RI_FKey_fk_upd_check_required would hence believe no
* check is necessary. So we need to do another lookup now and in
* case a reference still exists, abort the operation. That is
* already implemented in the NO ACTION trigger, so just run it. (This
* recheck is only needed in the SET DEFAULT case, since CASCADE would
* remove such rows in case of a DELETE operation or would change the
* FK key values in case of an UPDATE, while SET NULL is certain to
* result in rows that satisfy the FK constraint.)
*/
return ri_restrict(trigdata, true);
}
}
/*
* RI_FKey_pk_upd_check_required -
*
* Check if we really need to fire the RI trigger for an update or delete to a PK
* relation. This is called by the AFTER trigger queue manager to see if
* it can skip queuing an instance of an RI trigger. Returns true if the
* trigger must be fired, false if we can prove the constraint will still
* be satisfied.
*
* newslot will be NULL if this is called for a delete.
*/
bool
RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel,
TupleTableSlot *oldslot, TupleTableSlot *newslot)
{
const RI_ConstraintInfo *riinfo;
riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true);
/*
* If any old key value is NULL, the row could not have been referenced by
* an FK row, so no check is needed.
*/
if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL)
return false;
/* If all old and new key values are equal, no check is needed */
if (newslot && ri_KeysEqual(pk_rel, oldslot, newslot, riinfo, true))
return false;
/* Else we need to fire the trigger. */
return true;
}
/*
* RI_FKey_fk_upd_check_required -
*
* Check if we really need to fire the RI trigger for an update to an FK
* relation. This is called by the AFTER trigger queue manager to see if
* it can skip queuing an instance of an RI trigger. Returns true if the
* trigger must be fired, false if we can prove the constraint will still
* be satisfied.
*/
bool
RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
TupleTableSlot *oldslot, TupleTableSlot *newslot)
{
const RI_ConstraintInfo *riinfo;
int ri_nullcheck;
/*
* AfterTriggerSaveEvent() handles things such that this function is never
* called for partitioned tables.
*/
Assert(fk_rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE);
riinfo = ri_FetchConstraintInfo(trigger, fk_rel, false);
ri_nullcheck = ri_NullCheck(RelationGetDescr(fk_rel), newslot, riinfo, false);
/*
* If all new key values are NULL, the row satisfies the constraint, so no
* check is needed.
*/
if (ri_nullcheck == RI_KEYS_ALL_NULL)
return false;
/*
* If some new key values are NULL, the behavior depends on the match
* type.
*/
else if (ri_nullcheck == RI_KEYS_SOME_NULL)
{
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_SIMPLE:
/*
* If any new key value is NULL, the row must satisfy the
* constraint, so no check is needed.
*/
return false;
case FKCONSTR_MATCH_PARTIAL:
/*
* Don't know, must run full check.
*/
break;
case FKCONSTR_MATCH_FULL:
/*
* If some new key values are NULL, the row fails the
* constraint. We must not throw error here, because the row
* might get invalidated before the constraint is to be
* checked, but we should queue the event to apply the check
* later.
*/
return true;
}
}
/*
* Continues here for no new key values are NULL, or we couldn't decide
* yet.
*/
/*
* If the original row was inserted by our own transaction, we must fire
* the trigger whether or not the keys are equal. This is because our
* UPDATE will invalidate the INSERT so that the INSERT RI trigger will
* not do anything; so we had better do the UPDATE check. (We could skip
* this if we knew the INSERT trigger already fired, but there is no easy
* way to know that.)
*/
if (slot_is_current_xact_tuple(oldslot))
return true;
/* If all old and new key values are equal, no check is needed */
if (ri_KeysEqual(fk_rel, oldslot, newslot, riinfo, false))
return false;
/* Else we need to fire the trigger. */
return true;
}
/*
* RI_Initial_Check -
*
* Check an entire table for non-matching values using a single query.
* This is not a trigger procedure, but is called during ALTER TABLE
* ADD FOREIGN KEY to validate the initial table contents.
*
* We expect that the caller has made provision to prevent any problems
* caused by concurrent actions. This could be either by locking rel and
* pkrel at ShareRowExclusiveLock or higher, or by otherwise ensuring
* that triggers implementing the checks are already active.
* Hence, we do not need to lock individual rows for the check.
*
* If the check fails because the current user doesn't have permissions
* to read both tables, return false to let our caller know that they will
* need to do something else to check the constraint.
*/
bool
RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
{
const RI_ConstraintInfo *riinfo;
StringInfoData querybuf;
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char pkattname[MAX_QUOTED_NAME_LEN + 3];
char fkattname[MAX_QUOTED_NAME_LEN + 3];
RangeTblEntry *rte;
RTEPermissionInfo *pk_perminfo;
RTEPermissionInfo *fk_perminfo;
List *rtes = NIL;
List *perminfos = NIL;
const char *sep;
const char *fk_only;
const char *pk_only;
int save_nestlevel;
char workmembuf[32];
int spi_result;
SPIPlanPtr qplan;
riinfo = ri_FetchConstraintInfo(trigger, fk_rel, false);
/*
* Check to make sure current user has enough permissions to do the test
* query. (If not, caller can fall back to the trigger method, which
* works because it changes user IDs on the fly.)
*
* XXX are there any other show-stopper conditions to check?
*/
pk_perminfo = makeNode(RTEPermissionInfo);
pk_perminfo->relid = RelationGetRelid(pk_rel);
pk_perminfo->requiredPerms = ACL_SELECT;
perminfos = lappend(perminfos, pk_perminfo);
rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = RelationGetRelid(pk_rel);
rte->relkind = pk_rel->rd_rel->relkind;
rte->rellockmode = AccessShareLock;
rte->perminfoindex = list_length(perminfos);
rtes = lappend(rtes, rte);
fk_perminfo = makeNode(RTEPermissionInfo);
fk_perminfo->relid = RelationGetRelid(fk_rel);
fk_perminfo->requiredPerms = ACL_SELECT;
perminfos = lappend(perminfos, fk_perminfo);
rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = RelationGetRelid(fk_rel);
rte->relkind = fk_rel->rd_rel->relkind;
rte->rellockmode = AccessShareLock;
rte->perminfoindex = list_length(perminfos);
rtes = lappend(rtes, rte);
for (int i = 0; i < riinfo->nkeys; i++)
{
int attno;
attno = riinfo->pk_attnums[i] - FirstLowInvalidHeapAttributeNumber;
pk_perminfo->selectedCols = bms_add_member(pk_perminfo->selectedCols, attno);
attno = riinfo->fk_attnums[i] - FirstLowInvalidHeapAttributeNumber;
fk_perminfo->selectedCols = bms_add_member(fk_perminfo->selectedCols, attno);
}
if (!ExecCheckPermissions(rtes, perminfos, false))
return false;
/*
* Also punt if RLS is enabled on either table unless this role has the
* bypassrls right or is the table owner of the table(s) involved which
* have RLS enabled.
*/
if (!has_bypassrls_privilege(GetUserId()) &&
((pk_rel->rd_rel->relrowsecurity &&
!object_ownercheck(RelationRelationId, RelationGetRelid(pk_rel),
GetUserId())) ||
(fk_rel->rd_rel->relrowsecurity &&
!object_ownercheck(RelationRelationId, RelationGetRelid(fk_rel),
GetUserId()))))
return false;
/*----------
* The query string built is:
* SELECT fk.keycols FROM [ONLY] relname fk
* LEFT OUTER JOIN [ONLY] pkrelname pk
* ON (pk.pkkeycol1=fk.keycol1 [AND ...])
* WHERE pk.pkkeycol1 IS NULL AND
* For MATCH SIMPLE:
* (fk.keycol1 IS NOT NULL [AND ...])
* For MATCH FULL:
* (fk.keycol1 IS NOT NULL [OR ...])
*
* We attach COLLATE clauses to the operators when comparing columns
* that have different collations.
*----------
*/
initStringInfo(&querybuf);
appendStringInfoString(&querybuf, "SELECT ");
sep = "";
for (int i = 0; i < riinfo->nkeys; i++)
{
quoteOneName(fkattname,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
appendStringInfo(&querybuf, "%sfk.%s", sep, fkattname);
sep = ", ";
}
quoteRelationName(pkrelname, pk_rel);
quoteRelationName(fkrelname, fk_rel);
fk_only = fk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
pk_only = pk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
appendStringInfo(&querybuf,
" FROM %s%s fk LEFT OUTER JOIN %s%s pk ON",
fk_only, fkrelname, pk_only, pkrelname);
strcpy(pkattname, "pk.");
strcpy(fkattname, "fk.");
sep = "(";
for (int i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
Oid pk_coll = RIAttCollation(pk_rel, riinfo->pk_attnums[i]);
Oid fk_coll = RIAttCollation(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(pkattname + 3,
RIAttName(pk_rel, riinfo->pk_attnums[i]));
quoteOneName(fkattname + 3,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
ri_GenerateQual(&querybuf, sep,
pkattname, pk_type,
riinfo->pf_eq_oprs[i],
fkattname, fk_type);
if (pk_coll != fk_coll)
ri_GenerateQualCollation(&querybuf, pk_coll);
sep = "AND";
}
/*
* It's sufficient to test any one pk attribute for null to detect a join
* failure.
*/
quoteOneName(pkattname, RIAttName(pk_rel, riinfo->pk_attnums[0]));
appendStringInfo(&querybuf, ") WHERE pk.%s IS NULL AND (", pkattname);
sep = "";
for (int i = 0; i < riinfo->nkeys; i++)
{
quoteOneName(fkattname, RIAttName(fk_rel, riinfo->fk_attnums[i]));
appendStringInfo(&querybuf,
"%sfk.%s IS NOT NULL",
sep, fkattname);
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_SIMPLE:
sep = " AND ";
break;
case FKCONSTR_MATCH_FULL:
sep = " OR ";
break;
}
}
appendStringInfoChar(&querybuf, ')');
/*
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
* have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem. However, we
* must also set hash_mem_multiplier to 1, since it is surely not okay to
* let that get applied to the maintenance_work_mem value.
*
* We use the equivalent of a function SET option to allow the setting to
* persist for exactly the duration of the check query. guc.c also takes
* care of undoing the setting on error.
*/
save_nestlevel = NewGUCNestLevel();
snprintf(workmembuf, sizeof(workmembuf), "%d", maintenance_work_mem);
(void) set_config_option("work_mem", workmembuf,
PGC_USERSET, PGC_S_SESSION,
GUC_ACTION_SAVE, true, 0, false);
(void) set_config_option("hash_mem_multiplier", "1",
PGC_USERSET, PGC_S_SESSION,
GUC_ACTION_SAVE, true, 0, false);
SPI_connect();
/*
* Generate the plan. We don't need to cache it, and there are no
* arguments to the plan.
*/
qplan = SPI_prepare(querybuf.data, 0, NULL);
if (qplan == NULL)
elog(ERROR, "SPI_prepare returned %s for %s",
SPI_result_code_string(SPI_result), querybuf.data);
/*
* Run the plan. For safety we force a current snapshot to be used. (In
* transaction-snapshot mode, this arguably violates transaction isolation
* rules, but we really haven't got much choice.) We don't need to
* register the snapshot, because SPI_execute_snapshot will see to it. We
* need at most one tuple returned, so pass limit = 1.
*/
spi_result = SPI_execute_snapshot(qplan,
NULL, NULL,
GetLatestSnapshot(),
InvalidSnapshot,
true, false, 1);
/* Check result */
if (spi_result != SPI_OK_SELECT)
elog(ERROR, "SPI_execute_snapshot returned %s", SPI_result_code_string(spi_result));
/* Did we find a tuple violating the constraint? */
if (SPI_processed > 0)
{
TupleTableSlot *slot;
HeapTuple tuple = SPI_tuptable->vals[0];
TupleDesc tupdesc = SPI_tuptable->tupdesc;
RI_ConstraintInfo fake_riinfo;
slot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
heap_deform_tuple(tuple, tupdesc,
slot->tts_values, slot->tts_isnull);
ExecStoreVirtualTuple(slot);
/*
* The columns to look at in the result tuple are 1..N, not whatever
* they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
* ri_ReportViolation, overriding its normal habit of using the pk_rel
* or fk_rel's tupdesc.
*/
memcpy(&fake_riinfo, riinfo, sizeof(RI_ConstraintInfo));
for (int i = 0; i < fake_riinfo.nkeys; i++)
fake_riinfo.fk_attnums[i] = i + 1;
/*
* If it's MATCH FULL, and there are any nulls in the FK keys,
* complain about that rather than the lack of a match. MATCH FULL
* disallows partially-null FK rows.
*/
if (fake_riinfo.confmatchtype == FKCONSTR_MATCH_FULL &&
ri_NullCheck(tupdesc, slot, &fake_riinfo, false) != RI_KEYS_NONE_NULL)
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel),
NameStr(fake_riinfo.conname)),
errdetail("MATCH FULL does not allow mixing of null and nonnull key values."),
errtableconstraint(fk_rel,
NameStr(fake_riinfo.conname))));
/*
* We tell ri_ReportViolation we were doing the RI_PLAN_CHECK_LOOKUPPK
* query, which isn't true, but will cause it to use
* fake_riinfo.fk_attnums as we need.
*/
ri_ReportViolation(&fake_riinfo,
pk_rel, fk_rel,
slot, tupdesc,
RI_PLAN_CHECK_LOOKUPPK, false, false);
ExecDropSingleTupleTableSlot(slot);
}
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
/*
* Restore work_mem and hash_mem_multiplier.
*/
AtEOXact_GUC(true, save_nestlevel);
return true;
}
/*
* RI_PartitionRemove_Check -
*
* Verify no referencing values exist, when a partition is detached on
* the referenced side of a foreign key constraint.
*/
void
RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
{
const RI_ConstraintInfo *riinfo;
StringInfoData querybuf;
char *constraintDef;
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char pkattname[MAX_QUOTED_NAME_LEN + 3];
char fkattname[MAX_QUOTED_NAME_LEN + 3];
const char *sep;
const char *fk_only;
int save_nestlevel;
char workmembuf[32];
int spi_result;
SPIPlanPtr qplan;
int i;
riinfo = ri_FetchConstraintInfo(trigger, fk_rel, false);
/*
* We don't check permissions before displaying the error message, on the
* assumption that the user detaching the partition must have enough
* privileges to examine the table contents anyhow.
*/
/*----------
* The query string built is:
* SELECT fk.keycols FROM [ONLY] relname fk
* JOIN pkrelname pk
* ON (pk.pkkeycol1=fk.keycol1 [AND ...])
* WHERE (<partition constraint>) AND
* For MATCH SIMPLE:
* (fk.keycol1 IS NOT NULL [AND ...])
* For MATCH FULL:
* (fk.keycol1 IS NOT NULL [OR ...])
*
* We attach COLLATE clauses to the operators when comparing columns
* that have different collations.
*----------
*/
initStringInfo(&querybuf);
appendStringInfoString(&querybuf, "SELECT ");
sep = "";
for (i = 0; i < riinfo->nkeys; i++)
{
quoteOneName(fkattname,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
appendStringInfo(&querybuf, "%sfk.%s", sep, fkattname);
sep = ", ";
}
quoteRelationName(pkrelname, pk_rel);
quoteRelationName(fkrelname, fk_rel);
fk_only = fk_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ?
"" : "ONLY ";
appendStringInfo(&querybuf,
" FROM %s%s fk JOIN %s pk ON",
fk_only, fkrelname, pkrelname);
strcpy(pkattname, "pk.");
strcpy(fkattname, "fk.");
sep = "(";
for (i = 0; i < riinfo->nkeys; i++)
{
Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
Oid fk_type = RIAttType(fk_rel, riinfo->fk_attnums[i]);
Oid pk_coll = RIAttCollation(pk_rel, riinfo->pk_attnums[i]);
Oid fk_coll = RIAttCollation(fk_rel, riinfo->fk_attnums[i]);
quoteOneName(pkattname + 3,
RIAttName(pk_rel, riinfo->pk_attnums[i]));
quoteOneName(fkattname + 3,
RIAttName(fk_rel, riinfo->fk_attnums[i]));
ri_GenerateQual(&querybuf, sep,
pkattname, pk_type,
riinfo->pf_eq_oprs[i],
fkattname, fk_type);
if (pk_coll != fk_coll)
ri_GenerateQualCollation(&querybuf, pk_coll);
sep = "AND";
}
/*
* Start the WHERE clause with the partition constraint (except if this is
* the default partition and there's no other partition, because the
* partition constraint is the empty string in that case.)
*/
constraintDef = pg_get_partconstrdef_string(RelationGetRelid(pk_rel), "pk");
if (constraintDef && constraintDef[0] != '\0')
appendStringInfo(&querybuf, ") WHERE %s AND (",
constraintDef);
else
appendStringInfoString(&querybuf, ") WHERE (");
sep = "";
for (i = 0; i < riinfo->nkeys; i++)
{
quoteOneName(fkattname, RIAttName(fk_rel, riinfo->fk_attnums[i]));
appendStringInfo(&querybuf,
"%sfk.%s IS NOT NULL",
sep, fkattname);
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_SIMPLE:
sep = " AND ";
break;
case FKCONSTR_MATCH_FULL:
sep = " OR ";
break;
}
}
appendStringInfoChar(&querybuf, ')');
/*
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
* have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem. However, we
* must also set hash_mem_multiplier to 1, since it is surely not okay to
* let that get applied to the maintenance_work_mem value.
*
* We use the equivalent of a function SET option to allow the setting to
* persist for exactly the duration of the check query. guc.c also takes
* care of undoing the setting on error.
*/
save_nestlevel = NewGUCNestLevel();
snprintf(workmembuf, sizeof(workmembuf), "%d", maintenance_work_mem);
(void) set_config_option("work_mem", workmembuf,
PGC_USERSET, PGC_S_SESSION,
GUC_ACTION_SAVE, true, 0, false);
(void) set_config_option("hash_mem_multiplier", "1",
PGC_USERSET, PGC_S_SESSION,
GUC_ACTION_SAVE, true, 0, false);
SPI_connect();
/*
* Generate the plan. We don't need to cache it, and there are no
* arguments to the plan.
*/
qplan = SPI_prepare(querybuf.data, 0, NULL);
if (qplan == NULL)
elog(ERROR, "SPI_prepare returned %s for %s",
SPI_result_code_string(SPI_result), querybuf.data);
/*
* Run the plan. For safety we force a current snapshot to be used. (In
* transaction-snapshot mode, this arguably violates transaction isolation
* rules, but we really haven't got much choice.) We don't need to
* register the snapshot, because SPI_execute_snapshot will see to it. We
* need at most one tuple returned, so pass limit = 1.
*/
spi_result = SPI_execute_snapshot(qplan,
NULL, NULL,
GetLatestSnapshot(),
InvalidSnapshot,
true, false, 1);
/* Check result */
if (spi_result != SPI_OK_SELECT)
elog(ERROR, "SPI_execute_snapshot returned %s", SPI_result_code_string(spi_result));
/* Did we find a tuple that would violate the constraint? */
if (SPI_processed > 0)
{
TupleTableSlot *slot;
HeapTuple tuple = SPI_tuptable->vals[0];
TupleDesc tupdesc = SPI_tuptable->tupdesc;
RI_ConstraintInfo fake_riinfo;
slot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
heap_deform_tuple(tuple, tupdesc,
slot->tts_values, slot->tts_isnull);
ExecStoreVirtualTuple(slot);
/*
* The columns to look at in the result tuple are 1..N, not whatever
* they are in the fk_rel. Hack up riinfo so that ri_ReportViolation
* will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
* ri_ReportViolation, overriding its normal habit of using the pk_rel
* or fk_rel's tupdesc.
*/
memcpy(&fake_riinfo, riinfo, sizeof(RI_ConstraintInfo));
for (i = 0; i < fake_riinfo.nkeys; i++)
fake_riinfo.pk_attnums[i] = i + 1;
ri_ReportViolation(&fake_riinfo, pk_rel, fk_rel,
slot, tupdesc, 0, false, true);
}
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
/*
* Restore work_mem and hash_mem_multiplier.
*/
AtEOXact_GUC(true, save_nestlevel);
}
/* ----------
* Local functions below
* ----------
*/
/*
* quoteOneName --- safely quote a single SQL name
*
* buffer must be MAX_QUOTED_NAME_LEN long (includes room for \0)
*/
static void
quoteOneName(char *buffer, const char *name)
{
/* Rather than trying to be smart, just always quote it. */
*buffer++ = '"';
while (*name)
{
if (*name == '"')
*buffer++ = '"';
*buffer++ = *name++;
}
*buffer++ = '"';
*buffer = '\0';
}
/*
* quoteRelationName --- safely quote a fully qualified relation name
*
* buffer must be MAX_QUOTED_REL_NAME_LEN long (includes room for \0)
*/
static void
quoteRelationName(char *buffer, Relation rel)
{
quoteOneName(buffer, get_namespace_name(RelationGetNamespace(rel)));
buffer += strlen(buffer);
*buffer++ = '.';
quoteOneName(buffer, RelationGetRelationName(rel));
}
/*
* ri_GenerateQual --- generate a WHERE clause equating two variables
*
* This basically appends " sep leftop op rightop" to buf, adding casts
* and schema qualification as needed to ensure that the parser will select
* the operator we specify. leftop and rightop should be parenthesized
* if they aren't variables or parameters.
*/
static void
ri_GenerateQual(StringInfo buf,
const char *sep,
const char *leftop, Oid leftoptype,
Oid opoid,
const char *rightop, Oid rightoptype)
{
appendStringInfo(buf, " %s ", sep);
generate_operator_clause(buf, leftop, leftoptype, opoid,
rightop, rightoptype);
}
/*
* ri_GenerateQualCollation --- add a COLLATE spec to a WHERE clause
*
* We only have to use this function when directly comparing the referencing
* and referenced columns, if they are of different collations; else the
* parser will fail to resolve the collation to use. We don't need to use
* this function for RI queries that compare a variable to a $n parameter.
* Since parameter symbols always have default collation, the effect will be
* to use the variable's collation.
*
* Note that we require that the collations of the referencing and the
* referenced column have the same notion of equality: Either they have to
* both be deterministic or else they both have to be the same. (See also
* ATAddForeignKeyConstraint().)
*/
static void
ri_GenerateQualCollation(StringInfo buf, Oid collation)
{
HeapTuple tp;
Form_pg_collation colltup;
char *collname;
char onename[MAX_QUOTED_NAME_LEN];
/* Nothing to do if it's a noncollatable data type */
if (!OidIsValid(collation))
return;
tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collation));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for collation %u", collation);
colltup = (Form_pg_collation) GETSTRUCT(tp);
collname = NameStr(colltup->collname);
/*
* We qualify the name always, for simplicity and to ensure the query is
* not search-path-dependent.
*/
quoteOneName(onename, get_namespace_name(colltup->collnamespace));
appendStringInfo(buf, " COLLATE %s", onename);
quoteOneName(onename, collname);
appendStringInfo(buf, ".%s", onename);
ReleaseSysCache(tp);
}
/* ----------
* ri_BuildQueryKey -
*
* Construct a hashtable key for a prepared SPI plan of an FK constraint.
*
* key: output argument, *key is filled in based on the other arguments
* riinfo: info derived from pg_constraint entry
* constr_queryno: an internal number identifying the query type
* (see RI_PLAN_XXX constants at head of file)
* ----------
*/
static void
ri_BuildQueryKey(RI_QueryKey *key, const RI_ConstraintInfo *riinfo,
int32 constr_queryno)
{
/*
* Inherited constraints with a common ancestor can share ri_query_cache
* entries for all query types except RI_PLAN_CHECK_LOOKUPPK_FROM_PK.
* Except in that case, the query processes the other table involved in
* the FK constraint (i.e., not the table on which the trigger has been
* fired), and so it will be the same for all members of the inheritance
* tree. So we may use the root constraint's OID in the hash key, rather
* than the constraint's own OID. This avoids creating duplicate SPI
* plans, saving lots of work and memory when there are many partitions
* with similar FK constraints.
*
* (Note that we must still have a separate RI_ConstraintInfo for each
* constraint, because partitions can have different column orders,
* resulting in different pk_attnums[] or fk_attnums[] array contents.)
*
* We assume struct RI_QueryKey contains no padding bytes, else we'd need
* to use memset to clear them.
*/
if (constr_queryno != RI_PLAN_CHECK_LOOKUPPK_FROM_PK)
key->constr_id = riinfo->constraint_root_id;
else
key->constr_id = riinfo->constraint_id;
key->constr_queryno = constr_queryno;
}
/*
* Check that RI trigger function was called in expected context
*/
static void
ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
if (!CALLED_AS_TRIGGER(fcinfo))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" was not called by trigger manager", funcname)));
/*
* Check proper event
*/
if (!TRIGGER_FIRED_AFTER(trigdata->tg_event) ||
!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
switch (tgkind)
{
case RI_TRIGTYPE_INSERT:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for INSERT", funcname)));
break;
case RI_TRIGTYPE_UPDATE:
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for UPDATE", funcname)));
break;
case RI_TRIGTYPE_DELETE:
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for DELETE", funcname)));
break;
}
}
/*
* Fetch the RI_ConstraintInfo struct for the trigger's FK constraint.
*/
static const RI_ConstraintInfo *
ri_FetchConstraintInfo(Trigger *trigger, Relation trig_rel, bool rel_is_pk)
{
Oid constraintOid = trigger->tgconstraint;
const RI_ConstraintInfo *riinfo;
/*
* Check that the FK constraint's OID is available; it might not be if
* we've been invoked via an ordinary trigger or an old-style "constraint
* trigger".
*/
if (!OidIsValid(constraintOid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("no pg_constraint entry for trigger \"%s\" on table \"%s\"",
trigger->tgname, RelationGetRelationName(trig_rel)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
/* Find or create a hashtable entry for the constraint */
riinfo = ri_LoadConstraintInfo(constraintOid);
/* Do some easy cross-checks against the trigger call data */
if (rel_is_pk)
{
if (riinfo->fk_relid != trigger->tgconstrrelid ||
riinfo->pk_relid != RelationGetRelid(trig_rel))
elog(ERROR, "wrong pg_constraint entry for trigger \"%s\" on table \"%s\"",
trigger->tgname, RelationGetRelationName(trig_rel));
}
else
{
if (riinfo->fk_relid != RelationGetRelid(trig_rel) ||
riinfo->pk_relid != trigger->tgconstrrelid)
elog(ERROR, "wrong pg_constraint entry for trigger \"%s\" on table \"%s\"",
trigger->tgname, RelationGetRelationName(trig_rel));
}
if (riinfo->confmatchtype != FKCONSTR_MATCH_FULL &&
riinfo->confmatchtype != FKCONSTR_MATCH_PARTIAL &&
riinfo->confmatchtype != FKCONSTR_MATCH_SIMPLE)
elog(ERROR, "unrecognized confmatchtype: %d",
riinfo->confmatchtype);
if (riinfo->confmatchtype == FKCONSTR_MATCH_PARTIAL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MATCH PARTIAL not yet implemented")));
return riinfo;
}
/*
* Fetch or create the RI_ConstraintInfo struct for an FK constraint.
*/
static const RI_ConstraintInfo *
ri_LoadConstraintInfo(Oid constraintOid)
{
RI_ConstraintInfo *riinfo;
bool found;
HeapTuple tup;
Form_pg_constraint conForm;
/*
* On the first call initialize the hashtable
*/
if (!ri_constraint_cache)
ri_InitHashTables();
/*
* Find or create a hash entry. If we find a valid one, just return it.
*/
riinfo = (RI_ConstraintInfo *) hash_search(ri_constraint_cache,
&constraintOid,
HASH_ENTER, &found);
if (!found)
riinfo->valid = false;
else if (riinfo->valid)
return riinfo;
/*
* Fetch the pg_constraint row so we can fill in the entry.
*/
tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraintOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for constraint %u", constraintOid);
conForm = (Form_pg_constraint) GETSTRUCT(tup);
if (conForm->contype != CONSTRAINT_FOREIGN) /* should not happen */
elog(ERROR, "constraint %u is not a foreign key constraint",
constraintOid);
/* And extract data */
Assert(riinfo->constraint_id == constraintOid);
if (OidIsValid(conForm->conparentid))
riinfo->constraint_root_id =
get_ri_constraint_root(conForm->conparentid);
else
riinfo->constraint_root_id = constraintOid;
riinfo->oidHashValue = GetSysCacheHashValue1(CONSTROID,
ObjectIdGetDatum(constraintOid));
riinfo->rootHashValue = GetSysCacheHashValue1(CONSTROID,
ObjectIdGetDatum(riinfo->constraint_root_id));
memcpy(&riinfo->conname, &conForm->conname, sizeof(NameData));
riinfo->pk_relid = conForm->confrelid;
riinfo->fk_relid = conForm->conrelid;
riinfo->confupdtype = conForm->confupdtype;
riinfo->confdeltype = conForm->confdeltype;
riinfo->confmatchtype = conForm->confmatchtype;
riinfo->hasperiod = conForm->conperiod;
DeconstructFkConstraintRow(tup,
&riinfo->nkeys,
riinfo->fk_attnums,
riinfo->pk_attnums,
riinfo->pf_eq_oprs,
riinfo->pp_eq_oprs,
riinfo->ff_eq_oprs,
&riinfo->ndelsetcols,
riinfo->confdelsetcols);
/*
* For temporal FKs, get the operators and functions we need. We ask the
* opclass of the PK element for these. This all gets cached (as does the
* generated plan), so there's no performance issue.
*/
if (riinfo->hasperiod)
{
Oid opclass = get_index_column_opclass(conForm->conindid, riinfo->nkeys);
FindFKPeriodOpers(opclass,
&riinfo->period_contained_by_oper,
&riinfo->agged_period_contained_by_oper,
&riinfo->period_intersect_oper);
}
ReleaseSysCache(tup);
/*
* For efficient processing of invalidation messages below, we keep a
* doubly-linked count list of all currently valid entries.
*/
dclist_push_tail(&ri_constraint_cache_valid_list, &riinfo->valid_link);
riinfo->valid = true;
return riinfo;
}
/*
* get_ri_constraint_root
* Returns the OID of the constraint's root parent
*/
static Oid
get_ri_constraint_root(Oid constrOid)
{
for (;;)
{
HeapTuple tuple;
Oid constrParentOid;
tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constrOid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for constraint %u", constrOid);
constrParentOid = ((Form_pg_constraint) GETSTRUCT(tuple))->conparentid;
ReleaseSysCache(tuple);
if (!OidIsValid(constrParentOid))
break; /* we reached the root constraint */
constrOid = constrParentOid;
}
return constrOid;
}
/*
* Callback for pg_constraint inval events
*
* While most syscache callbacks just flush all their entries, pg_constraint
* gets enough update traffic that it's probably worth being smarter.
* Invalidate any ri_constraint_cache entry associated with the syscache
* entry with the specified hash value, or all entries if hashvalue == 0.
*
* Note: at the time a cache invalidation message is processed there may be
* active references to the cache. Because of this we never remove entries
* from the cache, but only mark them invalid, which is harmless to active
* uses. (Any query using an entry should hold a lock sufficient to keep that
* data from changing under it --- but we may get cache flushes anyway.)
*/
static void
InvalidateConstraintCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
{
dlist_mutable_iter iter;
Assert(ri_constraint_cache != NULL);
/*
* If the list of currently valid entries gets excessively large, we mark
* them all invalid so we can empty the list. This arrangement avoids
* O(N^2) behavior in situations where a session touches many foreign keys
* and also does many ALTER TABLEs, such as a restore from pg_dump.
*/
if (dclist_count(&ri_constraint_cache_valid_list) > 1000)
hashvalue = 0; /* pretend it's a cache reset */
dclist_foreach_modify(iter, &ri_constraint_cache_valid_list)
{
RI_ConstraintInfo *riinfo = dclist_container(RI_ConstraintInfo,
valid_link, iter.cur);
/*
* We must invalidate not only entries directly matching the given
* hash value, but also child entries, in case the invalidation
* affects a root constraint.
*/
if (hashvalue == 0 ||
riinfo->oidHashValue == hashvalue ||
riinfo->rootHashValue == hashvalue)
{
riinfo->valid = false;
/* Remove invalidated entries from the list, too */
dclist_delete_from(&ri_constraint_cache_valid_list, iter.cur);
}
}
}
/*
* Prepare execution plan for a query to enforce an RI restriction
*/
static SPIPlanPtr
ri_PlanCheck(const char *querystr, int nargs, Oid *argtypes,
RI_QueryKey *qkey, Relation fk_rel, Relation pk_rel)
{
SPIPlanPtr qplan;
Relation query_rel;
Oid save_userid;
int save_sec_context;
/*
* Use the query type code to determine whether the query is run against
* the PK or FK table; we'll do the check as that table's owner
*/
if (qkey->constr_queryno <= RI_PLAN_LAST_ON_PK)
query_rel = pk_rel;
else
query_rel = fk_rel;
/* Switch to proper UID to perform check as */
GetUserIdAndSecContext(&save_userid, &save_sec_context);
SetUserIdAndSecContext(RelationGetForm(query_rel)->relowner,
save_sec_context | SECURITY_LOCAL_USERID_CHANGE |
SECURITY_NOFORCE_RLS);
/* Create the plan */
qplan = SPI_prepare(querystr, nargs, argtypes);
if (qplan == NULL)
elog(ERROR, "SPI_prepare returned %s for %s", SPI_result_code_string(SPI_result), querystr);
/* Restore UID and security context */
SetUserIdAndSecContext(save_userid, save_sec_context);
/* Save the plan */
SPI_keepplan(qplan);
ri_HashPreparedPlan(qkey, qplan);
return qplan;
}
/*
* Perform a query to enforce an RI restriction
*/
static bool
ri_PerformCheck(const RI_ConstraintInfo *riinfo,
RI_QueryKey *qkey, SPIPlanPtr qplan,
Relation fk_rel, Relation pk_rel,
TupleTableSlot *oldslot, TupleTableSlot *newslot,
bool is_restrict,
bool detectNewRows, int expect_OK)
{
Relation query_rel,
source_rel;
bool source_is_pk;
Snapshot test_snapshot;
Snapshot crosscheck_snapshot;
int limit;
int spi_result;
Oid save_userid;
int save_sec_context;
Datum vals[RI_MAX_NUMKEYS * 2];
char nulls[RI_MAX_NUMKEYS * 2];
/*
* Use the query type code to determine whether the query is run against
* the PK or FK table; we'll do the check as that table's owner
*/
if (qkey->constr_queryno <= RI_PLAN_LAST_ON_PK)
query_rel = pk_rel;
else
query_rel = fk_rel;
/*
* The values for the query are taken from the table on which the trigger
* is called - it is normally the other one with respect to query_rel. An
* exception is ri_Check_Pk_Match(), which uses the PK table for both (and
* sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK). We might eventually
* need some less klugy way to determine this.
*/
if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK)
{
source_rel = fk_rel;
source_is_pk = false;
}
else
{
source_rel = pk_rel;
source_is_pk = true;
}
/* Extract the parameters to be passed into the query */
if (newslot)
{
ri_ExtractValues(source_rel, newslot, riinfo, source_is_pk,
vals, nulls);
if (oldslot)
ri_ExtractValues(source_rel, oldslot, riinfo, source_is_pk,
vals + riinfo->nkeys, nulls + riinfo->nkeys);
}
else
{
ri_ExtractValues(source_rel, oldslot, riinfo, source_is_pk,
vals, nulls);
}
/*
* In READ COMMITTED mode, we just need to use an up-to-date regular
* snapshot, and we will see all rows that could be interesting. But in
* transaction-snapshot mode, we can't change the transaction snapshot. If
* the caller passes detectNewRows == false then it's okay to do the query
* with the transaction snapshot; otherwise we use a current snapshot, and
* tell the executor to error out if it finds any rows under the current
* snapshot that wouldn't be visible per the transaction snapshot. Note
* that SPI_execute_snapshot will register the snapshots, so we don't need
* to bother here.
*/
if (IsolationUsesXactSnapshot() && detectNewRows)
{
CommandCounterIncrement(); /* be sure all my own work is visible */
test_snapshot = GetLatestSnapshot();
crosscheck_snapshot = GetTransactionSnapshot();
}
else
{
/* the default SPI behavior is okay */
test_snapshot = InvalidSnapshot;
crosscheck_snapshot = InvalidSnapshot;
}
/*
* If this is a select query (e.g., for a 'no action' or 'restrict'
* trigger), we only need to see if there is a single row in the table,
* matching the key. Otherwise, limit = 0 - because we want the query to
* affect ALL the matching rows.
*/
limit = (expect_OK == SPI_OK_SELECT) ? 1 : 0;
/* Switch to proper UID to perform check as */
GetUserIdAndSecContext(&save_userid, &save_sec_context);
SetUserIdAndSecContext(RelationGetForm(query_rel)->relowner,
save_sec_context | SECURITY_LOCAL_USERID_CHANGE |
SECURITY_NOFORCE_RLS);
/* Finally we can run the query. */
spi_result = SPI_execute_snapshot(qplan,
vals, nulls,
test_snapshot, crosscheck_snapshot,
false, false, limit);
/* Restore UID and security context */
SetUserIdAndSecContext(save_userid, save_sec_context);
/* Check result */
if (spi_result < 0)
elog(ERROR, "SPI_execute_snapshot returned %s", SPI_result_code_string(spi_result));
if (expect_OK >= 0 && spi_result != expect_OK)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("referential integrity query on \"%s\" from constraint \"%s\" on \"%s\" gave unexpected result",
RelationGetRelationName(pk_rel),
NameStr(riinfo->conname),
RelationGetRelationName(fk_rel)),
errhint("This is most likely due to a rule having rewritten the query.")));
/* XXX wouldn't it be clearer to do this part at the caller? */
if (qkey->constr_queryno != RI_PLAN_CHECK_LOOKUPPK_FROM_PK &&
expect_OK == SPI_OK_SELECT &&
(SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
ri_ReportViolation(riinfo,
pk_rel, fk_rel,
newslot ? newslot : oldslot,
NULL,
qkey->constr_queryno, is_restrict, false);
return SPI_processed != 0;
}
/*
* Extract fields from a tuple into Datum/nulls arrays
*/
static void
ri_ExtractValues(Relation rel, TupleTableSlot *slot,
const RI_ConstraintInfo *riinfo, bool rel_is_pk,
Datum *vals, char *nulls)
{
const int16 *attnums;
bool isnull;
if (rel_is_pk)
attnums = riinfo->pk_attnums;
else
attnums = riinfo->fk_attnums;
for (int i = 0; i < riinfo->nkeys; i++)
{
vals[i] = slot_getattr(slot, attnums[i], &isnull);
nulls[i] = isnull ? 'n' : ' ';
}
}
/*
* Produce an error report
*
* If the failed constraint was on insert/update to the FK table,
* we want the key names and values extracted from there, and the error
* message to look like 'key blah is not present in PK'.
* Otherwise, the attr names and values come from the PK table and the
* message looks like 'key blah is still referenced from FK'.
*/
static void
ri_ReportViolation(const RI_ConstraintInfo *riinfo,
Relation pk_rel, Relation fk_rel,
TupleTableSlot *violatorslot, TupleDesc tupdesc,
int queryno, bool is_restrict, bool partgone)
{
StringInfoData key_names;
StringInfoData key_values;
bool onfk;
const int16 *attnums;
Oid rel_oid;
AclResult aclresult;
bool has_perm = true;
/*
* Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK);
if (onfk)
{
attnums = riinfo->fk_attnums;
rel_oid = fk_rel->rd_id;
if (tupdesc == NULL)
tupdesc = fk_rel->rd_att;
}
else
{
attnums = riinfo->pk_attnums;
rel_oid = pk_rel->rd_id;
if (tupdesc == NULL)
tupdesc = pk_rel->rd_att;
}
/*
* Check permissions- if the user does not have access to view the data in
* any of the key columns then we don't include the errdetail() below.
*
* Check if RLS is enabled on the relation first. If so, we don't return
* any specifics to avoid leaking data.
*
* Check table-level permissions next and, failing that, column-level
* privileges.
*
* When a partition at the referenced side is being detached/dropped, we
* needn't check, since the user must be the table owner anyway.
*/
if (partgone)
has_perm = true;
else if (check_enable_rls(rel_oid, InvalidOid, true) != RLS_ENABLED)
{
aclresult = pg_class_aclcheck(rel_oid, GetUserId(), ACL_SELECT);
if (aclresult != ACLCHECK_OK)
{
/* Try for column-level permissions */
for (int idx = 0; idx < riinfo->nkeys; idx++)
{
aclresult = pg_attribute_aclcheck(rel_oid, attnums[idx],
GetUserId(),
ACL_SELECT);
/* No access to the key */
if (aclresult != ACLCHECK_OK)
{
has_perm = false;
break;
}
}
}
}
else
has_perm = false;
if (has_perm)
{
/* Get printable versions of the keys involved */
initStringInfo(&key_names);
initStringInfo(&key_values);
for (int idx = 0; idx < riinfo->nkeys; idx++)
{
int fnum = attnums[idx];
Form_pg_attribute att = TupleDescAttr(tupdesc, fnum - 1);
char *name,
*val;
Datum datum;
bool isnull;
name = NameStr(att->attname);
datum = slot_getattr(violatorslot, fnum, &isnull);
if (!isnull)
{
Oid foutoid;
bool typisvarlena;
getTypeOutputInfo(att->atttypid, &foutoid, &typisvarlena);
val = OidOutputFunctionCall(foutoid, datum);
}
else
val = "null";
if (idx > 0)
{
appendStringInfoString(&key_names, ", ");
appendStringInfoString(&key_values, ", ");
}
appendStringInfoString(&key_names, name);
appendStringInfoString(&key_values, val);
}
}
if (partgone)
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("removing partition \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(pk_rel),
NameStr(riinfo->conname)),
errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
key_names.data, key_values.data,
RelationGetRelationName(fk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
else if (onfk)
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel),
NameStr(riinfo->conname)),
has_perm ?
errdetail("Key (%s)=(%s) is not present in table \"%s\".",
key_names.data, key_values.data,
RelationGetRelationName(pk_rel)) :
errdetail("Key is not present in table \"%s\".",
RelationGetRelationName(pk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
else if (is_restrict)
ereport(ERROR,
(errcode(ERRCODE_RESTRICT_VIOLATION),
errmsg("update or delete on table \"%s\" violates RESTRICT setting of foreign key constraint \"%s\" on table \"%s\"",
RelationGetRelationName(pk_rel),
NameStr(riinfo->conname),
RelationGetRelationName(fk_rel)),
has_perm ?
errdetail("Key (%s)=(%s) is referenced from table \"%s\".",
key_names.data, key_values.data,
RelationGetRelationName(fk_rel)) :
errdetail("Key is referenced from table \"%s\".",
RelationGetRelationName(fk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
else
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("update or delete on table \"%s\" violates foreign key constraint \"%s\" on table \"%s\"",
RelationGetRelationName(pk_rel),
NameStr(riinfo->conname),
RelationGetRelationName(fk_rel)),
has_perm ?
errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
key_names.data, key_values.data,
RelationGetRelationName(fk_rel)) :
errdetail("Key is still referenced from table \"%s\".",
RelationGetRelationName(fk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
}
/*
* ri_NullCheck -
*
* Determine the NULL state of all key values in a tuple
*
* Returns one of RI_KEYS_ALL_NULL, RI_KEYS_NONE_NULL or RI_KEYS_SOME_NULL.
*/
static int
ri_NullCheck(TupleDesc tupDesc,
TupleTableSlot *slot,
const RI_ConstraintInfo *riinfo, bool rel_is_pk)
{
const int16 *attnums;
bool allnull = true;
bool nonenull = true;
if (rel_is_pk)
attnums = riinfo->pk_attnums;
else
attnums = riinfo->fk_attnums;
for (int i = 0; i < riinfo->nkeys; i++)
{
if (slot_attisnull(slot, attnums[i]))
nonenull = false;
else
allnull = false;
}
if (allnull)
return RI_KEYS_ALL_NULL;
if (nonenull)
return RI_KEYS_NONE_NULL;
return RI_KEYS_SOME_NULL;
}
/*
* ri_InitHashTables -
*
* Initialize our internal hash tables.
*/
static void
ri_InitHashTables(void)
{
HASHCTL ctl;
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(RI_ConstraintInfo);
ri_constraint_cache = hash_create("RI constraint cache",
RI_INIT_CONSTRAINTHASHSIZE,
&ctl, HASH_ELEM | HASH_BLOBS);
/* Arrange to flush cache on pg_constraint changes */
CacheRegisterSyscacheCallback(CONSTROID,
InvalidateConstraintCacheCallBack,
(Datum) 0);
ctl.keysize = sizeof(RI_QueryKey);
ctl.entrysize = sizeof(RI_QueryHashEntry);
ri_query_cache = hash_create("RI query cache",
RI_INIT_QUERYHASHSIZE,
&ctl, HASH_ELEM | HASH_BLOBS);
ctl.keysize = sizeof(RI_CompareKey);
ctl.entrysize = sizeof(RI_CompareHashEntry);
ri_compare_cache = hash_create("RI compare cache",
RI_INIT_QUERYHASHSIZE,
&ctl, HASH_ELEM | HASH_BLOBS);
}
/*
* ri_FetchPreparedPlan -
*
* Lookup for a query key in our private hash table of prepared
* and saved SPI execution plans. Return the plan if found or NULL.
*/
static SPIPlanPtr
ri_FetchPreparedPlan(RI_QueryKey *key)
{
RI_QueryHashEntry *entry;
SPIPlanPtr plan;
/*
* On the first call initialize the hashtable
*/
if (!ri_query_cache)
ri_InitHashTables();
/*
* Lookup for the key
*/
entry = (RI_QueryHashEntry *) hash_search(ri_query_cache,
key,
HASH_FIND, NULL);
if (entry == NULL)
return NULL;
/*
* Check whether the plan is still valid. If it isn't, we don't want to
* simply rely on plancache.c to regenerate it; rather we should start
* from scratch and rebuild the query text too. This is to cover cases
* such as table/column renames. We depend on the plancache machinery to
* detect possible invalidations, though.
*
* CAUTION: this check is only trustworthy if the caller has already
* locked both FK and PK rels.
*/
plan = entry->plan;
if (plan && SPI_plan_is_valid(plan))
return plan;
/*
* Otherwise we might as well flush the cached plan now, to free a little
* memory space before we make a new one.
*/
entry->plan = NULL;
if (plan)
SPI_freeplan(plan);
return NULL;
}
/*
* ri_HashPreparedPlan -
*
* Add another plan to our private SPI query plan hashtable.
*/
static void
ri_HashPreparedPlan(RI_QueryKey *key, SPIPlanPtr plan)
{
RI_QueryHashEntry *entry;
bool found;
/*
* On the first call initialize the hashtable
*/
if (!ri_query_cache)
ri_InitHashTables();
/*
* Add the new plan. We might be overwriting an entry previously found
* invalid by ri_FetchPreparedPlan.
*/
entry = (RI_QueryHashEntry *) hash_search(ri_query_cache,
key,
HASH_ENTER, &found);
Assert(!found || entry->plan == NULL);
entry->plan = plan;
}
/*
* ri_KeysEqual -
*
* Check if all key values in OLD and NEW are "equivalent":
* For normal FKs we check for equality.
* For temporal FKs we check that the PK side is a superset of its old value,
* or the FK side is a subset of its old value.
*
* Note: at some point we might wish to redefine this as checking for
* "IS NOT DISTINCT" rather than "=", that is, allow two nulls to be
* considered equal. Currently there is no need since all callers have
* previously found at least one of the rows to contain no nulls.
*/
static bool
ri_KeysEqual(Relation rel, TupleTableSlot *oldslot, TupleTableSlot *newslot,
const RI_ConstraintInfo *riinfo, bool rel_is_pk)
{
const int16 *attnums;
if (rel_is_pk)
attnums = riinfo->pk_attnums;
else
attnums = riinfo->fk_attnums;
/* XXX: could be worthwhile to fetch all necessary attrs at once */
for (int i = 0; i < riinfo->nkeys; i++)
{
Datum oldvalue;
Datum newvalue;
bool isnull;
/*
* Get one attribute's oldvalue. If it is NULL - they're not equal.
*/
oldvalue = slot_getattr(oldslot, attnums[i], &isnull);
if (isnull)
return false;
/*
* Get one attribute's newvalue. If it is NULL - they're not equal.
*/
newvalue = slot_getattr(newslot, attnums[i], &isnull);
if (isnull)
return false;
if (rel_is_pk)
{
/*
* If we are looking at the PK table, then do a bytewise
* comparison. We must propagate PK changes if the value is
* changed to one that "looks" different but would compare as
* equal using the equality operator. This only makes a
* difference for ON UPDATE CASCADE, but for consistency we treat
* all changes to the PK the same.
*/
CompactAttribute *att = TupleDescCompactAttr(oldslot->tts_tupleDescriptor, attnums[i] - 1);
if (!datum_image_eq(oldvalue, newvalue, att->attbyval, att->attlen))
return false;
}
else
{
Oid eq_opr;
/*
* When comparing the PERIOD columns we can skip the check
* whenever the referencing column stayed equal or shrank, so test
* with the contained-by operator instead.
*/
if (riinfo->hasperiod && i == riinfo->nkeys - 1)
eq_opr = riinfo->period_contained_by_oper;
else
eq_opr = riinfo->ff_eq_oprs[i];
/*
* For the FK table, compare with the appropriate equality
* operator. Changes that compare equal will still satisfy the
* constraint after the update.
*/
if (!ri_CompareWithCast(eq_opr, RIAttType(rel, attnums[i]), RIAttCollation(rel, attnums[i]),
newvalue, oldvalue))
return false;
}
}
return true;
}
/*
* ri_CompareWithCast -
*
* Call the appropriate comparison operator for two values.
* Normally this is equality, but for the PERIOD part of foreign keys
* it is ContainedBy, so the order of lhs vs rhs is significant.
* See below for how the collation is applied.
*
* NB: we have already checked that neither value is null.
*/
static bool
ri_CompareWithCast(Oid eq_opr, Oid typeid, Oid collid,
Datum lhs, Datum rhs)
{
RI_CompareHashEntry *entry = ri_HashCompareOp(eq_opr, typeid);
/* Do we need to cast the values? */
if (OidIsValid(entry->cast_func_finfo.fn_oid))
{
lhs = FunctionCall3(&entry->cast_func_finfo,
lhs,
Int32GetDatum(-1), /* typmod */
BoolGetDatum(false)); /* implicit coercion */
rhs = FunctionCall3(&entry->cast_func_finfo,
rhs,
Int32GetDatum(-1), /* typmod */
BoolGetDatum(false)); /* implicit coercion */
}
/*
* Apply the comparison operator.
*
* Note: This function is part of a call stack that determines whether an
* update to a row is significant enough that it needs checking or action
* on the other side of a foreign-key constraint. Therefore, the
* comparison here would need to be done with the collation of the *other*
* table. For simplicity (e.g., we might not even have the other table
* open), we'll use our own collation. This is fine because we require
* that both collations have the same notion of equality (either they are
* both deterministic or else they are both the same).
*
* With range/multirangetypes, the collation of the base type is stored as
* part of the rangetype (pg_range.rngcollation), and always used, so
* there is no danger of inconsistency even using a non-equals operator.
* But if we support arbitrary types with PERIOD, we should perhaps just
* always force a re-check.
*/
return DatumGetBool(FunctionCall2Coll(&entry->eq_opr_finfo, collid, lhs, rhs));
}
/*
* ri_HashCompareOp -
*
* See if we know how to compare two values, and create a new hash entry
* if not.
*/
static RI_CompareHashEntry *
ri_HashCompareOp(Oid eq_opr, Oid typeid)
{
RI_CompareKey key;
RI_CompareHashEntry *entry;
bool found;
/*
* On the first call initialize the hashtable
*/
if (!ri_compare_cache)
ri_InitHashTables();
/*
* Find or create a hash entry. Note we're assuming RI_CompareKey
* contains no struct padding.
*/
key.eq_opr = eq_opr;
key.typeid = typeid;
entry = (RI_CompareHashEntry *) hash_search(ri_compare_cache,
&key,
HASH_ENTER, &found);
if (!found)
entry->valid = false;
/*
* If not already initialized, do so. Since we'll keep this hash entry
* for the life of the backend, put any subsidiary info for the function
* cache structs into TopMemoryContext.
*/
if (!entry->valid)
{
Oid lefttype,
righttype,
castfunc;
CoercionPathType pathtype;
/* We always need to know how to call the equality operator */
fmgr_info_cxt(get_opcode(eq_opr), &entry->eq_opr_finfo,
TopMemoryContext);
/*
* If we chose to use a cast from FK to PK type, we may have to apply
* the cast function to get to the operator's input type.
*
* XXX eventually it would be good to support array-coercion cases
* here and in ri_CompareWithCast(). At the moment there is no point
* because cases involving nonidentical array types will be rejected
* at constraint creation time.
*
* XXX perhaps also consider supporting CoerceViaIO? No need at the
* moment since that will never be generated for implicit coercions.
*/
op_input_types(eq_opr, &lefttype, &righttype);
Assert(lefttype == righttype);
if (typeid == lefttype)
castfunc = InvalidOid; /* simplest case */
else
{
pathtype = find_coercion_pathway(lefttype, typeid,
COERCION_IMPLICIT,
&castfunc);
if (pathtype != COERCION_PATH_FUNC &&
pathtype != COERCION_PATH_RELABELTYPE)
{
/*
* The declared input type of the eq_opr might be a
* polymorphic type such as ANYARRAY or ANYENUM, or other
* special cases such as RECORD; find_coercion_pathway
* currently doesn't subsume these special cases.
*/
if (!IsBinaryCoercible(typeid, lefttype))
elog(ERROR, "no conversion function from %s to %s",
format_type_be(typeid),
format_type_be(lefttype));
}
}
if (OidIsValid(castfunc))
fmgr_info_cxt(castfunc, &entry->cast_func_finfo,
TopMemoryContext);
else
entry->cast_func_finfo.fn_oid = InvalidOid;
entry->valid = true;
}
return entry;
}
/*
* Given a trigger function OID, determine whether it is an RI trigger,
* and if so whether it is attached to PK or FK relation.
*/
int
RI_FKey_trigger_type(Oid tgfoid)
{
switch (tgfoid)
{
case F_RI_FKEY_CASCADE_DEL:
case F_RI_FKEY_CASCADE_UPD:
case F_RI_FKEY_RESTRICT_DEL:
case F_RI_FKEY_RESTRICT_UPD:
case F_RI_FKEY_SETNULL_DEL:
case F_RI_FKEY_SETNULL_UPD:
case F_RI_FKEY_SETDEFAULT_DEL:
case F_RI_FKEY_SETDEFAULT_UPD:
case F_RI_FKEY_NOACTION_DEL:
case F_RI_FKEY_NOACTION_UPD:
return RI_TRIGGER_PK;
case F_RI_FKEY_CHECK_INS:
case F_RI_FKEY_CHECK_UPD:
return RI_TRIGGER_FK;
}
return RI_TRIGGER_NONE;
} | c | github | https://github.com/postgres/postgres | src/backend/utils/adt/ri_triggers.c |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2024-04-11 and added to Hugging Face Transformers on 2024-04-10.*
# RecurrentGemma
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The Recurrent Gemma model was proposed in [RecurrentGemma: Moving Past Transformers for Efficient Open Language Models](https://huggingface.co/papers/2404.07839) by the Griffin, RLHF and Gemma Teams of Google.
The abstract from the paper is the following:
*We introduce RecurrentGemma, an open language model which uses Google’s novel Griffin architecture. Griffin combines linear recurrences with local attention to achieve excellent performance on language. It has a fixed-sized state, which reduces memory use and enables efficient inference on long sequences. We provide a pre-trained model with 2B non-embedding parameters, and an instruction tuned variant. Both models achieve comparable performance to Gemma-2B despite being trained on fewer tokens.*
Tips:
- The original checkpoints can be converted using the conversion script [`src/transformers/models/recurrent_gemma/convert_recurrent_gemma_weights_to_hf.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/recurrent_gemma/convert_recurrent_gemma_to_hf.py).
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/google-deepmind/recurrentgemma).
## RecurrentGemmaConfig
[[autodoc]] RecurrentGemmaConfig
## RecurrentGemmaModel
[[autodoc]] RecurrentGemmaModel
- forward
## RecurrentGemmaForCausalLM
[[autodoc]] RecurrentGemmaForCausalLM
- forward | unknown | github | https://github.com/huggingface/transformers | docs/source/en/model_doc/recurrent_gemma.md |
/* Auto-generated by generate-wrappers.py script. Do not modify */
#if defined(__i386__) || defined(__i686__) || defined(__x86_64__)
#include <requantization/q31-ssse3.c>
#endif /* defined(__i386__) || defined(__i686__) || defined(__x86_64__) */ | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/quantized/cpu/qnnpack/wrappers/requantization/q31-ssse3.c |
import unittest
from robot.utils.asserts import assert_equals, assert_true, assert_false
from robot.output.logger import Logger
from robot.output.console.verbose import VerboseOutput
class MessageMock:
def __init__(self, timestamp, level, message):
self.timestamp = timestamp
self.level = level
self.message = message
class LoggerMock:
def __init__(self, *expected):
self.expected = list(expected)
def message(self, msg):
exp_msg, exp_level = self.expected.pop(0)
assert_equals(msg.level, exp_level)
assert_equals(msg.message, exp_msg)
self.msg = msg
def copy(self):
return LoggerMock(*self.expected)
class LoggerMock2(LoggerMock):
def output_file(self, name, path):
self.output_file = (name, path)
def close(self):
self.closed = True
class TestLogger(unittest.TestCase):
def setUp(self):
self.logger = Logger(register_console_logger=False)
def test_write_to_one_logger(self):
logger = LoggerMock(('Hello, world!', 'INFO'))
self.logger.register_logger(logger)
self.logger.write('Hello, world!', 'INFO')
assert_true(logger.msg.timestamp.startswith('20'))
def test_write_to_one_logger_with_trace_level(self):
logger = LoggerMock(('expected message', 'TRACE'))
self.logger.register_logger(logger)
self.logger.write('expected message', 'TRACE')
assert_true(hasattr(logger, 'msg'))
def test_write_to_multiple_loggers(self):
logger = LoggerMock(('Hello, world!', 'INFO'))
logger2 = logger.copy()
logger3 = logger.copy()
self.logger.register_logger(logger, logger2, logger3)
self.logger.message(MessageMock('', 'INFO', 'Hello, world!'))
assert_true(logger.msg is logger2.msg)
assert_true(logger.msg is logger.msg)
def test_write_multiple_messages(self):
msgs = [('0', 'ERROR'), ('1', 'WARN'), ('2', 'INFO'), ('3', 'DEBUG'), ('4', 'TRACE')]
logger = LoggerMock(*msgs)
self.logger.register_logger(logger)
for msg, level in msgs:
self.logger.write(msg, level)
assert_equals(logger.msg.message, msg)
assert_equals(logger.msg.level, level)
def test_all_methods(self):
logger = LoggerMock2(('Hello, world!', 'INFO'))
self.logger.register_logger(logger)
self.logger.output_file('name', 'path')
self.logger.close()
assert_equals(logger.output_file, ('name', 'path'))
assert_true(logger.closed)
def test_registered_logger_does_not_need_all_methods(self):
logger = LoggerMock(('Hello, world!', 'INFO'))
self.logger.register_logger(logger)
self.logger.output_file('name', 'path')
self.logger.close()
def test_close_removes_registered_loggers(self):
logger = LoggerMock(('Hello, world!', 'INFO'))
logger2 = LoggerMock2(('Hello, world!', 'INFO'))
self.logger.register_logger(logger, logger2)
self.logger.close()
assert_equals(self.logger._loggers.all_loggers(), [])
def test_registering_file_logger_with_none_path_does_nothing(self):
self.logger.register_file_logger('None')
assert_equals(self.logger._loggers.all_loggers(), [])
def test_cached_messages_are_given_to_registered_writers(self):
self.logger.write('This is a cached message', 'INFO')
self.logger.write('Another cached message', 'TRACE')
logger = LoggerMock(('This is a cached message', 'INFO'),
('Another cached message', 'TRACE'))
self.logger.register_logger(logger)
assert_equals(logger.msg.message, 'Another cached message')
def test_message_cache_can_be_turned_off(self):
self.logger.disable_message_cache()
self.logger.write('This message is not cached', 'INFO')
logger = LoggerMock(('', ''))
self.logger.register_logger(logger)
assert_false(hasattr(logger, 'msg'))
def test_start_and_end_suite_test_and_keyword(self):
class MyLogger:
def start_suite(self, suite): self.started_suite = suite
def end_suite(self, suite): self.ended_suite = suite
def start_test(self, test): self.started_test = test
def end_test(self, test): self.ended_test = test
def start_keyword(self, keyword): self.started_keyword = keyword
def end_keyword(self, keyword): self.ended_keyword = keyword
logger = MyLogger()
self.logger.register_logger(logger)
for name in 'suite', 'test', 'keyword':
for stend in 'start', 'end':
getattr(self.logger, stend + '_' + name)(name)
assert_equals(getattr(logger, stend + 'ed_' + name), name)
def test_verbose_console_output_is_automatically_registered(self):
logger = Logger()
assert_true(logger._loggers.all_loggers()[0].start_suite.__self__.__class__ is VerboseOutput)
def test_loggercollection_is_iterable(self):
logger = Logger()
for log in logger._loggers:
assert_true(log)
def test_logger_is_iterable(self):
logger = Logger()
for log in logger:
assert_true(log)
assert_equals(list(logger), list(logger._loggers))
def test_automatic_console_logger_can_be_disabled(self):
logger = Logger()
logger.unregister_console_logger()
assert_equals(logger._loggers.all_loggers(), [])
def test_automatic_console_logger_can_be_disabled_after_registering_logger(self):
logger = Logger()
mock = LoggerMock()
logger.register_logger(mock)
logger.unregister_console_logger()
self._number_of_registered_loggers_should_be(1, logger)
assert_true(logger._loggers.all_loggers()[0].message.__self__.__class__ is LoggerMock)
def test_disabling_automatic_logger_multiple_times_has_no_effect(self):
logger = Logger()
logger.unregister_console_logger()
self._number_of_registered_loggers_should_be(0, logger)
logger.unregister_console_logger()
logger.unregister_console_logger()
self._number_of_registered_loggers_should_be(0, logger)
logger.register_logger(LoggerMock())
logger.unregister_console_logger()
self._number_of_registered_loggers_should_be(1, logger)
def test_registering_console_logger_disables_automatic_console_logger(self):
logger = Logger()
logger.register_console_logger(width=42)
self._number_of_registered_loggers_should_be(1, logger)
assert_equals(logger._loggers.all_loggers()[0].start_suite.__self__._writer._width, 42)
def test_unregister_logger(self):
logger1, logger2, logger3 = LoggerMock(), LoggerMock(), LoggerMock()
self.logger.register_logger(logger1, logger2, logger3)
self.logger.unregister_logger(logger2)
self._number_of_registered_loggers_should_be(2)
self.logger.unregister_logger(logger3, logger1)
self._number_of_registered_loggers_should_be(0)
def test_unregistering_non_registered_logger_is_ok(self):
logger1, logger2 = LoggerMock(), LoggerMock()
self.logger.register_logger(logger1)
self.logger.unregister_logger(logger2)
self.logger.unregister_logger(None)
def test_registering_context_changing_logger(self):
self.logger.register_context_changing_logger(LoggerMock())
self._number_of_registered_loggers_should_be(1)
def test_messages_to_context_chagning_loggers(self):
log = LoggerMock(('msg', 'INFO'))
self.logger.register_context_changing_logger(log)
self.logger.write('msg', 'INFO')
assert_true(log.msg is not None)
def test_start_methods_are_called_first_for_context_changing_loggers(self):
class FirstLogger:
def start_suite(self, suite): self.suite = suite
def start_test(self, test): self.test = test
def start_keyword(self, kw): self.kw = kw
class SecondLogger:
def __init__(self, logger): self._reference = logger
def start_suite(self, suite): assert_equals(suite, self._reference.suite)
def start_test(self, test): assert_equals(test, self._reference.test)
def start_keyword(self, kw): assert_equals(kw, self._reference.kw)
log1 = FirstLogger()
log2 = SecondLogger(log1)
self.logger.register_logger(log2)
self.logger.register_context_changing_logger(log1)
self.logger.start_suite('Suite')
self.logger.start_test('Test')
self.logger.start_keyword('Keyword')
def test_end_methods_are_called_last_for_context_changing_loggers(self):
class FirstLogger:
def end_suite(self, suite): self.suite = suite
def end_test(self, test): self.test = test
def end_keyword(self, kw): self.kw = kw
class SecondLogger:
def __init__(self, logger): self._reference = logger
def end_suite(self, suite): self.suite = suite; assert_equals(suite, self._reference.suite)
def end_test(self, test): assert_equals(test, self._reference.test)
def end_keyword(self, kw): assert_equals(kw, self._reference.kw)
log1 = FirstLogger()
log2 = SecondLogger(log1)
self.logger.register_logger(log1)
self.logger.register_context_changing_logger(log2)
self.logger.end_suite('Suite')
self.logger.end_test('Test')
self.logger.end_keyword('Keyword')
assert_true(log2.suite is not None)
def _number_of_registered_loggers_should_be(self, number, logger=None):
logger = logger or self.logger
assert_equals(len(logger._loggers.all_loggers()), number)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_DWARF_ENUMS_H
#define LIEF_DWARF_ENUMS_H
namespace LIEF {
namespace dwarf {
enum class EH_ENCODING {
ABSPTR = 0x00,
OMIT = 0xff,
ULEB128 = 0x01,
UDATA2 = 0x02,
UDATA4 = 0x03,
UDATA8 = 0x04,
SLEB128 = 0x09,
SDATA2 = 0x0a,
SDATA4 = 0x0b,
SDATA8 = 0x0c,
SIGNED = 0x09,
PCREL = 0x10,
INDIRECT = 0x80,
TEXTREL = 0x20,
DATAREL = 0x30,
FUNCREL = 0x40,
ALIGNED = 0x50,
};
} // dwarf
} // LIEF
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/DWARF/enums.hpp |
"""SCons.Tool.c++
Tool-specific initialization for generic Posix C++ compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/c++.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import SCons.Tool
import SCons.Defaults
import SCons.Util
compilers = ['CC', 'c++']
CXXSuffixes = ['.cpp', '.cc', '.cxx', '.c++', '.C++', '.mm']
if SCons.Util.case_sensitive_suffixes('.c', '.C'):
CXXSuffixes.append('.C')
def iscplusplus(source):
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in CXXSuffixes:
return 1
return 0
def generate(env):
"""
Add Builders and construction variables for Visual Age C++ compilers
to an Environment.
"""
import SCons.Tool
import SCons.Tool.cc
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
SCons.Tool.cc.add_common_cc_variables(env)
env['CXX'] = 'c++'
env['CXXFLAGS'] = SCons.Util.CLVar('')
env['CXXCOM'] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['OBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CXXFILESUFFIX'] = '.cc'
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
//===--- CodeCompletionContext.h ------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2022 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_IDE_CODECOMPLETIONCONTEXT
#define SWIFT_IDE_CODECOMPLETIONCONTEXT
#include "swift/IDE/CodeCompletionResult.h"
#include "swift/IDE/CodeCompletionResultSink.h"
namespace swift {
namespace ide {
class CodeCompletionCache;
struct RequestedCachedModule;
class CodeCompletionContext {
friend class CodeCompletionResultBuilder;
/// A set of current completion results.
CodeCompletionResultSink CurrentResults;
public:
CodeCompletionCache &Cache;
CompletionKind CodeCompletionKind = CompletionKind::None;
/// Module qualified nominal type decl names
SmallVector<NullTerminatedStringRef, 2> LookedupNominalTypeNames;
enum class TypeContextKind {
/// There is no known contextual type. All types are equally good.
None,
/// There is a contextual type from e.g a single-expression closure/function
/// body, where the return is implied. The context is a hint, and enables
/// unresolved member completion, but should not hide any results.
Implied,
/// There are known contextual types, or there aren't but a nonvoid type is
/// expected.
Required,
};
TypeContextKind typeContextKind = TypeContextKind::None;
/// Whether there may be members that can use implicit member syntax,
/// e.g. `x = .foo`.
bool MayUseImplicitMemberExpr = false;
/// Flag to indicate that the completion is happening reusing ASTContext
/// from the previous completion.
/// NOTE: Do not use this to change the behavior. This is only for debugging.
bool ReusingASTContext = false;
CodeCompletionContext(CodeCompletionCache &Cache) : Cache(Cache) {}
void setAnnotateResult(bool flag) { CurrentResults.annotateResult = flag; }
bool getAnnotateResult() const { return CurrentResults.annotateResult; }
void setIncludeObjectLiterals(bool flag) {
CurrentResults.includeObjectLiterals = flag;
}
bool includeObjectLiterals() const {
return CurrentResults.includeObjectLiterals;
}
void setAddInitsToTopLevel(bool flag) {
CurrentResults.addInitsToTopLevel = flag;
}
bool getAddInitsToTopLevel() const {
return CurrentResults.addInitsToTopLevel;
}
void setAddCallWithNoDefaultArgs(bool flag) {
CurrentResults.addCallWithNoDefaultArgs = flag;
}
bool addCallWithNoDefaultArgs() const {
return CurrentResults.addCallWithNoDefaultArgs;
}
void setVerifyUSRToDecl(bool flag) { CurrentResults.verifyUSRToDecl = flag; }
bool verifyUSRToDecl() const { return CurrentResults.verifyUSRToDecl; }
/// Allocate a string owned by the code completion context.
StringRef copyString(StringRef Str) {
return Str.copy(*CurrentResults.Allocator);
}
/// Sort code completion results in an implementation-defined order
/// in place.
static std::vector<CodeCompletionResult *>
sortCompletionResults(ArrayRef<CodeCompletionResult *> Results);
CodeCompletionResultSink &getResultSink() { return CurrentResults; }
/// Add code completion results from the given requested modules to this
/// context.
void addResultsFromModules(ArrayRef<RequestedCachedModule> RequestedModules,
const ExpectedTypeContext &TypeContext,
const DeclContext *DC,
bool CanCurrDeclContextHandleAsync);
};
} // end namespace ide
} // end namespace swift
#endif // SWIFT_IDE_CODECOMPLETIONCONTEXT | c | github | https://github.com/apple/swift | include/swift/IDE/CodeCompletionContext.h |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Comment.parent_content'
db.alter_column('canvas_comment', 'parent_content_id', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, null=True, to=orm['canvas.Content']))
def backwards(self, orm):
# Changing field 'Comment.parent_content'
db.alter_column('canvas_comment', 'parent_content_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['canvas.Content']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'used_as_stamp'", 'symmetrical': 'False', 'to': "orm['canvas.Content']"}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contentsticker': {
'Meta': {'object_name': 'ContentSticker'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas'] | unknown | codeparrot/codeparrot-clean | ||
// RUN: %check_clang_tidy %s bugprone-unused-return-value %t \
// RUN: -config='{CheckOptions: \
// RUN: {bugprone-unused-return-value.CheckedFunctions: "::*"}}' \
// RUN: --
struct S1 {
S1(){};
S1(S1 const &);
S1(S1 &&);
S1 &operator=(S1 const &);
S1 &operator=(S1 &&);
S1 &operator+=(S1);
S1 &operator++();
S1 &operator++(int);
S1 &operator--();
S1 &operator--(int);
};
struct S2 {
S2(){};
S2(S2 const &);
S2(S2 &&);
};
S2 &operator-=(S2&, int);
S2 &operator++(S2 &);
S2 &operator++(S2 &, int);
S1 returnValue();
S1 const &returnRef();
void bar() {
returnValue();
// CHECK-MESSAGES: [[@LINE-1]]:3: warning: the value returned by this function should not be disregarded; neglecting it may lead to errors
S1 a{};
a = returnValue();
a.operator=(returnValue());
a = returnRef();
a.operator=(returnRef());
a += returnRef();
a++;
++a;
a--;
--a;
S2 b{};
b -= 1;
b++;
++b;
} | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-return-value-avoid-assignment.cpp |
@props(['frame'])
@php
if ($class = $frame->class()) {
$source = $class;
if ($previous = $frame->previous()) {
$source .= $previous->operator();
$source .= $previous->callable();
$source .= '('.implode(', ', $previous->args()).')';
}
} else {
$source = $frame->source();
}
@endphp
<x-laravel-exceptions-renderer::syntax-highlight
:code="$source"
language="php"
truncate
class="text-xs min-w-0"
data-tippy-content="{{ $source }}"
/> | php | github | https://github.com/laravel/framework | src/Illuminate/Foundation/resources/exceptions/renderer/components/formatted-source.blade.php |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@file:Suppress("UNUSED_PARAMETER")
package org.springframework.docs.web.websocket.stomp.websocketstompdestinationseparator
import org.springframework.messaging.handler.annotation.DestinationVariable
import org.springframework.messaging.handler.annotation.MessageMapping
import org.springframework.stereotype.Controller
// tag::snippet[]
@Controller
@MessageMapping("red")
class RedController {
@MessageMapping("blue.{green}")
fun handleGreen(@DestinationVariable green: String) {
// ...
}
}
// end::snippet[] | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/web/websocket/stomp/websocketstompdestinationseparator/RedController.kt |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Instruction collection.
"""
from qiskit.exceptions import QiskitError
from .instruction import Instruction
class InstructionSet:
"""Instruction collection, and their contexts."""
def __init__(self):
"""New collection of instructions.
The context (qargs and cargs that each instruction is attached to),
is also stored separately for each instruction.
"""
self.instructions = []
self.qargs = []
self.cargs = []
def __len__(self):
"""Return number of instructions in set"""
return len(self.instructions)
def __getitem__(self, i):
"""Return instruction at index"""
return self.instructions[i]
def add(self, gate, qargs, cargs):
"""Add an instruction and its context (where it's attached)."""
if not isinstance(gate, Instruction):
raise QiskitError("attempt to add non-Instruction" +
" to InstructionSet")
self.instructions.append(gate)
self.qargs.append(qargs)
self.cargs.append(cargs)
def inverse(self):
"""Invert all instructions."""
for index, instruction in enumerate(self.instructions):
self.instructions[index] = instruction.inverse()
return self
def q_if(self, *qregs):
"""Add controls to all instructions."""
for gate in self.instructions:
gate.q_if(*qregs)
return self
def c_if(self, classical, val):
"""Add classical control register to all instructions."""
for gate in self.instructions:
gate.c_if(classical, val)
return self | unknown | codeparrot/codeparrot-clean | ||
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from xbrowse_server.phenotips.utilities import find_db_files
from xbrowse_server.phenotips.utilities import find_references
import os
'''
Please note: this is a beta version of this tool and under development
'''
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--installation_dir',
'-d',
dest='ins_dir',
help='The PhenoTips installation directory.'
)
parser.add_argument('--temporary_dir',
'-t',
dest='temp_dir',
help='A directory to keep temporary files in.'
)
def handle(self, *args, **options):
'''
This is a helper script that is required to be run when changing the backend
DB between HSQLDB/Postgresql etc
'''
if options['ins_dir'] is None or options['temp_dir'] is None:
self.print_help()
sys.exit()
if not os.path.exists(options['ins_dir']):
print '\n\nError: directory does not exist: please enter a valid PhenoTips installation directory (--installation_dir).'
sys.exit()
if not os.path.exists(options['temp_dir']):
print '\n\nError: directory does not exist: please enter a valid temporary directory (--temporary_dir).'
sys.exit()
self.start(options['ins_dir'], options['temp_dir'])
def start(self,install_dir,temp_dir):
'''
Start the application.
'''
files_to_adjust=find_db_files(install_dir)
find_references(files_to_adjust,temp_dir)
def print_help(self):
'''
Help message
'''
print '\n\nPrepared backend of PhenoTips for database technology change.\n'
print 'Requires:\n'
print '1.Valid PhenoTips installation directory'
print '2.Valid temporary file directory'
print '\n\n' | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from alpha_generic import *
import switcheroo
root = LinuxAlphaFSSwitcheroo(
mem_class=DDR3_1600_x64,
cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test | unknown | codeparrot/codeparrot-clean | ||
name: "Run CodeQL Analysis"
on:
push:
pull_request:
workflow_dispatch:
permissions: read-all
jobs:
run-analysis:
permissions:
actions: read
contents: read
security-events: write
uses: spring-io/github-actions/.github/workflows/codeql-analysis.yml@6e66995f7d29de1e4ff76e4f0def7a10163fe910 | unknown | github | https://github.com/spring-projects/spring-boot | .github/workflows/run-codeql-analysis.yml |
create_makefile("-test-/scan_args") | ruby | github | https://github.com/ruby/ruby | ext/-test-/scan_args/extconf.rb |
"""
support for presenting detailed information in failing assertions.
"""
from __future__ import absolute_import, division, print_function
import py
import sys
from _pytest.assertion import util
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert',
action="store",
dest="assertmode",
choices=("rewrite", "plain",),
default="rewrite",
metavar="MODE",
help="""Control assertion debugging tools. 'plain'
performs no assertion debugging. 'rewrite'
(the default) rewrites assert statements in
test modules on import to provide assert
expression information.""")
def register_assert_rewrite(*names):
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raise TypeError: if the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = 'expected module names as *args, got {0} instead'
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
importhook = DummyRewriteHook()
importhook.mark_rewrite(*names)
class DummyRewriteHook(object):
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names):
pass
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook = None
def install_importhook(config):
"""Try to install the rewrite hook, raise SystemError if it fails."""
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
raise SystemError('rewrite not supported')
config._assertstate = AssertionState(config, 'rewrite')
config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config._assertstate.trace('installed rewrite import hook')
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
assertstate = getattr(session.config, '_assertstate', None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = py.builtin._totext("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
assertstate = getattr(session.config, '_assertstate', None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare | unknown | codeparrot/codeparrot-clean | ||
<!--
Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
-->
# curl C code style
Source code that has a common style is easier to read than code that uses
different styles in different places. It helps making the code feel like one
single code base. Easy-to-read is an important property of code and helps
making it easier to review when new things are added and it helps debugging
code when developers are trying to figure out why things go wrong. A unified
style is more important than individual contributors having their own personal
tastes satisfied.
Our C code has a few style rules. Most of them are verified and upheld by the
`scripts/checksrc.pl` script. Invoked with `make checksrc` or even by default
by the build system when built after `./configure --enable-debug` has been
used.
It is normally not a problem for anyone to follow the guidelines, as you just
need to copy the style already used in the source code and there are no
particularly unusual rules in our set of rules.
We also work hard on writing code that are warning-free on all the major
platforms and in general on as many platforms as possible. Code that causes
warnings is not accepted as-is.
## Readability
A primary characteristic for code is readability. The intent and meaning of
the code should be visible to the reader. Being clear and unambiguous beats
being clever and saving two lines of code. Write simple code. You and others
who come back to this code over the coming decades want to be able to quickly
understand it when debugging.
## Naming
Try using a non-confusing naming scheme for your new functions and variable
names. It does not necessarily have to mean that you should use the same as in
other places of the code, just that the names should be logical,
understandable and be named according to what they are used for. File-local
functions should be made static. We like lower case names.
See the [INTERNALS](https://curl.se/dev/internals.html#symbols) document on
how we name non-exported library-global symbols.
## Indenting
We use only spaces for indentation, never TABs. We use two spaces for each new
open brace.
```c
if(something_is_true) {
while(second_statement == fine) {
moo();
}
}
```
## Comments
Since we write C89 code, **//** comments are not allowed. They were not
introduced in the C standard until C99. We use only __/* comments */__.
```c
/* this is a comment */
```
## Long lines
Source code in curl may never be wider than 79 columns and there are two
reasons for maintaining this even in the modern era of large and high
resolution screens:
1. Narrower columns are easier to read than wide ones. There is a reason
newspapers have used columns for decades or centuries.
2. Narrower columns allow developers to easier show multiple pieces of code
next to each other in different windows. It allows two or three source
code windows next to each other on the same screen - as well as multiple
terminal and debugging windows.
## Braces
In if/while/do/for expressions, we write the open brace on the same line as
the keyword and we then set the closing brace on the same indentation level as
the initial keyword. Like this:
```c
if(age < 40) {
/* clearly a youngster */
}
```
You may omit the braces if they would contain only a one-line statement:
```c
if(!x)
continue;
```
For functions the opening brace should be on a separate line:
```c
int main(int argc, char **argv)
{
return 1;
}
```
## 'else' on the following line
When adding an **else** clause to a conditional expression using braces, we
add it on a new line after the closing brace. Like this:
```c
if(age < 40) {
/* clearly a youngster */
}
else {
/* probably grumpy */
}
```
## No space before parentheses
When writing expressions using if/while/do/for, there shall be no space
between the keyword and the open parenthesis. Like this:
```c
while(1) {
/* loop forever */
}
```
## Use boolean conditions
Rather than test a conditional value such as a bool against TRUE or FALSE, a
pointer against NULL or != NULL and an int against zero or not zero in
if/while conditions we prefer:
```c
result = do_something();
if(!result) {
/* something went wrong */
return result;
}
```
## No assignments in conditions
To increase readability and reduce complexity of conditionals, we avoid
assigning variables within if/while conditions. We frown upon this style:
```c
if((ptr = malloc(100)) == NULL)
return NULL;
```
and instead we encourage the above version to be spelled out more clearly:
```c
ptr = malloc(100);
if(!ptr)
return NULL;
```
## New block on a new line
We never write multiple statements on the same source line, even for short
if() conditions.
```c
if(a)
return TRUE;
else if(b)
return FALSE;
```
and NEVER:
```c
if(a) return TRUE;
else if(b) return FALSE;
```
## Space around operators
Please use spaces on both sides of operators in C expressions. Postfix **(),
[], ->, ., ++, --** and Unary **+, -, !, ~, &** operators excluded they should
have no space.
Examples:
```c
bla = func();
who = name[0];
age += 1;
true = !false;
size += -2 + 3 * (a + b);
ptr->member = a++;
struct.field = b--;
ptr = &address;
contents = *pointer;
complement = ~bits;
empty = (!*string) ? TRUE : FALSE;
```
## No parentheses for return values
We use the 'return' statement without extra parentheses around the value:
```c
int works(void)
{
return TRUE;
}
```
## Parentheses for sizeof arguments
When using the sizeof operator in code, we prefer it to be written with
parentheses around its argument:
```c
int size = sizeof(int);
```
## Column alignment
Some statements cannot be completed on a single line because the line would be
too long, the statement too hard to read, or due to other style guidelines
above. In such a case the statement spans multiple lines.
If a continuation line is part of an expression or sub-expression then you
should align on the appropriate column so that it is easy to tell what part of
the statement it is. Operators should not start continuation lines. In other
cases follow the 2-space indent guideline. Here are some examples from
libcurl:
```c
if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) &&
(handle->set.httpversion != CURL_HTTP_VERSION_1_0) &&
(handle->set.httpreq == HTTPREQ_GET ||
handle->set.httpreq == HTTPREQ_HEAD))
/* did not ask for HTTP/1.0 and a GET or HEAD */
return TRUE;
```
If no parenthesis, use the default indent:
```c
data->set.http_disable_hostname_check_before_authentication =
va_arg(param, long) ? TRUE : FALSE;
```
Function invoke with an open parenthesis:
```c
if(option) {
result = parse_login_details(option, strlen(option),
(userp ? &user : NULL),
(passwdp ? &passwd : NULL),
NULL);
}
```
Align with the "current open" parenthesis:
```c
DEBUGF(infof(data, "Curl_pp_readresp_ %d bytes of trailing "
"server response left\n",
(int)clipamount));
```
## Platform dependent code
Use **#ifdef HAVE_FEATURE** to do conditional code. We avoid checking for
particular operating systems or hardware in the #ifdef lines. The HAVE_FEATURE
shall be generated by the configure script for Unix-like systems and they are
hard-coded in the `config-[system].h` files for the others.
We also encourage use of macros/functions that possibly are empty or defined
to constants when libcurl is built without that feature, to make the code
seamless. Like this example where the **magic()** function works differently
depending on a build-time conditional:
```c
#ifdef HAVE_MAGIC
void magic(int a)
{
return a + 2;
}
#else
#define magic(x) 1
#endif
int content = magic(3);
```
## No typedefed structs
Use structs by all means, but do not typedef them. Use the `struct name` way
of identifying them:
```c
struct something {
void *valid;
size_t way_to_write;
};
struct something instance;
```
**Not okay**:
```c
typedef struct {
void *wrong;
size_t way_to_write;
} something;
something instance;
```
## Banned functions
To avoid footguns and unintended consequences we forbid the use of a number of
C functions. The `checksrc` script finds and yells about them if used. This
makes us write better code.
This is the full list of functions generally banned.
_access
_fstati64
_lseeki64
_mbscat
_mbsncat
_open
_tcscat
_tcsdup
_tcsncat
_tcsncpy
_waccess
_wcscat
_wcsdup
_wcsncat
_wfopen
_wfreopen
_wopen
accept
accept4
access
aprintf
atoi
atol
calloc
close
CreateFile
CreateFileA
CreateFileW
fclose
fdopen
fopen
fprintf
free
freeaddrinfo
freopen
fstat
getaddrinfo
gets
gmtime
llseek
LoadLibrary
LoadLibraryA
LoadLibraryEx
LoadLibraryExA
LoadLibraryExW
LoadLibraryW
localtime
lseek
malloc
mbstowcs
MoveFileEx
MoveFileExA
MoveFileExW
msnprintf
mvsnprintf
open
printf
realloc
recv
rename
send
snprintf
socket
socketpair
sprintf
sscanf
stat
strcat
strcpy
strdup
strerror
strncat
strncpy
strtok
strtok_r
strtol
strtoul
vaprintf
vfprintf
vprintf
vsnprintf
vsprintf
wcscpy
wcsdup
wcsncpy
wcstombs
WSASocket
WSASocketA
WSASocketW | unknown | github | https://github.com/curl/curl | docs/internals/CODE_STYLE.md |
import itertools
import unittest
from parameterized import parameterized
import torch
import torch.nn as nn
from nsoltAtomExtension2dLayer import NsoltAtomExtension2dLayer
nchs = [ [3,3], [4,4] ]
datatype = [ torch.float, torch.double ]
nrows = [ 4, 8, 16 ]
ncols = [ 4, 8, 16 ]
dir = [ 'Right', 'Left', 'Up', 'Down' ]
target = [ 'Sum', 'Difference' ]
class NsoltAtomExtention2dLayerTestCase(unittest.TestCase):
"""
NSOLTATOMEXTENSION2DLAYERTESTCASE
コンポーネント別に入力(nComponents=1のみサポート):
nSamples x nRows x nCols x nChsTotal
コンポーネント別に出力(nComponents=1のみサポート):
nSamples x nRows x nCols x nChsTotal
Requirements: Python 3.7.x, PyTorch 1.7.x
Copyright (c) 2020-2021, Shogo MURAMATSU
All rights reserved.
Contact address: Shogo MURAMATSU,
Faculty of Engineering, Niigata University,
8050 2-no-cho Ikarashi, Nishi-ku,
Niigata, 950-2181, JAPAN
http://msiplab.eng.niigata-u.ac.jp/
"""
@parameterized.expand(
list(itertools.product(nchs,target))
)
def testConstructor(self,nchs,target):
# Expctd values
expctdName = 'Qn'
expctdDirection = 'Right'
expctdTargetChannels = target
expctdDescription = "Right shift the " \
+ target.lower() \
+ "-channel Coefs. " \
+ "(ps,pa) = (" + str(nchs[0]) + "," + str(nchs[1]) + ")"
# Instantiation of target class
layer = NsoltAtomExtension2dLayer(
number_of_channels=nchs,
name=expctdName,
direction=expctdDirection,
target_channels=expctdTargetChannels
)
# Actual values
actualName = layer.name
actualDirection = layer.direction
actualTargetChannels = layer.target_channels
actualDescription = layer.description
# Evaluation
self.assertTrue(isinstance(layer, nn.Module))
self.assertEqual(actualName,expctdName)
self.assertEqual(actualDirection,expctdDirection)
self.assertEqual(actualTargetChannels,expctdTargetChannels)
self.assertEqual(actualDescription,expctdDescription)
@parameterized.expand(
list(itertools.product(nchs,nrows,ncols,dir,datatype))
)
def testPredictGrayscaleShiftDifferenceCoefs(self,
nchs, nrows, ncols, dir, datatype):
rtol,atol= 0,1e-8
# Parameters
nSamples = 8
nChsTotal = sum(nchs)
target = 'Difference'
# nSamples x nRows x nCols x nChsTotal
X = torch.randn(nSamples,nrows,ncols,nChsTotal,dtype=datatype,requires_grad=True)
# Expected values
if dir=='Right':
shift = ( 0, 0, 1, 0 )
elif dir=='Left':
shift = ( 0, 0, -1, 0 )
elif dir=='Down':
shift = ( 0, 1, 0, 0 )
elif dir=='Up':
shift = ( 0, -1, 0, 0 )
else:
shift = ( 0, 0, 0, 0 )
# nSamples x nRows x nCols x nChsTotal
ps, pa = nchs
Y = X
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya, Ys-Ya),dim=-1)
# Block circular shift
Y[:,:,:,ps:] = torch.roll(Y[:,:,:,ps:],shifts=shift,dims=(0,1,2,3))
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya ,Ys-Ya),dim=-1)
# Output
expctdZ = Y/2.
# Instantiation of target class
layer = NsoltAtomExtension2dLayer(
number_of_channels=nchs,
name='Qn~',
direction=dir,
target_channels=target
)
# Actual values
with torch.no_grad():
actualZ = layer.forward(X)
# Evaluation
self.assertEqual(actualZ.dtype,datatype)
self.assertTrue(torch.allclose(actualZ,expctdZ,rtol=rtol,atol=atol))
self.assertFalse(actualZ.requires_grad)
@parameterized.expand(
list(itertools.product(nchs,nrows,ncols,dir,datatype))
)
def testPredictGrayscaleShiftSumCoefs(self,
nchs, nrows, ncols, dir, datatype):
rtol, atol= 1e-5, 1e-8
# Parameters
nSamples = 8
nChsTotal = sum(nchs)
target = 'Sum'
# nSamples x nRows x nCols x nChsTotal
X = torch.randn(nSamples,nrows,ncols,nChsTotal,dtype=datatype,requires_grad=True)
# Expected values
if dir=='Right':
shift = ( 0, 0, 1, 0 )
elif dir=='Left':
shift = ( 0, 0, -1, 0 )
elif dir=='Down':
shift = ( 0, 1, 0, 0 )
elif dir=='Up':
shift = ( 0, -1, 0, 0 )
else:
shift = ( 0, 0, 0, 0 )
# nSamples x nRows x nCols x nChsTotal
ps, pa = nchs
Y = X
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya, Ys-Ya),dim=-1)
# Block circular shift
Y[:,:,:,:ps] = torch.roll(Y[:,:,:,:ps],shifts=shift,dims=(0,1,2,3))
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya, Ys-Ya),dim=-1)
# Output
expctdZ = Y/2.
# Instantiation of target class
layer = NsoltAtomExtension2dLayer(
number_of_channels=nchs,
name='Qn~',
direction=dir,
target_channels=target
)
# Actual values
with torch.no_grad():
actualZ = layer.forward(X)
# Evaluation
self.assertEqual(actualZ.dtype,datatype)
self.assertTrue(torch.allclose(actualZ,expctdZ,rtol=rtol,atol=atol))
self.assertFalse(actualZ.requires_grad)
@parameterized.expand(
list(itertools.product(nchs,nrows,ncols,dir,datatype))
)
def testBackwardGrayscaleShiftDifferenceCoefs(self,
nchs, nrows, ncols, dir, datatype):
rtol,atol = 1e-5,1e-8
# Parameters
nSamples = 8
nChsTotal = sum(nchs)
target = 'Difference'
# nSamples x nRows x nCols x nChsTotal
X = torch.zeros(nSamples,nrows,ncols,nChsTotal,dtype=datatype,requires_grad=True)
dLdZ = torch.randn(nSamples,nrows,ncols,nChsTotal,dtype=datatype)
# Expected values
if dir=='Right':
shift = ( 0, 0, -1, 0 ) # Reverse
elif dir=='Left':
shift = ( 0, 0, 1, 0 ) # Reverse
elif dir=='Down':
shift = ( 0, -1, 0, 0 ) # Reverse
elif dir=='Up':
shift = ( 0, 1, 0, 0 ) # Reverse
else:
shift = ( 0, 0, 0, 0 ) # Reverse
# nSamples x nRows x nCols x nChsTotal
ps, pa = nchs
Y = dLdZ
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya,Ys-Ya),dim=-1)
# Block circular shift
Y[:,:,:,ps:] = torch.roll(Y[:,:,:,ps:],shifts=shift,dims=(0,1,2,3))
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya,Ys-Ya),dim=-1)
# Output
expctddLdX = Y/2.
# Instantiation of target class
layer = NsoltAtomExtension2dLayer(
number_of_channels=nchs,
name='Qn',
direction=dir,
target_channels=target
)
# Actual values
Z = layer.forward(X)
Z.backward(dLdZ)
actualdLdX = X.grad
# Evaluation
self.assertEqual(actualdLdX.dtype,datatype)
self.assertTrue(torch.allclose(actualdLdX,expctddLdX,rtol=rtol,atol=atol))
self.assertTrue(Z.requires_grad)
@parameterized.expand(
list(itertools.product(nchs,nrows,ncols,dir,datatype))
)
def testBackwardGrayscaleShiftSumCoefs(self,
nchs, nrows, ncols, dir, datatype):
rtol,atol = 1e-5,1e-8
# Parameters
nSamples = 8
nChsTotal = sum(nchs)
target = 'Sum'
# nSamples x nRows x nCols x nChsTotal
X = torch.zeros(nSamples,nrows,ncols,nChsTotal,dtype=datatype,requires_grad=True)
dLdZ = torch.randn(nSamples,nrows,ncols,nChsTotal,dtype=datatype)
# Expected values
if dir=='Right':
shift = ( 0, 0, -1, 0) # Reverse
elif dir=='Left':
shift = ( 0, 0, 1, 0 ) # Reverse
elif dir=='Down':
shift = ( 0, -1, 0, 0 ) # Reverse
elif dir=='Up':
shift = ( 0, 1, 0, 0 ) # Reverse
else:
shift = ( 0, 0, 0, 0 )
# nSamples x nRows x nCols x nChsTotal
ps, pa = nchs
Y = dLdZ
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya, Ys-Ya),dim=-1)
# Block circular shift
Y[:,:,:,:ps] = torch.roll(Y[:,:,:,:ps],shifts=shift,dims=(0,1,2,3))
# Block butterfly
Ys = Y[:,:,:,:ps]
Ya = Y[:,:,:,ps:]
Y = torch.cat((Ys+Ya, Ys-Ya),dim=-1)
# Output
expctddLdX = Y/2.
# Instantiation of target class
layer = NsoltAtomExtension2dLayer(
number_of_channels=nchs,
name='Qn',
direction=dir,
target_channels=target
)
# Actual values
Z = layer.forward(X)
Z.backward(dLdZ)
actualdLdX = X.grad
# Evaluation
self.assertEqual(actualdLdX.dtype,datatype)
self.assertTrue(torch.allclose(actualdLdX,expctddLdX,rtol=rtol,atol=atol))
self.assertTrue(Z.requires_grad)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 RethinkDB, all rights reserved.
import time
import struct
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.defer import DeferredQueue, CancelledError
from twisted.internet.protocol import ClientFactory, Protocol
from twisted.internet.endpoints import clientFromString
from twisted.internet.error import TimeoutError
from . import ql2_pb2 as p
from .net import Query, Response, Cursor, maybe_profile
from .net import Connection as ConnectionBase
from .errors import *
__all__ = ['Connection']
pResponse = p.Response.ResponseType
pQuery = p.Query.QueryType
class DatabaseProtocol(Protocol):
WAITING_FOR_HANDSHAKE = 0
READY = 1
def __init__(self, factory):
self.factory = factory
self.state = DatabaseProtocol.WAITING_FOR_HANDSHAKE
self._handlers = {
DatabaseProtocol.WAITING_FOR_HANDSHAKE: self._handleHandshake,
DatabaseProtocol.READY: self._handleResponse
}
self.buf = bytes()
self.buf_expected_length = 0
self.buf_token = None
self.wait_for_handshake = Deferred()
self._open = True
def connectionMade(self):
# Send immediately the handshake.
self.factory.handshake.reset()
self.transport.write(self.factory.handshake.next_message(None))
# Defer a timer which will callback when timed out and errback the
# wait_for_handshake. Otherwise, it will be cancelled in
# handleHandshake.
self._timeout_defer = reactor.callLater(self.factory.timeout,
self._handleHandshakeTimeout)
def connectionLost(self, reason):
self._open = False
def _handleHandshakeTimeout(self):
# If we are here, we failed to do the handshake before the timeout.
# We close the connection and raise an ReqlTimeoutError in the
# wait_for_handshake deferred.
self._open = False
self.transport.loseConnection()
self.wait_for_handshake.errback(ReqlTimeoutError())
def _handleHandshake(self, data):
try:
self.buf += data
end_index = self.buf.find(b'\0')
if end_index != -1:
response = self.buf[:end_index]
self.buf = self.buf[end_index + 1:]
request = self.factory.handshake.next_message(response)
if request is None:
# We're now ready to work with real data.
self.state = DatabaseProtocol.READY
# We cancel the scheduled timeout.
self._timeout_defer.cancel()
# We callback our wait_for_handshake.
self.wait_for_handshake.callback(None)
elif request != "":
self.transport.write(request)
except Exception as e:
self.wait_for_handshake.errback(e)
def _handleResponse(self, data):
# If we have more than one response, we should handle all of them.
self.buf += data
while True:
# 1. Read the header, until we read the length of the awaited payload.
if self.buf_expected_length == 0:
if len(self.buf) >= 12:
token, length = struct.unpack('<qL', self.buf[:12])
self.buf_token = token
self.buf_expected_length = length
self.buf = self.buf[12:]
else:
# We quit the function, it is impossible to have read the
# entire payload at this point.
return
# 2. Buffer the data, until the size of the data match the expected
# length provided by the header.
if len(self.buf) < self.buf_expected_length:
return
self.factory.response_handler(self.buf_token, self.buf[:self.buf_expected_length])
self.buf = self.buf[self.buf_expected_length:]
self.buf_token = None
self.buf_expected_length = 0
def dataReceived(self, data):
try:
if self._open:
self._handlers[self.state](data)
except Exception as e:
raise ReqlDriverError('Driver failed to handle received data.'
'Error: {exc}. Dropping the connection.'.format(exc=str(e)))
self.transport.loseConnection()
class DatabaseProtoFactory(ClientFactory):
protocol = DatabaseProtocol
def __init__(self, timeout, response_handler, handshake):
self.timeout = timeout
self.handshake = handshake
self.response_handler = response_handler
def startedConnecting(self, connector):
pass
def buildProtocol(self, addr):
p = DatabaseProtocol(self)
return p
def clientConnectionLost(self, connector, reason):
pass
def clientConnectionFailed(self, connector, reason):
pass
class CursorItems(DeferredQueue):
def __init__(self):
super(CursorItems, self).__init__()
def cancel_getters(self, err):
"""
Cancel all waiters.
"""
for waiter in self.waiting[:]:
if not waiter.called:
waiter.errback(err)
self.waiting.remove(waiter)
def extend(self, data):
for k in data:
self.put(k)
def __len__(self):
return len(self.pending)
def __getitem__(self, index):
return self.pending[index]
def __iter__(self):
return iter(self.pending)
class TwistedCursor(Cursor):
def __init__(self, *args, **kwargs):
kwargs.setdefault('items_type', CursorItems)
super(TwistedCursor, self).__init__(*args, **kwargs)
self.waiting = list()
def _extend(self, res):
Cursor._extend(self, res)
if self.error is not None:
self.items.cancel_getters(self.error)
for d in self.waiting[:]:
d.callback(None)
self.waiting.remove(d)
def _empty_error(self):
return RqlCursorEmpty()
@inlineCallbacks
def fetch_next(self, wait=True):
timeout = Cursor._wait_to_timeout(wait)
deadline = None if timeout is None else time.time() + timeout
def wait_canceller(d):
d.errback(ReqlTimeoutError())
while len(self.items) == 0 and self.error is None:
self._maybe_fetch_batch()
wait = Deferred(canceller=wait_canceller)
self.waiting.append(wait)
if deadline is not None:
timeout = max(0, deadline - time.time())
reactor.callLater(timeout, lambda: wait.cancel())
yield wait
returnValue(not self._is_empty() or self._has_error())
def _has_error(self):
return self.error and (not isinstance(self.error, RqlCursorEmpty))
def _is_empty(self):
return isinstance(self.error, RqlCursorEmpty) and len(self.items) == 0
def _get_next(self, timeout):
if len(self.items) == 0 and self.error is not None:
return defer.fail(self.error)
def raise_timeout(errback):
if isinstance(errback.value, CancelledError):
raise ReqlTimeoutError()
else:
raise errback.value
item_defer = self.items.get()
if timeout is not None:
item_defer.addErrback(raise_timeout)
timer = reactor.callLater(timeout, lambda: item_defer.cancel())
self._maybe_fetch_batch()
return item_defer
class ConnectionInstance(object):
def __init__(self, parent, start_reactor=False):
self._parent = parent
self._closing = False
self._connection = None
self._user_queries = {}
self._cursor_cache = {}
if start_reactor:
reactor.run()
def client_port(self):
if self.is_open():
return self._connection.transport.getHost().port
def client_address(self):
if self.is_open():
return self._connection.transport.getHost().host
def _handleResponse(self, token, data):
try:
cursor = self._cursor_cache.get(token)
if cursor is not None:
cursor._extend(data)
elif token in self._user_queries:
query, deferred = self._user_queries[token]
res = Response(token, data,
self._parent._get_json_decoder(query))
if res.type == pResponse.SUCCESS_ATOM:
deferred.callback(maybe_profile(res.data[0], res))
elif res.type in (pResponse.SUCCESS_SEQUENCE,
pResponse.SUCCESS_PARTIAL):
cursor = TwistedCursor(self, query, res)
deferred.callback(maybe_profile(cursor, res))
elif res.type == pResponse.WAIT_COMPLETE:
deferred.callback(None)
elif res.type == pResponse.SERVER_INFO:
deferred.callback(res.data[0])
else:
deferred.errback(res.make_error(query))
del self._user_queries[token]
elif not self._closing:
raise ReqlDriverError("Unexpected response received.")
except Exception as e:
if not self._closing:
self.close(exception=e)
@inlineCallbacks
def _connectTimeout(self, factory, timeout):
try:
# TODO: use ssl options
# TODO: this doesn't work for literal IPv6 addresses like '::1'
args = "tcp:%s:%d" % (self._parent.host, self._parent.port)
if timeout is not None:
args = args + (":timeout=%d" % timeout)
endpoint = clientFromString(reactor, args)
p = yield endpoint.connect(factory)
returnValue(p)
except TimeoutError:
raise ReqlTimeoutError()
@inlineCallbacks
def connect(self, timeout):
factory = DatabaseProtoFactory(timeout, self._handleResponse,
self._parent.handshake)
# We connect to the server, and send the handshake payload.
pConnection = None
try:
pConnection = yield self._connectTimeout(factory, timeout)
except Exception as e:
raise ReqlDriverError('Could not connect to {p.host}:{p.port}. Error: {exc}'
.format(p=self._parent, exc=str(e)))
# Now, we need to wait for the handshake.
try:
yield pConnection.wait_for_handshake
except ReqlAuthError as e:
raise
except ReqlTimeoutError as e:
raise ReqlTimeoutError(self._parent.host, self._parent.port)
except Exception as e:
raise ReqlDriverError('Connection interrupted during handshake with {p.host}:{p.port}. Error: {exc}'
.format(p=self._parent, exc=str(e)))
self._connection = pConnection
returnValue(self._parent)
def is_open(self):
return self._connection._open
def close(self, noreply_wait=False, token=None, exception=None):
d = defer.succeed(None)
self._closing = True
error_message = "Connection is closed"
if exception is not None:
error_message = "Connection is closed (reason: {exc})".format(exc=str(exception))
for cursor in list(self._cursor_cache.values()):
cursor._error(error_message)
for query, deferred in iter(self._user_queries.values()):
if not deferred.called:
deferred.errback(fail=ReqlDriverError(error_message))
self._user_queries = {}
self._cursor_cache = {}
if noreply_wait:
noreply = Query(pQuery.NOREPLY_WAIT, token, None, None)
d = self.run_query(noreply, False)
def closeConnection(res):
self._connection.transport.loseConnection()
return res
return d.addBoth(closeConnection)
@inlineCallbacks
def run_query(self, query, noreply):
response_defer = Deferred()
if not noreply:
self._user_queries[query.token] = (query, response_defer)
# Send the query
self._connection.transport.write(query.serialize(self._parent._get_json_encoder(query)))
if noreply:
returnValue(None)
else:
res = yield response_defer
returnValue(res)
class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(ConnectionInstance, *args, **kwargs)
@inlineCallbacks
def reconnect(self, noreply_wait=True, timeout=None):
yield self.close(noreply_wait)
res = yield super(Connection, self).reconnect(noreply_wait, timeout)
returnValue(res)
@inlineCallbacks
def close(self, *args, **kwargs):
res = yield super(Connection, self).close(*args, **kwargs) or None
returnValue(res)
@inlineCallbacks
def noreply_wait(self, *args, **kwargs):
res = yield super(Connection, self).noreply_wait(*args, **kwargs)
returnValue(res)
@inlineCallbacks
def server(self, *args, **kwargs):
res = yield super(Connection, self).server(*args, **kwargs)
returnValue(res)
@inlineCallbacks
def _start(self, *args, **kwargs):
res = yield super(Connection, self)._start(*args, **kwargs)
returnValue(res)
@inlineCallbacks
def _continue(self, *args, **kwargs):
res = yield super(Connection, self)._continue(*args, **kwargs)
returnValue(res)
@inlineCallbacks
def _stop(self, *args, **kwargs):
res = yield super(Connection, self)._stop(*args, **kwargs)
returnValue(res) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
#include "state.h"
#include "../common/dictionary.h"
#include "../common/platform.h"
#include "huffman.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#ifdef BROTLI_REPORTING
/* When BROTLI_REPORTING is defined extra reporting module have to be linked. */
void BrotliDecoderOnStart(const BrotliDecoderState* s);
void BrotliDecoderOnFinish(const BrotliDecoderState* s);
#define BROTLI_DECODER_ON_START(s) BrotliDecoderOnStart(s);
#define BROTLI_DECODER_ON_FINISH(s) BrotliDecoderOnFinish(s);
#else
#if !defined(BROTLI_DECODER_ON_START)
#define BROTLI_DECODER_ON_START(s) (void)(s);
#endif
#if !defined(BROTLI_DECODER_ON_FINISH)
#define BROTLI_DECODER_ON_FINISH(s) (void)(s);
#endif
#endif
BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s,
brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque) {
BROTLI_DECODER_ON_START(s);
if (!alloc_func) {
s->alloc_func = BrotliDefaultAllocFunc;
s->free_func = BrotliDefaultFreeFunc;
s->memory_manager_opaque = 0;
} else {
s->alloc_func = alloc_func;
s->free_func = free_func;
s->memory_manager_opaque = opaque;
}
s->error_code = 0; /* BROTLI_DECODER_NO_ERROR */
BrotliInitBitReader(&s->br);
s->state = BROTLI_STATE_UNINITED;
s->large_window = 0;
s->substate_metablock_header = BROTLI_STATE_METABLOCK_HEADER_NONE;
s->substate_uncompressed = BROTLI_STATE_UNCOMPRESSED_NONE;
s->substate_decode_uint8 = BROTLI_STATE_DECODE_UINT8_NONE;
s->substate_read_block_length = BROTLI_STATE_READ_BLOCK_LENGTH_NONE;
s->buffer_length = 0;
s->loop_counter = 0;
s->pos = 0;
s->rb_roundtrips = 0;
s->partial_pos_out = 0;
s->used_input = 0;
s->block_type_trees = NULL;
s->block_len_trees = NULL;
s->ringbuffer = NULL;
s->ringbuffer_size = 0;
s->new_ringbuffer_size = 0;
s->ringbuffer_mask = 0;
s->context_map = NULL;
s->context_modes = NULL;
s->dist_context_map = NULL;
s->context_map_slice = NULL;
s->dist_context_map_slice = NULL;
s->literal_hgroup.codes = NULL;
s->literal_hgroup.htrees = NULL;
s->insert_copy_hgroup.codes = NULL;
s->insert_copy_hgroup.htrees = NULL;
s->distance_hgroup.codes = NULL;
s->distance_hgroup.htrees = NULL;
s->is_last_metablock = 0;
s->is_uncompressed = 0;
s->is_metadata = 0;
s->should_wrap_ringbuffer = 0;
s->canny_ringbuffer_allocation = 1;
s->window_bits = 0;
s->max_distance = 0;
s->dist_rb[0] = 16;
s->dist_rb[1] = 15;
s->dist_rb[2] = 11;
s->dist_rb[3] = 4;
s->dist_rb_idx = 0;
s->block_type_trees = NULL;
s->block_len_trees = NULL;
s->mtf_upper_bound = 63;
s->compound_dictionary = NULL;
s->dictionary =
BrotliSharedDictionaryCreateInstance(alloc_func, free_func, opaque);
if (!s->dictionary) return BROTLI_FALSE;
s->metadata_start_func = NULL;
s->metadata_chunk_func = NULL;
s->metadata_callback_opaque = 0;
return BROTLI_TRUE;
}
void BrotliDecoderStateMetablockBegin(BrotliDecoderState* s) {
s->meta_block_remaining_len = 0;
s->block_length[0] = BROTLI_BLOCK_SIZE_CAP;
s->block_length[1] = BROTLI_BLOCK_SIZE_CAP;
s->block_length[2] = BROTLI_BLOCK_SIZE_CAP;
s->num_block_types[0] = 1;
s->num_block_types[1] = 1;
s->num_block_types[2] = 1;
s->block_type_rb[0] = 1;
s->block_type_rb[1] = 0;
s->block_type_rb[2] = 1;
s->block_type_rb[3] = 0;
s->block_type_rb[4] = 1;
s->block_type_rb[5] = 0;
s->context_map = NULL;
s->context_modes = NULL;
s->dist_context_map = NULL;
s->context_map_slice = NULL;
s->literal_htree = NULL;
s->dist_context_map_slice = NULL;
s->dist_htree_index = 0;
s->context_lookup = NULL;
s->literal_hgroup.codes = NULL;
s->literal_hgroup.htrees = NULL;
s->insert_copy_hgroup.codes = NULL;
s->insert_copy_hgroup.htrees = NULL;
s->distance_hgroup.codes = NULL;
s->distance_hgroup.htrees = NULL;
}
void BrotliDecoderStateCleanupAfterMetablock(BrotliDecoderState* s) {
BROTLI_DECODER_FREE(s, s->context_modes);
BROTLI_DECODER_FREE(s, s->context_map);
BROTLI_DECODER_FREE(s, s->dist_context_map);
BROTLI_DECODER_FREE(s, s->literal_hgroup.htrees);
BROTLI_DECODER_FREE(s, s->insert_copy_hgroup.htrees);
BROTLI_DECODER_FREE(s, s->distance_hgroup.htrees);
}
void BrotliDecoderStateCleanup(BrotliDecoderState* s) {
BrotliDecoderStateCleanupAfterMetablock(s);
BROTLI_DECODER_ON_FINISH(s);
BROTLI_DECODER_FREE(s, s->compound_dictionary);
BrotliSharedDictionaryDestroyInstance(s->dictionary);
s->dictionary = NULL;
BROTLI_DECODER_FREE(s, s->ringbuffer);
BROTLI_DECODER_FREE(s, s->block_type_trees);
}
BROTLI_BOOL BrotliDecoderHuffmanTreeGroupInit(BrotliDecoderState* s,
HuffmanTreeGroup* group, brotli_reg_t alphabet_size_max,
brotli_reg_t alphabet_size_limit, brotli_reg_t ntrees) {
/* 376 = 256 (1-st level table) + 4 + 7 + 15 + 31 + 63 (2-nd level mix-tables)
This number is discovered "unlimited" "enough" calculator; it is actually
a wee bigger than required in several cases (especially for alphabets with
less than 16 symbols). */
const size_t max_table_size = alphabet_size_limit + 376;
const size_t code_size = sizeof(HuffmanCode) * ntrees * max_table_size;
const size_t htree_size = sizeof(HuffmanCode*) * ntrees;
/* Pointer alignment is, hopefully, wider than sizeof(HuffmanCode). */
HuffmanCode** p = (HuffmanCode**)BROTLI_DECODER_ALLOC(s,
code_size + htree_size);
group->alphabet_size_max = (uint16_t)alphabet_size_max;
group->alphabet_size_limit = (uint16_t)alphabet_size_limit;
group->num_htrees = (uint16_t)ntrees;
group->htrees = p;
group->codes = p ? (HuffmanCode*)(&p[ntrees]) : NULL;
return !!p;
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif | c | github | https://github.com/nodejs/node | deps/brotli/c/dec/state.c |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/** Generated by rm commands */
public class PathIsNotEmptyDirectoryException extends PathExistsException {
/** @param path for the exception */
public PathIsNotEmptyDirectoryException(String path) {
super(path, "Directory is not empty");
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIsNotEmptyDirectoryException.java |
import subprocess
def sam2bam_sort(samfiles,thread=1,sortType=''):
"""
This function will change sam file to bam file and sort bam files
"""
sorted_files = []
sam2bamCmd=''
rmSamCmd=''
sortBamCmd=''
indexCmd=''
rmBamCmd=''
for sam in samfiles:
bam = sam[:-3] + 'bam'
sort = bam[:-3] + 'sort'
sort_bam = sort + '.bam'
sorted_files.append(sort_bam)
# sam file to bam file
sam2bamCmd = sam2bamCmd + ('samtools view -@ {thread} -bS {sam} '
'-o {bam} && ').format(sam=sam, bam=bam,thread=thread)
# remove sam file
rmSamCmd = rmSamCmd + 'rm {sam} & '.format(sam=sam)
# sort bam file
if sortType == 'name':
tag = ' -n'
else:
tag = ''
sortBamCmd = sortBamCmd + ('samtools sort{tag} -m 4G -@ {thread} -T {sort} {bam} '
'-o {sortbam} && ').format(tag=tag,thread=thread,sort=sort,
bam=bam,sortbam=sort_bam)
# index bam file
indexCmd = indexCmd + 'samtools index {sortbam} & '.format(sortbam=sort_bam)
# remove unsorted bam file
rmBamCmd = rmBamCmd + 'rm {bam} & '.format(bam=bam)
subprocess.check_call(sam2bamCmd[:-3],shell=True)
subprocess.check_call(rmSamCmd[:-3],shell=True)
subprocess.check_call(sortBamCmd[:-3],shell=True)
subprocess.check_call(indexCmd + 'wait',shell=True)
subprocess.check_call(rmBamCmd + 'wait',shell=True)
return sorted_files
def bam2sam(bamfiles):
"""
This function will transfer bam file to sam files
"""
samFiles = []
cmd = ''
for bam in bamfiles:
samfile = bam[:-3] + 'sam'
samFiles.append(samfile)
# command
cmd = cmd + 'samtools view {bam} > {sam} & '.format(bam=bam,sam=samfile)
subprocess.check_call(cmd + 'wait',shell=True)
return samFiles
#===========================================================================
#===========================================================================
def extract_bam(sortedBamFiles,extractType,seqType='pair',thread=1):
"""
This function extract the mapped/unmapped reads from the bam file which
is got from mapping using aligners.
* sortedBamFiles: a list of bam files
* extractType: default(map), other(unmap)
* thread: number of threads
"""
if seqType == 'single':
sam_tag = '4'
elif seqType == 'pair':
sam_tag = '12'
# define returned files
returnFile = []
cmd = ''
if extractType == 'map':
for bam in sortedBamFiles:
filename = bam[:-3] + 'map.bam'
returnFile.append(filename)
cmd = cmd + 'samtools view -@ {thread} -F {tag} -bh {input} > {output} && '.format(
thread=str(thread),tag=sam_tag,input=bam,output=filename)
else:
for bam in sortedBamFiles:
filename = bam[:-3] + 'unmap.bam'
returnFile.append(filename)
cmd = cmd + 'samtools view -@ {thread} -f {tag} -bh {input} > {output} && '.format(
thread=str(thread),tag=sam_tag,input=bam,output=filename)
subprocess.call(cmd[:-3],shell=True)
return returnFile
def merge_bam(bamfiles,outputbam):
"""
this function merges bam files into one
"""
bam = ' '.join(bamfiles)
cmd = ('samtools merge -f {output} {input}').format(output=outputbam,input=bam)
subprocess.check_call(cmd,shell=True)
def index_bam(bamFiles):
"""
This function indexes bam files for easy of access
"""
cmd = ''
for bam in bamFiles:
cmd = cmd + 'samtools index {bam} & '.format(bam=bam)
subprocess.check_call(cmd + 'wait',shell=True)
print 'done' | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.GroupRebalanceConfig;
import org.apache.kafka.clients.MockClient;
import org.apache.kafka.clients.consumer.CloseOptions;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.DisconnectException;
import org.apache.kafka.common.errors.FencedInstanceIdException;
import org.apache.kafka.common.errors.GroupMaxSizeReachedException;
import org.apache.kafka.common.errors.InconsistentGroupProtocolException;
import org.apache.kafka.common.errors.UnknownMemberIdException;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.message.JoinGroupRequestData;
import org.apache.kafka.common.message.JoinGroupResponseData;
import org.apache.kafka.common.message.LeaveGroupRequestData;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse;
import org.apache.kafka.common.message.SyncGroupRequestData;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.metrics.KafkaMetric;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.AbstractRequest;
import org.apache.kafka.common.requests.FindCoordinatorResponse;
import org.apache.kafka.common.requests.HeartbeatRequest;
import org.apache.kafka.common.requests.HeartbeatResponse;
import org.apache.kafka.common.requests.JoinGroupRequest;
import org.apache.kafka.common.requests.JoinGroupResponse;
import org.apache.kafka.common.requests.LeaveGroupRequest;
import org.apache.kafka.common.requests.LeaveGroupResponse;
import org.apache.kafka.common.requests.RequestTestUtils;
import org.apache.kafka.common.requests.SyncGroupRequest;
import org.apache.kafka.common.requests.SyncGroupResponse;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Timer;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.test.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
import java.util.stream.Stream;
import static java.util.Collections.emptyMap;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
public class AbstractCoordinatorTest {
private static final ByteBuffer EMPTY_DATA = ByteBuffer.wrap(new byte[0]);
private static final int REBALANCE_TIMEOUT_MS = 60000;
private static final int SESSION_TIMEOUT_MS = 10000;
private static final int HEARTBEAT_INTERVAL_MS = 3000;
private static final int RETRY_BACKOFF_MS = 100;
private static final int RETRY_BACKOFF_MAX_MS = 1000;
private static final int REQUEST_TIMEOUT_MS = 40000;
private static final String GROUP_ID = "dummy-group";
private static final String METRIC_GROUP_PREFIX = "consumer";
private static final String PROTOCOL_TYPE = "dummy";
private static final String PROTOCOL_NAME = "dummy-subprotocol";
private Node node;
private Metrics metrics;
private MockTime mockTime;
private Node coordinatorNode;
private MockClient mockClient;
private DummyCoordinator coordinator;
private ConsumerNetworkClient consumerClient;
private final String memberId = "memberId";
private final String leaderId = "leaderId";
private final int defaultGeneration = -1;
@AfterEach
public void closeCoordinator() {
Utils.closeQuietly(coordinator, "close coordinator");
Utils.closeQuietly(consumerClient, "close consumer client");
}
private void setupCoordinator() {
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS,
Optional.empty(), Optional.empty());
}
private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs) {
setupCoordinator(retryBackoffMs, retryBackoffMaxMs, REBALANCE_TIMEOUT_MS,
Optional.empty(), Optional.empty());
}
private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional<String> groupInstanceId, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier) {
LogContext logContext = new LogContext();
this.mockTime = new MockTime();
ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, retryBackoffMaxMs, 60 * 60 * 1000L,
false, false, new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST),
logContext, new ClusterResourceListeners());
this.mockClient = new MockClient(mockTime, metadata);
this.consumerClient = new ConsumerNetworkClient(logContext,
mockClient,
metadata,
mockTime,
retryBackoffMs,
REQUEST_TIMEOUT_MS,
HEARTBEAT_INTERVAL_MS);
metrics = new Metrics(mockTime);
mockClient.updateMetadata(RequestTestUtils.metadataUpdateWith(1, emptyMap()));
this.node = metadata.fetch().nodes().get(0);
this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(SESSION_TIMEOUT_MS,
rebalanceTimeoutMs,
HEARTBEAT_INTERVAL_MS,
GROUP_ID,
groupInstanceId,
null,
retryBackoffMs,
retryBackoffMaxMs);
this.coordinator = new DummyCoordinator(rebalanceConfig,
consumerClient,
metrics,
mockTime,
heartbeatThreadSupplier);
}
private void joinGroup() {
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
final int generation = 1;
mockClient.prepareResponse(joinGroupFollowerResponse(generation, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
coordinator.ensureActiveGroup();
}
@Test
public void testMetrics() {
setupCoordinator();
assertNotNull(getMetric("heartbeat-response-time-max"));
assertNotNull(getMetric("heartbeat-rate"));
assertNotNull(getMetric("heartbeat-total"));
assertNotNull(getMetric("last-heartbeat-seconds-ago"));
assertNotNull(getMetric("join-time-avg"));
assertNotNull(getMetric("join-time-max"));
assertNotNull(getMetric("join-rate"));
assertNotNull(getMetric("join-total"));
assertNotNull(getMetric("sync-time-avg"));
assertNotNull(getMetric("sync-time-max"));
assertNotNull(getMetric("sync-rate"));
assertNotNull(getMetric("sync-total"));
assertNotNull(getMetric("rebalance-latency-avg"));
assertNotNull(getMetric("rebalance-latency-max"));
assertNotNull(getMetric("rebalance-latency-total"));
assertNotNull(getMetric("rebalance-rate-per-hour"));
assertNotNull(getMetric("rebalance-total"));
assertNotNull(getMetric("last-rebalance-seconds-ago"));
assertNotNull(getMetric("failed-rebalance-rate-per-hour"));
assertNotNull(getMetric("failed-rebalance-total"));
metrics.sensor("heartbeat-latency").record(1.0d);
metrics.sensor("heartbeat-latency").record(6.0d);
metrics.sensor("heartbeat-latency").record(2.0d);
assertEquals(6.0d, getMetric("heartbeat-response-time-max").metricValue());
assertEquals(0.1d, getMetric("heartbeat-rate").metricValue());
assertEquals(3.0d, getMetric("heartbeat-total").metricValue());
assertEquals(-1.0d, getMetric("last-heartbeat-seconds-ago").metricValue());
coordinator.heartbeat().sentHeartbeat(mockTime.milliseconds());
assertEquals(0.0d, getMetric("last-heartbeat-seconds-ago").metricValue());
mockTime.sleep(10 * 1000L);
assertEquals(10.0d, getMetric("last-heartbeat-seconds-ago").metricValue());
metrics.sensor("join-latency").record(1.0d);
metrics.sensor("join-latency").record(6.0d);
metrics.sensor("join-latency").record(2.0d);
assertEquals(3.0d, getMetric("join-time-avg").metricValue());
assertEquals(6.0d, getMetric("join-time-max").metricValue());
assertEquals(0.1d, getMetric("join-rate").metricValue());
assertEquals(3.0d, getMetric("join-total").metricValue());
metrics.sensor("sync-latency").record(1.0d);
metrics.sensor("sync-latency").record(6.0d);
metrics.sensor("sync-latency").record(2.0d);
assertEquals(3.0d, getMetric("sync-time-avg").metricValue());
assertEquals(6.0d, getMetric("sync-time-max").metricValue());
assertEquals(0.1d, getMetric("sync-rate").metricValue());
assertEquals(3.0d, getMetric("sync-total").metricValue());
metrics.sensor("rebalance-latency").record(1.0d);
metrics.sensor("rebalance-latency").record(6.0d);
metrics.sensor("rebalance-latency").record(2.0d);
assertEquals(3.0d, getMetric("rebalance-latency-avg").metricValue());
assertEquals(6.0d, getMetric("rebalance-latency-max").metricValue());
assertEquals(9.0d, getMetric("rebalance-latency-total").metricValue());
assertEquals(3.0d, getMetric("rebalance-rate-per-hour").metricValue());
assertEquals(3.0d, getMetric("rebalance-total").metricValue());
metrics.sensor("failed-rebalance").record(1.0d);
metrics.sensor("failed-rebalance").record(6.0d);
metrics.sensor("failed-rebalance").record(2.0d);
assertEquals(3.0d, getMetric("failed-rebalance-rate-per-hour").metricValue());
assertEquals(3.0d, getMetric("failed-rebalance-total").metricValue());
assertEquals(-1.0d, getMetric("last-rebalance-seconds-ago").metricValue());
coordinator.setLastRebalanceTime(mockTime.milliseconds());
assertEquals(0.0d, getMetric("last-rebalance-seconds-ago").metricValue());
mockTime.sleep(10 * 1000L);
assertEquals(10.0d, getMetric("last-rebalance-seconds-ago").metricValue());
long windowLength = metrics.config().samples() * 3600_000L;
mockTime.sleep(windowLength - 10 * 1000L - 1);
assertEquals(3d / metrics.config().samples(), (double) getMetric("failed-rebalance-rate-per-hour").metricValue(), 0.1d);
assertEquals(3d / metrics.config().samples(), (double) getMetric("rebalance-rate-per-hour").metricValue(), 0.1d);
// WindowLength have passed, triggering metric reset
mockTime.sleep(1L);
assertEquals(0d, getMetric("failed-rebalance-rate-per-hour").metricValue());
assertEquals(0d, getMetric("rebalance-rate-per-hour").metricValue());
}
private KafkaMetric getMetric(final String name) {
return metrics.metrics().get(metrics.metricName(name, "consumer-coordinator-metrics"));
}
@Test
public void testCoordinatorDiscoveryExponentialBackoff() {
// With exponential backoff, we will get retries at 10, 20, 40, 80, 100 ms (with jitter)
int shortRetryBackoffMs = 10;
int shortRetryBackoffMaxMs = 100;
setupCoordinator(shortRetryBackoffMs, shortRetryBackoffMaxMs);
for (int i = 0; i < 5; i++) {
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
}
// cut out the coordinator for 100 milliseconds to simulate a disconnect.
// after backing off, we should be able to connect.
mockClient.backoff(coordinatorNode, 100L);
long initialTime = mockTime.milliseconds();
coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE));
long endTime = mockTime.milliseconds();
long lowerBoundBackoffMs = 0;
long upperBoundBackoffMs = 0;
for (int i = 0; i < 4; i++) {
lowerBoundBackoffMs += (long) (shortRetryBackoffMs * Math.pow(CommonClientConfigs.RETRY_BACKOFF_EXP_BASE, i) * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER));
upperBoundBackoffMs += (long) (shortRetryBackoffMs * Math.pow(CommonClientConfigs.RETRY_BACKOFF_EXP_BASE, i) * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER));
}
long timeElapsed = endTime - initialTime;
assertTrue(timeElapsed >= lowerBoundBackoffMs);
assertTrue(timeElapsed <= upperBoundBackoffMs + shortRetryBackoffMs);
}
@Test
public void testWakeupFromEnsureCoordinatorReady() {
setupCoordinator();
consumerClient.wakeup();
// No wakeup should occur from the async variation.
coordinator.ensureCoordinatorReadyAsync();
// But should wakeup in sync variation even if timer is 0.
assertThrows(WakeupException.class, () ->
coordinator.ensureCoordinatorReady(mockTime.timer(0))
);
}
@Test
public void testTimeoutAndRetryJoinGroupIfNeeded() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
Timer firstAttemptTimer = mockTime.timer(REQUEST_TIMEOUT_MS);
Future<Boolean> firstAttempt = executor.submit(() -> coordinator.joinGroupIfNeeded(firstAttemptTimer));
mockTime.sleep(REQUEST_TIMEOUT_MS);
assertFalse(firstAttempt.get());
assertTrue(consumerClient.hasPendingRequests(coordinatorNode));
mockClient.respond(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
Timer secondAttemptTimer = mockTime.timer(REQUEST_TIMEOUT_MS);
Future<Boolean> secondAttempt = executor.submit(() -> coordinator.joinGroupIfNeeded(secondAttemptTimer));
assertTrue(secondAttempt.get());
} finally {
executor.shutdownNow();
executor.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
}
@Test
public void testGroupMaxSizeExceptionIsFatal() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.GROUP_MAX_SIZE_REACHED));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertInstanceOf(future.exception().getClass(), Errors.GROUP_MAX_SIZE_REACHED.exception());
assertFalse(future.isRetriable());
}
@Test
public void testJoinGroupRequestTimeout() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
mockTime.sleep(REQUEST_TIMEOUT_MS + 1);
assertFalse(consumerClient.poll(future, mockTime.timer(0)));
mockTime.sleep(REBALANCE_TIMEOUT_MS - REQUEST_TIMEOUT_MS + AbstractCoordinator.JOIN_GROUP_TIMEOUT_LAPSE);
assertTrue(consumerClient.poll(future, mockTime.timer(0)));
assertInstanceOf(DisconnectException.class, future.exception());
}
@Test
public void testJoinGroupRequestTimeoutLowerBoundedByDefaultRequestTimeout() {
int rebalanceTimeoutMs = REQUEST_TIMEOUT_MS - 10000;
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, rebalanceTimeoutMs, Optional.empty(), Optional.empty());
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
long expectedRequestDeadline = mockTime.milliseconds() + REQUEST_TIMEOUT_MS;
mockTime.sleep(rebalanceTimeoutMs + AbstractCoordinator.JOIN_GROUP_TIMEOUT_LAPSE + 1);
assertFalse(consumerClient.poll(future, mockTime.timer(0)));
mockTime.sleep(expectedRequestDeadline - mockTime.milliseconds() + 1);
assertTrue(consumerClient.poll(future, mockTime.timer(0)));
assertInstanceOf(DisconnectException.class, future.exception());
}
@Test
public void testJoinGroupRequestMaxTimeout() {
// Ensure we can handle the maximum allowed rebalance timeout
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE,
Optional.empty(), Optional.empty());
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
assertFalse(consumerClient.poll(future, mockTime.timer(0)));
mockTime.sleep(Integer.MAX_VALUE + 1L);
assertTrue(consumerClient.poll(future, mockTime.timer(0)));
}
@Test
public void testJoinGroupRequestWithMemberIdRequired() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.MEMBER_ID_REQUIRED));
mockClient.prepareResponse(body -> {
if (!(body instanceof JoinGroupRequest)) {
return false;
}
JoinGroupRequest joinGroupRequest = (JoinGroupRequest) body;
return joinGroupRequest.data().memberId().equals(memberId);
}, joinGroupResponse(Errors.UNKNOWN_MEMBER_ID));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertEquals(Errors.MEMBER_ID_REQUIRED.message(), future.exception().getMessage());
assertTrue(coordinator.rejoinNeededOrPending());
assertTrue(coordinator.hasValidMemberId());
assertTrue(coordinator.hasMatchingGenerationId(defaultGeneration));
future = coordinator.sendJoinGroupRequest();
assertTrue(consumerClient.poll(future, mockTime.timer(REBALANCE_TIMEOUT_MS)));
}
@Test
public void testJoinGroupRequestWithFencedInstanceIdException() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.FENCED_INSTANCE_ID));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertEquals(Errors.FENCED_INSTANCE_ID.message(), future.exception().getMessage());
// Make sure the exception is fatal.
assertFalse(future.isRetriable());
}
@Test
public void testJoinGroupProtocolTypeAndName() {
final String wrongProtocolType = "wrong-type";
final String wrongProtocolName = "wrong-name";
// No Protocol Type in both JoinGroup and SyncGroup responses
assertTrue(joinGroupWithProtocolTypeAndName(null, null, null));
// Protocol Type in both JoinGroup and SyncGroup responses
assertTrue(joinGroupWithProtocolTypeAndName(PROTOCOL_TYPE, PROTOCOL_TYPE, PROTOCOL_NAME));
// Wrong protocol type in the JoinGroupResponse
assertThrows(InconsistentGroupProtocolException.class,
() -> joinGroupWithProtocolTypeAndName("wrong", null, null));
// Correct protocol type in the JoinGroupResponse
// Wrong protocol type in the SyncGroupResponse
// Correct protocol name in the SyncGroupResponse
assertThrows(InconsistentGroupProtocolException.class,
() -> joinGroupWithProtocolTypeAndName(PROTOCOL_TYPE, wrongProtocolType, PROTOCOL_NAME));
// Correct protocol type in the JoinGroupResponse
// Correct protocol type in the SyncGroupResponse
// Wrong protocol name in the SyncGroupResponse
assertThrows(InconsistentGroupProtocolException.class,
() -> joinGroupWithProtocolTypeAndName(PROTOCOL_TYPE, PROTOCOL_TYPE, wrongProtocolName));
}
@Test
public void testRetainMemberIdAfterJoinGroupDisconnect() {
setupCoordinator();
String memberId = "memberId";
int generation = 5;
// Rebalance once to initialize the generation and memberId
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
expectJoinGroup("", generation, memberId);
expectSyncGroup(generation, memberId);
ensureActiveGroup(generation, memberId);
// Force a rebalance
coordinator.requestRejoin("Manual test trigger");
assertTrue(coordinator.rejoinNeededOrPending());
// Disconnect during the JoinGroup and ensure that the retry preserves the memberId
int rejoinedGeneration = 10;
expectDisconnectInJoinGroup(memberId);
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
expectJoinGroup(memberId, rejoinedGeneration, memberId);
expectSyncGroup(rejoinedGeneration, memberId);
ensureActiveGroup(rejoinedGeneration, memberId);
}
@Test
public void testRetainMemberIdAfterSyncGroupDisconnect() {
setupCoordinator();
String memberId = "memberId";
int generation = 5;
// Rebalance once to initialize the generation and memberId
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
expectJoinGroup("", generation, memberId);
expectSyncGroup(generation, memberId);
ensureActiveGroup(generation, memberId);
// Force a rebalance
coordinator.requestRejoin("Manual test trigger");
assertTrue(coordinator.rejoinNeededOrPending());
// Disconnect during the SyncGroup and ensure that the retry preserves the memberId
int rejoinedGeneration = 10;
expectJoinGroup(memberId, rejoinedGeneration, memberId);
expectDisconnectInSyncGroup(rejoinedGeneration, memberId);
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
// Note that the consumer always starts from JoinGroup after a failed rebalance
expectJoinGroup(memberId, rejoinedGeneration, memberId);
expectSyncGroup(rejoinedGeneration, memberId);
ensureActiveGroup(rejoinedGeneration, memberId);
}
@Test
public void testRejoinReason() {
setupCoordinator();
String memberId = "memberId";
int generation = 5;
// test initial reason
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
expectJoinGroup("", "", generation, memberId);
// successful sync group response should reset reason
expectSyncGroup(generation, memberId);
ensureActiveGroup(generation, memberId);
assertEquals("", coordinator.rejoinReason());
// force a rebalance
expectJoinGroup(memberId, "Manual test trigger", generation, memberId);
expectSyncGroup(generation, memberId);
coordinator.requestRejoin("Manual test trigger");
ensureActiveGroup(generation, memberId);
assertEquals("", coordinator.rejoinReason());
// max group size reached
mockClient.prepareResponse(joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.GROUP_MAX_SIZE_REACHED));
coordinator.requestRejoin("Manual test trigger 2");
Throwable e = assertThrows(GroupMaxSizeReachedException.class,
() -> coordinator.joinGroupIfNeeded(mockTime.timer(100L)));
// next join group request should contain exception message
expectJoinGroup(memberId, String.format("rebalance failed due to %s", e.getClass().getSimpleName()), generation, memberId);
expectSyncGroup(generation, memberId);
ensureActiveGroup(generation, memberId);
assertEquals("", coordinator.rejoinReason());
// check limit length of reason field
final String reason = "Very looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong reason that is 271 characters long to make sure that length limit logic handles the scenario nicely";
final String truncatedReason = reason.substring(0, 255);
expectJoinGroup(memberId, truncatedReason, generation, memberId);
expectSyncGroup(generation, memberId);
coordinator.requestRejoin(reason);
ensureActiveGroup(generation, memberId);
assertEquals("", coordinator.rejoinReason());
}
private void ensureActiveGroup(
int generation,
String memberId
) {
coordinator.ensureActiveGroup();
assertEquals(generation, coordinator.generation().generationId);
assertEquals(memberId, coordinator.generation().memberId);
assertFalse(coordinator.rejoinNeededOrPending());
}
private void expectSyncGroup(
int expectedGeneration,
String expectedMemberId
) {
mockClient.prepareResponse(body -> {
if (!(body instanceof SyncGroupRequest)) {
return false;
}
SyncGroupRequestData syncGroupRequest = ((SyncGroupRequest) body).data();
return syncGroupRequest.generationId() == expectedGeneration
&& syncGroupRequest.memberId().equals(expectedMemberId)
&& syncGroupRequest.protocolType().equals(PROTOCOL_TYPE)
&& syncGroupRequest.protocolName().equals(PROTOCOL_NAME);
}, syncGroupResponse(Errors.NONE, PROTOCOL_TYPE, PROTOCOL_NAME));
}
private void expectDisconnectInSyncGroup(
int expectedGeneration,
String expectedMemberId
) {
mockClient.prepareResponse(body -> {
if (!(body instanceof SyncGroupRequest)) {
return false;
}
SyncGroupRequestData syncGroupRequest = ((SyncGroupRequest) body).data();
return syncGroupRequest.generationId() == expectedGeneration
&& syncGroupRequest.memberId().equals(expectedMemberId)
&& syncGroupRequest.protocolType().equals(PROTOCOL_TYPE)
&& syncGroupRequest.protocolName().equals(PROTOCOL_NAME);
}, null, true);
}
private void expectDisconnectInJoinGroup(
String expectedMemberId
) {
mockClient.prepareResponse(body -> {
if (!(body instanceof JoinGroupRequest)) {
return false;
}
JoinGroupRequestData joinGroupRequest = ((JoinGroupRequest) body).data();
return joinGroupRequest.memberId().equals(expectedMemberId)
&& joinGroupRequest.protocolType().equals(PROTOCOL_TYPE);
}, null, true);
}
private void expectJoinGroup(
String expectedMemberId,
int responseGeneration,
String responseMemberId
) {
expectJoinGroup(expectedMemberId, null, responseGeneration, responseMemberId);
}
private void expectJoinGroup(
String expectedMemberId,
String expectedReason,
int responseGeneration,
String responseMemberId
) {
JoinGroupResponse response = joinGroupFollowerResponse(
responseGeneration,
responseMemberId,
"leaderId",
Errors.NONE,
PROTOCOL_TYPE
);
mockClient.prepareResponse(body -> {
if (!(body instanceof JoinGroupRequest)) {
return false;
}
JoinGroupRequestData joinGroupRequest = ((JoinGroupRequest) body).data();
// abstract coordinator never sets reason to null
String actualReason = joinGroupRequest.reason();
boolean isReasonMatching = expectedReason == null || expectedReason.equals(actualReason);
return joinGroupRequest.memberId().equals(expectedMemberId)
&& joinGroupRequest.protocolType().equals(PROTOCOL_TYPE)
&& isReasonMatching;
}, response);
}
@Test
public void testNoGenerationWillNotTriggerProtocolNameCheck() {
final String wrongProtocolName = "wrong-name";
setupCoordinator();
mockClient.reset();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(body -> {
if (!(body instanceof JoinGroupRequest)) {
return false;
}
JoinGroupRequest joinGroupRequest = (JoinGroupRequest) body;
return joinGroupRequest.data().protocolType().equals(PROTOCOL_TYPE);
}, joinGroupFollowerResponse(defaultGeneration, memberId,
"memberid", Errors.NONE, PROTOCOL_TYPE));
mockClient.prepareResponse(body -> {
if (!(body instanceof SyncGroupRequest)) {
return false;
}
coordinator.resetGenerationOnLeaveGroup();
SyncGroupRequest syncGroupRequest = (SyncGroupRequest) body;
return syncGroupRequest.data().protocolType().equals(PROTOCOL_TYPE)
&& syncGroupRequest.data().protocolName().equals(PROTOCOL_NAME);
}, syncGroupResponse(Errors.NONE, PROTOCOL_TYPE, wrongProtocolName));
// let the retry to complete successfully to break out of the while loop
mockClient.prepareResponse(body -> {
if (!(body instanceof JoinGroupRequest)) {
return false;
}
JoinGroupRequest joinGroupRequest = (JoinGroupRequest) body;
return joinGroupRequest.data().protocolType().equals(PROTOCOL_TYPE);
}, joinGroupFollowerResponse(1, memberId,
"memberid", Errors.NONE, PROTOCOL_TYPE));
mockClient.prepareResponse(body -> {
if (!(body instanceof SyncGroupRequest)) {
return false;
}
SyncGroupRequest syncGroupRequest = (SyncGroupRequest) body;
return syncGroupRequest.data().protocolType().equals(PROTOCOL_TYPE)
&& syncGroupRequest.data().protocolName().equals(PROTOCOL_NAME);
}, syncGroupResponse(Errors.NONE, PROTOCOL_TYPE, PROTOCOL_NAME));
// No exception shall be thrown as the generation is reset.
coordinator.joinGroupIfNeeded(mockTime.timer(100L));
}
private boolean joinGroupWithProtocolTypeAndName(String joinGroupResponseProtocolType,
String syncGroupResponseProtocolType,
String syncGroupResponseProtocolName) {
setupCoordinator();
mockClient.reset();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(body -> {
if (!(body instanceof JoinGroupRequest)) {
return false;
}
JoinGroupRequest joinGroupRequest = (JoinGroupRequest) body;
return joinGroupRequest.data().protocolType().equals(PROTOCOL_TYPE);
}, joinGroupFollowerResponse(defaultGeneration, memberId,
"memberid", Errors.NONE, joinGroupResponseProtocolType));
mockClient.prepareResponse(body -> {
if (!(body instanceof SyncGroupRequest)) {
return false;
}
SyncGroupRequest syncGroupRequest = (SyncGroupRequest) body;
return syncGroupRequest.data().protocolType().equals(PROTOCOL_TYPE)
&& syncGroupRequest.data().protocolName().equals(PROTOCOL_NAME);
}, syncGroupResponse(Errors.NONE, syncGroupResponseProtocolType, syncGroupResponseProtocolName));
return coordinator.joinGroupIfNeeded(mockTime.timer(5000L));
}
@Test
public void testSyncGroupRequestWithFencedInstanceIdException() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
final int generation = -1;
mockClient.prepareResponse(joinGroupFollowerResponse(generation, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.FENCED_INSTANCE_ID));
assertThrows(FencedInstanceIdException.class, () -> coordinator.ensureActiveGroup());
}
@Test
public void testJoinGroupUnknownMemberResponseWithOldGeneration() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
TestUtils.waitForCondition(() -> !mockClient.requests().isEmpty(), 2000,
"The join-group request was not sent");
// change the generation after the join-group request
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
currGen.generationId,
currGen.memberId + "-new",
currGen.protocolName);
coordinator.setNewGeneration(newGen);
mockClient.respond(joinGroupFollowerResponse(currGen.generationId + 1, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.UNKNOWN_MEMBER_ID));
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertInstanceOf(future.exception().getClass(), Errors.UNKNOWN_MEMBER_ID.exception());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testSyncGroupUnknownMemberResponseWithOldGeneration() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
coordinator.setNewState(AbstractCoordinator.MemberState.PREPARING_REBALANCE);
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
TestUtils.waitForCondition(() -> {
consumerClient.poll(mockTime.timer(REQUEST_TIMEOUT_MS));
return !mockClient.requests().isEmpty();
}, 2000,
"The join-group request was not sent");
mockClient.respond(joinGroupFollowerResponse(currGen.generationId, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
assertTrue(mockClient.requests().isEmpty());
TestUtils.waitForCondition(() -> {
consumerClient.poll(mockTime.timer(REQUEST_TIMEOUT_MS));
return !mockClient.requests().isEmpty();
}, 2000,
"The sync-group request was not sent");
// change the generation after the sync-group request
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
currGen.generationId,
currGen.memberId + "-new",
currGen.protocolName);
coordinator.setNewGeneration(newGen);
mockClient.respond(syncGroupResponse(Errors.UNKNOWN_MEMBER_ID));
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertInstanceOf(future.exception().getClass(), Errors.UNKNOWN_MEMBER_ID.exception());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testSyncGroupIllegalGenerationResponseWithOldGeneration() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
coordinator.setNewState(AbstractCoordinator.MemberState.PREPARING_REBALANCE);
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
TestUtils.waitForCondition(() -> {
consumerClient.poll(mockTime.timer(REQUEST_TIMEOUT_MS));
return !mockClient.requests().isEmpty();
}, 2000,
"The join-group request was not sent");
mockClient.respond(joinGroupFollowerResponse(currGen.generationId, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
assertTrue(mockClient.requests().isEmpty());
TestUtils.waitForCondition(() -> {
consumerClient.poll(mockTime.timer(REQUEST_TIMEOUT_MS));
return !mockClient.requests().isEmpty();
}, 2000,
"The sync-group request was not sent");
// change the generation after the sync-group request
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
currGen.generationId,
currGen.memberId + "-new",
currGen.protocolName);
coordinator.setNewGeneration(newGen);
mockClient.respond(syncGroupResponse(Errors.ILLEGAL_GENERATION));
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertInstanceOf(future.exception().getClass(), Errors.ILLEGAL_GENERATION.exception());
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testHeartbeatSentWhenCompletingRebalance() throws Exception {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
coordinator.setNewState(AbstractCoordinator.MemberState.COMPLETING_REBALANCE);
// the heartbeat should be sent out during a rebalance
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
TestUtils.waitForCondition(() -> !mockClient.requests().isEmpty(), 2000,
"The heartbeat request was not sent");
assertTrue(coordinator.heartbeat().hasInflight());
mockClient.respond(heartbeatResponse(Errors.REBALANCE_IN_PROGRESS));
assertEquals(currGen, coordinator.generation());
}
@Test
public void testHeartbeatIllegalGenerationResponseWithOldGeneration() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
// let the heartbeat thread send out a request
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
TestUtils.waitForCondition(() -> !mockClient.requests().isEmpty(), 2000,
"The heartbeat request was not sent");
assertTrue(coordinator.heartbeat().hasInflight());
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
currGen.generationId + 1,
currGen.memberId,
currGen.protocolName);
coordinator.setNewGeneration(newGen);
mockClient.respond(heartbeatResponse(Errors.ILLEGAL_GENERATION));
// the heartbeat error code should be ignored
TestUtils.waitForCondition(() -> {
coordinator.pollHeartbeat(mockTime.milliseconds());
return !coordinator.heartbeat().hasInflight();
}, 2000,
"The heartbeat response was not received");
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testHeartbeatUnknownMemberResponseWithOldGeneration() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
// let the heartbeat request to send out a request
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
TestUtils.waitForCondition(() -> !mockClient.requests().isEmpty(), 2000,
"The heartbeat request was not sent");
assertTrue(coordinator.heartbeat().hasInflight());
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
currGen.generationId,
currGen.memberId + "-new",
currGen.protocolName);
coordinator.setNewGeneration(newGen);
mockClient.respond(heartbeatResponse(Errors.UNKNOWN_MEMBER_ID));
// the heartbeat error code should be ignored
TestUtils.waitForCondition(() -> {
coordinator.pollHeartbeat(mockTime.milliseconds());
return !coordinator.heartbeat().hasInflight();
}, 2000,
"The heartbeat response was not received");
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testHeartbeatRebalanceInProgressResponseDuringRebalancing() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
// let the heartbeat request to send out a request
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
TestUtils.waitForCondition(() -> !mockClient.requests().isEmpty(), 2000,
"The heartbeat request was not sent");
assertTrue(coordinator.heartbeat().hasInflight());
mockClient.respond(heartbeatResponse(Errors.REBALANCE_IN_PROGRESS));
coordinator.requestRejoin("test");
TestUtils.waitForCondition(() -> {
coordinator.ensureActiveGroup(new MockTime(1L).timer(100L));
return !coordinator.heartbeat().hasInflight();
},
2000,
"The heartbeat response was not received");
// the generation would not be reset while the rebalance is in progress
assertEquals(currGen, coordinator.generation());
mockClient.respond(joinGroupFollowerResponse(currGen.generationId, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
coordinator.ensureActiveGroup();
assertEquals(currGen, coordinator.generation());
}
@Test
public void testHeartbeatInstanceFencedResponseWithOldGeneration() throws InterruptedException {
setupCoordinator();
joinGroup();
final AbstractCoordinator.Generation currGen = coordinator.generation();
// let the heartbeat request to send out a request
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
TestUtils.waitForCondition(() -> !mockClient.requests().isEmpty(), 2000,
"The heartbeat request was not sent");
assertTrue(coordinator.heartbeat().hasInflight());
// change the generation
final AbstractCoordinator.Generation newGen = new AbstractCoordinator.Generation(
currGen.generationId,
currGen.memberId + "-new",
currGen.protocolName);
coordinator.setNewGeneration(newGen);
mockClient.respond(heartbeatResponse(Errors.FENCED_INSTANCE_ID));
// the heartbeat error code should be ignored
TestUtils.waitForCondition(() -> {
coordinator.pollHeartbeat(mockTime.milliseconds());
return !coordinator.heartbeat().hasInflight();
}, 2000,
"The heartbeat response was not received");
// the generation should not be reset
assertEquals(newGen, coordinator.generation());
}
@Test
public void testHeartbeatRequestWithFencedInstanceIdException() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
final int generation = -1;
mockClient.prepareResponse(joinGroupFollowerResponse(generation, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
mockClient.prepareResponse(heartbeatResponse(Errors.FENCED_INSTANCE_ID));
assertThrows(FencedInstanceIdException.class,
() -> {
coordinator.ensureActiveGroup();
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
long startMs = System.currentTimeMillis();
while (System.currentTimeMillis() - startMs < 1000) {
Thread.sleep(10);
coordinator.pollHeartbeat(mockTime.milliseconds());
}
},
"Expected pollHeartbeat to raise fenced instance id exception in 1 second");
}
@Test
public void testJoinGroupRequestWithGroupInstanceIdNotFound() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.UNKNOWN_MEMBER_ID));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertEquals(Errors.UNKNOWN_MEMBER_ID.message(), future.exception().getMessage());
assertTrue(coordinator.rejoinNeededOrPending());
assertTrue(coordinator.hasUnknownGeneration());
}
@Test
public void testJoinGroupRequestWithRebalanceInProgress() {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
mockClient.prepareResponse(
joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.REBALANCE_IN_PROGRESS));
RequestFuture<ByteBuffer> future = coordinator.sendJoinGroupRequest();
assertTrue(consumerClient.poll(future, mockTime.timer(REQUEST_TIMEOUT_MS)));
assertInstanceOf(future.exception().getClass(), Errors.REBALANCE_IN_PROGRESS.exception());
assertEquals(Errors.REBALANCE_IN_PROGRESS.message(), future.exception().getMessage());
assertTrue(coordinator.rejoinNeededOrPending());
// make sure we'll retry on next poll
assertEquals(0, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
mockClient.prepareResponse(joinGroupFollowerResponse(defaultGeneration, memberId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
coordinator.ensureActiveGroup();
// make sure both onJoinPrepare and onJoinComplete got called
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
}
@Test
public void testLeaveGroupSentWithGroupInstanceIdUnSet() {
checkLeaveGroupRequestSent(Optional.empty());
checkLeaveGroupRequestSent(Optional.of("groupInstanceId"));
}
@ParameterizedTest
@MethodSource("groupInstanceIdAndMembershipOperationMatrix")
public void testLeaveGroupSentWithGroupInstanceIdUnSetAndDifferentGroupMembershipOperation(Optional<String> groupInstanceId, CloseOptions.GroupMembershipOperation operation) {
checkLeaveGroupRequestSent(groupInstanceId, operation, Optional.empty());
}
private static Stream<Arguments> groupInstanceIdAndMembershipOperationMatrix() {
return Stream.of(
Arguments.of(Optional.empty(), CloseOptions.GroupMembershipOperation.DEFAULT),
Arguments.of(Optional.empty(), CloseOptions.GroupMembershipOperation.LEAVE_GROUP),
Arguments.of(Optional.empty(), CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP),
Arguments.of(Optional.of("groupInstanceId"), CloseOptions.GroupMembershipOperation.DEFAULT),
Arguments.of(Optional.of("groupInstanceId"), CloseOptions.GroupMembershipOperation.LEAVE_GROUP),
Arguments.of(Optional.of("groupInstanceId"), CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP)
);
}
private void checkLeaveGroupRequestSent(Optional<String> groupInstanceId) {
checkLeaveGroupRequestSent(groupInstanceId, CloseOptions.GroupMembershipOperation.DEFAULT, Optional.empty());
}
private void checkLeaveGroupRequestSent(Optional<String> groupInstanceId, CloseOptions.GroupMembershipOperation operation, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier) {
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId, heartbeatThreadSupplier);
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
final RuntimeException e = new RuntimeException();
// raise the error when the coordinator tries to send leave group request.
mockClient.prepareResponse(body -> {
if (body instanceof LeaveGroupRequest)
throw e;
return false;
}, heartbeatResponse(Errors.UNKNOWN_SERVER_ERROR));
try {
coordinator.ensureActiveGroup();
coordinator.close(new MockTime().timer(0), operation);
if (CloseOptions.GroupMembershipOperation.LEAVE_GROUP == operation ||
(CloseOptions.GroupMembershipOperation.DEFAULT == operation && coordinator.isDynamicMember())) {
fail("Expected leavegroup to raise an error.");
}
} catch (RuntimeException exception) {
if (CloseOptions.GroupMembershipOperation.LEAVE_GROUP == operation || coordinator.isDynamicMember()) {
assertEquals(exception, e);
} else {
fail("Coordinator with group.instance.id set shouldn't send leave group request.");
}
}
}
@Test
public void testHandleNormalLeaveGroupResponse() {
MemberResponse memberResponse = new MemberResponse()
.setMemberId(memberId)
.setErrorCode(Errors.NONE.code());
LeaveGroupResponse response =
leaveGroupResponse(Collections.singletonList(memberResponse));
RequestFuture<Void> leaveGroupFuture = setupLeaveGroup(response);
assertNotNull(leaveGroupFuture);
assertTrue(leaveGroupFuture.succeeded());
}
@Test
public void testHandleNormalLeaveGroupResponseAndTruncatedLeaveReason() {
MemberResponse memberResponse = new MemberResponse()
.setMemberId(memberId)
.setErrorCode(Errors.NONE.code());
LeaveGroupResponse response =
leaveGroupResponse(Collections.singletonList(memberResponse));
String leaveReason = "Very looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong leaveReason that is 271 characters long to make sure that length limit logic handles the scenario nicely";
RequestFuture<Void> leaveGroupFuture = setupLeaveGroup(response, leaveReason, leaveReason.substring(0, 255));
assertNotNull(leaveGroupFuture);
assertTrue(leaveGroupFuture.succeeded());
}
@Test
public void testHandleMultipleMembersLeaveGroupResponse() {
MemberResponse memberResponse = new MemberResponse()
.setMemberId(memberId)
.setErrorCode(Errors.NONE.code());
LeaveGroupResponse response =
leaveGroupResponse(Arrays.asList(memberResponse, memberResponse));
RequestFuture<Void> leaveGroupFuture = setupLeaveGroup(response);
assertNotNull(leaveGroupFuture);
assertInstanceOf(IllegalStateException.class, leaveGroupFuture.exception());
}
@Test
public void testHandleLeaveGroupResponseWithEmptyMemberResponse() {
LeaveGroupResponse response =
leaveGroupResponse(Collections.emptyList());
RequestFuture<Void> leaveGroupFuture = setupLeaveGroup(response);
assertNotNull(leaveGroupFuture);
assertTrue(leaveGroupFuture.succeeded());
}
@Test
public void testHandleLeaveGroupResponseWithException() {
MemberResponse memberResponse = new MemberResponse()
.setMemberId(memberId)
.setErrorCode(Errors.UNKNOWN_MEMBER_ID.code());
LeaveGroupResponse response =
leaveGroupResponse(Collections.singletonList(memberResponse));
RequestFuture<Void> leaveGroupFuture = setupLeaveGroup(response);
assertNotNull(leaveGroupFuture);
assertInstanceOf(UnknownMemberIdException.class, leaveGroupFuture.exception());
}
private RequestFuture<Void> setupLeaveGroup(LeaveGroupResponse leaveGroupResponse) {
return setupLeaveGroup(leaveGroupResponse, "test maybe leave group", "test maybe leave group");
}
private RequestFuture<Void> setupLeaveGroup(LeaveGroupResponse leaveGroupResponse,
String leaveReason,
String expectedLeaveReason) {
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, Optional.empty(), Optional.empty());
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
mockClient.prepareResponse(body -> {
if (!(body instanceof LeaveGroupRequest)) {
return false;
}
LeaveGroupRequestData leaveGroupRequest = ((LeaveGroupRequest) body).data();
return leaveGroupRequest.members().get(0).memberId().equals(memberId) &&
leaveGroupRequest.members().get(0).reason().equals(expectedLeaveReason);
}, leaveGroupResponse);
coordinator.ensureActiveGroup();
return coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, leaveReason);
}
@Test
public void testUncaughtExceptionInHeartbeatThread() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
final RuntimeException e = new RuntimeException();
// raise the error when the background thread tries to send a heartbeat
mockClient.prepareResponse(body -> {
if (body instanceof HeartbeatRequest)
throw e;
return false;
}, heartbeatResponse(Errors.UNKNOWN_SERVER_ERROR));
coordinator.ensureActiveGroup();
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
try {
long startMs = System.currentTimeMillis();
while (System.currentTimeMillis() - startMs < 1000) {
Thread.sleep(10);
coordinator.timeToNextHeartbeat(0);
}
fail("Expected timeToNextHeartbeat to raise an error in 1 second");
} catch (RuntimeException exception) {
assertEquals(exception, e);
}
try {
long startMs = System.currentTimeMillis();
while (System.currentTimeMillis() - startMs < 1000) {
Thread.sleep(10);
coordinator.pollHeartbeat(mockTime.milliseconds());
}
fail("Expected pollHeartbeat to raise an error in 1 second");
} catch (RuntimeException exception) {
assertEquals(exception, e);
}
}
@Test
public void testPollHeartbeatAwakesHeartbeatThread() throws Exception {
final int longRetryBackoffMs = 10000;
final int longRetryBackoffMaxMs = 10000;
setupCoordinator(longRetryBackoffMs, longRetryBackoffMaxMs);
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
coordinator.ensureActiveGroup();
final CountDownLatch heartbeatDone = new CountDownLatch(1);
mockClient.prepareResponse(body -> {
heartbeatDone.countDown();
return body instanceof HeartbeatRequest;
}, heartbeatResponse(Errors.NONE));
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
coordinator.pollHeartbeat(mockTime.milliseconds());
if (!heartbeatDone.await(1, TimeUnit.SECONDS)) {
fail("Should have received a heartbeat request after calling pollHeartbeat");
}
}
@Test
public void testLookupCoordinator() {
setupCoordinator();
mockClient.backoff(node, 50);
RequestFuture<Void> noBrokersAvailableFuture = coordinator.lookupCoordinator();
assertTrue(noBrokersAvailableFuture.failed(), "Failed future expected");
mockTime.sleep(50);
RequestFuture<Void> future = coordinator.lookupCoordinator();
assertFalse(future.isDone(), "Request not sent");
assertSame(future, coordinator.lookupCoordinator(), "New request sent while one is in progress");
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE));
assertNotSame(future, coordinator.lookupCoordinator(), "New request not sent after previous completed");
}
@Test
public void testWakeupAfterJoinGroupSent() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(new MockClient.RequestMatcher() {
private int invocations = 0;
@Override
public boolean matches(AbstractRequest body) {
invocations++;
boolean isJoinGroupRequest = body instanceof JoinGroupRequest;
if (isJoinGroupRequest && invocations == 1)
// simulate wakeup before the request returns
throw new WakeupException();
return isJoinGroupRequest;
}
}, joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException ignored) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
@Test
public void testWakeupAfterJoinGroupSentExternalCompletion() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(new MockClient.RequestMatcher() {
private int invocations = 0;
@Override
public boolean matches(AbstractRequest body) {
invocations++;
boolean isJoinGroupRequest = body instanceof JoinGroupRequest;
if (isJoinGroupRequest && invocations == 1)
// simulate wakeup before the request returns
throw new WakeupException();
return isJoinGroupRequest;
}
}, joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException ignored) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
// the join group completes in this poll()
consumerClient.poll(mockTime.timer(0));
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
@Test
public void testWakeupAfterJoinGroupReceived() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(body -> {
boolean isJoinGroupRequest = body instanceof JoinGroupRequest;
if (isJoinGroupRequest)
// wakeup after the request returns
consumerClient.wakeup();
return isJoinGroupRequest;
}, joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException ignored) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
@Test
public void testWakeupAfterJoinGroupReceivedExternalCompletion() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(body -> {
boolean isJoinGroupRequest = body instanceof JoinGroupRequest;
if (isJoinGroupRequest)
// wakeup after the request returns
consumerClient.wakeup();
return isJoinGroupRequest;
}, joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()");
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
// the join group completes in this poll()
consumerClient.poll(mockTime.timer(0));
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
@Test
public void testWakeupAfterSyncGroupSentExternalCompletion() throws Exception {
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS,
Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class)));
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(new MockClient.RequestMatcher() {
private int invocations = 0;
@Override
public boolean matches(AbstractRequest body) {
invocations++;
boolean isSyncGroupRequest = body instanceof SyncGroupRequest;
if (isSyncGroupRequest && invocations == 1)
// wakeup after the request returns
consumerClient.wakeup();
return isSyncGroupRequest;
}
}, syncGroupResponse(Errors.NONE));
assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()");
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertNotNull(coordinator.heartbeatThread());
verify(coordinator.heartbeatThread()).enable();
// the join group completes in this poll()
consumerClient.poll(mockTime.timer(0));
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
}
@Test
public void testWakeupAfterSyncGroupReceived() {
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS,
Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class)));
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(body -> {
boolean isSyncGroupRequest = body instanceof SyncGroupRequest;
if (isSyncGroupRequest)
// wakeup after the request returns
consumerClient.wakeup();
return isSyncGroupRequest;
}, syncGroupResponse(Errors.NONE));
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException ignored) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertNotNull(coordinator.heartbeatThread());
verify(coordinator.heartbeatThread()).enable();
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
}
@Test
public void testWakeupAfterSyncGroupReceivedExternalCompletion() {
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS,
Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class)));
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(body -> {
boolean isSyncGroupRequest = body instanceof SyncGroupRequest;
if (isSyncGroupRequest)
// wakeup after the request returns
consumerClient.wakeup();
return isSyncGroupRequest;
}, syncGroupResponse(Errors.NONE));
assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()");
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertNotNull(coordinator.heartbeatThread());
verify(coordinator.heartbeatThread()).enable();
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
}
@Test
public void testWakeupInOnJoinComplete() throws Exception {
setupCoordinator();
coordinator.wakeupOnJoinComplete = true;
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException ignored) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
// the join group completes in this poll()
coordinator.wakeupOnJoinComplete = false;
consumerClient.poll(mockTime.timer(0));
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
@Test
public void testAuthenticationErrorInEnsureCoordinatorReady() {
setupCoordinator();
mockClient.createPendingAuthenticationError(node, 300);
assertThrows(AuthenticationException.class,
() -> coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE)),
"Expected an authentication error.");
}
@Test
public void testBackoffAndRetryUponRetriableError() {
this.mockTime = new MockTime();
long currentTimeMs = System.currentTimeMillis();
this.mockTime.setCurrentTimeMs(System.currentTimeMillis());
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
// Retriable Exception
mockClient.prepareResponse(joinGroupResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS));
mockClient.prepareResponse(joinGroupResponse(Errors.NONE)); // Retry w/o error
mockClient.prepareResponse(syncGroupResponse(Errors.NONE));
coordinator.joinGroupIfNeeded(mockTime.timer(REQUEST_TIMEOUT_MS));
assertEquals(RETRY_BACKOFF_MS, mockTime.milliseconds() - currentTimeMs,
(int) (RETRY_BACKOFF_MS * CommonClientConfigs.RETRY_BACKOFF_JITTER) + 1);
}
@Test
public void testReturnUponRetriableErrorAndExpiredTimer() throws InterruptedException {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(mockTime.timer(0));
ExecutorService executor = Executors.newFixedThreadPool(1);
Timer t = mockTime.timer(500);
try {
Future<Boolean> attempt = executor.submit(() -> coordinator.joinGroupIfNeeded(t));
mockTime.sleep(500);
mockClient.prepareResponse(joinGroupResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS));
assertFalse(attempt.get());
} catch (Exception e) {
fail();
} finally {
executor.shutdownNow();
executor.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
}
private AtomicBoolean prepareFirstHeartbeat() {
final AtomicBoolean heartbeatReceived = new AtomicBoolean(false);
mockClient.prepareResponse(body -> {
boolean isHeartbeatRequest = body instanceof HeartbeatRequest;
if (isHeartbeatRequest)
heartbeatReceived.set(true);
return isHeartbeatRequest;
}, heartbeatResponse(Errors.UNKNOWN_SERVER_ERROR));
return heartbeatReceived;
}
private void awaitFirstHeartbeat(final AtomicBoolean heartbeatReceived) throws Exception {
mockTime.sleep(HEARTBEAT_INTERVAL_MS);
TestUtils.waitForCondition(heartbeatReceived::get,
3000, "Should have received a heartbeat request after joining the group");
}
private FindCoordinatorResponse groupCoordinatorResponse(Node node, Errors error) {
return FindCoordinatorResponse.prepareResponse(error, GROUP_ID, node);
}
private HeartbeatResponse heartbeatResponse(Errors error) {
return new HeartbeatResponse(new HeartbeatResponseData().setErrorCode(error.code()));
}
private JoinGroupResponse joinGroupFollowerResponse(int generationId,
String memberId,
String leaderId,
Errors error) {
return joinGroupFollowerResponse(generationId, memberId, leaderId, error, null);
}
private JoinGroupResponse joinGroupFollowerResponse(int generationId,
String memberId,
String leaderId,
Errors error,
String protocolType) {
return new JoinGroupResponse(
new JoinGroupResponseData()
.setErrorCode(error.code())
.setGenerationId(generationId)
.setProtocolType(protocolType)
.setProtocolName(PROTOCOL_NAME)
.setMemberId(memberId)
.setLeader(leaderId)
.setMembers(Collections.emptyList()),
ApiKeys.JOIN_GROUP.latestVersion()
);
}
private JoinGroupResponse joinGroupResponse(Errors error) {
return joinGroupFollowerResponse(JoinGroupRequest.UNKNOWN_GENERATION_ID,
JoinGroupRequest.UNKNOWN_MEMBER_ID, JoinGroupRequest.UNKNOWN_MEMBER_ID, error);
}
private SyncGroupResponse syncGroupResponse(Errors error) {
return syncGroupResponse(error, null, null);
}
private SyncGroupResponse syncGroupResponse(Errors error,
String protocolType,
String protocolName) {
return new SyncGroupResponse(
new SyncGroupResponseData()
.setErrorCode(error.code())
.setProtocolType(protocolType)
.setProtocolName(protocolName)
.setAssignment(new byte[0])
);
}
private LeaveGroupResponse leaveGroupResponse(List<MemberResponse> members) {
return new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.NONE.code())
.setMembers(members));
}
public static class DummyCoordinator extends AbstractCoordinator {
private int onJoinPrepareInvokes = 0;
private int onJoinCompleteInvokes = 0;
private boolean wakeupOnJoinComplete = false;
DummyCoordinator(GroupRebalanceConfig rebalanceConfig,
ConsumerNetworkClient client,
Metrics metrics,
Time time,
Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier) {
super(rebalanceConfig, new LogContext(), client, metrics, METRIC_GROUP_PREFIX, time, Optional.empty(), heartbeatThreadSupplier);
}
@Override
protected String protocolType() {
return PROTOCOL_TYPE;
}
@Override
protected JoinGroupRequestData.JoinGroupRequestProtocolCollection metadata() {
return new JoinGroupRequestData.JoinGroupRequestProtocolCollection(
Collections.singleton(new JoinGroupRequestData.JoinGroupRequestProtocol()
.setName(PROTOCOL_NAME)
.setMetadata(EMPTY_DATA.array())).iterator()
);
}
@Override
protected Map<String, ByteBuffer> onLeaderElected(String leaderId,
String protocol,
List<JoinGroupResponseData.JoinGroupResponseMember> allMemberMetadata,
boolean skipAssignment) {
Map<String, ByteBuffer> assignment = new HashMap<>();
for (JoinGroupResponseData.JoinGroupResponseMember member : allMemberMetadata) {
assignment.put(member.memberId(), EMPTY_DATA);
}
return assignment;
}
@Override
protected boolean onJoinPrepare(Timer timer, int generation, String memberId) {
onJoinPrepareInvokes++;
return true;
}
@Override
protected void onJoinComplete(int generation, String memberId, String protocol, ByteBuffer memberAssignment) {
if (wakeupOnJoinComplete)
throw new WakeupException();
onJoinCompleteInvokes++;
}
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java |
"""Utilities for extracting common archive formats"""
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
import zipfile
import tarfile
import os
import shutil
import posixpath
import contextlib
from pkg_resources import ensure_directory, ContextualZipFile
from distutils.errors import DistutilsError
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src,dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(filename, extract_dir, progress_filter=default_filter,
drivers=None):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % (filename,))
paths = {filename:('',extract_dir)}
for base, dirs, files in os.walk(filename):
src,dst = paths[base]
for d in dirs:
paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d)
for f in files:
target = os.path.join(dst,f)
target = progress_filter(src+f, target)
if not target:
continue # skip non-files
ensure_directory(target)
f = os.path.join(base,f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with ContextualZipFile(filename) as z:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
f = open(target,'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
with contextlib.closing(tarobj):
tarobj.chown = lambda *args: None # don't do any chowning!
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
linkpath = posixpath.join(posixpath.dirname(member.name), linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
tarobj._extract_member(member, final_dst) # XXX Ugh
except tarfile.ExtractError:
pass # chown/chmod/mkfifo/mknode/makedev failed
return True
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile | unknown | codeparrot/codeparrot-clean | ||
#ifndef C10_UTIL_TYPE_H_
#define C10_UTIL_TYPE_H_
#include <cstddef>
#include <string>
#ifdef __GXX_RTTI
#include <typeinfo>
#endif // __GXX_RTTI
#include <c10/macros/Macros.h>
namespace c10 {
/// Utility to demangle a C++ symbol name.
C10_API std::string demangle(const char* name);
/// Returns the printable name of the type.
template <typename T>
inline const char* demangle_type() {
#ifdef __GXX_RTTI
static const auto& name = *(new std::string(demangle(typeid(T).name())));
return name.c_str();
#else // __GXX_RTTI
return "(RTTI disabled, cannot show name)";
#endif // __GXX_RTTI
}
} // namespace c10
#endif // C10_UTIL_TYPE_H_ | c | github | https://github.com/pytorch/pytorch | c10/util/Type.h |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from formencode import Invalid
from pylons import request, tmpl_context
from mediadrop.forms.login import LoginForm
from mediadrop.lib.base import BaseController
from mediadrop.lib.helpers import redirect, url_for
from mediadrop.lib.i18n import _
from mediadrop.lib.decorators import expose, observable
from mediadrop.lib.routing_helpers import dispatch_info_for_url, is_url_for_mediadrop_domain
from mediadrop.plugin import events
import logging
log = logging.getLogger(__name__)
login_form = LoginForm()
class LoginController(BaseController):
@expose('login.html')
@observable(events.LoginController.login)
def login(self, came_from=None, **kwargs):
if request.environ.get('repoze.who.identity'):
redirect(came_from or '/')
# the friendlyform plugin requires that these values are set in the
# query string
form_url = url_for('/login/submit',
came_from=(came_from or '').encode('utf-8'),
__logins=str(self._is_failed_login()))
login_errors = None
if self._is_failed_login():
login_errors = Invalid('dummy', None, {}, error_dict={
'_form': Invalid(_('Invalid username or password.'), None, {}),
'login': Invalid('dummy', None, {}),
'password': Invalid('dummy', None, {}),
})
return dict(
login_form = login_form,
form_action = form_url,
form_values = kwargs,
login_errors = login_errors,
)
@expose()
def login_handler(self):
"""This is a dummy method.
Without a dummy method, Routes will throw a NotImplemented exception.
Calls that would route to this method are intercepted by
repoze.who, as defined in mediadrop.lib.auth
"""
pass
@expose()
def logout_handler(self):
"""This is a dummy method.
Without a dummy method, Routes will throw a NotImplemented exception.
Calls that would route to this method are intercepted by
repoze.who, as defined in mediadrop.lib.auth
"""
pass
@expose()
@observable(events.LoginController.post_login)
def post_login(self, came_from=None, **kwargs):
if not request.identity:
# The FriendlyForm plugin will always issue a redirect to
# /login/continue (post login url) even for failed logins.
# If 'came_from' is a protected page (i.e. /admin) we could just
# redirect there and the login form will be displayed again with
# our login error message.
# However if the user tried to login from the front page, this
# mechanism doesn't work so go to the login method directly here.
self._increase_number_of_failed_logins()
return self.login(came_from=came_from)
if came_from:
url_mapper = request.environ['routes.url'].mapper
target = dispatch_info_for_url(came_from, url_mapper)
if not is_url_for_mediadrop_domain(came_from):
log.debug('no redirect to %r because target url does match our hostname (prevents parameter base redirection attacks)' % came_from)
came_from = None
elif (target is not None) and getattr(target.action, '_request_method', None) not in ('GET', None):
log.debug('no redirect to %r because target url does not allow GET requests' % came_from)
came_from = None
if came_from:
redirect(came_from)
# It is important to return absolute URLs (if app mounted in subdirectory)
if request.perm.contains_permission(u'edit') or request.perm.contains_permission(u'admin'):
redirect(url_for('/admin', qualified=True))
redirect(url_for('/', qualified=True))
@expose()
@observable(events.LoginController.post_logout)
def post_logout(self, came_from=None, **kwargs):
redirect('/')
def _is_failed_login(self):
# repoze.who.logins will always be an integer even if the HTTP login
# counter variable contained a non-digit string
return (request.environ.get('repoze.who.logins', 0) > 0)
def _increase_number_of_failed_logins(self):
request.environ['repoze.who.logins'] += 1
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# BaseController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
request.identity = request.environ.get('repoze.who.identity')
tmpl_context.identity = request.identity
return BaseController.__call__(self, environ, start_response) | unknown | codeparrot/codeparrot-clean | ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Test\Command;
use Composer\Test\TestCase;
use RuntimeException;
class RepositoryCommandTest extends TestCase
{
public function testListWithNoRepositories(): void
{
$this->initTempComposer([]);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'list']);
$appTester->assertCommandIsSuccessful();
self::assertSame('[packagist.org] composer https://repo.packagist.org', trim($appTester->getDisplay(true)));
// composer.json should remain unchanged
self::assertSame([], json_decode((string) file_get_contents('composer.json'), true));
}
public function testListWithRepositoriesAsList(): void
{
$this->initTempComposer([
'repositories' => [
['type' => 'composer', 'url' => 'https://first.test'],
['name' => 'foo', 'type' => 'vcs', 'url' => 'https://old.example.org'],
['name' => 'bar', 'type' => 'vcs', 'url' => 'https://other.example.org'],
],
]);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'list']);
$appTester->assertCommandIsSuccessful();
self::assertSame('[0] composer https://first.test
[foo] vcs https://old.example.org
[bar] vcs https://other.example.org
[packagist.org] disabled', trim($appTester->getDisplay(true)));
}
public function testListWithRepositoriesAsAssoc(): void
{
$this->initTempComposer([
'repositories' => [
['type' => 'composer', 'url' => 'https://first.test'],
'foo' => ['type' => 'vcs', 'url' => 'https://old.example.org'],
'bar' => ['type' => 'vcs', 'url' => 'https://other.example.org'],
],
]);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'list']);
$appTester->assertCommandIsSuccessful();
self::assertSame('[0] composer https://first.test
[foo] vcs https://old.example.org
[bar] vcs https://other.example.org
[packagist.org] disabled', trim($appTester->getDisplay(true)));
}
public function testAddRepositoryWithTypeAndUrl(): void
{
$this->initTempComposer([]);
$appTester = $this->getApplicationTester();
$result = $appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'vcs',
'arg2' => 'https://example.org/foo.git',
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame(['repositories' => [
['name' => 'foo', 'type' => 'vcs', 'url' => 'https://example.org/foo.git'],
]], $json);
}
public function testAddRepositoryWithJson(): void
{
$this->initTempComposer([]);
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'bar',
'arg1' => '{"type":"composer","url":"https://repo.example.org"}',
]);
$appTester->assertCommandIsSuccessful();
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame(['repositories' => [
['name' => 'bar', 'type' => 'composer', 'url' => 'https://repo.example.org'],
]], $json);
}
public function testRemoveRepository(): void
{
$this->initTempComposer(['repositories' => ['foo' => ['type' => 'vcs', 'url' => 'https://example.org']]], [], [], false);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'remove', 'name' => 'foo']);
$appTester->assertCommandIsSuccessful();
$json = json_decode((string) file_get_contents('composer.json'), true);
// repositories key may still exist as empty array depending on manipulator, accept either
if (isset($json['repositories'])) {
self::assertSame([], $json['repositories']);
} else {
self::assertSame([], $json);
}
}
/**
* @dataProvider provideTestSetAndGetUrlInRepositoryAssoc
* @param array<string, mixed> $repositories
*/
public function testSetAndGetUrlInRepositoryAssoc(array $repositories, string $name, string $index, string $newUrl): void
{
$this->initTempComposer(['repositories' => $repositories], [], [], false);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'set-url', 'name' => $name, 'arg1' => $newUrl]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
// calling it still in assoc means, the repository has not been converted, which is good
self::assertSame($newUrl, $json['repositories'][$index]['url'] ?? null);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'get-url', 'name' => $name]);
$appTester->assertCommandIsSuccessful();
self::assertSame($newUrl, trim($appTester->getDisplay(true)));
}
/**
* @return iterable<array{0: array, 1: string, 2: string, 3: string}>
*/
public static function provideTestSetAndGetUrlInRepositoryAssoc(): iterable
{
$repositories = [
'first' => ['type' => 'composer', 'url' => 'https://first.test'],
'foo' => ['type' => 'vcs', 'url' => 'https://old.example.org'],
'bar' => ['type' => 'vcs', 'url' => 'https://other.example.org'],
];
yield 'change first of three' => [
$repositories,
'first',
'first',
'https://new.example.org',
];
yield 'change middle of three' => [
$repositories,
'foo',
'foo',
'https://new.example.org',
];
yield 'change last of three' => [
$repositories,
'bar',
'bar',
'https://new.example.org',
];
}
/**
* @dataProvider provideTestSetAndGetUrlInRepositoryList
* @param list<array<string, mixed>> $repositories
*/
public function testSetAndGetUrlInRepositoryList(array $repositories, string $name, int $index, string $newUrl): void
{
$this->initTempComposer(['repositories' => $repositories], [], [], false);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'set-url', 'name' => $name, 'arg1' => $newUrl]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame($name, $json['repositories'][$index]['name'] ?? null);
self::assertSame($newUrl, $json['repositories'][$index]['url'] ?? null);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'get-url', 'name' => $name]);
$appTester->assertCommandIsSuccessful();
self::assertSame($newUrl, trim($appTester->getDisplay(true)));
}
/**
* @return iterable<array{0: array, 1: string, 2: int, 3: string}>
*/
public static function provideTestSetAndGetUrlInRepositoryList(): iterable
{
$repositories = [
['name' => 'first', 'type' => 'composer', 'url' => 'https://first.test'],
['name' => 'foo', 'type' => 'vcs', 'url' => 'https://old.example.org'],
['name' => 'bar', 'type' => 'vcs', 'url' => 'https://other.example.org'],
];
yield 'change first of three' => [
$repositories,
'first',
0,
'https://new.example.org',
];
yield 'change middle of three' => [
$repositories,
'foo',
1,
'https://new.example.org',
];
yield 'change last of three' => [
$repositories,
'bar',
2,
'https://new.example.org',
];
}
public function testDisableAndEnablePackagist(): void
{
$this->initTempComposer([]);
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'disable', 'name' => 'packagist']);
$appTester->assertCommandIsSuccessful();
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame(['repositories' => [['packagist.org' => false]]], $json);
// enable packagist should remove the override
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', 'action' => 'enable', 'name' => 'packagist']);
$appTester->assertCommandIsSuccessful();
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame([], $json);
}
public function testInvalidArgCombinationThrows(): void
{
$this->expectException(RuntimeException::class);
$this->expectExceptionMessage('--file and --global can not be combined');
$appTester = $this->getApplicationTester();
$appTester->run(['command' => 'repo', '--file' => 'alt.composer.json', '--global' => true]);
}
public function testPrependRepositoryByNameListToAssoc(): void
{
$this->initTempComposer(['repositories' => [['type' => 'git', 'url' => 'example.tld']]], [], [], false);
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'path',
'arg2' => 'foo/bar',
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame([
'repositories' => [
['name' => 'foo', 'type' => 'path', 'url' => 'foo/bar'],
['type' => 'git', 'url' => 'example.tld'],
],
], $json);
}
public function testAppendRepositoryByNameListToAssoc(): void
{
$this->initTempComposer(['repositories' => [['type' => 'git', 'url' => 'example.tld']]], [], [], false);
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'path',
'arg2' => 'foo/bar',
'--append' => true,
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame([
'repositories' => [
['type' => 'git', 'url' => 'example.tld'],
['name' => 'foo', 'type' => 'path', 'url' => 'foo/bar'],
],
], $json);
}
public function testPrependRepositoryAssocWithPackagistDisabled(): void
{
$this->initTempComposer(['repositories' => [['type' => 'git', 'url' => 'example.tld'], 'packagist.org' => false]]);
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'path',
'arg2' => 'foo/bar',
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame([
'repositories' => [
['name' => 'foo', 'type' => 'path', 'url' => 'foo/bar'],
['type' => 'git', 'url' => 'example.tld'],
['packagist.org' => false],
],
], $json);
}
public function testAppendRepositoryAssocWithPackagistDisabled(): void
{
$this->initTempComposer(['repositories' => [['type' => 'git', 'url' => 'example.tld'], 'packagist.org' => false]]);
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'path',
'arg2' => 'foo/bar',
'--append' => true,
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
self::assertSame([
'repositories' => [
['type' => 'git', 'url' => 'example.tld'],
['packagist.org' => false],
['name' => 'foo', 'type' => 'path', 'url' => 'foo/bar'],
],
], $json);
}
public function testAddBeforeAndAfterByName(): void
{
// Start with two repos as named-list and a disabled packagist boolean
$this->initTempComposer(['repositories' => [
['name' => 'alpha', 'type' => 'vcs', 'url' => 'https://example.org/a'],
['name' => 'omega', 'type' => 'vcs', 'url' => 'https://example.org/o'],
'packagist.org' => false,
]]);
// Insert before omega
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'beta',
'arg1' => 'vcs',
'arg2' => 'https://example.org/b',
'--before' => 'omega',
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
// Insert after alpha
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'gamma',
'arg1' => 'vcs',
'arg2' => 'https://example.org/g',
'--after' => 'alpha',
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
// Expect order: alpha, gamma, beta, omega, then packagist.org boolean preserved
self::assertSame([
'repositories' => [
['name' => 'alpha', 'type' => 'vcs', 'url' => 'https://example.org/a'],
['name' => 'gamma', 'type' => 'vcs', 'url' => 'https://example.org/g'],
['name' => 'beta', 'type' => 'vcs', 'url' => 'https://example.org/b'],
['name' => 'omega', 'type' => 'vcs', 'url' => 'https://example.org/o'],
['packagist.org' => false],
],
], $json);
}
public function testAddSameNameReplacesExisting(): void
{
$this->initTempComposer([]);
// first add
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'vcs',
'arg2' => 'https://example.org/old',
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
// second add with same name but different url
$appTester = $this->getApplicationTester();
$appTester->run([
'command' => 'repo',
'action' => 'add',
'name' => 'foo',
'arg1' => 'vcs',
'arg2' => 'https://example.org/new',
'--append' => true,
]);
$appTester->assertCommandIsSuccessful($appTester->getDisplay());
$json = json_decode((string) file_get_contents('composer.json'), true);
// repositories can be stored as assoc or named-list depending on manipulator fallbacks
// Validate there is only one "foo" and its url is the latest
$countFoo = 0;
$url = null;
foreach ($json['repositories'] as $k => $repo) {
if (is_string($k) && $k === 'foo' && is_array($repo)) {
$countFoo++;
$url = $repo['url'] ?? null;
} elseif (is_array($repo) && isset($repo['name']) && $repo['name'] === 'foo') {
$countFoo++;
$url = $repo['url'] ?? null;
}
}
self::assertSame(1, $countFoo, 'Exactly one repository entry with name foo should exist');
self::assertSame('https://example.org/new', $url, 'The foo repository should have been updated to the new URL');
}
} | php | github | https://github.com/composer/composer | tests/Composer/Test/Command/RepositoryCommandTest.php |
#
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
ref = 0
module_refptr = module['refptr']
for cpu in cpus.cpu_list("cpu_possible_mask"):
refptr = cpus.per_cpu(module_refptr, cpu)
ref += refptr['incs']
ref -= refptr['decs']
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(ref)))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod() | unknown | codeparrot/codeparrot-clean | ||
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_classic.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain_classic.agents.types import AGENT_TO_CLASS
from langchain_classic.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__name__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict,
llm: BaseLanguageModel,
tools: list[Tool],
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: BaseLanguageModel | None = None,
tools: list[Tool] | None = None,
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
msg = "Must specify an agent Type in config"
raise ValueError(msg)
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
msg = (
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
raise ValueError(msg)
if tools is None:
msg = (
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
raise ValueError(msg)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
msg = "One of `llm_chain` and `llm_chain_path` should be specified."
raise ValueError(msg)
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one.",
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: str | Path,
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: str | Path,
**kwargs: Any,
) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
file_path = Path(file) if isinstance(file, str) else file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with file_path.open() as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with file_path.open() as f:
config = yaml.safe_load(f)
else:
msg = f"Unsupported file type, must be one of {valid_suffixes}."
raise ValueError(msg)
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs) | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/agents/loading.py |
import os
import re, unicodedata
import tg
import gettext
import math
import inspect
class NoDefault(object):
"""A dummy value used for parameters with no default."""
def slugify(value, type, models):
if isinstance(value, dict):
for k, v in value.iteritems():
key = k
value = v
counter = models.Product.query.find({'name.%s' % key: value, 'type': type}).count()
else:
counter = models.Product.query.find({'name.%s' % tg.config.lang: value, 'type': type}).count()
value = type + '-' + value
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
value = value + '-' + str(counter)
return value
def slugify_category(value, models):
if isinstance(value, dict):
for k, v in value.iteritems():
key = k
value = v
counter = models.Category.query.find({'name.%s' % key: value}).count()
else:
counter = models.Category.query.find({'name.%s' % tg.config.lang: value}).count()
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
if counter != 0:
value += '-' + str(counter)
return value
def short_lang(languages_list):
try:
return languages_list[0].split("_")[0]
except (IndexError, TypeError):
return tg.config.lang
def internationalise(value):
if isinstance(value, dict):
return value
return {tg.config.lang: value}
def preferred_language():
return short_lang(tg.i18n.get_lang(all=False))
class with_currency(object):
@staticmethod
def float2cur(n):
return int(round(n*100.0))
@staticmethod
def cur2float(n):
return math.floor(float(n))/100.0
def __init__(self, *args):
self.currencies = args
def __call__(self, f):
def _decorated(*args, **kwargs):
named_params = inspect.getcallargs(f, *args, **kwargs)
for cur in self.currencies:
value = named_params[cur]
named_params[cur] = self.float2cur(value)
return self.cur2float(f(**named_params))
return _decorated
@with_currency('price')
def apply_vat(price, vat):
return price*vat
@with_currency('total', 'discount')
def apply_discount(total, discount):
return total - discount
def apply_percentage_discount(total, percentage):
discount = get_percentage_discount(total, percentage)
return apply_discount(total, discount)
@with_currency('total')
def get_percentage_discount(total, percentage):
return total * (percentage / 100.0) | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.Repeating-an-empty-row.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "",
"version": "v0",
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Row title $row",
"collapse": false,
"repeat": {
"mode": "variable",
"value": "row"
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": []
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "utc",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Repeating an empty row",
"variables": [
{
"kind": "CustomVariable",
"spec": {
"name": "vertical",
"query": "1,2,3",
"current": {
"text": [
"All"
],
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "1",
"value": "1"
},
{
"selected": false,
"text": "2",
"value": "2"
},
{
"selected": false,
"text": "3",
"value": "3"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
},
{
"kind": "CustomVariable",
"spec": {
"name": "horizontal",
"query": "1,2,3",
"current": {
"text": [
"All"
],
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "1",
"value": "1"
},
{
"selected": false,
"text": "2",
"value": "2"
},
{
"selected": false,
"text": "3",
"value": "3"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
},
{
"kind": "CustomVariable",
"spec": {
"name": "row",
"query": "1,2,3",
"current": {
"text": [
"All"
],
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "1",
"value": "1"
},
{
"selected": false,
"text": "2",
"value": "2"
},
{
"selected": false,
"text": "3",
"value": "3"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
}
]
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/e2e-repeats/v0alpha1.Repeating-an-empty-row.v42.v2beta1.json |
#!/usr/bin/env python
# straight outta them aubio docs
def apply_filter(path):
from aubio import source, sink, digital_filter
from os.path import basename, splitext
# open input file, get its samplerate
s = source(path)
samplerate = s.samplerate
# create an A-weighting filter
f = digital_filter(7)
f.set_a_weighting(samplerate)
# alternatively, apply another filter
# create output file
o = sink("filtered_" + splitext(basename(path))[0] + ".wav", samplerate)
total_frames = 0
while True:
samples, read = s()
filtered_samples = f(samples)
o(filtered_samples, read)
total_frames += read
if read < s.hop_size: break
duration = total_frames / float(samplerate)
print ("read {:s}".format(s.uri))
print ("applied A-weighting filtered ({:d} Hz)".format(samplerate))
print ("wrote {:s} ({:.2f} s)".format(o.uri, duration))
if __name__ == '__main__':
import sys
for f in sys.argv[1:]:
apply_filter(f) | unknown | codeparrot/codeparrot-clean | ||
<!doctype html>
<!-- #docregion -->
<html lang="en">
<head>
<base href="/" />
<title>Drag and Drop Disabled Sorting Example</title>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
</head>
<body>
<cdk-drag-drop-disabled-sorting-example> </cdk-drag-drop-disabled-sorting-example>
</body>
</html>
<!-- #enddocregion --> | html | github | https://github.com/angular/angular | adev/src/content/examples/drag-drop/src/disable-sorting/index.html |
"""A basic example of using the SQLAlchemy Sharding API.
Sharding refers to horizontally scaling data across multiple
databases.
The basic components of a "sharded" mapping are:
* multiple databases, each assigned a 'shard id'
* a function which can return a single shard id, given an instance
to be saved; this is called "shard_chooser"
* a function which can return a list of shard ids which apply to a particular
instance identifier; this is called "id_chooser". If it returns all shard ids,
all shards will be searched.
* a function which can return a list of shard ids to try, given a particular
Query ("query_chooser"). If it returns all shard ids, all shards will be
queried and the results joined together.
In this example, four sqlite databases will store information about weather
data on a database-per-continent basis. We provide example shard_chooser,
id_chooser and query_chooser functions. The query_chooser illustrates
inspection of the SQL expression element in order to attempt to determine a
single shard being requested.
The construction of generic sharding routines is an ambitious approach
to the issue of organizing instances among multiple databases. For a
more plain-spoken alternative, the "distinct entity" approach
is a simple method of assigning objects to different tables (and potentially
database nodes) in an explicit way - described on the wiki at
`EntityName <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/EntityName>`_.
.. autosource::
""" | unknown | codeparrot/codeparrot-clean | ||
from ipv8.database import database_blob
from ipv8.keyvault.crypto import default_eccrypto
from ipv8.peer import Peer
from ipv8.test.base import TestBase
from pony.orm import db_session
from tribler_core.modules.metadata_store.community.gigachannel_community import GigaChannelCommunity
from tribler_core.modules.metadata_store.orm_bindings.channel_node import LEGACY_ENTRY, NEW
from tribler_core.modules.metadata_store.serialization import REGULAR_TORRENT
from tribler_core.modules.metadata_store.store import MetadataStore
from tribler_core.tests.tools.base_test import MockObject
from tribler_core.utilities.path_util import Path
from tribler_core.utilities.random_utils import random_infohash
EMPTY_BLOB = database_blob(b"")
class TestGigaChannelUnits(TestBase):
"""
Unit tests for the GigaChannel community which do not need a real Session.
"""
def setUp(self):
super(TestGigaChannelUnits, self).setUp()
self.count = 0
self.initialize(GigaChannelCommunity, 2)
def create_node(self, *args, **kwargs):
metadata_store = MetadataStore(
Path(self.temporary_directory()) / ("%d.db" % self.count),
Path(self.temporary_directory()),
default_eccrypto.generate_key(u"curve25519"),
)
kwargs['metadata_store'] = metadata_store
node = super(TestGigaChannelUnits, self).create_node(*args, **kwargs)
self.count += 1
return node
def add_random_torrent(self, metadata_cls, name="test", channel=None):
d = {"infohash": random_infohash(), "title": name, "tags": "", "size": 1234, "status": NEW}
if channel:
d.update({"origin_id": channel.id_})
torrent_metadata = metadata_cls.from_dict(d)
torrent_metadata.sign()
async def test_send_random_subscribed_channel(self):
"""
Test whether sending a single channel with a single torrent to another peer works correctly
"""
with db_session:
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("test", "bla")
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
# We must change the key for the first node so the created channel becomes foreign
self.nodes[0].overlay.metadata_store.ChannelNode._my_key = default_eccrypto.generate_key(u"curve25519")
await self.nodes[0].overlay.prepare_gossip_blob_cache()
self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
self.assertEqual(len(self.nodes[1].overlay.metadata_store.ChannelMetadata.select()), 1)
channel = self.nodes[1].overlay.metadata_store.ChannelMetadata.select()[:][0]
self.assertEqual(channel.contents_len, 1)
async def test_send_random_personal_channel(self):
"""
Test whether sending the personal channel works correctly
"""
with db_session:
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("test", "bla")
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
await self.nodes[0].overlay.prepare_gossip_blob_cache()
self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
self.assertEqual(len(self.nodes[1].overlay.metadata_store.ChannelMetadata.select()), 1)
channel = self.nodes[1].overlay.metadata_store.ChannelMetadata.select()[:][0]
self.assertEqual(channel.contents_len, 1)
async def test_send_personal_and_random_channels(self):
"""
Test whether sending the personal channel works correctly
"""
with db_session:
# Add non-personal channel
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("non-personal", "bla")
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
# Add personal channel
self.nodes[0].overlay.metadata_store.ChannelNode._my_key = default_eccrypto.generate_key(u"curve25519")
# After the previous line the previously created channel becomes non-personal
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("personal", "bla")
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
await self.nodes[0].overlay.prepare_gossip_blob_cache()
self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
self.assertEqual(len(self.nodes[1].overlay.metadata_store.ChannelMetadata.select()), 2)
channels = self.nodes[1].overlay.metadata_store.ChannelMetadata.select()[:]
self.assertEqual(channels[0].contents_len, 1)
self.assertEqual(channels[1].contents_len, 1)
async def test_send_random_multiple_torrents(self):
"""
Test whether sending a single channel with a multiple torrents to another peer works correctly
"""
with db_session:
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("test", "bla")
for _ in range(10):
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
await self.nodes[0].overlay.prepare_gossip_blob_cache()
self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
channel = self.nodes[1].overlay.metadata_store.ChannelMetadata.get()
torrents1 = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertLess(channel.contents_len, 10)
self.assertLess(0, channel.contents_len)
# We must delete the old and create all-new torrent entries for the next test.
# Otherwise, it becomes non-deterministic.
with db_session:
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.get()
self.nodes[0].overlay.metadata_store.TorrentMetadata.select(
lambda g: g.metadata_type == REGULAR_TORRENT
).delete()
self.nodes[1].overlay.metadata_store.TorrentMetadata.select().delete()
for _ in range(10):
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
# Initiate the gossip again. This time, it should be sent from the blob cache
# so the torrents on the receiving end should not change this time.
self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
torrents2 = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents1), len(torrents2))
await self.nodes[0].overlay.prepare_gossip_blob_cache()
self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
torrents3 = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertLess(len(torrents2), len(torrents3))
async def test_send_and_get_channel_update_back(self):
"""
Test if sending back information on updated version of a channel works
"""
with db_session:
# Add channel to node 0
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("test", "bla")
for _ in range(20):
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
channel_v1_dict = channel.to_dict()
channel_v1_dict.pop("health")
self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel)
channel.commit_channel_torrent()
with db_session:
# Add the outdated version of the channel to node 1
self.nodes[1].overlay.metadata_store.ChannelMetadata.from_dict(channel_v1_dict)
# node1 --outdated_channel--> node0
await self.nodes[1].overlay.prepare_gossip_blob_cache()
self.nodes[1].overlay.send_random_to(Peer(self.nodes[0].my_peer.public_key, self.nodes[0].endpoint.wan_address))
await self.deliver_messages(timeout=0.5)
with db_session:
self.assertEqual(
self.nodes[1].overlay.metadata_store.ChannelMetadata.select()[:][0].timestamp,
self.nodes[0].overlay.metadata_store.ChannelMetadata.select()[:][0].timestamp,
)
async def test_gigachannel_search(self):
"""
Scenario: Node 0 is setup with a channel with 20 ubuntu related torrents. Node 1 searches for 'ubuntu' and
expects to receive some results. The search results are processed by node 1 when it receives and adds to its
database. Max number of results is 5, so we expect 5 torrents are added the database.
"""
def mock_notify(overlay, args):
overlay.notified_results = True
self.assertTrue("results" in args[0])
self.nodes[1].overlay.notifier = MockObject()
self.nodes[1].overlay.notifier.notify = lambda sub, args: mock_notify(self.nodes[1].overlay, args)
await self.introduce_nodes()
with db_session:
# add some free-for-all entries
self.nodes[0].overlay.metadata_store.TorrentMetadata.add_ffa_from_dict(
dict(title="ubuntu legacy", infohash=random_infohash())
)
self.nodes[0].overlay.metadata_store.ChannelMetadata(
title="ubuntu legacy chan", infohash=random_infohash(), public_key=b"", status=LEGACY_ENTRY, id_=0
)
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("ubuntu", "ubuntu")
for i in range(20):
self.add_random_torrent(
self.nodes[0].overlay.metadata_store.TorrentMetadata, name="ubuntu %s" % i, channel=channel
)
channel.commit_channel_torrent()
# Node 1 has no torrents and searches for 'ubuntu'
with db_session:
torrents = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents), 0)
self.nodes[1].overlay.send_search_request(u'"ubuntu"*')
await self.deliver_messages(timeout=0.5)
with db_session:
torrents = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents), 5)
# Only non-legacy FFA torrents should be sent on search
torrents_ffa = self.nodes[1].overlay.metadata_store.TorrentMetadata.select(
lambda g: g.public_key == EMPTY_BLOB
)[:]
self.assertEqual(len(torrents_ffa), 1)
# Legacy FFA channel should not be sent
channels_ffa = self.nodes[1].overlay.metadata_store.ChannelMetadata.select(
lambda g: g.public_key == EMPTY_BLOB
)[:]
self.assertEqual(len(channels_ffa), 0)
self.assertTrue(self.nodes[1].overlay.notified_results)
async def test_gigachannel_search_reject_stale_result(self):
"""
Scenario: If two search requests are sent one after another, the response for the first query becomes stale and
is rejected.
"""
await self.introduce_nodes()
with db_session:
channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("linux", "ubuntu")
for i in range(10):
self.add_random_torrent(
self.nodes[0].overlay.metadata_store.TorrentMetadata, name="ubuntu %s" % i, channel=channel
)
for i in range(10):
self.add_random_torrent(
self.nodes[0].overlay.metadata_store.TorrentMetadata, name="debian %s" % i, channel=channel
)
channel.commit_channel_torrent()
# Assert Node 1 has no previous torrents in the database
with db_session:
torrents = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents), 0)
# Node 1 sent two consecutive queries
self.nodes[1].overlay.send_search_request(u'"ubuntu"*')
self.nodes[1].overlay.send_search_request(u'"debian"*')
await self.deliver_messages(timeout=0.5)
# Assert that only the last result is accepted
with db_session:
torrents = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents), 5)
for torrent in torrents:
self.assertIn("debian", torrent.to_simple_dict()['name'])
async def test_gigachannel_search_with_no_result(self):
"""
Test giga channel search which yields no result
"""
await self.introduce_nodes()
# Both node 0 and node 1 have no torrents in the database
with db_session:
torrents = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
torrents2 = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents), 0)
self.assertEqual(len(torrents2), 0)
# Node 1 searches for 'A ubuntu'
query = u'"\xc1 ubuntu"*'
self.nodes[1].overlay.send_search_request(query)
await self.deliver_messages(timeout=0.5)
# Expect no data received in search and nothing processed to the database
with db_session:
torrents = self.nodes[1].overlay.metadata_store.TorrentMetadata.select()[:]
self.assertEqual(len(torrents), 0) | unknown | codeparrot/codeparrot-clean | ||
import z3
from miasm2.core.asmblock import AsmLabel
from miasm2.expression.expression import *
from miasm2.ir.translators.translator import Translator
from miasm2.ir.translators.z3_ir import Z3Mem
# Some examples of use/unit tests.
def equiv(z3_expr1, z3_expr2):
s = z3.Solver()
s.add(z3.Not(z3_expr1 == z3_expr2))
return s.check() == z3.unsat
def check_interp(interp, constraints, bits=32, valbits=8):
"""Checks that a list of @constraints (addr, value) (as python ints)
match a z3 FuncInterp (@interp).
"""
constraints = dict((addr,
z3.BitVecVal(val, valbits))
for addr, val in constraints)
l = interp.as_list()
for entry in l:
if not isinstance(entry, list) or len(entry) < 2:
continue
addr, value = entry[0], entry[1]
if addr.as_long() in constraints:
assert equiv(value, constraints[addr.as_long()])
# equiv short test
# --------------------------------------------------------------------------
assert equiv(z3.BitVec('a', 32) + z3.BitVecVal(3, 32) - z3.BitVecVal(1, 32),
z3.BitVec('a', 32) + z3.BitVecVal(2, 32))
# Z3Mem short tests
# --------------------------------------------------------------------------
mem = Z3Mem(endianness='<') # little endian
eax = z3.BitVec('EAX', 32)
assert equiv(
# @32[EAX]
mem.get(eax, 32),
# @16[EAX+2] . @16[EAX]
z3.Concat(mem.get(eax+2, 16),
mem.get(eax, 16)))
# --------------------------------------------------------------------------
ax = z3.BitVec('AX', 16)
assert not equiv(
# @16[EAX] with EAX = ZeroExtend(AX)
mem.get(z3.ZeroExt(16, ax), 16),
# @16[AX]
mem.get(ax, 16))
# TranslatorZ3 tests
# --------------------------------------------------------------------------
e = ExprId('x', 32)
ez3 = Translator.to_language('z3').from_expr(e)
z3_e = z3.BitVec('x', 32)
assert equiv(ez3, z3_e)
# --------------------------------------------------------------------------
four = ExprInt(4, 32)
five = ExprInt(5, 32)
e2 = (e + five + four) * five
ez3 = Translator.to_language('z3').from_expr(e2)
z3_four = z3.BitVecVal(4, 32)
z3_five = z3.BitVecVal(5, 32)
z3_e2 = (z3_e + z3_five + z3_four) * z3_five
assert equiv(ez3, z3_e2)
# --------------------------------------------------------------------------
emem = ExprMem(ExprInt(0xdeadbeef, 32), size=32)
emem2 = ExprMem(ExprInt(0xfee1dead, 32), size=32)
e3 = (emem + e) * ExprInt(2, 32) * emem2
ez3 = Translator.to_language('z3').from_expr(e3)
mem = Z3Mem()
z3_emem = mem.get(z3.BitVecVal(0xdeadbeef, 32), 32)
z3_emem2 = mem.get(z3.BitVecVal(0xfee1dead, 32), 32)
z3_e3 = (z3_emem + z3_e) * z3.BitVecVal(2, 32) * z3_emem2
assert equiv(ez3, z3_e3)
# --------------------------------------------------------------------------
e4 = emem * five
ez3 = Translator.to_language('z3').from_expr(e4)
z3_e4 = z3_emem * z3_five
assert equiv(ez3, z3_e4)
# Solve constraint and check endianness
solver = z3.Solver()
solver.add(ez3 == 10)
solver.check()
model = solver.model()
check_interp(model[mem.get_mem_array(32)],
[(0xdeadbeef, 2), (0xdeadbeef + 3, 0)])
# --------------------------------------------------------------------------
ez3 = Translator.to_language("z3", endianness=">").from_expr(e4)
memb = Z3Mem(endianness=">")
z3_emem = memb.get(z3.BitVecVal(0xdeadbeef, 32), 32)
z3_e4 = z3_emem * z3_five
assert equiv(ez3, z3_e4)
# Solve constraint and check endianness
solver = z3.Solver()
solver.add(ez3 == 10)
solver.check()
model = solver.model()
check_interp(model[memb.get_mem_array(32)],
[(0xdeadbeef, 0), (0xdeadbeef + 3, 2)])
# --------------------------------------------------------------------------
e5 = ExprSlice(ExprCompose(e, four), 0, 32) * five
ez3 = Translator.to_language('z3').from_expr(e5)
z3_e5 = z3.Extract(31, 0, z3.Concat(z3_four, z3_e)) * z3_five
assert equiv(ez3, z3_e5)
# --------------------------------------------------------------------------
# Parity
seven = ExprInt(7, 32)
one0seven = ExprInt(0x107, 32)
for miasm_int, res in [(five, 1), (four, 0), (seven, 0), (one0seven, 0)]:
e6 = ExprOp('parity', miasm_int)
ez3 = Translator.to_language('z3').from_expr(e6)
z3_e6 = z3.BitVecVal(res, 1)
assert equiv(ez3, z3_e6)
# --------------------------------------------------------------------------
# '-'
for miasm_int, res in [(five, -5), (four, -4)]:
e6 = ExprOp('-', miasm_int)
ez3 = Translator.to_language('z3').from_expr(e6)
z3_e6 = z3.BitVecVal(res, 32)
assert equiv(ez3, z3_e6)
# --------------------------------------------------------------------------
e7 = ExprId(AsmLabel("label_histoire", 0xdeadbeef), 32)
ez3 = Translator.to_language('z3').from_expr(e7)
z3_e7 = z3.BitVecVal(0xdeadbeef, 32)
assert equiv(ez3, z3_e7)
# Should just not throw anything to pass
e8 = ExprId(AsmLabel("label_jambe"), 32)
ez3 = Translator.to_language('z3').from_expr(e8)
assert not equiv(ez3, z3_e7)
# --------------------------------------------------------------------------
# bsr, bsf
# bsf(0x1138) == 3
bsf1 = Translator.to_language('z3').from_expr(ExprOp("bsf", ExprInt(0x1138, 32)))
bsf2 = z3.BitVecVal(3, 32)
assert(equiv(bsf1, bsf2))
# bsr(0x11300) == 0x10
bsr1 = Translator.to_language('z3').from_expr(ExprOp("bsr", ExprInt(0x11300, 32)))
bsr2 = z3.BitVecVal(0x10, 32)
assert(equiv(bsr1, bsr2))
# bsf(0x80000) == bsr(0x80000)
bsf3 = Translator.to_language('z3').from_expr(ExprOp("bsf", ExprInt(0x80000, 32)))
bsr3 = Translator.to_language('z3').from_expr(ExprOp("bsr", ExprInt(0x80000, 32)))
assert(equiv(bsf3, bsr3))
print "TranslatorZ3 tests are OK." | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Author: Douglas Creager <dcreager@dcreager.net>
# This file is placed into the public domain.
# Calculates the current version number. If possible, this is the
# output of “git describe”, modified to conform to the versioning
# scheme that setuptools uses. If “git describe” returns an error
# (most likely because we're in an unpacked copy of a release tarball,
# rather than in a git working copy), then we fall back on reading the
# contents of the RELEASE-VERSION file.
#
# To use this script, simply import it your setup.py file, and use the
# results of get_git_version() as your package version:
#
# from version import *
#
# setup(
# version=get_git_version(),
# .
# .
# .
# )
#
# This will automatically update the RELEASE-VERSION file, if
# necessary. Note that the RELEASE-VERSION file should *not* be
# checked into git; please add it to your top-level .gitignore file.
#
# You'll probably want to distribute the RELEASE-VERSION file in your
# sdist tarballs; to do this, just create a MANIFEST.in file that
# contains the following line:
#
# include RELEASE-VERSION
__all__ = ("get_git_version")
from subprocess import Popen, PIPE
def call_git_describe(abbrev=4):
try:
p = Popen(['git', 'describe', '--abbrev=%d' % abbrev],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
return line.strip()
except:
return None
def read_release_version():
try:
f = open("RELEASE-VERSION", "r")
try:
version = f.readlines()[0]
return version.strip()
finally:
f.close()
except:
return None
def write_release_version(version):
f = open("RELEASE-VERSION", "w")
f.write("%s\n" % version)
f.close()
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using “git describe”.
version = call_git_describe(abbrev)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
raise ValueError("Cannot find the version number!")
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version
if __name__ == "__main__":
print get_git_version() | unknown | codeparrot/codeparrot-clean | ||
# Set up and run tests of the 'bundle-uri' command in protocol v2
#
# The test that includes this script should set BUNDLE_URI_PROTOCOL
# to one of "file", "git", or "http".
BUNDLE_URI_TEST_PARENT=
BUNDLE_URI_TEST_URI=
BUNDLE_URI_TEST_BUNDLE_URI=
case "$BUNDLE_URI_PROTOCOL" in
file)
BUNDLE_URI_PARENT=file_parent
BUNDLE_URI_REPO_URI="file://$PWD/file_parent"
BUNDLE_URI_BUNDLE_URI="$BUNDLE_URI_REPO_URI/fake.bdl"
test_set_prereq BUNDLE_URI_FILE
;;
git)
. "$TEST_DIRECTORY"/lib-git-daemon.sh
start_git_daemon --export-all --enable=receive-pack
BUNDLE_URI_PARENT="$GIT_DAEMON_DOCUMENT_ROOT_PATH/parent"
BUNDLE_URI_REPO_URI="$GIT_DAEMON_URL/parent"
BUNDLE_URI_BUNDLE_URI="$BUNDLE_URI_REPO_URI/fake.bdl"
test_set_prereq BUNDLE_URI_GIT
;;
http)
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
BUNDLE_URI_PARENT="$HTTPD_DOCUMENT_ROOT_PATH/http_parent"
BUNDLE_URI_REPO_URI="$HTTPD_URL/smart/http_parent"
BUNDLE_URI_BUNDLE_URI="$BUNDLE_URI_REPO_URL/fake.bdl"
test_set_prereq BUNDLE_URI_HTTP
;;
*)
BUG "Need to pass valid BUNDLE_URI_PROTOCOL (was \"$BUNDLE_URI_PROTOCOL\")"
;;
esac
test_expect_success "setup protocol v2 $BUNDLE_URI_PROTOCOL:// tests" '
git init "$BUNDLE_URI_PARENT" &&
test_commit -C "$BUNDLE_URI_PARENT" one &&
git -C "$BUNDLE_URI_PARENT" config uploadpack.advertiseBundleURIs true
'
case "$BUNDLE_URI_PROTOCOL" in
http)
test_expect_success "setup config for $BUNDLE_URI_PROTOCOL:// tests" '
git -C "$BUNDLE_URI_PARENT" config http.receivepack true
'
;;
*)
;;
esac
BUNDLE_URI_BUNDLE_URI_ESCAPED=$(echo "$BUNDLE_URI_BUNDLE_URI" | test_uri_escape)
test_expect_success "connect with $BUNDLE_URI_PROTOCOL:// using protocol v2: no bundle-uri" '
test_when_finished "rm -f log" &&
test_when_finished "git -C \"$BUNDLE_URI_PARENT\" config uploadpack.advertiseBundleURIs true" &&
git -C "$BUNDLE_URI_PARENT" config uploadpack.advertiseBundleURIs false &&
GIT_TRACE_PACKET="$PWD/log" \
git \
-c protocol.version=2 \
ls-remote --symref "$BUNDLE_URI_REPO_URI" \
>actual 2>err &&
# Server responded using protocol v2
grep "< version 2" log &&
! grep bundle-uri log
'
test_expect_success "connect with $BUNDLE_URI_PROTOCOL:// using protocol v2: have bundle-uri" '
test_when_finished "rm -f log" &&
GIT_TRACE_PACKET="$PWD/log" \
git \
-c protocol.version=2 \
ls-remote --symref "$BUNDLE_URI_REPO_URI" \
>actual 2>err &&
# Server responded using protocol v2
grep "< version 2" log &&
# Server advertised bundle-uri capability
grep "< bundle-uri" log
'
test_expect_success "clone with $BUNDLE_URI_PROTOCOL:// using protocol v2: request bundle-uris" '
test_when_finished "rm -rf log* cloned*" &&
GIT_TRACE_PACKET="$PWD/log" \
git \
-c transfer.bundleURI=false \
-c protocol.version=2 \
clone "$BUNDLE_URI_REPO_URI" cloned \
>actual 2>err &&
# Server responded using protocol v2
grep "< version 2" log &&
# Server advertised bundle-uri capability
grep "< bundle-uri" log &&
# Client did not issue bundle-uri command
! grep "> command=bundle-uri" log &&
GIT_TRACE_PACKET="$PWD/log" \
git \
-c transfer.bundleURI=true \
-c protocol.version=2 \
clone "$BUNDLE_URI_REPO_URI" cloned2 \
>actual 2>err &&
# Server responded using protocol v2
grep "< version 2" log &&
# Server advertised bundle-uri capability
grep "< bundle-uri" log &&
# Client issued bundle-uri command
grep "> command=bundle-uri" log &&
GIT_TRACE_PACKET="$PWD/log3" \
git \
-c transfer.bundleURI=true \
-c protocol.version=2 \
clone --bundle-uri="$BUNDLE_URI_BUNDLE_URI" \
"$BUNDLE_URI_REPO_URI" cloned3 \
>actual 2>err &&
# Server responded using protocol v2
grep "< version 2" log3 &&
# Server advertised bundle-uri capability
grep "< bundle-uri" log3 &&
# Client did not issue bundle-uri command (--bundle-uri override)
! grep "> command=bundle-uri" log3
'
# The remaining tests will all assume transfer.bundleURI=true
#
# This test can be removed when transfer.bundleURI is enabled by default.
test_expect_success 'enable transfer.bundleURI for remaining tests' '
git config --global transfer.bundleURI true
'
test_expect_success "test bundle-uri with $BUNDLE_URI_PROTOCOL:// using protocol v2" '
test_config -C "$BUNDLE_URI_PARENT" \
bundle.only.uri "$BUNDLE_URI_BUNDLE_URI_ESCAPED" &&
# All data about bundle URIs
cat >expect <<-EOF &&
[bundle]
version = 1
mode = all
[bundle "only"]
uri = $BUNDLE_URI_BUNDLE_URI_ESCAPED
EOF
test-tool bundle-uri \
ls-remote \
"$BUNDLE_URI_REPO_URI" \
>actual &&
test_cmp_config_output expect actual
'
test_expect_success "test bundle-uri with $BUNDLE_URI_PROTOCOL:// using protocol v2 and extra data" '
test_config -C "$BUNDLE_URI_PARENT" \
bundle.only.uri "$BUNDLE_URI_BUNDLE_URI_ESCAPED" &&
# Extra data should be ignored
test_config -C "$BUNDLE_URI_PARENT" bundle.only.extra bogus &&
# All data about bundle URIs
cat >expect <<-EOF &&
[bundle]
version = 1
mode = all
[bundle "only"]
uri = $BUNDLE_URI_BUNDLE_URI_ESCAPED
EOF
test-tool bundle-uri \
ls-remote \
"$BUNDLE_URI_REPO_URI" \
>actual &&
test_cmp_config_output expect actual
'
test_expect_success "test bundle-uri with $BUNDLE_URI_PROTOCOL:// using protocol v2 with list" '
test_config -C "$BUNDLE_URI_PARENT" \
bundle.bundle1.uri "$BUNDLE_URI_BUNDLE_URI_ESCAPED-1.bdl" &&
test_config -C "$BUNDLE_URI_PARENT" \
bundle.bundle2.uri "$BUNDLE_URI_BUNDLE_URI_ESCAPED-2.bdl" &&
test_config -C "$BUNDLE_URI_PARENT" \
bundle.bundle3.uri "$BUNDLE_URI_BUNDLE_URI_ESCAPED-3.bdl" &&
# All data about bundle URIs
cat >expect <<-EOF &&
[bundle]
version = 1
mode = all
[bundle "bundle1"]
uri = $BUNDLE_URI_BUNDLE_URI_ESCAPED-1.bdl
[bundle "bundle2"]
uri = $BUNDLE_URI_BUNDLE_URI_ESCAPED-2.bdl
[bundle "bundle3"]
uri = $BUNDLE_URI_BUNDLE_URI_ESCAPED-3.bdl
EOF
test-tool bundle-uri \
ls-remote \
"$BUNDLE_URI_REPO_URI" \
>actual &&
test_cmp_config_output expect actual
' | unknown | github | https://github.com/git/git | t/lib-bundle-uri-protocol.sh |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from java.lang import String
# Global workaround for os.listdir bug http://bugs.jython.org/issue1593
# This bug has been fixed in Jython 2.5.2.
if sys.version_info[:3] < (2, 5, 2):
os._orig_listdir = os.listdir
def listdir(path):
items = os._orig_listdir(path)
if isinstance(path, unicode):
items = [unicode(String(i).toString()) for i in items]
return items
os.listdir = listdir | unknown | codeparrot/codeparrot-clean | ||
import sys
import atexit
import signal
from vexbot._version import __version__ as version
from vexbot.adapters.irc import IrcInterface
"""
try:
pkg_resources.get_distribution('irc3')
except pkg_resources.DistributionNotFound:
_IRC3_INSTALLED = False
if _IRC3_INSTALLED:
import irc3
else:
pass
"""
import irc3
from irc3 import utils
def main(**kwargs):
"""
if not _IRC3_INSTALLED:
logging.error('vexbot_irc requires `irc3` to be installed. Please install '
'using `pip install irc3`')
sys.exit(1)
"""
config = _from_argv(irc3.IrcBot, kwargs=kwargs)
if not 'includes' in config:
config['includes'] = []
message_plug = 'vexbot.adapters.irc.echo_to_message'
if not message_plug in config['includes']:
config['includes'].append(message_plug)
service_name = config.get('service_name', 'irc')
connection = config.get('connection', {})
interface = IrcInterface(service_name, irc_config=config, connection=connection)
interface.run()
sys.exit()
# NOTE: This code is from `irc3`
def _from_argv(cls, argv=None, **kwargs) -> dict:
prog = cls.server and 'irc3d' or 'irc3'
# TODO: Add in publish ports and all that jazz.
doc = """
Run an __main__.py instance from a config file
Usage: __main__.py [options] <config>...
Options:
-h, --help Display this help and exit
--version Output version information and exit
--logdir DIRECTORY Log directory to use instead of stderr
--logdate Show datetimes in console output
--host HOST Server name or ip
--port PORT Server port
-v,--verbose Increase verbosity
-r,--raw Show raw irc log on the console
-d,--debug Add some debug commands/utils
"""
import os
import docopt
import textwrap
args = argv or sys.argv[1:]
args = docopt.docopt(doc, args, version=version)
cfg = utils.parse_config(
cls.server and 'server' or 'bot', *args['<config>'])
cfg.update(
verbose=args['--verbose'],
debug=args['--debug'],
)
cfg.update(kwargs)
if args['--host']: # pragma: no cover
host = args['--host']
cfg['host'] = host
if host in ('127.0.0.1', 'localhost'):
cfg['ssl'] = False
if args['--port']: # pragma: no cover
cfg['port'] = args['--port']
if args['--logdir'] or 'logdir' in cfg:
logdir = os.path.expanduser(args['--logdir'] or cfg.get('logdir'))
cls.logging_config = config.get_file_config(logdir)
if args['--logdate']: # pragma: no cover
fmt = cls.logging_config['formatters']['console']
fmt['format'] = config.TIMESTAMPED_FMT
if args.get('--help-page'): # pragma: no cover
for v in cls.logging_config['handlers'].values():
v['level'] = 'ERROR'
if args['--raw']:
cfg['raw'] = True
return cfg
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run() | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.shortcuts import render_to_kmz
from django.contrib.gis.tests.utils import no_oracle
from django.db.models import Count, Min
from django.test import TestCase, skipUnlessDBFeature
if HAS_GEOS:
from .models import City, PennsylvaniaCity, State, Truth
@skipUnlessDBFeature("gis_enabled")
class GeoRegressionTests(TestCase):
fixtures = ['initial']
def test_update(self):
"Testing GeoQuerySet.update(). See #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test_kmz(self):
"Testing `render_to_kmz` with non-ASCII data. See #11624."
name = "Åland Islands"
places = [{
'name': name,
'description': name,
'kml': '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
render_to_kmz('gis/kml/placemarks.kml', {'places': places})
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent(self):
"Testing `extent` on a table with a single point. See #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite. See #16408."
founded = datetime(1857, 5, 23)
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
founded=founded)
self.assertEqual(founded, PennsylvaniaCity.objects.datetimes('founded', 'day')[0])
self.assertEqual(founded, PennsylvaniaCity.objects.aggregate(Min('founded'))['founded__min'])
def test_empty_count(self):
"Testing that PostGISAdapter.__eq__ does check empty strings. See #13670."
# contrived example, but need a geo lookup paired with an id__in lookup
pueblo = City.objects.get(name='Pueblo')
state = State.objects.filter(poly__contains=pueblo.point)
cities_within_state = City.objects.filter(id__in=state)
# .count() should not throw TypeError in __eq__
self.assertEqual(cities_within_state.count(), 1)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test_defer_or_only_with_annotate(self):
"Regression for #16409. Make sure defer() and only() work with annotate()"
self.assertIsInstance(list(City.objects.annotate(Count('point')).defer('name')), list)
self.assertIsInstance(list(City.objects.annotate(Count('point')).only('name')), list)
def test_boolean_conversion(self):
"Testing Boolean value conversion with the spatial backend, see #15169."
t1 = Truth.objects.create(val=True)
t2 = Truth.objects.create(val=False)
val1 = Truth.objects.get(pk=t1.pk).val
val2 = Truth.objects.get(pk=t2.pk).val
# verify types -- should't be 0/1
self.assertIsInstance(val1, bool)
self.assertIsInstance(val2, bool)
# verify values
self.assertEqual(val1, True)
self.assertEqual(val2, False) | unknown | codeparrot/codeparrot-clean | ||
"""
Video Maker transforms screenshots taken during a test into a HTML 5
compatible video, so that one can watch the screen activity of the
whole test from inside your own browser.
This relies on generally available multimedia libraries, frameworks
and tools.
"""
import os
import time
import glob
import logging
import re
__all__ = ['GstPythonVideoMaker', 'video_maker']
#
# Check what kind of video libraries tools we have available
#
# Gstreamer python bindings are our first choice
try:
import gst
GST_PYTHON_INSTALLED = True
except ImportError:
GST_PYTHON_INSTALLED = False
#
# PIL is also required to normalize images
#
try:
import PIL.Image
PIL_INSTALLED = True
except ImportError:
PIL_INSTALLED = False
#
# We only do video
#
CONTAINER_PREFERENCE = ['ogg', 'webm']
ENCODER_PREFERENCE = ['theora', 'vp8']
class GstPythonVideoMaker(object):
'''
Makes a movie out of screendump images using gstreamer-python
'''
CONTAINER_MAPPING = {'ogg': 'oggmux',
'webm': 'webmmux'}
ENCODER_MAPPING = {'theora': 'theoraenc',
'vp8': 'vp8enc'}
CONTAINER_ENCODER_MAPPING = {'ogg': 'theora',
'webm': 'vp8'}
def __init__(self, verbose=False):
if not GST_PYTHON_INSTALLED:
raise ValueError('gstreamer-python library was not found')
if not PIL_INSTALLED:
raise ValueError('python-imaging library was not found')
self.verbose = verbose
def get_most_common_image_size(self, input_dir):
'''
Find the most common image size
'''
image_sizes = {}
image_files = glob.glob(os.path.join(input_dir, '*.jpg'))
for f in image_files:
i = PIL.Image.open(f)
if not image_sizes.has_key(i.size):
image_sizes[i.size] = 1
else:
image_sizes[i.size] += 1
most_common_size_counter = 0
most_common_size = None
for image_size, image_counter in image_sizes.items():
if image_counter > most_common_size_counter:
most_common_size_counter = image_counter
most_common_size = image_size
return most_common_size
def normalize_images(self, input_dir):
'''
GStreamer requires all images to be the same size, so we do it here
'''
image_size = self.get_most_common_image_size(input_dir)
if image_size is None:
image_size = (800, 600)
if self.verbose:
logging.debug('Normalizing image files to size: %s', image_size)
image_files = glob.glob(os.path.join(input_dir, '*.jpg'))
for f in image_files:
i = PIL.Image.open(f)
if i.size != image_size:
i.resize(image_size).save(f)
def has_element(self, kind):
'''
Returns True if a gstreamer element is available
'''
return gst.element_factory_find(kind) is not None
def get_container_name(self):
'''
Gets the video container available that is the best based on preference
'''
for c in CONTAINER_PREFERENCE:
element_kind = self.CONTAINER_MAPPING.get(c, c)
if self.has_element(element_kind):
return element_kind
raise ValueError('No suitable container format was found')
def get_encoder_name(self):
'''
Gets the video encoder available that is the best based on preference
'''
for c in ENCODER_PREFERENCE:
element_kind = self.ENCODER_MAPPING.get(c, c)
if self.has_element(element_kind):
return element_kind
raise ValueError('No suitable encoder format was found')
def get_element(self, name):
'''
Makes and returns and element from the gst factory interface
'''
if self.verbose:
logging.debug('GStreamer element requested: %s', name)
return gst.element_factory_make(name, name)
def start(self, input_dir, output_file):
'''
Process the input files and output the video file
'''
self.normalize_images(input_dir)
file_list = glob.glob(os.path.join(input_dir, '*.jpg'))
no_files = len(file_list)
if no_files == 0:
if self.verbose:
logging.debug("Number of files to encode as video is zero")
return
index_list = []
for ifile in file_list:
index_list.append(int(re.findall(r"/+.*/(\d{4})\.jpg", ifile)[0]))
index_list.sort()
if self.verbose:
logging.debug('Number of files to encode as video: %s', no_files)
pipeline = gst.Pipeline("pipeline")
source = self.get_element("multifilesrc")
source_location = os.path.join(input_dir, "%04d.jpg")
if self.verbose:
logging.debug("Source location: %s", source_location)
source.set_property('location', source_location)
source.set_property('index', index_list[0])
source_caps = gst.Caps()
source_caps.append('image/jpeg,framerate=(fraction)4/1')
source.set_property('caps', source_caps)
decoder = self.get_element("jpegdec")
# Attempt to auto detect the chosen encoder/mux based on output_file
encoder = None
container = None
for container_name in self.CONTAINER_ENCODER_MAPPING:
if output_file.endswith('.%s' % container_name):
enc_name = self.CONTAINER_ENCODER_MAPPING[container_name]
enc_name_gst = self.ENCODER_MAPPING[enc_name]
encoder = self.get_element(enc_name_gst)
cont_name_gst = self.CONTAINER_MAPPING[container_name]
container = self.get_element(cont_name_gst)
# If auto detection fails, choose from the list of preferred codec/mux
if encoder is None:
encoder = self.get_element(self.get_encoder_name())
if container is None:
container = self.get_element(self.get_container_name())
output = self.get_element("filesink")
output.set_property('location', output_file)
pipeline.add_many(source, decoder, encoder, container, output)
gst.element_link_many(source, decoder, encoder, container, output)
pipeline.set_state(gst.STATE_PLAYING)
while True:
if source.get_property('index') <= no_files:
if self.verbose:
logging.debug("Currently processing image number: %s",
source.get_property('index'))
time.sleep(1)
else:
break
time.sleep(3)
pipeline.set_state(gst.STATE_NULL)
def video_maker(input_dir, output_file):
'''
Instantiates and runs a video maker
'''
v = GstPythonVideoMaker()
v.start(input_dir, output_file)
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s <input_dir> <output_file>' % sys.argv[0]
else:
video_maker(sys.argv[1], sys.argv[2]) | unknown | codeparrot/codeparrot-clean | ||
// @validatePreserveExistingMemoizationGuarantees
import {useMemo} from 'react';
import {Stringify} from 'shared-runtime';
// derived from https://github.com/facebook/react/issues/32261
function Component({items}) {
const record = useMemo(
() =>
Object.fromEntries(
items.map(item => [item.id, ref => <Stringify ref={ref} {...item} />])
),
[items]
);
// Without a declaration for Object.entries(), this would be assumed to mutate
// `record`, meaning existing memoization couldn't be preserved
return (
<div>
{Object.keys(record).map(id => (
<Stringify key={id} render={record[id]} />
))}
</div>
);
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [
{
items: [
{id: '0', name: 'Hello'},
{id: '1', name: 'World!'},
],
},
],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/object-keys.js |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TPU_GRAPH_REWRITE_HOST_TRAINING_LOOP_OPTIMIZATION_UTIL_H_
#define TENSORFLOW_CORE_TPU_GRAPH_REWRITE_HOST_TRAINING_LOOP_OPTIMIZATION_UTIL_H_
#include <optional>
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace tpu {
struct LoopArgInfo {
std::string enter_node_name;
// Exit nodes are optional for loop invariant while loop args.
std::optional<std::string> exit_node_name;
};
struct HostTrainingLoopInfo {
// Name and attribute information about the function in which
// host training loop is included. If host training loop is not
// inside a function call, then `function_name` and `function_attrs`
// are nullopt.
std::optional<std::string> encapsulating_function_name;
std::optional<AttrValueMap> encapsulating_function_attrs;
// TPU Compile node as within a host training loop.
std::string compile_node_name;
// Name of the while loop in which TPU compile op is located.
std::string while_loop_name;
// Name of the node that represents loop condition.
std::string loop_cond_node_name;
// Exit and Enter node names for each loop arguments.
std::vector<LoopArgInfo> loop_arguments;
std::unordered_set<Node*> loop_nodes; // NOLINT
};
// Walks through the `graph`, recursively if functional nodes exist, and
// identifies all host training loops. Host training loops are the inner
// most while loops that encapsulates TPUCompileOp node. This would be
// later used/analyzed to introduce host loop specific optimizations such
// as adding sharded weight update.
absl::Status DetectHostTrainingLoop(
const std::string* current_function_name,
const AttrValueMap* current_function_attr,
const FunctionLibraryDefinition* library, Graph* graph,
FunctionLibraryRuntime* flr,
std::vector<HostTrainingLoopInfo>* host_training_loops_info);
// Injects VariableReshardOps to before and after TPUExecute op inside
// host training loop body. This effectively applies sharded weight update
// on model weight variables.
absl::Status AddReshardOp(Graph* graph,
const HostTrainingLoopInfo& host_loop_info);
} // namespace tpu
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TPU_GRAPH_REWRITE_HOST_TRAINING_LOOP_OPTIMIZATION_UTIL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tpu/graph_rewrite/host_training_loop_optimization_util.h |
#!/usr/bin/python
# This script is used to generate luabinding glue codes.
# Android ndk version must be ndk-r9b.
import sys
import os, os.path
import shutil
import ConfigParser
import subprocess
import re
from contextlib import contextmanager
def _check_ndk_root_env():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment."
sys.exit(1)
return NDK_ROOT
def _check_python_bin_env():
''' Checking the environment PYTHON_BIN, which will be used for building
'''
try:
PYTHON_BIN = os.environ['PYTHON_BIN']
except Exception:
print "PYTHON_BIN not defined, use current python."
PYTHON_BIN = sys.executable
return PYTHON_BIN
class CmdError(Exception):
pass
@contextmanager
def _pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def _run_cmd(command):
ret = subprocess.call(command, shell=True)
if ret != 0:
message = "Error running command"
raise CmdError(message)
def main():
cur_platform= '??'
llvm_path = '??'
ndk_root = _check_ndk_root_env()
# del the " in the path
ndk_root = re.sub(r"\"", "", ndk_root)
python_bin = _check_python_bin_env()
platform = sys.platform
if platform == 'win32':
cur_platform = 'windows'
elif platform == 'darwin':
cur_platform = platform
elif 'linux' in platform:
cur_platform = 'linux'
else:
print 'Your platform is not supported!'
sys.exit(1)
if platform == 'win32':
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s' % cur_platform))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s' % cur_platform))
else:
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86')))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86')))
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if not os.path.exists(x64_llvm_path):
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if os.path.isdir(x86_llvm_path):
llvm_path = x86_llvm_path
elif os.path.isdir(x64_llvm_path):
llvm_path = x64_llvm_path
else:
print 'llvm toolchain not found!'
print 'path: %s or path: %s are not valid! ' % (x86_llvm_path, x64_llvm_path)
sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
cocos_root = os.path.abspath(os.path.join(project_root, ''))
cxx_generator_root = os.path.abspath(os.path.join(project_root, 'tools/bindings-generator'))
# save config to file
config = ConfigParser.ConfigParser()
config.set('DEFAULT', 'androidndkdir', ndk_root)
config.set('DEFAULT', 'clangllvmdir', llvm_path)
config.set('DEFAULT', 'cocosdir', cocos_root)
config.set('DEFAULT', 'cxxgeneratordir', cxx_generator_root)
config.set('DEFAULT', 'extra_flags', '')
# To fix parse error on windows, we must difine __WCHAR_MAX__ and undefine __MINGW32__ .
if platform == 'win32':
config.set('DEFAULT', 'extra_flags', '-D__WCHAR_MAX__=0x7fffffff -U__MINGW32__')
conf_ini_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'userconf.ini'))
print 'generating userconf.ini...'
with open(conf_ini_file, 'w') as configfile:
config.write(configfile)
# set proper environment variables
if 'linux' in platform or platform == 'darwin':
os.putenv('LD_LIBRARY_PATH', '%s/libclang' % cxx_generator_root)
if platform == 'win32':
path_env = os.environ['PATH']
os.putenv('PATH', r'%s;%s\libclang;%s\tools\win32;' % (path_env, cxx_generator_root, cxx_generator_root))
try:
tolua_root = '%s/tools/tolua' % project_root
output_dir = '%s/cocos/scripting/lua-bindings/auto' % project_root
cmd_args = {'cocos2dx.ini' : ('cocos2d-x', 'lua_cocos2dx_auto'), \
'cocos2dx_extension.ini' : ('cocos2dx_extension', 'lua_cocos2dx_extension_auto'), \
'cocos2dx_ui.ini' : ('cocos2dx_ui', 'lua_cocos2dx_ui_auto'), \
'cocos2dx_studio.ini' : ('cocos2dx_studio', 'lua_cocos2dx_studio_auto'), \
'cocos2dx_spine.ini' : ('cocos2dx_spine', 'lua_cocos2dx_spine_auto'), \
'cocos2dx_physics.ini' : ('cocos2dx_physics', 'lua_cocos2dx_physics_auto'), \
'cocos2dx_experimental_video.ini' : ('cocos2dx_experimental_video', 'lua_cocos2dx_experimental_video_auto'), \
'cocos2dx_experimental.ini' : ('cocos2dx_experimental', 'lua_cocos2dx_experimental_auto'), \
'cocos2dx_controller.ini' : ('cocos2dx_controller', 'lua_cocos2dx_controller_auto'), \
'cocos2dx_cocosbuilder.ini': ('cocos2dx_cocosbuilder', 'lua_cocos2dx_cocosbuilder_auto'), \
'cocos2dx_cocosdenshion.ini': ('cocos2dx_cocosdenshion', 'lua_cocos2dx_cocosdenshion_auto'), \
'cocos2dx_3d.ini': ('cocos2dx_3d', 'lua_cocos2dx_3d_auto'), \
'cocos2dx_audioengine.ini': ('cocos2dx_audioengine', 'lua_cocos2dx_audioengine_auto'), \
'cocos2dx_csloader.ini' : ('cocos2dx_csloader', 'lua_cocos2dx_csloader_auto'), \
'cocos2dx_experimental_webview.ini' : ('cocos2dx_experimental_webview', 'lua_cocos2dx_experimental_webview_auto'), \
'cocos2dx_physics3d.ini' : ('cocos2dx_physics3d', 'lua_cocos2dx_physics3d_auto'), \
'cocos2dx_navmesh.ini' : ('cocos2dx_navmesh', 'lua_cocos2dx_navmesh_auto'), \
}
target = 'lua'
generator_py = '%s/generator.py' % cxx_generator_root
for key in cmd_args.keys():
args = cmd_args[key]
cfg = '%s/%s' % (tolua_root, key)
print 'Generating bindings for %s...' % (key[:-4])
command = '%s %s %s -s %s -t %s -o %s -n %s' % (python_bin, generator_py, cfg, args[0], target, output_dir, args[1])
_run_cmd(command)
print '---------------------------------'
print 'Generating lua bindings succeeds.'
print '---------------------------------'
except Exception as e:
if e.__class__.__name__ == 'CmdError':
print '---------------------------------'
print 'Generating lua bindings fails.'
print '---------------------------------'
sys.exit(1)
else:
raise
# -------------- main --------------
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import asyncore
import mimetypes
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from email.header import Header
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (
EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers,
send_mail, send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError, sanitize_address
from django.test import SimpleTestCase, override_settings
from django.utils._os import upath
from django.utils.encoding import force_bytes, force_text
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (
message_from_string as message_from_bytes,
message_from_file as message_from_binary_file,
)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contents of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'],
cc=['cc@example.com', 'cc.other@example.com']
)
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(
email.recipients(),
['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com']
)
# Testing with Bcc
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'],
cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com']
)
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(
email.recipients(),
['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com']
)
def test_reply_to(self):
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
reply_to=['reply_to@example.com'],
)
message = email.message()
self.assertEqual(message['Reply-To'], 'reply_to@example.com')
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
reply_to=['reply_to1@example.com', 'reply_to2@example.com']
)
message = email.message()
self.assertEqual(message['Reply-To'], 'reply_to1@example.com, reply_to2@example.com')
def test_recipients_as_tuple(self):
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'),
cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',)
)
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(
email.recipients(),
['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com']
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'):
EmailMessage(to='foo@example.com')
with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'):
EmailMessage(cc='foo@example.com')
with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'):
EmailMessage(bcc='foo@example.com')
with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'):
EmailMessage(reply_to='reply_to@example.com')
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
with self.assertRaises(BadHeaderError):
email.message()
email = EmailMessage(
ugettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com']
)
with self.assertRaises(BadHeaderError):
email.message()
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage(
'Long subject lines that get wrapped should contain a space '
'continuation character to get expected behavior in Outlook and Thunderbird',
'Content', 'from@example.com', ['to@example.com']
)
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(
message['Subject'].encode(),
b'Long subject lines that get wrapped should contain a space continuation\n'
b' character to get expected behavior in Outlook and Thunderbird'
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
reply_to=['foo@example.com'], headers={'Reply-To': 'override@example.com'},
)
message = email.message()
self.assertEqual(message['Reply-To'], 'override@example.com')
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
'Subject', 'Content', 'from@example.com',
['"Firstname Sürname" <to@example.com>', 'other@example.com'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com'
)
email = EmailMessage(
'Subject', 'Content', 'from@example.com',
['"Sürname, Firstname" <to@example.com>', 'other@example.com'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com'
)
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "from@example.com", ["to@example.com"],
headers={"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'other@example.com')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(
payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>')
)
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
('file.txt', 'text/plain'),
('file.png', 'image/png'),
('file_txt', None),
('file_png', None),
('file_txt.png', 'image/png'),
('file_png.txt', 'text/plain'),
)
test_mimetypes = ['text/plain', 'image/png', None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage('subject', 'body', 'from@example.com', ['to@example.com'])
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(os.path.dirname(upath(__file__)), 'attachments', basename)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.locmem.EmailBackend'),
locmem.EmailBackend
)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.console.EmailBackend'),
console.EmailBackend
)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir),
filebased.EmailBackend
)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(
'Subject', 'From the future', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
self.assertNotIn(b'>From the future', email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage(
'Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
self.assertNotIn(b'Content-Transfer-Encoding: base64', msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage(
'Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
s = msg.message().as_bytes()
self.assertNotIn(b'Content-Transfer-Encoding: quoted-printable', s)
self.assertIn(b'Content-Transfer-Encoding: 7bit', s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage(
'Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
s = msg.message().as_bytes()
self.assertNotIn(b'Content-Transfer-Encoding: quoted-printable', s)
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
msg = EmailMessage(
'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com',
['to@example.com'], headers={'From': 'from@example.com'},
)
s = msg.message().as_bytes()
self.assertNotIn(b'Content-Transfer-Encoding: quoted-printable', s)
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage(
'Child Subject', 'Some body of child message', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
def test_sanitize_address(self):
"""
Email addresses are properly sanitized.
"""
# Simple ASCII address - string form
self.assertEqual(sanitize_address('to@example.com', 'ascii'), 'to@example.com')
self.assertEqual(sanitize_address('to@example.com', 'utf-8'), 'to@example.com')
# Bytestrings are transformed to normal strings.
self.assertEqual(sanitize_address(b'to@example.com', 'utf-8'), 'to@example.com')
# Simple ASCII address - tuple form
self.assertEqual(
sanitize_address(('A name', 'to@example.com'), 'ascii'),
'A name <to@example.com>'
)
if PY3:
self.assertEqual(
sanitize_address(('A name', 'to@example.com'), 'utf-8'),
'=?utf-8?q?A_name?= <to@example.com>'
)
else:
self.assertEqual(
sanitize_address(('A name', 'to@example.com'), 'utf-8'),
'A name <to@example.com>'
)
# Unicode characters are are supported in RFC-6532.
self.assertEqual(
sanitize_address('tó@example.com', 'utf-8'),
'=?utf-8?b?dMOz?=@example.com'
)
self.assertEqual(
sanitize_address(('Tó Example', 'tó@example.com'), 'utf-8'),
'=?utf-8?q?T=C3=B3_Example?= <=?utf-8?b?dMOz?=@example.com>'
)
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertIn(str('Content-Transfer-Encoding: base64'), txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertIn(str('Content-Transfer-Encoding: base64'), txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin, object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(
len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox])
)
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_long_lines(self):
"""
Email line length is limited to 998 chars by the RFC:
https://tools.ietf.org/html/rfc5322#section-2.1.1
Message body containing longer lines are converted to Quoted-Printable
to avoid having to insert newlines, which could be hairy to do properly.
"""
email = EmailMessage('Subject', "Comment ça va? " * 100, 'from@example.com', ['to@example.com'])
email.send()
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com'),
('Cc', 'cc@example.com')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = ugettext_lazy
self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')]))
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'django')
self.flush_mailbox()
m = EmailMessage(
'Subject', 'Content', _('tester'), [_('to1'), _('to2')],
cc=[_('cc1'), _('cc2')],
bcc=[_('bcc')],
reply_to=[_('reply')],
)
self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'to1, to2')
self.assertEqual(message.get('cc'), 'cc1, cc2')
self.assertEqual(message.get('Reply-To'), 'reply')
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username='', password='')
conn.close()
def test_use_as_contextmanager(self):
"""
Test that the connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username='', password='')
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
headers={'From': 'from@example.com'},
)
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
super(FakeSMTPChannel, self).collect_incoming_data(data)
except UnicodeDecodeError:
# ignore decode error in SSL/TLS connection tests as we only care
# whether the connection attempt was made
pass
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
# New kwarg added in Python 3.5; default switching to False in 3.6.
if sys.version_info >= (3, 5):
kwargs['decode_data'] = True
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
# According to the spec, mailfrom does not necessarily match the
# From header - on Python 3 this is the case where the local part
# isn't encoded, so try to correct that.
lp, domain = mailfrom.split('@', 1)
lp = Header(lp, 'utf-8').encode()
mailfrom = '@'.join([lp, domain])
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(SMTPBackendTestsBase, cls).setUpClass()
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
super(SMTPBackendTestsBase, cls).tearDownClass()
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Test that opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
try:
with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'):
backend.open()
finally:
backend.close()
def test_server_open(self):
"""
Test that open() tells us whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertFalse(backend.connection)
opened = backend.open()
backend.close()
self.assertTrue(opened)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, 'foo')
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile='bar')
self.assertEqual(backend.ssl_certfile, 'bar')
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, None)
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, 'foo')
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile='bar')
self.assertEqual(backend.ssl_keyfile, 'bar')
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, None)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
try:
with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'):
backend.open()
finally:
backend.close()
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
try:
with self.assertRaises(SSLError):
backend.open()
finally:
backend.close()
def test_connection_timeout_default(self):
"""Test that the connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(connection.timeout, None)
def test_connection_timeout_custom(self):
"""Test that the timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super(MyEmailBackend, self).__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- Test that RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == 'data':
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
if PY3:
msg = msg.decode('utf-8')
# Ensure that the message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace('\r\n', '')
self.assertNotIn('\r', msg)
self.assertNotIn('\n', msg)
finally:
SMTP.send = send
class SMTPBackendStoppedServerTest(SMTPBackendTestsBase):
"""
This test requires a separate class, because it shuts down the
FakeSMTPServer started in setUpClass(). It cannot be restarted
("RuntimeError: threads can only be started once").
"""
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
backend.close() | unknown | codeparrot/codeparrot-clean | ||
#!/opt/mesosphere/bin/python3
import logging
import os
import sys
import time
def main(directory, max_files, managed_file):
'''
Finds all files under the given `directory` and checks if the number
of files exceeds the `max_files`. If so, deletes files starting from
the oldest (except for the `managed_file`) until the `max_files` is
no longer exceeded.
@type directory: str, absolute path of the directory to clean up
@type max_files: int, maximum number of files to keep
@type managed_file: str, one file (i.e. leading log) to excempt from cleanup
'''
logging.basicConfig(format='[%(levelname)s] %(message)s',
level=logging.INFO)
# For simplicity, convert all paths to absolute paths.
directory = os.path.abspath(directory)
managed_file = os.path.abspath(managed_file)
# Build a list of all files.
# TODO(josephw): Do we want to delete directories too?
all_files = []
for root, subdirectories, files in os.walk(directory):
all_files += [os.path.join(root, name) for name in files]
# Exempt the one managed file. This is presumably the leading log file.
all_files.remove(managed_file)
logging.info("Found {0} files in log directory (max {1})".format(len(all_files), max_files))
if len(all_files) <= max_files:
return
oldest_first = sorted(all_files, key=lambda x: os.stat(x).st_mtime_ns)
to_delete = oldest_first[0:len(all_files) - max_files]
delete_mtime_threshold = time.strftime(
"%a, %d %b %Y %H:%M:%S +0000",
time.gmtime(os.path.getmtime(to_delete[-1])))
logging.info("Deleting all files modified after: {0}", delete_mtime_threshold)
for path in to_delete:
logging.info("Deleting old file inside log directory: {0}".format(path))
os.remove(path)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage: delete-oldest-unmanaged-files.py <directory> <max-files> <managed-file>")
sys.exit(1)
main(sys.argv[1], int(sys.argv[2]), sys.argv[3]) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in oslo_concurrency.processutils."""
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
_fake_execute_repliers = []
_fake_execute_log = []
def fake_execute_get_log():
return _fake_execute_log
def fake_execute_clear_log():
global _fake_execute_log
_fake_execute_log = []
def fake_execute_set_repliers(repliers):
"""Allows the client to configure replies to commands."""
global _fake_execute_repliers
_fake_execute_repliers = repliers
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
def fake_execute(*cmd_parts, **kwargs):
"""This function stubs out execute.
It optionally executes a preconfigued function to return expected data.
"""
global _fake_execute_repliers
process_input = kwargs.get('process_input', None)
check_exit_code = kwargs.get('check_exit_code', 0)
delay_on_retry = kwargs.get('delay_on_retry', True)
attempts = kwargs.get('attempts', 1)
run_as_root = kwargs.get('run_as_root', False)
cmd_str = ' '.join(str(part) for part in cmd_parts)
LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str)
_fake_execute_log.append(cmd_str)
reply_handler = fake_execute_default_reply_handler
for fake_replier in _fake_execute_repliers:
if re.match(fake_replier[0], cmd_str):
reply_handler = fake_replier[1]
LOG.debug('Faked command matched %s', fake_replier[0])
break
if isinstance(reply_handler, six.string_types):
# If the reply handler is a string, return it as stdout
reply = reply_handler, ''
else:
try:
# Alternative is a function, so call it
reply = reply_handler(cmd_parts,
process_input=process_input,
delay_on_retry=delay_on_retry,
attempts=attempts,
run_as_root=run_as_root,
check_exit_code=check_exit_code)
except processutils.ProcessExecutionError as e:
LOG.debug('Faked command raised an exception %s', e)
raise
LOG.debug("Reply to faked command is stdout='%(stdout)s' "
"stderr='%(stderr)s'", {'stdout': reply[0], 'stderr': reply[1]})
# Replicate the sleep call in the real function
greenthread.sleep(0)
return reply
def stub_out_processutils_execute(stubs):
fake_execute_set_repliers([])
fake_execute_clear_log()
stubs.Set(processutils, 'execute', fake_execute) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license. See full license at the bottom of this file.
from django.conf.urls import patterns, url
from contacts import views
urlpatterns = patterns('',
# The home view ('/contacts/')
url(r'^$', views.index, name='index'),
# Used to start OAuth2 flow ('/contacts/connect/')
url(r'^connect/$', views.connect, name='connect'),
# Used as redirect target in OAuth2 flow ('/contacts/authorize/')
url(r'^authorize/$', views.authorize, name='authorize'),
# Displays a form to create a new contact ('/contacts/new/')
url(r'^new/$', views.new, name='new'),
# Invoked to create a new contact in Office 365 ('/contacts/create/')
url(r'^create/$', views.create, name='create'),
# Displays an existing contact in an editable form ('/contacts/edit/<contact_id>/')
url(r'^edit/(?P<contact_id>.+)/$', views.edit, name='edit'),
# Invoked to update an existing contact ('/contacts/update/<contact_id>/')
url(r'^update/(?P<contact_id>.+)/$', views.update, name='update'),
# Invoked to delete an existing contact ('/contacts/delete/<contact_id>/')
url(r'^delete/(?P<contact_id>.+)/$', views.delete, name='delete'),
)
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# ""Software""), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | unknown | codeparrot/codeparrot-clean | ||
//===--- PrimitiveParsing.h - Primitive parsing routines --------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Primitive parsing routines useful in various places in the compiler.
///
//===----------------------------------------------------------------------===//
#ifndef SWIFT_BASIC_PRIMITIVEPARSING_H
#define SWIFT_BASIC_PRIMITIVEPARSING_H
#include "llvm/ADT/StringRef.h"
#include "swift/Basic/LLVM.h"
namespace swift {
unsigned measureNewline(const char *BufferPtr, const char *BufferEnd);
static inline unsigned measureNewline(StringRef S) {
return measureNewline(S.data(), S.data() + S.size());
}
static inline bool startsWithNewline(StringRef S) {
return S.starts_with("\n") || S.starts_with("\r\n");
}
/// Breaks a given string to lines and trims leading whitespace from them.
void trimLeadingWhitespaceFromLines(StringRef Text, unsigned WhitespaceToTrim,
SmallVectorImpl<StringRef> &Lines);
static inline void splitIntoLines(StringRef Text,
SmallVectorImpl<StringRef> &Lines) {
trimLeadingWhitespaceFromLines(Text, 0, Lines);
}
} // end namespace swift
#endif // SWIFT_BASIC_PRIMITIVEPARSING_H | c | github | https://github.com/apple/swift | include/swift/Basic/PrimitiveParsing.h |
import inspect
import torch
from torch.utils._pytree import register_pytree_node, SequenceKey
__all__ = ["pytree_register_structseq", "all_return_types"]
all_return_types = []
# error: Module has no attribute "_return_types"
return_types = torch._C._return_types # type: ignore[attr-defined]
def pytree_register_structseq(cls):
def structseq_flatten(structseq):
return list(structseq), None
def structseq_flatten_with_keys(structseq):
values, context = structseq_flatten(structseq)
return [(SequenceKey(i), v) for i, v in enumerate(values)], context
def structseq_unflatten(values, context):
return cls(values)
register_pytree_node(
cls,
structseq_flatten,
structseq_unflatten,
flatten_with_keys_fn=structseq_flatten_with_keys,
)
for name in dir(return_types):
if name.startswith("__"):
continue
_attr = getattr(return_types, name)
globals()[name] = _attr
if not name.startswith("_"):
__all__.append(name)
all_return_types.append(_attr)
# Today everything in torch.return_types is a structseq, aka a "namedtuple"-like
# thing defined by the Python C-API. We're going to need to modify this when that
# is no longer the case.
# NB: I don't know how to check that something is a "structseq" so we do a fuzzy
# check for tuple
if inspect.isclass(_attr) and issubclass(_attr, tuple):
pytree_register_structseq(_attr) | python | github | https://github.com/pytorch/pytorch | torch/return_types.py |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, division, print_function
from functools import wraps
#------------------------------------------------------------------------------
def memoize(max_cache_size=1000):
"""Python 2.4 compatible memoize decorator.
It creates a cache that has a maximum size. If the cache exceeds the max,
it is thrown out and a new one made. With such behavior, it is wise to set
the cache just a little larger that the maximum expected need.
Parameters:
max_cache_size - the size to which a cache can grow
"""
def wrapper(f):
@wraps(f)
def fn(*args, **kwargs):
if kwargs:
key = (args, tuple(kwargs.items()))
else:
key = args
try:
return fn.cache[key]
except KeyError:
if fn.count >= max_cache_size:
fn.cache = {}
fn.count = 0
result = f(*args, **kwargs)
fn.cache[key] = result
fn.count += 1
return result
except TypeError:
return f(*args, **kwargs)
fn.cache = {}
fn.count = 0
return fn
return wrapper | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_COFF_BINARY_H
#define LIEF_COFF_BINARY_H
#include "LIEF/visibility.h"
#include "LIEF/iterators.hpp"
#include "LIEF/span.hpp"
#include "LIEF/COFF/String.hpp"
#include "LIEF/asm/Instruction.hpp"
#include <memory>
#include <vector>
#include <unordered_map>
namespace LIEF {
namespace assembly {
class Engine;
}
namespace COFF {
class Header;
class Parser;
class Section;
class Relocation;
class Symbol;
/// Class that represents a COFF Binary
class LIEF_API Binary {
public:
friend class Parser;
/// Internal container used to store COFF's section
using sections_t = std::vector<std::unique_ptr<Section>>;
/// Iterator that outputs Section& object
using it_sections = ref_iterator<sections_t&, Section*>;
/// Iterator that outputs const Section& object
using it_const_sections = const_ref_iterator<const sections_t&, const Section*>;
/// Internal container used to store COFF's relocations
using relocations_t = std::vector<std::unique_ptr<Relocation>>;
/// Iterator that outputs Relocation& object
using it_relocations = ref_iterator<relocations_t&, Relocation*>;
/// Iterator that outputs const Relocation& object
using it_const_relocations = const_ref_iterator<const relocations_t&, const Relocation*>;
/// Internal container used to store COFF's strings
using strings_table_t = std::vector<String>;
/// Iterator that outputs String& object
using it_strings_table = ref_iterator<strings_table_t&>;
/// Iterator that outputs const String& object
using it_const_strings_table = const_ref_iterator<const strings_table_t&>;
/// Internal container used to store COFF's symbols
using symbols_t = std::vector<std::unique_ptr<Symbol>>;
/// Iterator that outputs Symbol& object
using it_symbols = ref_iterator<symbols_t&, Symbol*>;
/// Iterator that outputs Symbol& object
using it_const_symbols = const_ref_iterator<const symbols_t&, const Symbol*>;
/// Instruction iterator
using instructions_it = iterator_range<assembly::Instruction::Iterator>;
/// Iterator which outputs COFF symbols representing functions
using it_functions = filter_iterator<symbols_t&, Symbol*>;
/// Iterator which outputs COFF symbols representing functions
using it_const_function = const_filter_iterator<const symbols_t&, const Symbol*>;
/// The COFF header
const Header& header() const {
return *header_;
}
Header& header() {
return *header_;
}
/// Iterator over the different sections located in this COFF binary
it_sections sections() {
return sections_;
}
it_const_sections sections() const {
return sections_;
}
/// Iterator over **all** the relocations used by this COFF binary
it_relocations relocations() {
return relocations_;
}
it_const_relocations relocations() const {
return relocations_;
}
/// Iterator over the COFF's symbols
it_symbols symbols() {
return symbols_;
}
it_const_symbols symbols() const {
return symbols_;
}
/// Iterator over the COFF's strings
it_const_strings_table string_table() const {
return strings_table_;
}
it_strings_table string_table() {
return strings_table_;
}
/// Try to find the COFF string at the given offset in the COFF string table.
///
/// \warning This offset must include the first 4 bytes holding the size of
/// the table. Hence, the first string starts a the offset 4.
String* find_string(uint32_t offset) {
auto it = std::find_if(strings_table_.begin(), strings_table_.end(),
[offset] (const String& item) {
return offset == item.offset();
}
);
return it == strings_table_.end() ? nullptr : &*it;
}
const String* find_string(uint32_t offset) const {
return const_cast<Binary*>(this)->find_string(offset);
}
/// Iterator over the functions implemented in this COFF
it_const_function functions() const;
it_functions functions();
/// Try to find the function (symbol) with the given name
const Symbol* find_function(const std::string& name) const;
Symbol* find_function(const std::string& name) {
return const_cast<Symbol*>(static_cast<const Binary*>(this)->find_function(name));
}
/// Try to find the function (symbol) with the given **demangled** name
const Symbol* find_demangled_function(const std::string& name) const;
Symbol* find_demangled_function(const std::string& name) {
return const_cast<Symbol*>(static_cast<const Binary*>(this)->find_demangled_function(name));
}
/// Disassemble code for the given symbol
///
/// ```cpp
/// const Symbol* func = binary->find_demangled_function("int __cdecl my_function(int, int)");
/// auto insts = binary->disassemble(*func);
/// for (std::unique_ptr<assembly::Instruction> inst : insts) {
/// std::cout << inst->to_string() << '\n';
/// }
/// ```
///
/// \see LIEF::assembly::Instruction
instructions_it disassemble(const Symbol& symbol) const;
/// Disassemble code for the given symbol name
///
/// ```cpp
/// auto insts = binary->disassemble("main");
/// for (std::unique_ptr<assembly::Instruction> inst : insts) {
/// std::cout << inst->to_string() << '\n';
/// }
/// ```
///
/// \see LIEF::assembly::Instruction
instructions_it disassemble(const std::string& symbol) const;
/// Disassemble code provided by the given buffer at the specified
/// `address` parameter.
///
/// \see LIEF::assembly::Instruction
instructions_it disassemble(const uint8_t* buffer, size_t size,
uint64_t address = 0) const;
/// Disassemble code provided by the given vector of bytes at the specified
/// `address` parameter.
///
/// \see LIEF::assembly::Instruction
instructions_it disassemble(const std::vector<uint8_t>& buffer,
uint64_t address = 0) const {
return disassemble(buffer.data(), buffer.size(), address);
}
instructions_it disassemble(LIEF::span<const uint8_t> buffer,
uint64_t address = 0) const {
return disassemble(buffer.data(), buffer.size(), address);
}
instructions_it disassemble(LIEF::span<uint8_t> buffer, uint64_t address = 0) const {
return disassemble(buffer.data(), buffer.size(), address);
}
std::string to_string() const;
LIEF_API friend std::ostream& operator<<(std::ostream& os, const Binary& bin) {
os << bin.to_string();
return os;
}
~Binary();
private:
Binary();
std::unique_ptr<Header> header_;
sections_t sections_;
relocations_t relocations_;
strings_table_t strings_table_;
symbols_t symbols_;
mutable std::unordered_map<uint32_t, std::unique_ptr<assembly::Engine>> engines_;
assembly::Engine* get_engine(uint64_t address) const;
template<uint32_t Key, class F>
LIEF_LOCAL assembly::Engine* get_cache_engine(uint64_t address, F&& f) const;
};
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/COFF/Binary.hpp |
import { enable_legacy_mode_flag } from './index.js';
enable_legacy_mode_flag(); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/internal/flags/legacy.js |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service methods for typed instances."""
__author__ = 'Sean Lip'
import copy
import inspect
from extensions.objects.models import objects
class Registry(object):
"""Registry of all objects."""
# Dict mapping object class names to their classes.
objects_dict = {}
@classmethod
def _refresh_registry(cls):
cls.objects_dict.clear()
# Add new object instances to the registry.
for name, clazz in inspect.getmembers(objects, inspect.isclass):
if name.endswith('_test') or name == 'BaseObject':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseObject' not in ancestor_names:
continue
cls.objects_dict[clazz.__name__] = clazz
@classmethod
def get_all_object_classes(cls):
"""Get the dict of all object classes."""
cls._refresh_registry()
return copy.deepcopy(cls.objects_dict)
@classmethod
def get_object_class_by_type(cls, obj_type):
"""Gets an object class by its type. Types are CamelCased.
Refreshes once if the class is not found; subsequently, throws an
error."""
if obj_type not in cls.objects_dict:
cls._refresh_registry()
if obj_type not in cls.objects_dict:
raise TypeError('\'%s\' is not a valid object class.' % obj_type)
return cls.objects_dict[obj_type]
def get_all_object_editor_js_templates():
"""Returns a string containing the JS templates for all objects."""
object_editors_js = ''
all_object_classes = Registry.get_all_object_classes()
for obj_type, obj_cls in all_object_classes.iteritems():
if obj_cls.has_editor_js_template():
object_editors_js += obj_cls.get_editor_js_template()
return object_editors_js | unknown | codeparrot/codeparrot-clean | ||
"""
Support for Insteon switch devices via local hub support.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.insteon_local/
"""
import json
import logging
import os
from datetime import timedelta
from homeassistant.components.switch import SwitchDevice
from homeassistant.loader import get_component
import homeassistant.util as util
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['insteon_local']
DOMAIN = 'switch'
INSTEON_LOCAL_SWITCH_CONF = 'insteon_local_switch.conf'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Insteon local switch platform."""
insteonhub = hass.data['insteon_local']
conf_switches = config_from_file(hass.config.path(
INSTEON_LOCAL_SWITCH_CONF))
if len(conf_switches):
for device_id in conf_switches:
setup_switch(
device_id, conf_switches[device_id], insteonhub, hass,
add_devices)
else:
linked = insteonhub.get_linked()
for device_id in linked:
if linked[device_id]['cat_type'] == 'switch'\
and device_id not in conf_switches:
request_configuration(device_id, insteonhub,
linked[device_id]['model_name'] + ' ' +
linked[device_id]['sku'],
hass, add_devices)
def request_configuration(device_id, insteonhub, model, hass,
add_devices_callback):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if device_id in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[device_id], 'Failed to register, please try again.')
return
def insteon_switch_config_callback(data):
"""The actions to do when our configuration callback is called."""
setup_switch(device_id, data.get('name'), insteonhub, hass,
add_devices_callback)
_CONFIGURING[device_id] = configurator.request_config(
hass, 'Insteon Switch ' + model + ' addr: ' + device_id,
insteon_switch_config_callback,
description=('Enter a name for ' + model + ' addr: ' + device_id),
entity_picture='/static/images/config_insteon.png',
submit_caption='Confirm',
fields=[{'id': 'name', 'name': 'Name', 'type': ''}]
)
def setup_switch(device_id, name, insteonhub, hass, add_devices_callback):
"""Set up the switch."""
if device_id in _CONFIGURING:
request_id = _CONFIGURING.pop(device_id)
configurator = get_component('configurator')
configurator.request_done(request_id)
_LOGGER.info("Device configuration done!")
conf_switch = config_from_file(hass.config.path(INSTEON_LOCAL_SWITCH_CONF))
if device_id not in conf_switch:
conf_switch[device_id] = name
if not config_from_file(
hass.config.path(INSTEON_LOCAL_SWITCH_CONF), conf_switch):
_LOGGER.error("Failed to save configuration file")
device = insteonhub.switch(device_id)
add_devices_callback([InsteonLocalSwitchDevice(device, name)])
def config_from_file(filename, config=None):
"""Small configuration file management function."""
if config:
# We're writing configuration
try:
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(config))
except IOError as error:
_LOGGER.error("Saving configuration file failed: %s", error)
return False
return True
else:
# We're reading config
if os.path.isfile(filename):
try:
with open(filename, 'r') as fdesc:
return json.loads(fdesc.read())
except IOError as error:
_LOGGER.error("Reading config file failed: %s", error)
# This won't work yet
return False
else:
return {}
class InsteonLocalSwitchDevice(SwitchDevice):
"""An abstract Class for an Insteon node."""
def __init__(self, node, name):
"""Initialize the device."""
self.node = node
self.node.deviceName = name
self._state = False
@property
def name(self):
"""Return the the name of the node."""
return self.node.deviceName
@property
def unique_id(self):
"""Return the ID of this Insteon node."""
return 'insteon_local_{}'.format(self.node.device_id)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Get the updated status of the switch."""
resp = self.node.status(0)
if 'cmd2' in resp:
self._state = int(resp['cmd2'], 16) > 0
@property
def is_on(self):
"""Return the boolean response if the node is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn device on."""
self.node.on()
self._state = True
def turn_off(self, **kwargs):
"""Turn device off."""
self.node.off()
self._state = False | unknown | codeparrot/codeparrot-clean | ||
import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i, h2o_exec as h2e
# FIX!. enums may only work if 0 based
# try -5,5 etc
# maybe call GLM on it after doing factor (check # of coefficients)
DO_CASE = 1
REBALANCE_CHUNKS = 100
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri1 = int(r1.triangular(0,4,2.5))
rowData.append(ri1)
rowTotal = sum(rowData)
### print colCount, rowTotal, result
rowDataStr = map(str,rowData)
rowDataCsv = ",".join(rowDataStr)
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(3)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rebalance_int2enum(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100000, 30, 'cC', 100),
]
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "\nCreating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=20)
hex_key=parseResult['destination_key']
inspect = h2o_cmd.runInspect(key=hex_key)
print "\n" + csvFilename
print "Rebalancing it to create an artificially large # of chunks"
rb_key = "rb_%s" % (hex_key)
start = time.time()
print "Rebalancing %s to %s with %s chunks" % (hex_key, rb_key, REBALANCE_CHUNKS)
rebalanceResult = h2o.nodes[0].rebalance(source=hex_key, after=rb_key, chunks=REBALANCE_CHUNKS)
elapsed = time.time() - start
print "rebalance end on ", csvFilename, 'took', elapsed, 'seconds',\
print "Now doing to_enum across all columns of %s" % hex_key
for column_index in range(colCount):
# is the column index 1-base in to_enum
result = h2o.nodes[0].to_enum(None, src_key=hex_key, column_index=column_index+1)
# print "\nto_enum result:", h2o.dump_json(result)
summaryResult = h2o_cmd.runSummary(key=hex_key)
# check that it at least is an enum column now, with no na's
# just look at the column we touched
column = summaryResult['summaries'][column_index]
colname = column['colname']
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype = stats['type']
cardinality = stats['cardinality']
if stattype != 'Enum':
raise Exception("column %s, which has name %s, didn't convert to Enum, is %s %s" (column_index, colname, stattype, coltype))
if nacnt!=0:
raise Exception("column %s, which has name %s, somehow got NAs after convert to Enum %s" (column_index, colname, nacnt))
if cardinality!=4:
raise Exception("column %s, which has name %s, should have cardinality 4, got: %s" (column_index, colname, cardinality))
h2o_cmd.infoFromSummary(summaryResult)
if __name__ == '__main__':
h2o.unit_main() | unknown | codeparrot/codeparrot-clean | ||
@use '@angular/docs/styles/media-queries' as mq;
$ver-dropdown-width: 200px;
:host {
display: flex;
flex-flow: column;
align-items: center;
padding: var(--layout-padding) 0px;
container: update-guide-page / inline-size;
.docs-viewer {
padding-inline: var(--layout-padding);
//applying styles when TOC position got translated to the top right
@include mq.for-large-desktop-up {
// take the available space except a reserved area for TOC
margin-left: -16rem;
width: calc(100% - 16rem);
box-sizing: border-box;
}
.page-header {
margin-top: 0px;
}
}
}
.page {
max-width: var(--page-width);
& > * {
@include mq.for-extra-large-desktop-up {
padding-inline: 0px;
}
}
@include mq.for-tablet-landscape-down {
width: 100%;
}
}
h3,
h4 {
margin-block-start: 2em;
}
.wizard {
padding-inline: 1rem;
.show-button {
display: block;
margin-block-start: 2rem;
}
}
.adev-version-selector {
display: flex;
gap: 1rem;
}
.adev-template-dropdown {
border: 1px solid var(--senary-contrast);
border-radius: 0.25rem;
padding: 0;
transform: translateY(-0.7rem);
max-height: 200px;
overflow-y: auto;
width: $ver-dropdown-width;
box-sizing: border-box;
background: var(--page-background);
li {
list-style: none;
box-sizing: border-box;
button {
background: var(--page-background);
font-size: 0.875rem;
width: 100%;
text-align: left;
padding-block: 0.5rem;
color: var(--quaternary-contrast);
transition:
color 0.3s ease,
background 0.3s ease;
font-weight: 400;
&:hover {
background: var(--senary-contrast);
color: var(--primary-contrast);
}
}
}
}
.adev-template-select {
margin-block-end: 0.5rem;
// cdk select button
button {
font-size: 0.875rem;
border: 1px solid var(--senary-contrast);
border-radius: 0.25rem;
width: $ver-dropdown-width;
display: inline-flex;
justify-content: space-between;
align-items: center;
padding-block: 0.5rem;
font-weight: 400;
transition: border 0.3s ease;
span {
color: var(--primary-contrast);
transition: color 0.3s ease;
margin-inline-start: 0.1rem;
}
docs-icon {
font-size: 1.3rem;
color: var(--quaternary-contrast);
transition: color 0.3s ease;
}
}
}
.adev-recommendation-item {
display: flex;
mat-checkbox {
margin-top: 0.5rem;
}
.adev-recommendation-content {
flex: 1;
display: flex;
align-items: center;
justify-content: space-between;
gap: 1rem;
}
.adev-complexity-badge {
display: inline-block;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.75rem;
font-weight: 500;
text-transform: uppercase;
white-space: nowrap;
flex-shrink: 0;
--badge-color: var(--primary-text, black);
color: var(--badge-color);
background: color-mix(in srgb, var(--badge-color) 10%, var(--page-background));
.docs-dark-mode & {
background: color-mix(in srgb, var(--badge-color) 17%, var(--page-background));
}
@include mq.for-tablet-landscape-down {
display: none;
}
}
.adev-complexity-1 { --badge-color: var(--super-green); }
.adev-complexity-2 { --badge-color: var(--bright-blue); }
.adev-complexity-3 { --badge-color: var(--symbolic-orange); }
// Code blocks are generable from the markdown, we need to opt-out of the scoping
::ng-deep code {
cursor: pointer;
}
} | unknown | github | https://github.com/angular/angular | adev/src/app/features/update/update.component.scss |
# Copyright 2025 Westlake Representational Learning Lab (Fajie Yuan Lab) team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evolla model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class SaProtConfig(PreTrainedConfig):
r"""This is the configuration class to store the configuration of a [`EvollaSaProtProteinEncoder`]. It is used to instantiate a
SaProt model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 446):
Vocabulary size of the protein sequence model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`EvollaModel`].
mask_token_id (`int`, *optional*, defaults to 4):
The id of the *mask* token in the protein sequence model.
pad_token_id (`int`, *optional*, defaults to 1):
The id of the *padding* token in the protein sequence model.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the protein sequence model layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 33):
Number of hidden layers in the protein sequence model.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the protein sequence model.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the intermediate layers in the protein sequence model.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the hidden layers in the protein sequence model.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in the protein sequence model.
max_position_embeddings (`int`, *optional*, defaults to 1026):
The maximum sequence length that the protein sequence model might ever be used with. Typically set this to
something large just in case (e.g., 512 or 1024 or 2048).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for the layer normalization layer in the protein sequence model.
position_embedding_type (`str`, *optional*, defaults to `"rotary"`):
The type of position embedding to use in the protein sequence model. Currently only `"rotary"` is supported.
emb_layer_norm_before (`bool`, *optional*, defaults to `False`):
Whether to apply layer normalization before the position embedding in the protein sequence model.
token_dropout (`bool`, *optional*, defaults to `True`):
Whether to apply dropout to the tokens in the protein sequence model."""
def __init__(
self,
vocab_size=446,
mask_token_id=4,
pad_token_id=1,
hidden_size=1280,
num_hidden_layers=33,
num_attention_heads=20,
intermediate_size=5120,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1026,
initializer_range=0.02,
layer_norm_eps=1e-05,
position_embedding_type="rotary",
emb_layer_norm_before=False,
token_dropout=True,
is_decoder=False,
add_cross_attention=False,
**kwargs,
):
super().__init__(**kwargs)
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.is_decoder = is_decoder
self.add_cross_attention = add_cross_attention
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.emb_layer_norm_before = emb_layer_norm_before
self.token_dropout = token_dropout
class EvollaConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EvollaModel`]. It is used to instantiate an
Evolla model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Evolla-10B.
e.g. [westlake-repl/Evolla-10B-hf](https://huggingface.co/westlake-repl/Evolla-10B-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
protein_encoder_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`SaProtConfig`].
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the Evolla llama model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`EvollaModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the llama layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimensionality of the intermediate layers in the llama model.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the llama model.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the llama model.
num_key_value_heads (`int`, *optional*, defaults to 8):
Number of key-value pairs for each attention layer in the llama model.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the llama model. If string, `"gelu"`, `"relu"`,
`"selu"` and `"silu"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for the RMS-norm layer in the llama model.
rope_parameters (`float`, *optional*):
The scaling factor for the RoPE layer in the llama model.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the attention layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention layer.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layer.
aligner_ffn_mult (`int`, *optional*, defaults to 4):
The FFN multiplier for the aligner layer.
aligner_enable_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the aligner layer.
aligner_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in the aligner layer.
aligner_num_add_layers (`int`, *optional*, defaults to 8):
The number of additional layers for the aligner layer.
resampler_depth (`int`, *optional*, defaults to 6):
The depth of the resampler layer in the llama model.
resampler_dim_head (`int`, *optional*, defaults to 64):
The dimension of the heads in the resampler layer in the llama model.
resampler_heads (`int`, *optional*, defaults to 8):
The number of heads in the resampler layer in the llama model.
resampler_num_latents (`int`, *optional*, defaults to 64):
The number of latents in the resampler layer in the llama model.
resampler_ff_mult (`int`, *optional*, defaults to 4):
The FFN multiplier for the resampler layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the *beginning-of-sequence* token.
eos_token_id (`int`, *optional*, defaults to 128009):
The id of the *end-of-sequence* token.
use_cache (`bool`, *optional*, defaults to `False`):
Whether or not the model should return the last key/values attentions (not used by all models).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the input and output word embeddings.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
decoder-only or encoder-only architectures.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model.
Example:
```python
>>> from transformers import EvollaModel, EvollaConfig
>>> # Initializing a Evolla evolla-10b style configuration
>>> configuration = EvollaConfig()
>>> # Initializing a model from the evolla-10b style configuration
>>> model = EvollaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "EvollaModel"
sub_configs = {"protein_encoder_config": SaProtConfig}
default_theta = 500000.0
def __init__(
self,
protein_encoder_config: dict | None = None,
vocab_size: int | None = 128256, # llama vocab size
hidden_size: int | None = 4096, # llama hidden size
intermediate_size: int | None = 14336, # llama intermediate size
num_hidden_layers: int | None = 32, # llama num layers
num_attention_heads: int | None = 32, # llama num heads
num_key_value_heads: int | None = 8, # llama num key-value heads
hidden_act: str | None = "silu", # llama activation function
max_position_embeddings: int | None = 8192, # llama rope max length
rms_norm_eps: int | None = 1e-05,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
aligner_ffn_mult: int | None = 4,
aligner_enable_bias: bool | None = True,
aligner_attention_probs_dropout_prob: float | None = 0.1,
aligner_num_add_layers: int | None = 8,
resampler_depth: int | None = 6,
resampler_dim_head: int | None = 64,
resampler_heads: int | None = 8,
resampler_num_latents: int | None = 64,
resampler_ff_mult: int | None = 4,
initializer_range: float | None = 0.02,
pad_token_id: int | None = None,
bos_token_id: int | None = 128000,
eos_token_id: int | None = 128009,
use_cache: bool | None = False,
tie_word_embeddings: bool | None = False,
is_decoder: bool | None = False,
add_cross_attention: bool | None = False,
**kwargs,
):
self.is_decoder = is_decoder
self.add_cross_attention = add_cross_attention
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.rms_norm_eps = rms_norm_eps
self.tie_word_embeddings = tie_word_embeddings
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.aligner_ffn_mult = aligner_ffn_mult
self.aligner_enable_bias = aligner_enable_bias
self.aligner_attention_probs_dropout_prob = aligner_attention_probs_dropout_prob
self.aligner_num_add_layers = aligner_num_add_layers
self.use_cache = use_cache
self.initializer_range = initializer_range
self.resampler_depth = resampler_depth
self.resampler_dim_head = resampler_dim_head
self.resampler_heads = resampler_heads
self.resampler_num_latents = resampler_num_latents
self.resampler_ff_mult = resampler_ff_mult
self.rope_parameters = rope_parameters
# Subconfig
if protein_encoder_config is None:
protein_encoder_config = {}
logger.info("`protein_encoder_config` is `None`. Initializing the `SaProtConfig` with default values.")
self.protein_encoder_config = SaProtConfig(**protein_encoder_config)
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["EvollaConfig"] | python | github | https://github.com/huggingface/transformers | src/transformers/models/evolla/configuration_evolla.py |
#!/usr/bin/env python
# Copyright (c) 2010 ActiveState Software Inc.
# See LICENSE.txt for license details.
"""Less support for CodeIntel"""
import logging
from codeintel2.common import _xpcom_
from codeintel2.lang_css import CSSLexer, CSSLangIntel, CSSBuffer
from codeintel2.lang_css import isident, WHITESPACE
from codeintel2.accessor import AccessorCache
from codeintel2.common import Trigger, TRG_FORM_CPLN, TRG_FORM_CALLTIP
from codeintel2.util import OrdPunctLast
# ... and a whole lot more?
if _xpcom_:
from xpcom.server import UnwrapObject
#---- globals
log = logging.getLogger("codeintel.less")
# log.setLevel(logging.DEBUG)
#---- language support
class LessLexer(CSSLexer):
# This must be defined as "Less" in order to get autocompletion working.
lang = "Less"
def __init__(self):
CSSLexer.__init__(self)
self._properties['lexer.css.less.language'] = '1'
class SCSSLexer(CSSLexer):
# This must be defined as "SCSS" in order to get autocompletion working.
lang = "SCSS"
def __init__(self):
CSSLexer.__init__(self)
self._properties['lexer.css.scss.language'] = '1'
class SassLexer(CSSLexer):
# This must be defined as "Sass" in order to get autocompletion working.
lang = "Sass"
def __init__(self):
CSSLexer.__init__(self)
# self._properties.setProperty('lexer.css.sass.language', '1')
self._properties['lexer.css.sass.language'] = '1'
DebugStatus = False
def _OrdPunctLastOnSecondItem(value):
return OrdPunctLast(value[1])
class _NestedCSSLangIntel(CSSLangIntel):
def _trg_from_pos(self, buf, pos, implicit=True, DEBUG=False, ac=None, styleClassifier=None):
# DEBUG = True # not using 'logging' system, because want to be fast
if DEBUG:
print("\n----- %s _trg_from_pos(pos=%r, implicit=%r) -----"\
% (self.lang, pos, implicit))
try:
if pos == 0:
return None
if ac is None:
ac = AccessorCache(buf.accessor, pos, fetchsize=50)
else:
ac.resetToPosition(pos)
# Ensure this variable is initialized as False, it is used by UDL
# for checking if the css style is inside of a html tag, example:
# <p style="mycss: value;" />
# When it's found that it is such a case, this value is set True
ac.is_html_style_attribute = False
last_pos, last_char, last_style = ac.getPrevPosCharStyle()
if DEBUG:
print(" _trg_from_pos:: last_pos: %s" % last_pos)
print(" last_char: %r" % last_char)
print(" last_style: %s" % last_style)
# The easy ones are triggering after any of '#.[: '.
# For speed, let's get the common ' ' out of the way. The only
# trigger on space is 'complete-property-values'.
if styleClassifier.is_default(last_style):
if DEBUG:
print(" _trg_from_pos:: Default style: %d, ch: %r" % (last_style, last_char))
# Move backwards resolving ambiguity, default on "property-
# values"
min_pos = max(0, pos - 200)
while last_pos > min_pos:
last_pos, last_char, last_style = ac.getPrevPosCharStyle()
if styleClassifier.is_operator(last_style, ac) or styleClassifier.is_value(last_style, ac):
if DEBUG:
print(" _trg_from_pos: space => property-values")
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
pos, implicit, extra={"ac": ac})
elif styleClassifier.is_tag(last_style, ac):
# Now we need to move further back to see which
# region we're in.
if DEBUG:
print(" _trg_from_pos: space => tag-names")
return self._get_property_name_trigger_check_context(ac, styleClassifier, pos, implicit)
elif styleClassifier.is_identifier(last_style, ac):
if DEBUG:
print(" _trg_from_pos: space => property-names")
return Trigger(
self.lang, TRG_FORM_CPLN, "tag-or-property-names",
pos, implicit, extra={"ac": ac})
if DEBUG:
print(" _trg_from_pos: couldn't resolve space, settling on property-names")
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
pos, implicit, extra={"ac": ac})
elif styleClassifier.is_operator(last_style, ac):
# anchors
if DEBUG:
print(" _trg_from_pos:: OPERATOR style")
if last_char == '#':
return Trigger("CSS", TRG_FORM_CPLN, "anchors",
pos, implicit, extra={"ac": ac})
elif last_char == ':':
try:
p, ch, style = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.ignore_styles)
if DEBUG:
print(" _trg_from_pos:: Looking at p: %d, ch: %r, style: %d" % (p, ch, style))
except IndexError:
style = None
if DEBUG:
print(" _trg_from_pos:: style: %r" % (style))
if style is None or \
not styleClassifier.is_identifier(style, ac):
# if style is None or \
# not styleClassifier.is_css_style(style) or \
# styleClassifier.is_class(style, ac):
# complete for pseudo-class-names
return Trigger(
"CSS", TRG_FORM_CPLN, "pseudo-class-names",
pos, implicit, extra={"ac": ac})
else:
# if styleClassifier.is_identifier(style, ac):
# calltip for property-values
return Trigger(
"CSS", TRG_FORM_CALLTIP, "property-values",
pos, implicit, extra={"ac": ac})
# class-names
elif last_char == '.':
return Trigger("CSS", TRG_FORM_CPLN, "class-names",
pos, implicit, extra={"ac": ac})
# at-rule
elif last_char == '@':
# p, ch, style = ac.getPrevPosCharStyle(ignore_styles=styleClassifier.comment_styles)
# XXX - Should check not beyond first rule set
# - Should check not within a rule block.
return Trigger("CSS", TRG_FORM_CPLN, "at-rule",
pos, implicit, extra={"ac": ac})
# Not quite like CSS: don't handle </
# tag-names
elif styleClassifier.is_tag(last_style, ac):
# We trigger on tag names of specified length >= 1 char
if DEBUG:
print(" _trg_from_pos:: TAG style")
p, ch, style = last_pos, last_char, last_style
try:
while p >= 0:
if DEBUG:
print(" _trg_from_pos:: Looking at p: %d, ch: %r, style: %d" % (p, ch, style))
if not isident(ch):
p += 1
break
elif style != last_style:
if DEBUG:
print(" _trg_from_pos:: Current style is not a tag: %d" % (style))
return None
p, ch, style = ac.getPrevPosCharStyle()
except IndexError:
p = 0
return self._get_property_name_trigger_check_context(ac, styleClassifier, p, implicit)
elif styleClassifier.is_identifier(last_style, ac):
if DEBUG:
print(" _trg_from_pos:: IDENTIFIER style")
# property-names
# print "here", accessor.text_range(0, pos)
# We trigger on identifier names with any length >= 1 char
pos = last_pos
while pos >= 0:
pos, ch, style = ac.getPrevPosCharStyle()
if not isident(ch):
break
elif style != last_style:
return None
return self._get_property_name_trigger_check_context(ac, styleClassifier, pos + 1, implicit)
elif styleClassifier.is_value(last_style, ac):
p, ch, style = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.comment_styles)
if DEBUG:
print(" _trg_from_pos:: VALUE style")
print(" _trg_from_pos:: p: %s" % p)
print(" _trg_from_pos:: ch: %r" % ch)
print(" _trg_from_pos:: style: %s" % style)
ac.dump()
# Implicit triggering only happens on a whitespace character
# after any one of these ":,%) " characters
# Note: last_char can be a value style yet also be whitespace
# in straight CSS.
if last_char in WHITESPACE:
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
last_pos+1, implicit, extra={"ac": ac})
elif ch in WHITESPACE or ch in ":,%)":
# Check to ensure this is not a pseudo-class! Bug:
# http://bugs.activestate.com/show_bug.cgi?id=71073
if ch == ":":
# Last style must be an identifier then!
pp, pch, pstyle = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.ignore_styles)
if DEBUG:
print("pp: %d, pch: %r, pstyle: %d" % (pp, pch,
pstyle))
if not styleClassifier.is_identifier(pstyle, ac):
# This is likely a pseudo-class definition then,
# no trigger here.
if DEBUG:
print("pseudo-class style found, no trigger.")
return None
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
p+1, implicit, extra={"ac": ac})
# For explicit, we can also be inside a property already
if not implicit and isident(ch):
# If there is already part of a value there, we need to move
# the trigger point "p" to the start of the value.
while isident(ch):
p, ch, style = ac.getPrevPosCharStyle()
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
p+1, implicit, extra={"ac": ac})
return None
elif DEBUG:
print(" _trg_from_pos:: Unexpected style: %d, ch: %r" % (last_style, last_char))
# XXX "at-property-names" - Might be used later
# elif last_style == SCE_CSS_DIRECTIVE:
# # property-names
# # We trigger on identifier names with length == 3
# #print "here", accessor.text_range(0, pos)
# if pos >= 4 and accessor.char_at_pos(pos - 4) == ' ' and \
# self._is_ident_of_length(accessor, pos, length=3):
# # We are good for completion
# if DEBUG:
# print "Got a trigger for 'at-property-names'"
# return Trigger("CSS", TRG_FORM_CPLN, "at-property-names",
# pos-3, implicit, extra={"ac": ac})
except IndexError:
# Wen't out of range of buffer before we found anything useful
pass
if DEBUG:
print("----- CSS trg_from_pos() -----")
return None
def _get_property_name_trigger_check_context(self, ac,
styleClassifier, pos, implicit):
min_pos = pos - 200
if min_pos < 1:
min_pos = 1
try:
ac.resetToPosition(pos)
except IndexError:
# We're at the start, so return tags only
return Trigger("CSS", TRG_FORM_CPLN, "tag-names",
pos, implicit, extra={"ac": ac})
# States:
#
last_pos, last_ch, last_style = ac.getCurrentPosCharStyle()
# print "_get_property_name_trigger_check_context: last_pos:%d,
# last_ch:%c, last_style:%d" % (last_pos, last_ch, last_style)
cpln_type = None
p = last_pos
while p > min_pos:
try:
p, ch, style = ac.getPrevPosCharStyle()
except IndexError:
p, ch, style = last_pos, last_ch, last_style
if ch == '\n' and styleClassifier.is_default(style):
# Main heuristic: if the tag starts on col 1, assume we're at
# the top-level
if (styleClassifier.is_tag(last_style)
or styleClassifier.is_operator(last_style)):
cpln_type = "tag-names"
break
elif styleClassifier.is_default(last_style):
cpln_type = "tag-or-property-names"
break
elif ch == '{' and styleClassifier.is_operator(style):
cpln_type = "tag-or-property-names"
break
if p < min_pos:
break
last_ch = ch
last_style = style
if cpln_type is None:
if p <= 0:
cpln_type = "tag-names"
else:
cpln_type = "tag-or-property-names"
if cpln_type == "tag-or-property-names":
lang = self.lang
else:
lang = "CSS" # Use the parent class.
return Trigger(lang, TRG_FORM_CPLN, cpln_type,
pos, implicit, extra={"ac": ac})
def _async_eval_at_trg(self, buf, trg, ctlr, styleClassifier):
if _xpcom_:
trg = UnwrapObject(trg)
ctlr = UnwrapObject(ctlr)
# Handle ambiguous property-names here
DEBUG = DebugStatus
# DEBUG = True
if DEBUG:
print("Less: _async_eval_at_trg: trg: %s(%r)" % (trg, trg))
if trg.id != (self.lang, TRG_FORM_CPLN, "tag-or-property-names"):
CSSLangIntel._async_eval_at_trg(
self, buf, trg, ctlr, styleClassifier)
return
if DEBUG:
print("\n----- async_eval_at_trg(trg=%r) -----"\
% (trg))
# Setup the AccessorCache
extra = trg.extra
ac = None
# print "Extra: %r" % (extra)
if isinstance(extra, dict):
extra = extra.get("extra", None)
if isinstance(extra, dict):
ac = extra.get("ac", None)
if ac and DEBUG:
print(" _async_eval_at_trg:: Trigger had existing AC")
ac.dump()
if ac is None:
if DEBUG:
print(" _async_eval_at_trg:: Created new trigger!")
ac = AccessorCache(buf.accessor, trg.pos, fetchsize=20)
ctlr.start(buf, trg)
pos = trg.pos
try:
cplns1 = [("property", v + ": ") for v in self.CSS_PROPERTY_NAMES]
cplns2 = [("element", v) for v in self.CSS_HTML_TAG_NAMES]
cplns = sorted(cplns1 + cplns2, key=_OrdPunctLastOnSecondItem)
# Note: we add the colon as well - see bug 89913.
ctlr.set_cplns(cplns)
# print " _async_eval_at_trg:: cplns:", cplns
ctlr.done("success")
trg.retriggerOnCompletion = True
except IndexError:
# Tried to go out of range of buffer, nothing appropriate found
if DEBUG:
print(" _async_eval_at_trg:: ** Out of range error **")
ctlr.done("success")
class _NestedSassLangIntel(_NestedCSSLangIntel):
""" The difference here is that we don't want to put up triggers in
the leading whitespace of a line.
"""
def _trg_from_pos(self, buf, pos, implicit=True, DEBUG=False, ac=None, styleClassifier=None):
# DEBUG = True # not using 'logging' system, because want to be fast
if DEBUG:
print("\n----- %s _trg_from_pos(pos=%r, implicit=%r) -----"\
% (self.lang, pos, implicit))
try:
if pos == 0:
return None
if ac is None:
ac = AccessorCache(buf.accessor, pos, fetchsize=50)
else:
ac.resetToPosition(pos)
# Ensure this variable is initialized as False, it is used by UDL
# for checking if the css style is inside of a html tag, example:
# <p style="mycss: value;" />
# When it's found that it is such a case, this value is set True
ac.is_html_style_attribute = False
last_pos, last_char, last_style = ac.getPrevPosCharStyle()
if DEBUG:
print(" _trg_from_pos:: last_pos: %s" % last_pos)
print(" last_char: %r" % last_char)
print(" last_style: %s" % last_style)
# All we want to know with sass is if we're in the white-space on
# of after the start of a line. If yes, don't trigger, because
# the user might want to just type more spaces.
if styleClassifier.is_default(last_style):
if DEBUG:
print(" _trg_from_pos:: Default style: %d, ch: %r" % (last_style, last_char))
if last_char == '\n':
# SASS: we don't want to put up a box until we start typing
# something.
if DEBUG:
print("Found \\n at current pos, don't trigger.")
return None
min_pos = max(0, pos - 200)
while last_pos > min_pos:
last_pos, last_char, last_style = ac.getPrevPosCharStyle()
if styleClassifier.is_default(last_style):
if last_char == '\n':
return None
else:
break
# Fallback and do SCSS/Less/CSS triggering.
# TODO: Support ":color blue" colon-first notation.
# TODO: After ",\n", offer tag-names if the above line starts with a tab.
# Otherwise, indent the same level, and then offer tag-names.
return _NestedCSSLangIntel._trg_from_pos(self, buf, pos, implicit=implicit, DEBUG=DEBUG, ac=None, styleClassifier=styleClassifier)
except IndexError:
pass
class LessLangIntel(_NestedCSSLangIntel):
# This must be defined as "Less" in order to get autocompletion working.
lang = "Less"
class SCSSLangIntel(_NestedCSSLangIntel):
lang = "SCSS"
class SassLangIntel(_NestedSassLangIntel):
lang = "Sass"
class LessBuffer(CSSBuffer):
lang = "Less"
class SCSSBuffer(CSSBuffer):
lang = "SCSS"
class SassBuffer(CSSBuffer):
lang = "Sass"
cpln_fillup_chars = CSSBuffer.cpln_fillup_chars.replace(" ", "")
cpln_stop_chars = CSSBuffer.cpln_stop_chars.replace(" ", "")
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info("Less",
silvercity_lexer=LessLexer(),
buf_class=LessBuffer,
langintel_class=LessLangIntel,
is_cpln_lang=True)
mgr.set_lang_info("SCSS",
silvercity_lexer=SCSSLexer(),
buf_class=SCSSBuffer,
langintel_class=SCSSLangIntel,
is_cpln_lang=True)
mgr.set_lang_info("Sass",
silvercity_lexer=SassLexer(),
buf_class=SassBuffer,
langintel_class=SassLangIntel,
is_cpln_lang=True) | unknown | codeparrot/codeparrot-clean | ||
"""Unittest main program"""
import sys
import os
import types
from django.utils.unittest import loader, runner
try:
from django.utils.unittest.signals import installHandler
except ImportError:
installHandler = None
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s test_module.TestClass - run tests from
test_module.TestClass
%(progName)s test_module.TestClass.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=None,
testLoader=loader.defaultTestLoader, exit=True,
verbosity=1, failfast=None, catchbreak=None, buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False and installHandler is not None:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print self.USAGE % usage
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None and installHandler is not None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=loader.TestLoader):
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False and installHandler is not None:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None and installHandler is not None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
def main_():
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.benchmark.index.mapper;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.index.mapper.LuceneDocument;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.xcontent.XContentType;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
import static java.nio.charset.StandardCharsets.UTF_8;
@Fork(value = 1)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Benchmark)
public class BeatsMapperBenchmark {
@Param({ "1600172297" })
private long seed;
private Random random;
private MapperService mapperService;
private SourceToParse[] sources;
@Setup
public void setUp() throws IOException {
this.random = new Random(seed);
this.mapperService = MapperServiceFactory.create(readSampleMapping());
this.sources = generateRandomDocuments(10_000);
}
private static String readSampleMapping() throws IOException {
// Uncompressed mapping is around 1mb and 29k lines.
// It is unlikely that it will be modified so keeping the compressed version instead to minimize the repo size.
return readCompressedMapping("filebeat-mapping-8.1.2.json.gz");
}
private static String readCompressedMapping(String resource) throws IOException {
try (var in = new GZIPInputStream(BeatsMapperBenchmark.class.getResourceAsStream(resource))) {
return new String(in.readAllBytes(), UTF_8);
}
}
private SourceToParse[] generateRandomDocuments(int count) {
var docs = new SourceToParse[count];
for (int i = 0; i < count; i++) {
docs[i] = generateRandomDocument();
}
return docs;
}
private SourceToParse generateRandomDocument() {
return new SourceToParse(
UUIDs.randomBase64UUID(),
new BytesArray(
"{ \"@timestamp\": "
+ System.currentTimeMillis()
+ ", \"log.file.path\": \""
+ randomFrom("logs-1.log", "logs-2.log", "logs-3.log")
+ "\", \"log.level\": \""
+ "INFO"
+ "\", \"log.logger\": \""
+ "some.package.for.logging.requests"
+ "\", \"client.ip\": \""
+ randomIp()
+ "\", \"http.request.method\": \""
+ randomFrom("GET", "POST")
+ "\", \"http.request.id\": \""
+ random.nextInt()
+ "\", \"http.request.bytes\": "
+ random.nextInt(1024)
+ ", \"url.path\": \""
+ randomString(1024)
+ "\", \"http.response.status_code\": "
+ randomFrom(200, 204, 300, 404, 500)
+ ", \"http.response.bytes\": "
+ random.nextInt(1024)
+ ", \"http.response.mime_type\": \""
+ randomFrom("application/json", "application/xml")
+ "\"}"
),
XContentType.JSON
);
}
private String randomIp() {
return "" + random.nextInt(255) + '.' + random.nextInt(255) + '.' + random.nextInt(255) + '.' + random.nextInt(255);
}
private String randomString(int maxLength) {
var length = random.nextInt(maxLength);
var builder = new StringBuilder(length);
for (int i = 0; i < length; i++) {
builder.append((byte) (32 + random.nextInt(94)));
}
return builder.toString();
}
@SafeVarargs
@SuppressWarnings("varargs")
private <T> T randomFrom(T... items) {
return items[random.nextInt(items.length)];
}
@Benchmark
public List<LuceneDocument> benchmarkParseKeywordFields() {
return mapperService.documentMapper().parse(randomFrom(sources)).docs();
}
} | java | github | https://github.com/elastic/elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/BeatsMapperBenchmark.java |
# coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
import sys
PYTHON_MAJOR_VERSION = sys.version_info
import os
import posixpath
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
try:
import urlparse as url_parser
import urllib2
cj = CookieJar()
cookieProcessor = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookieProcessor)
urlopen = opener.open
except ImportError:
import urllib.parse as url_parser
from urllib.request import urlopen as url_opener
urlopen = url_opener
from m3u8.model import M3U8, Playlist, IFramePlaylist, Media, Segment
from m3u8.parser import parse, is_url
__all__ = ('M3U8', 'Playlist', 'IFramePlaylist', 'Media',
'Segment', 'loads', 'load', 'parse')
def loads(content):
'''
Given a string with a m3u8 content, returns a M3U8 object.
Raises ValueError if invalid content
'''
return M3U8(content)
def load(uri):
'''
Retrieves the content from a given URI and returns a M3U8 object.
Raises ValueError if invalid content or IOError if request fails.
'''
if is_url(uri):
return _load_from_uri(uri)
else:
return _load_from_file(uri)
def getCookieProcessor():
return cookieProcessor
# Support for python3 inspired by https://github.com/szemtiv/m3u8/
def _load_from_uri(uri):
resource = urlopen(uri)
base_uri = _parsed_url(_url_for(uri))
if PYTHON_MAJOR_VERSION < (3,):
content = _read_python2x(resource)
else:
content = _read_python3x(resource)
return M3U8(content, base_uri=base_uri)
def _url_for(uri):
return urlopen(uri).geturl()
def _parsed_url(url):
parsed_url = url_parser.urlparse(url)
prefix = parsed_url.scheme + '://' + parsed_url.netloc
base_path = posixpath.normpath(parsed_url.path + '/..')
return url_parser.urljoin(prefix, base_path)
def _read_python2x(resource):
return resource.read().strip()
def _read_python3x(resource):
return resource.read().decode(resource.headers.get_content_charset(failobj="utf-8"))
def _load_from_file(uri):
with open(uri) as fileobj:
raw_content = fileobj.read().strip()
base_uri = os.path.dirname(uri)
return M3U8(raw_content, base_uri=base_uri) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# Copyright 2023 The Cockroach Authors.
#
# Use of this software is governed by the CockroachDB Software License
# included in the /LICENSE file.
set -exuo pipefail
dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))"
source "$dir/teamcity-support.sh" # For $root
source "$dir/teamcity-bazel-support.sh" # For run_bazel
BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e FIPS_ENABLED=1 -e LITERAL_ARTIFACTS_DIR=$root/artifacts -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_A -e GOOGLE_KMS_KEY_B -e GOOGLE_CREDENTIALS_ASSUME_ROLE -e GOOGLE_SERVICE_ACCOUNT -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e SELECTIVE_TESTS -e SNOWFLAKE_USER -e SNOWFLAKE_PVT_KEY -e DATADOG_ALWAYS_UPLOAD -e TEAMCITY_BUILD_PROPERTIES_FILE -e DD_API_KEY" \
run_bazel build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh | unknown | github | https://github.com/cockroachdb/cockroach | build/teamcity/cockroach/nightlies/roachtest_nightly_gce_fips.sh |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lxd_profile
short_description: Manage LXD profiles
version_added: "2.2"
description:
- Management of LXD profiles
author: "Hiroaki Nakamura (@hnakamur)"
options:
name:
description:
- Name of a profile.
required: true
description:
description:
- Description of the profile.
version_added: "2.5"
config:
description:
- 'The config for the container (e.g. {"limits.memory": "4GB"}).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
- If the profile already exists and its "config" value in metadata
obtained from
GET /1.0/profiles/<name>
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
are different, they this module tries to apply the configurations.
- Not all config values are supported to apply the existing profile.
Maybe you need to delete and recreate a profile.
required: false
devices:
description:
- 'The devices for the profile
(e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
required: false
new_name:
description:
- A new name of a profile.
- If this parameter is specified a profile will be renamed to this name.
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
required: false
state:
choices:
- present
- absent
description:
- Define the state of a profile.
required: false
default: present
url:
description:
- The unix domain socket path or the https URL for the LXD server.
required: false
default: unix:/var/lib/lxd/unix.socket
snap_url:
description:
- The unix domain socket path when LXD is installed by snap package manager.
required: false
default: unix:/var/snap/lxd/common/lxd/unix.socket
version_added: '2.8'
client_key:
description:
- The client certificate key file path.
required: false
default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
aliases: [ key_file ]
client_cert:
description:
- The client certificate file path.
required: false
default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
aliases: [ cert_file ]
trust_password:
description:
- The client trusted password.
- You need to set this password on the LXD server before
running this module using the following command.
lxc config set core.trust_password <some random password>
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
- If trust_password is set, this module send a request for
authentication before sending any requests.
required: false
notes:
- Profiles must have a unique name. If you attempt to create a profile
with a name that already existed in the users namespace the module will
simply return as "unchanged".
'''
EXAMPLES = '''
# An example for creating a profile
- hosts: localhost
connection: local
tasks:
- name: Create a profile
lxd_profile:
name: macvlan
state: present
config: {}
description: my macvlan profile
devices:
eth0:
nictype: macvlan
parent: br0
type: nic
# An example for creating a profile via http connection
- hosts: localhost
connection: local
tasks:
- name: create macvlan profile
lxd_profile:
url: https://127.0.0.1:8443
# These client_cert and client_key values are equal to the default values.
#client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
#client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
trust_password: mypassword
name: macvlan
state: present
config: {}
description: my macvlan profile
devices:
eth0:
nictype: macvlan
parent: br0
type: nic
# An example for deleting a profile
- hosts: localhost
connection: local
tasks:
- name: Delete a profile
lxd_profile:
name: macvlan
state: absent
# An example for renaming a profile
- hosts: localhost
connection: local
tasks:
- name: Rename a profile
lxd_profile:
name: macvlan
new_name: macvlan2
state: present
'''
RETURN = '''
old_state:
description: The old state of the profile
returned: success
type: str
sample: "absent"
logs:
description: The logs of requests and responses.
returned: when ansible-playbook is invoked with -vvvv.
type: list
sample: "(too long to be placed here)"
actions:
description: List of actions performed for the profile.
returned: success
type: list
sample: '["create"]'
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.lxd import LXDClient, LXDClientException
# PROFILE_STATES is a list for states supported
PROFILES_STATES = [
'present', 'absent'
]
# CONFIG_PARAMS is a list of config attribute names.
CONFIG_PARAMS = [
'config', 'description', 'devices'
]
class LXDProfileManagement(object):
def __init__(self, module):
"""Management of LXC containers via Ansible.
:param module: Processed Ansible Module.
:type module: ``object``
"""
self.module = module
self.name = self.module.params['name']
self._build_config()
self.state = self.module.params['state']
self.new_name = self.module.params.get('new_name', None)
self.key_file = self.module.params.get('client_key', None)
self.cert_file = self.module.params.get('client_cert', None)
self.debug = self.module._verbosity >= 4
try:
if os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
self.url = self.module.params['snap_url']
else:
self.url = self.module.params['url']
except Exception as e:
self.module.fail_json(msg=e.msg)
try:
self.client = LXDClient(
self.url, key_file=self.key_file, cert_file=self.cert_file,
debug=self.debug
)
except LXDClientException as e:
self.module.fail_json(msg=e.msg)
self.trust_password = self.module.params.get('trust_password', None)
self.actions = []
def _build_config(self):
self.config = {}
for attr in CONFIG_PARAMS:
param_val = self.module.params.get(attr, None)
if param_val is not None:
self.config[attr] = param_val
def _get_profile_json(self):
return self.client.do(
'GET', '/1.0/profiles/{0}'.format(self.name),
ok_error_codes=[404]
)
@staticmethod
def _profile_json_to_module_state(resp_json):
if resp_json['type'] == 'error':
return 'absent'
return 'present'
def _update_profile(self):
if self.state == 'present':
if self.old_state == 'absent':
if self.new_name is None:
self._create_profile()
else:
self.module.fail_json(
msg='new_name must not be set when the profile does not exist and the specified state is present',
changed=False)
else:
if self.new_name is not None and self.new_name != self.name:
self._rename_profile()
if self._needs_to_apply_profile_configs():
self._apply_profile_configs()
elif self.state == 'absent':
if self.old_state == 'present':
if self.new_name is None:
self._delete_profile()
else:
self.module.fail_json(
msg='new_name must not be set when the profile exists and the specified state is absent',
changed=False)
def _create_profile(self):
config = self.config.copy()
config['name'] = self.name
self.client.do('POST', '/1.0/profiles', config)
self.actions.append('create')
def _rename_profile(self):
config = {'name': self.new_name}
self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config)
self.actions.append('rename')
self.name = self.new_name
def _needs_to_change_profile_config(self, key):
if key not in self.config:
return False
old_configs = self.old_profile_json['metadata'].get(key, None)
return self.config[key] != old_configs
def _needs_to_apply_profile_configs(self):
return (
self._needs_to_change_profile_config('config') or
self._needs_to_change_profile_config('description') or
self._needs_to_change_profile_config('devices')
)
def _apply_profile_configs(self):
config = self.old_profile_json.copy()
for k, v in self.config.items():
config[k] = v
self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config)
self.actions.append('apply_profile_configs')
def _delete_profile(self):
self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name))
self.actions.append('delete')
def run(self):
"""Run the main method."""
try:
if self.trust_password is not None:
self.client.authenticate(self.trust_password)
self.old_profile_json = self._get_profile_json()
self.old_state = self._profile_json_to_module_state(self.old_profile_json)
self._update_profile()
state_changed = len(self.actions) > 0
result_json = {
'changed': state_changed,
'old_state': self.old_state,
'actions': self.actions
}
if self.client.debug:
result_json['logs'] = self.client.logs
self.module.exit_json(**result_json)
except LXDClientException as e:
state_changed = len(self.actions) > 0
fail_params = {
'msg': e.msg,
'changed': state_changed,
'actions': self.actions
}
if self.client.debug:
fail_params['logs'] = e.kwargs['logs']
self.module.fail_json(**fail_params)
def main():
"""Ansible Main module."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
new_name=dict(
type='str',
),
config=dict(
type='dict',
),
description=dict(
type='str',
),
devices=dict(
type='dict',
),
state=dict(
choices=PROFILES_STATES,
default='present'
),
url=dict(
type='str',
default='unix:/var/lib/lxd/unix.socket'
),
snap_url=dict(
type='str',
default='unix:/var/snap/lxd/common/lxd/unix.socket'
),
client_key=dict(
type='str',
default='{0}/.config/lxc/client.key'.format(os.environ['HOME']),
aliases=['key_file']
),
client_cert=dict(
type='str',
default='{0}/.config/lxc/client.crt'.format(os.environ['HOME']),
aliases=['cert_file']
),
trust_password=dict(type='str', no_log=True)
),
supports_check_mode=False,
)
lxd_manage = LXDProfileManagement(module=module)
lxd_manage.run()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
平安证券 - 对账单 - 分析
===============================================================================
"""
import os
import pandas as pd
# path = r"C:\Users\Mike\Desktop\pazq_records"
def read_data(path):
files = [os.path.join(path, file) for file in os.listdir(path) if file.endswith(".xls")]
res = pd.DataFrame()
for file in files:
data = pd.read_csv(file, encoding='gbk', sep='\t')
res = res.append(data, ignore_index=True)
res.columns = [x.strip('"=') for x in res.columns]
for col in res.columns:
res[col] = res[col].astype(str)
res[col] = res[col].apply(lambda x: x.strip('"='))
res.sort_values("发生日期", ascending=False, inplace=True)
res.reset_index(drop=True, inplace=True)
res.drop(['备注', 'Unnamed: 21'], axis=1, inplace=True)
float_col = ['发生金额', '成交均价', '成交数量', '成交金额', '股份余额',
'手续费', '印花税', '资金余额', '委托价格', '委托数量', '过户费']
for col in float_col:
res[col] = res[col].astype(float)
return res
def cal_gain(data):
"""根据交易数据,计算总盈亏"""
res = dict(data.groupby('业务名称').sum()['发生金额'])
total_gain = -res['银证转出'] - res['银证转入']
return round(total_gain, 4)
def cal_share_gain(data):
"""计算个股操作盈亏"""
data = data[data['证券代码'] != "nan"]
res = data.groupby(['证券名称', '业务名称']).sum()['成交金额']
shares = res.index.levels[0]
share_gains = []
for share in shares:
try:
print(share, " - 总盈亏:")
stg = res[share]['证券卖出清算'] - res[share]['证券买入清算']
print(stg, '\n')
share_gains.append((share, stg))
except:
print("\nerro: ", res[share])
return share_gains | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.multi-lane-annotations.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "Time series",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "PD8C576611E62080A"
},
"spec": {
"noise": 10,
"scenarioId": "random_walk",
"spread": 10
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "PD8C576611E62080A"
},
"spec": {
"rawFrameContent": "[\n {\n \"fields\": [\n {\n \"name\": \"type\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\",\n \"Milestones\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"color\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\",\n \"#F2495C\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"time\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1720697881000,\n 1728580067000,\n 1730129182000,\n 1730734644000,\n 1732542118000,\n 1736426576000,\n 1736874473000,\n 1738676647000,\n 1740067713000,\n 1740684246000,\n 1743733609000,\n 1744034815000,\n 1745335229000,\n 1745936130000,\n 1746107356000,\n 1747055303000,\n 1747946736000,\n 1748446531000,\n 1750098297000,\n 1750963080000,\n 1750963096000,\n 1752067555000,\n 1752080606000,\n 1753125884000,\n 1754395568000,\n 1754407010000,\n 1756228450000,\n 1757947221000,\n 1759763050000,\n 1759924632000,\n 1761572602000\n ],\n \"type\": \"time\"\n },\n {\n \"name\": \"timeEnd\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1729081505000,\n 1730129307000,\n 1730838127000,\n 1732543887000,\n 1736528756000,\n 1736874600000,\n 1740064375000,\n 1740170732000,\n 1742397235000,\n 1743733611000,\n 1744038997000,\n 1745341972000,\n 1745936141000,\n 1746118905000,\n 1747055307000,\n 1747853613000,\n 1748446520000,\n 1750187872000,\n 1750963071000,\n 1751038503000,\n 1752067563000,\n 1752166243000,\n 1753396603000,\n 1754395592000,\n 1754407004000,\n 1756228445000,\n 1758134868000,\n 1759763044000,\n 1761572592000,\n 1761572593000,\n null\n ],\n \"type\": \"number\"\n },\n {\n \"name\": \"title\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"0.1.0\",\n \"1.0.2\",\n \"1.0.3\",\n \"1.0.4\",\n \"1.0.5\",\n \"1.0.6\",\n \"1.0.7\",\n \"1.0.8\",\n \"1.0.9\",\n \"1.0.10\",\n \"1.0.11\",\n \"1.0.12\",\n \"1.0.13\",\n \"1.0.14\",\n \"1.0.15\",\n \"1.0.16\",\n \"1.0.17\",\n \"1.0.18\",\n \"1.0.19\",\n \"1.0.20\",\n \"1.0.21\",\n \"1.0.22\",\n \"1.0.23\",\n \"1.0.24\",\n \"1.0.25\",\n \"1.0.26\",\n \"1.0.27\",\n \"1.0.28\",\n \"1.0.29\",\n \"1.0.30\",\n \"1.0.31\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"text\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n false\n ],\n \"type\": \"boolean\"\n },\n {\n \"name\": \"source\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n {\n \"datasource\": {\n \"type\": \"grafana-github-datasource\",\n \"uid\": \"feyypehpl45xcf\"\n },\n \"enable\": true,\n \"hide\": false,\n \"iconColor\": \"red\",\n \"mappings\": {\n \"text\": {\n \"source\": \"field\",\n \"value\": \"closed\"\n },\n \"time\": {\n \"source\": \"field\",\n \"value\": \"created_at\"\n },\n \"timeEnd\": {\n \"source\": \"field\",\n \"value\": \"closed_at\"\n },\n \"title\": {\n \"source\": \"field\",\n \"value\": \"title\"\n }\n },\n \"name\": \"Milestones\",\n \"target\": {\n \"options\": {\n \"query\": \"\"\n },\n \"owner\": \"grafana\",\n \"queryType\": \"Milestones\",\n \"refId\": \"Anno\",\n \"repository\": \"logs-drilldown\"\n }\n },\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null\n ],\n \"type\": \"other\"\n },\n {\n \"name\": \"isRegion\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n true,\n false\n ],\n \"type\": \"boolean\"\n }\n ],\n \"length\": 31,\n \"meta\": {\n \"dataTopic\": \"annotations\"\n }\n },\n {\n \"fields\": [\n {\n \"name\": \"type\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"Issues\",\n \"Issues\",\n \"Issues\",\n \"Issues\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"color\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"#FADE2A\",\n \"#FADE2A\",\n \"#FADE2A\",\n \"#FADE2A\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"time\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1760112753000,\n 1759930212000,\n 1759924899000,\n 1759850404000\n ],\n \"type\": \"time\"\n },\n {\n \"name\": \"timeEnd\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1760972421000,\n null,\n null,\n 1759926839000\n ],\n \"type\": \"number\"\n },\n {\n \"name\": \"title\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"[BUG]: Error when data source configured for multi tenant queries\",\n \"[FEAT]: Allow users without explore permissions to use logs drilldown\",\n \"Sync Logs panel and label/fields empty states\",\n \"fix: labels clear variable state not showing\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"text\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"[BUG]: Error when data source configured for multi tenant queries\",\n \"[FEAT]: Allow users without explore permissions to use logs drilldown\",\n \"Sync Logs panel and label/fields empty states\",\n \"fix: labels clear variable state not showing\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"tags\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n [\n \"bug\",\n \"needs-triage\"\n ],\n [\n \"enhancement\",\n \"needs-triage\"\n ],\n [\n \"needs-triage\"\n ],\n [\n \"bug\"\n ]\n ],\n \"type\": \"other\"\n },\n {\n \"name\": \"source\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n {\n \"datasource\": {\n \"type\": \"grafana-github-datasource\",\n \"uid\": \"feyypehpl45xcf\"\n },\n \"enable\": true,\n \"hide\": false,\n \"iconColor\": \"yellow\",\n \"mappings\": {\n \"tags\": {\n \"source\": \"field\",\n \"value\": \"labels\"\n },\n \"time\": {\n \"source\": \"field\",\n \"value\": \"created_at\"\n },\n \"timeEnd\": {\n \"source\": \"field\",\n \"value\": \"closed_at\"\n }\n },\n \"name\": \"Issues\",\n \"target\": {\n \"options\": {\n \"query\": \"\"\n },\n \"owner\": \"grafana\",\n \"queryType\": \"Issues\",\n \"refId\": \"Anno\",\n \"repository\": \"logs-drilldown\"\n }\n },\n null,\n null,\n null\n ],\n \"type\": \"other\"\n },\n {\n \"name\": \"isRegion\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n true,\n false,\n false,\n true\n ],\n \"type\": \"boolean\"\n }\n ],\n \"length\": 4,\n \"meta\": {\n \"dataTopic\": \"annotations\"\n }\n },\n {\n \"fields\": [\n {\n \"name\": \"type\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"Pulls\",\n \"Pulls\",\n \"Pulls\",\n \"Pulls\",\n \"Pulls\",\n \"Pulls\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"color\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"#5794F2\",\n \"#5794F2\",\n \"#5794F2\",\n \"#5794F2\",\n \"#5794F2\",\n \"#5794F2\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"time\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1759941908000,\n 1759854730000,\n 1759852733000,\n 1759841416000,\n 1759786844000,\n 1759762979000\n ],\n \"type\": \"time\"\n },\n {\n \"name\": \"timeEnd\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1760110687000,\n 1759857566000,\n 1759926838000,\n 1759843529000,\n 1759848237000,\n 1759769543000\n ],\n \"type\": \"number\"\n },\n {\n \"name\": \"title\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"feat(EmptyLogs): add button to fix with assistant\",\n \"docs: Update troubleshooting page\",\n \"fix: unexpected clear variable behavior\",\n \"docs: update stale readme, fix docker install script\",\n \"docs: Update install and troubleshooting\",\n \"fix: fix RegExp.source removing flags, use toString instead\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"text\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"feat(EmptyLogs): add button to fix with assistant\",\n \"docs: Update troubleshooting page\",\n \"fix: unexpected clear variable behavior\",\n \"docs: update stale readme, fix docker install script\",\n \"docs: Update install and troubleshooting\",\n \"fix: fix RegExp.source removing flags, use toString instead\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"source\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n {\n \"datasource\": {\n \"type\": \"grafana-github-datasource\",\n \"uid\": \"feyypehpl45xcf\"\n },\n \"enable\": true,\n \"hide\": false,\n \"iconColor\": \"blue\",\n \"mappings\": {\n \"time\": {\n \"source\": \"field\",\n \"value\": \"created_at\"\n },\n \"timeEnd\": {\n \"source\": \"field\",\n \"value\": \"merged_at\"\n }\n },\n \"name\": \"Pulls\",\n \"target\": {\n \"options\": {\n \"query\": \"\",\n \"timeField\": 1\n },\n \"owner\": \"grafana\",\n \"queryType\": \"Pull_Requests\",\n \"refId\": \"Anno\",\n \"repository\": \"logs-drilldown\"\n }\n },\n null,\n null,\n null,\n null,\n null\n ],\n \"type\": \"other\"\n },\n {\n \"name\": \"isRegion\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n true,\n true,\n true,\n true,\n true,\n true\n ],\n \"type\": \"boolean\"\n }\n ],\n \"length\": 6,\n \"meta\": {\n \"dataTopic\": \"annotations\"\n }\n },\n {\n \"fields\": [\n {\n \"name\": \"type\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\",\n \"Commits\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"color\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\",\n \"#73BF69\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"time\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n 1760349982000,\n 1760110687000,\n 1759950041000,\n 1759948586000,\n 1759933371000,\n 1759926838000,\n 1759857566000,\n 1759850172000,\n 1759848237000,\n 1759843529000,\n 1759769543000,\n 1759762725000\n ],\n \"type\": \"time\"\n },\n {\n \"name\": \"title\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"Matias Chomicki\",\n \"Matias Chomicki\",\n \"github-actions[bot]\",\n \"Liza Detrick\",\n \"grafana-plugins-platform-bot[bot]\",\n \"Galen Kistler\",\n \"J Stickler\",\n \"Piotr Jamróz\",\n \"J Stickler\",\n \"Galen Kistler\",\n \"Galen Kistler\",\n \"Galen Kistler\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"text\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"feat(LogsListScene): add defaultDisplayedFields support (#1554)\\n\\n* feat(LogsListScene): add defaultDisplayedFields support\\n\\n* chore: update mock\\n\\n* fix(Table): omit unsupported field\\n\\n* fix(LogListScene): reset default displayed fields when switching viz\\n\\n* chore: lint\",\n \"feat(EmptyLogs): add button to fix with assistant (#1571)\\n\\n* feat(LogListScene): add error type to identify empty results\\n\\n* feat(LogsPanelError): use assistant to investigate no results\\n\\n* fix(Shoo): shoo\\n\\n* feat(EmbeddedLogs): customize empty logs prompt\\n\\n* feat(LogsPanelError): implement custom CTA\\n\\n* chore: spelling\\n\\n* chore: add unit test\\n\\n* chore: remove it.only\\n\\n* fix(variableHelpers): exclude primary label from variables to clear\\n\\n* feat(embedding): refactor options\\n\\n* feat(assistant): create service\\n\\n* feat(assistant): integrate with empty layout and no matching labels\\n\\n* chore: fix types\\n\\n* chore: update tests\\n\\n* chore: remove quotes\\n\\n* chore: prevent access to an undefined object\\n\\n* chore: move label to constants\\n\\n* Revert \\\"chore: move label to constants\\\"\\n\\nThis reverts commit fbdf96415e5b2af7a5cae4eec6e6d5a30bc05d1c.\\n\\n* chore: use name instead of label\\n\\n* Revert \\\"fix(Shoo): shoo\\\"\\n\\nThis reverts commit 75993f7a845b514cb5da9667e94a2f0a3c913f24.\\n\\n* chore: more unit tests\\n\\n* test(variableHelpers): test getVariablesThatCanBeCleared\",\n \"chore: bump @grafana/create-plugin configuration to 5.26.9 (#1559)\",\n \"feat(table): preferences (#1534)\",\n \"chore(version): bump version to v1.0.29\",\n \"fix: unexpected clear variable behavior (#1567)\\n\\n* fix: show clear ui when query contains clearable label filters\",\n \"docs: Update troubleshooting page (#1568)\",\n \"fix: validate primary label correctly (#1561)\\n\\n* fix: properly clear variables for legacy urls\",\n \"docs: Update install and troubleshooting (#1564)\\n\\nCo-authored-by: Galen Kistler \u003c109082771+gtk-grafana@users.noreply.github.com\u003e\",\n \"docs: update stale readme, fix docker install script (#1565)\\n\\n* docs: update stale readme\\n\\n* chore: fix docker script\",\n \"fix: fix RegExp.source removing flags, use toString instead (#1563)\\n\\n* fix: fix RegExp.source removing flags, use toString instead\",\n \"fix: stale urls (#1562)\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"id\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n \"cf08043276597abe16f4ae9dbe97bd988d31b7ed\",\n \"01e343dacde3b9fcd862ec2cb2b04fbea4eb6d63\",\n \"9e67734938b980a80496d4cdc60efe7dbc4ba74e\",\n \"924ebd235b6ea99d297d8e04e08ac8ea90a32968\",\n \"23b9b5dc0504a4c397b326562cb796cfdc0531df\",\n \"089fdfa26c7daad5543be1aae2377850348663c3\",\n \"437a568a7a7279c436a674c7b1b0d11a4971d47b\",\n \"ee12a20b32db26b82fe32ed9d5868bd2dde4a4d0\",\n \"657880b399682101616b3413e04c2b664d018ef3\",\n \"fc776d767ebf2a64fb9fad7ab0f479cdaf536cdb\",\n \"a9207d235af9b2f5a4915f072801128a41c4063b\",\n \"e2eae8b8f45dc685659689f85962b0f0a58f13b1\"\n ],\n \"type\": \"string\"\n },\n {\n \"name\": \"source\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n {\n \"datasource\": {\n \"type\": \"grafana-github-datasource\",\n \"uid\": \"feyypehpl45xcf\"\n },\n \"enable\": true,\n \"hide\": false,\n \"iconColor\": \"green\",\n \"mappings\": {\n \"text\": {\n \"source\": \"field\",\n \"value\": \"message\"\n },\n \"time\": {\n \"source\": \"field\",\n \"value\": \"committed_at\"\n },\n \"title\": {\n \"source\": \"field\",\n \"value\": \"author\"\n }\n },\n \"name\": \"Commits\",\n \"target\": {\n \"options\": {\n \"gitRef\": \"main\"\n },\n \"owner\": \"grafana\",\n \"queryType\": \"Commits\",\n \"refId\": \"Anno\",\n \"repository\": \"logs-drilldown\"\n }\n },\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null,\n null\n ],\n \"type\": \"other\"\n },\n {\n \"name\": \"isRegion\",\n \"config\": {\n \"custom\": {}\n },\n \"values\": [\n false,\n false,\n false,\n false,\n false,\n false,\n false,\n false,\n false,\n false,\n false,\n false\n ],\n \"type\": \"boolean\"\n }\n ],\n \"length\": 12,\n \"meta\": {\n \"dataTopic\": \"annotations\"\n }\n }\n]",
"scenarioId": "raw_frame"
}
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 30
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "12.3.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-10": {
"kind": "Panel",
"spec": {
"id": 10,
"title": "Status history (multi-lane)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "status-history",
"version": "12.3.0-pre",
"spec": {
"options": {
"annotations": {
"multiLane": true
},
"colWidth": 0.9,
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
}
},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "xymark",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "PD8C576611E62080A"
},
"spec": {
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"meta\": {\n \"type\": \"timeseries\"\n },\n \"fields\": [\n {\n \"name\": \"time\",\n \"type\": \"time\",\n \"config\": {\n \"interval\": 604800000\n }\n },\n {\n \"name\": \"A-series\",\n \"type\": \"number\",\n \"labels\": {},\n \"config\": {}\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1755870318505,\n 1756475118505,\n 1757079918505,\n 1757684718505,\n 1758289518505,\n 1758894318505,\n 1759499118505,\n 1760103918505,\n 1760708718505,\n 1761313518505\n ],\n [\n 49.57457814271496,\n 64.78808691382616,\n 94.88860442042386,\n 96.59132232810856,\n 58.57144477681538,\n 79.33618638515327,\n 89.64117713117561,\n 134.51905322565585,\n 122.83710544843791,\n 79.84039369237018\n ]\n ]\n }\n }\n]",
"scenarioId": "raw_frame"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "PD8C576611E62080A"
},
"spec": {
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"B\",\n \"name\": \"xymark\",\n \"meta\": {\n \"dataTopic\": \"annotations\"\n },\n \"fields\": [\n {\n \"name\": \"xMin\",\n \"type\": \"time\",\n \"config\": {}\n },\n {\n \"name\": \"xMax\",\n \"type\": \"time\",\n \"config\": {}\n },\n {\n \"name\": \"yMin\",\n \"type\": \"number\",\n \"config\": {}\n },\n {\n \"name\": \"yMax\",\n \"type\": \"number\",\n \"config\": {}\n },\n {\n \"name\": \"color\",\n \"type\": \"string\",\n \"config\": {}\n },\n {\n \"name\": \"fillOpacity\",\n \"type\": \"number\",\n \"config\": {}\n },\n {\n \"name\": \"lineWidth\",\n \"type\": \"number\",\n \"config\": {}\n },\n {\n \"name\": \"lineStyle\",\n \"type\": \"string\",\n \"config\": {}\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1757684718505\n ],\n [\n 1758894318505\n ],\n [\n 70\n ],\n [\n 120\n ],\n [\n \"#f00\"\n ],\n [\n 0.1\n ],\n [\n 1\n ],\n [\n \"dash\"\n ]\n ]\n }\n }\n]",
"scenarioId": "raw_frame"
}
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"maxDataPoints": 5
}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "12.3.0-pre",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Time series (multi-lane)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "12.3.0-pre",
"spec": {
"options": {
"annotations": {
"multiLane": true
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "Candlestick",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "candlestick",
"version": "12.3.0-pre",
"spec": {
"options": {
"candleStyle": "candles",
"colorStrategy": "open-close",
"colors": {
"down": "red",
"up": "green"
},
"includeAllFields": false,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mode": "candles+volume",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Candlestick (multi-lane)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "candlestick",
"version": "12.3.0-pre",
"spec": {
"options": {
"annotations": {
"multiLane": true
},
"candleStyle": "candles",
"colorStrategy": "open-close",
"colors": {
"down": "red",
"up": "green"
},
"includeAllFields": false,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mode": "candles+volume",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "State timeline",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "state-timeline",
"version": "12.3.0-pre",
"spec": {
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "State timeline (multi-lane)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "state-timeline",
"version": "12.3.0-pre",
"spec": {
"options": {
"alignValue": "left",
"annotations": {
"multiLane": true
},
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineWidth": 0,
"spanNulls": false
}
},
"overrides": []
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "Heatmap",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "heatmap",
"version": "12.3.0-pre",
"spec": {
"options": {
"calculate": false,
"cellGap": 1,
"color": {
"exponent": 0.5,
"fill": "dark-orange",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Oranges",
"steps": 64
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": true
},
"rowsFrame": {
"layout": "auto"
},
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false
}
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "Heatmap (multi-lane)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "heatmap",
"version": "12.3.0-pre",
"spec": {
"options": {
"annotations": {
"multiLane": true
},
"calculate": false,
"cellGap": 1,
"color": {
"exponent": 0.5,
"fill": "dark-orange",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Oranges",
"steps": 64
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": true
},
"rowsFrame": {
"layout": "auto"
},
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false
}
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "Status history",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "datasource",
"version": "v0",
"datasource": {
"name": "-- Dashboard --"
},
"spec": {
"panelId": 1
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "status-history",
"version": "12.3.0-pre",
"spec": {
"options": {
"colWidth": 0.9,
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"rowHeight": 0.9,
"showValue": "auto",
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"axisPlacement": "auto",
"fillOpacity": 70,
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 8,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 8,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 16,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 16,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 24,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-7"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 24,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 32,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 32,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-10"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 40,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"timeSettings": {
"timezone": "browser",
"from": "2025-08-22T13:45:18.505Z",
"to": "2025-10-28T04:38:37.130Z",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Multi-lane annotations",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/annotations/v0alpha1.multi-lane-annotations.v42.v2beta1.json |
#include "test/jemalloc_test.h"
#include "jemalloc/internal/prof_data.h"
TEST_BEGIN(test_prof_realloc) {
tsd_t *tsd;
int flags;
void *p, *q;
prof_info_t prof_info_p, prof_info_q;
prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3;
test_skip_if(!config_prof);
tsd = tsd_fetch();
flags = MALLOCX_TCACHE_NONE;
prof_cnt_all(&cnt_0);
p = mallocx(1024, flags);
expect_ptr_not_null(p, "Unexpected mallocx() failure");
prof_info_get(tsd, p, NULL, &prof_info_p);
expect_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
"Expected valid tctx");
prof_cnt_all(&cnt_1);
expect_u64_eq(cnt_0.curobjs + 1, cnt_1.curobjs,
"Allocation should have increased sample size");
q = rallocx(p, 2048, flags);
expect_ptr_ne(p, q, "Expected move");
expect_ptr_not_null(p, "Unexpected rmallocx() failure");
prof_info_get(tsd, q, NULL, &prof_info_q);
expect_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
"Expected valid tctx");
prof_cnt_all(&cnt_2);
expect_u64_eq(cnt_1.curobjs, cnt_2.curobjs,
"Reallocation should not have changed sample size");
dallocx(q, flags);
prof_cnt_all(&cnt_3);
expect_u64_eq(cnt_0.curobjs, cnt_3.curobjs,
"Sample size should have returned to base level");
}
TEST_END
int
main(void) {
return test_no_reentrancy(
test_prof_realloc);
} | c | github | https://github.com/redis/redis | deps/jemalloc/test/unit/prof_tctx.c |
"""Dependency injector dynamic container unit tests for async resources."""
import unittest2 as unittest
# Runtime import to get asyncutils module
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
import sys
sys.path.append(_TOP_DIR)
from asyncutils import AsyncTestCase
from dependency_injector import (
containers,
providers,
)
class AsyncResourcesTest(AsyncTestCase):
@unittest.skipIf(sys.version_info[:2] <= (3, 5), 'Async test')
def test_async_init_resources(self):
async def _init1():
_init1.init_counter += 1
yield
_init1.shutdown_counter += 1
_init1.init_counter = 0
_init1.shutdown_counter = 0
async def _init2():
_init2.init_counter += 1
yield
_init2.shutdown_counter += 1
_init2.init_counter = 0
_init2.shutdown_counter = 0
class Container(containers.DeclarativeContainer):
resource1 = providers.Resource(_init1)
resource2 = providers.Resource(_init2)
container = Container()
self.assertEqual(_init1.init_counter, 0)
self.assertEqual(_init1.shutdown_counter, 0)
self.assertEqual(_init2.init_counter, 0)
self.assertEqual(_init2.shutdown_counter, 0)
self._run(container.init_resources())
self.assertEqual(_init1.init_counter, 1)
self.assertEqual(_init1.shutdown_counter, 0)
self.assertEqual(_init2.init_counter, 1)
self.assertEqual(_init2.shutdown_counter, 0)
self._run(container.shutdown_resources())
self.assertEqual(_init1.init_counter, 1)
self.assertEqual(_init1.shutdown_counter, 1)
self.assertEqual(_init2.init_counter, 1)
self.assertEqual(_init2.shutdown_counter, 1)
self._run(container.init_resources())
self._run(container.shutdown_resources())
self.assertEqual(_init1.init_counter, 2)
self.assertEqual(_init1.shutdown_counter, 2)
self.assertEqual(_init2.init_counter, 2)
self.assertEqual(_init2.shutdown_counter, 2) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
//
// Code generated by generate-staticcheck; DO NOT EDIT.
//
//go:build bazel
package s1037
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/simple"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range simple.Analyzers {
if analyzer.Analyzer.Name == "S1037" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
} | go | github | https://github.com/cockroachdb/cockroach | build/bazelutil/staticcheckanalyzers/s1037/analyzer.go |
# -*- coding: utf-8 -*-
#
# DataShape documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 12 15:38:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DataShape'
copyright = u'2013, Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1-dev'
# The full version, including alpha/beta/rc tags.
release = '0.0.1-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DataShapedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DataShape.tex', u'DataShape Documentation',
u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'datashape', u'DataShape Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DataShape', u'DataShape Documentation',
u'Continuum Analytics', 'DataShape', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Insecure client-server interoperability as a unit test."""
from concurrent import futures
import unittest
import grpc
from src.proto.grpc.testing import test_pb2
from tests.interop import _intraop_test_case
from tests.interop import methods
from tests.interop import server
class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
unittest.TestCase):
def setUp(self):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
self.server)
port = self.server.add_insecure_port('[::]:0')
self.server.start()
self.stub = test_pb2.TestServiceStub(
grpc.insecure_channel('localhost:{}'.format(port)))
if __name__ == '__main__':
unittest.main(verbosity=2) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.identity.groups import constants
GROUPS_INDEX_URL = reverse(constants.GROUPS_INDEX_URL)
GROUP_CREATE_URL = reverse(constants.GROUPS_CREATE_URL)
GROUP_UPDATE_URL = reverse(constants.GROUPS_UPDATE_URL, args=[1])
GROUP_MANAGE_URL = reverse(constants.GROUPS_MANAGE_URL, args=[1])
GROUP_ADD_MEMBER_URL = reverse(constants.GROUPS_ADD_MEMBER_URL, args=[1])
class GroupsViewTests(test.BaseAdminViewTests):
def _get_domain_id(self):
return self.request.session.get('domain_context', None)
def _get_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('group_list',)})
def test_index(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
api.keystone.group_list(IgnoreArg(), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
if domain_id:
for group in res.context['table'].data:
self.assertItemsEqual(group.domain_id, domain_id)
self.assertContains(res, 'Create Group')
self.assertContains(res, 'Edit')
self.assertContains(res, 'Delete Group')
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('group_list',
'keystone_can_edit_group')})
def test_index_with_keystone_can_edit_group_false(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
api.keystone.group_list(IgnoreArg(), domain=domain_id) \
.AndReturn(groups)
api.keystone.keystone_can_edit_group() \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
self.assertNotContains(res, 'Create Group')
self.assertNotContains(res, 'Edit')
self.assertNotContains(res, 'Delete Group')
@test.create_stubs({api.keystone: ('group_create', )})
def test_create(self):
domain_id = self._get_domain_id()
group = self.groups.get(id="1")
api.keystone.group_create(IsA(http.HttpRequest),
description=group.description,
domain_id=domain_id,
name=group.name).AndReturn(group)
self.mox.ReplayAll()
formData = {'method': 'CreateGroupForm',
'name': group.name,
'description': group.description}
res = self.client.post(GROUP_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('group_get',
'group_update')})
def test_update(self):
group = self.groups.get(id="1")
test_description = 'updated description'
api.keystone.group_get(IsA(http.HttpRequest), '1').AndReturn(group)
api.keystone.group_update(IsA(http.HttpRequest),
description=test_description,
group_id=group.id,
name=group.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateGroupForm',
'group_id': group.id,
'name': group.name,
'description': test_description}
res = self.client.post(GROUP_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('group_list',
'group_delete')})
def test_delete_group(self):
domain_id = self._get_domain_id()
group = self.groups.get(id="2")
api.keystone.group_list(IgnoreArg(), domain=domain_id) \
.AndReturn(self.groups.list())
api.keystone.group_delete(IgnoreArg(), group.id)
self.mox.ReplayAll()
formData = {'action': 'groups__delete__%s' % group.id}
res = self.client.post(GROUPS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, GROUPS_INDEX_URL)
@test.create_stubs({api.keystone: ('group_get',
'user_list',)})
def test_manage(self):
group = self.groups.get(id="1")
group_members = self.users.list()
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(group_members)
self.mox.ReplayAll()
res = self.client.get(GROUP_MANAGE_URL)
self.assertTemplateUsed(res, constants.GROUPS_MANAGE_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, group_members)
@test.create_stubs({api.keystone: ('user_list',
'remove_group_user')})
def test_remove_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(self.users.list())
api.keystone.remove_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_members__removeGroupMember__%s' % user.id}
res = self.client.post(GROUP_MANAGE_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('group_get',
'user_list',
'add_group_user')})
def test_add_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(),
domain=group.domain_id).\
AndReturn(self.users.list())
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(self.users.list()[2:])
api.keystone.add_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_non_members__addMember__%s' % user.id}
res = self.client.post(GROUP_ADD_MEMBER_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1) | unknown | codeparrot/codeparrot-clean | ||
"""
SIMBAD_report.py: CCP4 GUI Project
This library is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
version 3, modified in accordance with the provisions of the
license to address the requirements of UK law.
You should have received a copy of the modified GNU Lesser General
Public License along with this library. If not, copies may be
downloaded from http://www.ccp4.ac.uk/ccp4license.php
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
"""
import os
import re
if __name__ == '__main__':
import sys
ccp4 = os.environ['CCP4']
sys.path.append(os.path.join(ccp4, 'share', 'ccp4i2', 'report'))
sys.path.append(os.path.join(ccp4, 'share', 'ccp4i2', 'core'))
sys.path.append(os.path.join(ccp4, 'lib', 'python2.7', 'site-packages'))
from lxml import etree as ET
from report.CCP4ReportParser import Report
from simbad.util import SIMBAD_DIRNAME, SIMBAD_PYRVAPI_SHAREDIR
class SIMBAD_report(Report):
TASKNAME = 'SIMBAD'
RUNNING = True
def __init__(self, xmlnode=None, jobInfo={}, **kw):
Report.__init__(self, xmlnode=xmlnode, jobInfo=jobInfo, **kw)
repdir = os.path.join(jobInfo.get('fileroot', None), SIMBAD_DIRNAME, SIMBAD_PYRVAPI_SHAREDIR)
self.get_tables_as_elements(repdir)
#print("JMHT WRITING REPORT %s" % self.e1_dict)
self.addDiv(style='clear:both;')
for e1 in xmlnode:
# Process each tab separately
if e1.tag == 'tab':
self.report_section(e1, self)
return
def get_tables_as_elements(self, repdir):
"""Get tables as xmltree elements by parsing task.tsk file and .table files"""
try:
t1_list = list()
with open(os.path.join(repdir, 'task.tsk')) as istream:
#print("JMHT CHECKING task.tsk %s\n" % os.path.join(repdir, 'task.tsk'))
for s1 in re.findall('<table .+?</table>', istream.read(), re.S):
t1 = ET.fromstring(s1)
if len(t1): t1_list.append(t1)
for f1 in os.listdir(repdir):
if f1.endswith('.table'):
t1 = ET.parse(os.path.join(repdir, f1)).getroot()
if len(t1): t1_list.append(t1)
self.e1_dict = dict()
for t1 in t1_list:
tid = t1.get('id', None)
if tid and tid.endswith('-grid'):
tags = [t2.tag for t2 in t1]
if tags == ['thead', 'tbody']:
assert len(t1) == 2
e1 = t1
else:
tset = set(tags)
tag = tset.pop()
assert not tset and tag == 'tr'
e1 = ET.Element('table')
e1.append(t1)
e1.attrib.update(t1.attrib)
t1.attrib.clear()
t1.tag = 'tbody'
for e2 in e1.iter():
e2.attrib.pop('class', None)
e1.find('tbody').set('class', 'fancy')
self.e1_dict[tid[:-5]] = e1
if len(self.e1_dict.keys()): return True
return False
except Exception as e:
print "EXCEPTION: {0}".format(e)
return
def report_section(self, e1, r0, sort=False):
"""
"""
elems = list()
title = 'Untitled'
state = False
cou = 0
#print("Processing tag %s id %s\n%s" % (e1.tag, e1.get('id'),ET.tostring(e1)))
for e2 in e1:
row = e2.get('row', '_')
col = e2.get('col', '_')
if row.isdigit() : row = int(row)
if col.isdigit() : col = int(col)
if e2.get('id') or e2.tag == 'text':
elems.append([row, col, e2])
if e2.tag == 'table':
cou += 1
elif e2.tag == 'name':
title = e2.text.strip()
elif e2.tag == 'open':
state = e2.text.strip() == 'true'
if elems:
# strip out anything we can't deal with here
if any([x in title.lower() for x in ['downloads', 'log files', 'graph']]): return
#print "GOT ELEMS ",[g[2].get('id') for g in elems],title
r1 = r0.addFold(label=title, initiallyOpen=state)
#for row, col, e2 in sorted(grid):
if sorted: elems = sorted(elems)
for _,_,e2 in elems:
id2 = e2.get('id')
#print "PROCESSING ",id2, e2.tag
if e2.tag == 'section':
self.report_section(e2, r1)
elif e2.tag == 'table':
if id2 and id2 in self.e1_dict:
if id2 == 'mrbump_table':
r1.append("The table below details the Molecular Replacement results from MrBUMP")
if cou > 1:
r1.append(e2.findtext('legend').strip())
r1.append(ET.tostring(self.e1_dict[id2]))
elif e2.tag == 'text':
for t in e2.itertext(): r1.append(t)
else:
pass
if __name__ == '__main__':
# Run with no arguments in the CCP4 job directory (the one that holds the SIMBAD directory)
def test2():
import argparse
parser = argparse.ArgumentParser(
description='test of morda report generator',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-w', '--wrkdir',
help='''a directory, containing the subdirectory
report/ generated by rvapi''',
default='.',
metavar='<dir>'
)
parser.add_argument(
'-i', '--xml',
help='input xml-file generated previously by rvapi',
default='program.xml',
metavar='<file>'
)
parser.add_argument(
'-o', '--html',
help='output html-file, a report file for i2',
default='areport.html',
metavar='<file>'
)
opt = parser.parse_args()
xmlnode = ET.parse(opt.xml).getroot()
jobInfo = dict(fileroot=os.path.abspath(opt.wrkdir))
report = SIMBAD_report(xmlnode, jobInfo)
if len(report.errReport):
print 'ERROR REPORT'
print report.errReport.report()
htmlbase = 'file://' + \
os.environ['CCP4'] + '/share/ccp4i2/docs/report_files'
htmlstr = ET.tostring(report.as_etree(htmlBase=htmlbase))
with open(opt.html, 'w') as ostream:
print >> ostream, htmlstr.replace('><', '>\n<')
test2()
# #from CCP4ReportParser import Report
# # class AMPLE_report(Report):
# # # Specify which gui task and/or pluginscript this applies to
# # TASKNAME = 'AMPLE'
# # RUNNING = False
# # def __init__(self,xmlnode=None,jobInfo={},jobStatus=None,**kw):
# # Report. __init__(self,xmlnode=xmlnode,jobInfo=jobInfo, jobStatus=jobStatus, **kw)
# # clearingDiv = self.addDiv(style="clear:both;")
# # self.addDefaultReport(self)
# #
# # def addDefaultReport(self, parent=None):
# # if parent is None: parent=self
# # if len(self.xmlnode.xpath("LogText")) > 0:
# # newFold = parent.addFold(label="Log text", initiallyOpen=True)
# # newFold.addPre(text = self.xmlnode.xpath("LogText")[0].text) | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright 2009, 2013 Red Hat, Inc.
# Cole Robinson <crobinso@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
from .device import VirtualDevice
from .xmlbuilder import XMLProperty
class VirtualInputDevice(VirtualDevice):
virtual_device_type = VirtualDevice.VIRTUAL_DEV_INPUT
TYPE_MOUSE = "mouse"
TYPE_TABLET = "tablet"
TYPE_KEYBOARD = "keyboard"
TYPE_DEFAULT = "default"
TYPES = [TYPE_MOUSE, TYPE_TABLET, TYPE_KEYBOARD, TYPE_DEFAULT]
BUS_PS2 = "ps2"
BUS_USB = "usb"
BUS_XEN = "xen"
BUS_DEFAULT = "default"
BUSES = [BUS_PS2, BUS_USB, BUS_XEN, BUS_DEFAULT]
type = XMLProperty("./@type",
default_cb=lambda s: s.TYPE_MOUSE,
default_name=TYPE_DEFAULT)
def _default_bus(self):
if self.type == self.TYPE_TABLET:
return self.BUS_USB
if self.conn.is_xen():
return self.BUS_XEN
return self.BUS_PS2
bus = XMLProperty("./@bus",
default_cb=_default_bus,
default_name=BUS_DEFAULT)
VirtualInputDevice.register_type() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Command-line wrapper for the tracetool machinery.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
import sys
import getopt
from tracetool import error_write, out
import tracetool.backend
import tracetool.format
_SCRIPT = ""
def error_opt(msg = None):
if msg is not None:
error_write("Error: " + msg + "\n")
backend_descr = "\n".join([ " %-15s %s" % (n, d)
for n,d in tracetool.backend.get_list() ])
format_descr = "\n".join([ " %-15s %s" % (n, d)
for n,d in tracetool.format.get_list() ])
error_write("""\
Usage: %(script)s --format=<format> --backend=<backend> [<options>]
Backends:
%(backends)s
Formats:
%(formats)s
Options:
--help This help message.
--list-backends Print list of available backends.
--check-backend Check if the given backend is valid.
--binary <path> Full path to QEMU binary.
--target-type <type> QEMU emulator target type ('system' or 'user').
--target-name <name> QEMU emulator target name.
--probe-prefix <prefix> Prefix for dtrace probe names
(default: qemu-<target-type>-<target-name>).\
""" % {
"script" : _SCRIPT,
"backends" : backend_descr,
"formats" : format_descr,
})
if msg is None:
sys.exit(0)
else:
sys.exit(1)
def main(args):
global _SCRIPT
_SCRIPT = args[0]
long_opts = [ "backend=", "format=", "help", "list-backends", "check-backend" ]
long_opts += [ "binary=", "target-type=", "target-name=", "probe-prefix=" ]
try:
opts, args = getopt.getopt(args[1:], "", long_opts)
except getopt.GetoptError, err:
error_opt(str(err))
check_backend = False
arg_backend = ""
arg_format = ""
binary = None
target_type = None
target_name = None
probe_prefix = None
for opt, arg in opts:
if opt == "--help":
error_opt()
elif opt == "--backend":
arg_backend = arg
elif opt == "--format":
arg_format = arg
elif opt == "--list-backends":
public_backends = tracetool.backend.get_list(only_public = True)
out(", ".join([ b for b,_ in public_backends ]))
sys.exit(0)
elif opt == "--check-backend":
check_backend = True
elif opt == "--binary":
binary = arg
elif opt == '--target-type':
target_type = arg
elif opt == '--target-name':
target_name = arg
elif opt == '--probe-prefix':
probe_prefix = arg
else:
error_opt("unhandled option: %s" % opt)
if arg_backend is None:
error_opt("backend not set")
if check_backend:
if tracetool.backend.exists(arg_backend):
sys.exit(0)
else:
sys.exit(1)
if arg_format == "stap":
if binary is None:
error_opt("--binary is required for SystemTAP tapset generator")
if probe_prefix is None and target_type is None:
error_opt("--target-type is required for SystemTAP tapset generator")
if probe_prefix is None and target_name is None:
error_opt("--target-name is required for SystemTAP tapset generator")
if probe_prefix is None:
probe_prefix = ".".join([ "qemu", target_type, target_name ])
try:
tracetool.generate(sys.stdin, arg_format, arg_backend,
binary = binary, probe_prefix = probe_prefix)
except tracetool.TracetoolError, e:
error_opt(str(e))
if __name__ == "__main__":
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
groups:
- name: yolo
rules:
- record: strawberry{flavor="sweet"}
expr: 1 | unknown | github | https://github.com/prometheus/prometheus | model/rulefmt/testdata/invalid_record_name.bad.yaml |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from measurements import smoothness
from metrics import power
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import wpr_modes
from telemetry.page import page
from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import page_test_test_case
class FakeTracingController(object):
def __init__(self):
self.category_filter = None
def Start(self, _options, category_filter, _timeout):
self.category_filter = category_filter
class FakePlatform(object):
def __init__(self):
self.tracing_controller = FakeTracingController()
def CanMonitorPower(self):
return False
class FakeBrowser(object):
def __init__(self):
self.platform = FakePlatform()
class AnimatedPage(page.Page):
def __init__(self, page_set):
super(AnimatedPage, self).__init__(
url='file://animated_page.html',
page_set=page_set, base_dir=page_set.base_dir)
def RunPageInteractions(self, action_runner):
action_runner.Wait(.2)
class FakeTab(object):
def __init__(self):
self.browser = FakeBrowser()
def ExecuteJavaScript(self, js):
pass
class SmoothnessUnitTest(page_test_test_case.PageTestTestCase):
"""Smoke test for smoothness measurement
Runs smoothness measurement on a simple page and verifies
that all metrics were added to the results. The test is purely functional,
i.e. it only checks if the metrics are present and non-zero.
"""
def testSyntheticDelayConfiguration(self):
test_page = page.Page('http://dummy', None)
test_page.synthetic_delays = {
'cc.BeginMainFrame': { 'target_duration': 0.012 },
'cc.DrawAndSwap': { 'target_duration': 0.012, 'mode': 'alternating' },
'gpu.PresentingFrame': { 'target_duration': 0.012 }
}
tab = FakeTab()
measurement = smoothness.Smoothness()
measurement.WillStartBrowser(tab.browser.platform)
measurement.WillNavigateToPage(test_page, tab)
measurement.WillRunActions(test_page, tab)
expected_category_filter = set([
'DELAY(cc.BeginMainFrame;0.012000;static)',
'DELAY(cc.DrawAndSwap;0.012000;alternating)',
'DELAY(gpu.PresentingFrame;0.012000;static)',
'benchmark'
])
tracing_controller = tab.browser.platform.tracing_controller
actual_category_filter = (
tracing_controller.category_filter.included_categories)
# FIXME: Put blink.console into the expected above and remove these two
# remove entries when the blink.console change has rolled into chromium.
actual_category_filter.remove('webkit.console')
actual_category_filter.remove('blink.console')
if expected_category_filter != actual_category_filter:
sys.stderr.write("Expected category filter: %s\n" %
repr(expected_category_filter))
sys.stderr.write("Actual category filter filter: %s\n" %
repr(actual_category_filter))
self.assertEquals(expected_category_filter, actual_category_filter)
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
def testSmoothness(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = smoothness.Smoothness()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
frame_times = results.FindAllPageSpecificValuesNamed('frame_times')
self.assertEquals(len(frame_times), 1)
self.assertGreater(frame_times[0].GetRepresentativeNumber(), 0)
mean_frame_time = results.FindAllPageSpecificValuesNamed('mean_frame_time')
self.assertEquals(len(mean_frame_time), 1)
self.assertGreater(mean_frame_time[0].GetRepresentativeNumber(), 0)
frame_time_discrepancy = results.FindAllPageSpecificValuesNamed(
'frame_time_discrepancy')
self.assertEquals(len(frame_time_discrepancy), 1)
self.assertGreater(frame_time_discrepancy[0].GetRepresentativeNumber(), 0)
percentage_smooth = results.FindAllPageSpecificValuesNamed(
'percentage_smooth')
self.assertEquals(len(percentage_smooth), 1)
self.assertGreaterEqual(percentage_smooth[0].GetRepresentativeNumber(), 0)
mean_input_event_latency = results.FindAllPageSpecificValuesNamed(
'mean_input_event_latency')
if mean_input_event_latency:
self.assertEquals(len(mean_input_event_latency), 1)
self.assertGreater(
mean_input_event_latency[0].GetRepresentativeNumber(), 0)
@decorators.Enabled('android') # SurfaceFlinger is android-only
def testSmoothnessSurfaceFlingerMetricsCalculated(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = smoothness.Smoothness()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
avg_surface_fps = results.FindAllPageSpecificValuesNamed('avg_surface_fps')
self.assertEquals(1, len(avg_surface_fps))
self.assertGreater(avg_surface_fps[0].GetRepresentativeNumber, 0)
jank_count = results.FindAllPageSpecificValuesNamed('jank_count')
self.assertEquals(1, len(jank_count))
self.assertGreater(jank_count[0].GetRepresentativeNumber(), -1)
max_frame_delay = results.FindAllPageSpecificValuesNamed('max_frame_delay')
self.assertEquals(1, len(max_frame_delay))
self.assertGreater(max_frame_delay[0].GetRepresentativeNumber, 0)
frame_lengths = results.FindAllPageSpecificValuesNamed('frame_lengths')
self.assertEquals(1, len(frame_lengths))
self.assertGreater(frame_lengths[0].GetRepresentativeNumber, 0)
@decorators.Disabled('mac', 'chromeos') # http://crbug.com/403903
def testSmoothnessForPageWithNoGesture(self):
ps = self.CreateEmptyPageSet()
ps.AddUserStory(AnimatedPage(ps))
measurement = smoothness.Smoothness()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
percentage_smooth = results.FindAllPageSpecificValuesNamed(
'percentage_smooth')
self.assertEquals(len(percentage_smooth), 1)
self.assertGreaterEqual(percentage_smooth[0].GetRepresentativeNumber(), 0)
def testCleanUpTrace(self):
self.TestTracingCleanedUp(smoothness.Smoothness, self._options)
def testCleanUpPowerMetric(self):
class FailPage(page.Page):
def __init__(self, page_set):
# pylint: disable=bad-super-call
super(FailPage, self).__init__(
url='file://blank.html',
page_set=page_set, base_dir=page_set.base_dir)
def RunPageInteractions(self, _):
raise exceptions.IntentionalException
class FakePowerMetric(power.PowerMetric):
start_called = False
stop_called = True
def Start(self, _1, _2):
self.start_called = True
def Stop(self, _1, _2):
self.stop_called = True
ps = self.CreateEmptyPageSet()
ps.AddUserStory(FailPage(ps))
class BuggyMeasurement(smoothness.Smoothness):
fake_power = None
# Inject fake power metric.
def WillStartBrowser(self, platform):
self.fake_power = self._power_metric = FakePowerMetric(platform)
measurement = BuggyMeasurement()
try:
self.RunMeasurement(measurement, ps)
except exceptions.IntentionalException:
pass
self.assertTrue(measurement.fake_power.start_called)
self.assertTrue(measurement.fake_power.stop_called) | unknown | codeparrot/codeparrot-clean | ||
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
from waterbutler.version import __version__
sys.path.insert(0, os.path.abspath(".."))
master_doc = "index"
project = "WaterButler"
copyright = "2018, Center For Open Science"
version = release = __version__
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = []
# I wish this could go in a per-module file...
coverage_ignore_classes = []
coverage_ignore_functions = []
html_favicon = 'favicon.ico'
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
'tornado': ('http://www.tornadoweb.org/en/stable/', None),
'aiohttp': ('https://aiohttp.readthedocs.org/en/v0.18.2/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for merge layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
class MergeLayersTest(test.TestCase):
def test_merge_add(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.add([i1, i2, i3])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
# test masking
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Add()
o = layer([m1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
mask = layer.output_mask
self.assertListEqual(mask.get_shape().as_list(), [None, 4])
def test_merge_elementwise_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.add([i1, i2])
with self.assertRaises(ValueError):
keras.layers.add(i1)
with self.assertRaises(ValueError):
keras.layers.add([i1])
def test_merge_multiply(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.multiply([i1, i2, i3])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
def test_merge_average(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.average([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
def test_merge_maximum(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.maximum([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
def test_merge_concatenate(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.concatenate([i1, i2], axis=1)
self.assertListEqual(o.get_shape().as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
def test_concatenate_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaises(ValueError):
keras.layers.concatenate([i1, i2], axis=-1)
with self.assertRaises(ValueError):
keras.layers.concatenate(i1, axis=-1)
with self.assertRaises(ValueError):
keras.layers.concatenate([i1], axis=-1)
def test_merge_dot(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
o = keras.layers.dot([i1, i2], axes=1)
self.assertListEqual(o.get_shape().as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
self.assertAllClose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = keras.layers.dot([i1, i2], axes=(-1, -1))
self.assertListEqual(o.get_shape().as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)
# test _compute_output_shape
layer = keras.layers.Dot(axes=-1)
self.assertEqual(layer._compute_output_shape([(4, 5), (4, 5)]), (4, 1))
def test_dot_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
i3 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot(i1, axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2, i3], axes=-1)
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v0alpha1",
"metadata": {
"name": "test-v2alpha1-complete",
"labels": {
"category": "test"
},
"annotations": {
"description": "Complete example of v2alpha1 dashboard features"
}
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": false,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
},
{
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "Prometheus Annotations",
"target": {
"expr": "changes(process_start_time_seconds[1m])",
"refId": "Anno"
}
}
]
},
"description": "This dashboard demonstrates all features that need to be converted from v2alpha1 to v2beta1",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 2,
"liveNow": true,
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"title": "Conditional Row",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"description": "This panel demonstrates conditional rendering features",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [
{
"options": {
"0": {
"color": "red",
"text": "Down"
},
"1": {
"color": "green",
"text": "Up"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": 0
},
{
"color": "green",
"value": 1
}
]
}
}
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"textMode": "auto"
},
"pluginVersion": "12.1.0-pre",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"expr": "up{job=\"grafana\"}",
"refId": "A"
}
],
"title": "Panel with Conditional Rendering",
"transformations": [
{
"id": "reduce",
"options": {
"includeTimeField": false,
"mode": "reduceFields",
"reducers": [
"mean"
]
}
}
],
"type": "stat"
}
],
"preload": true,
"refresh": "10s",
"schemaVersion": 42,
"tags": [
"test",
"example",
"migration"
],
"templating": {
"list": [
{
"current": {
"text": "false",
"value": "false"
},
"description": "Toggle feature on/off",
"hide": 0,
"label": "Enable Feature",
"name": "switch_var",
"options": [
{
"selected": false,
"text": "true",
"value": "true"
},
{
"selected": true,
"text": "false",
"value": "false"
}
],
"query": "",
"skipUrlSync": false,
"type": "switch"
},
{
"allowCustomValue": false,
"current": {
"text": "All",
"value": [
"$__all"
]
},
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"definition": "up",
"description": "Shows all up metrics",
"hide": 0,
"includeAll": true,
"label": "Prometheus Query",
"multi": true,
"name": "prometheus_query",
"options": [],
"query": {
"expr": "up"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"staticOptions": [
{
"text": "All",
"value": "$__all"
},
{
"text": "host1",
"value": "host1"
}
],
"staticOptionsOrder": "before",
"type": "query"
},
{
"current": {
"text": "server1",
"value": "server1"
},
"description": "A simple text variable",
"hide": 0,
"label": "Text Variable",
"name": "text_var",
"query": "server1,server2,server3",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "production",
"value": "production"
},
"description": "A constant value",
"hide": 2,
"label": "Constant",
"name": "constant_var",
"query": "production",
"skipUrlSync": true,
"type": "constant"
},
{
"allowCustomValue": false,
"current": {
"text": "gdev-prometheus",
"value": "gdev-prometheus"
},
"description": "Select a datasource",
"hide": 0,
"includeAll": false,
"label": "Datasource",
"multi": false,
"name": "ds_var",
"options": [
{
"text": "gdev-prometheus",
"value": "gdev-prometheus"
}
],
"query": "prometheus",
"refresh": 0,
"regex": "/^gdev-/",
"skipUrlSync": false,
"type": "datasource"
},
{
"auto": true,
"auto_count": 30,
"auto_min": "10s",
"current": {
"text": "5m",
"value": "5m"
},
"description": "Time interval selection",
"hide": 0,
"label": "Interval",
"name": "interval",
"options": [
{
"text": "1m",
"value": "1m"
},
{
"text": "5m",
"value": "5m"
},
{
"text": "10m",
"value": "10m"
},
{
"text": "30m",
"value": "30m"
},
{
"text": "1h",
"value": "1h"
},
{
"text": "6h",
"value": "6h"
},
{
"text": "12h",
"value": "12h"
},
{
"text": "1d",
"value": "1d"
}
],
"query": "1m,5m,10m,30m,1h,6h,12h,1d",
"skipUrlSync": false,
"type": "interval"
},
{
"allValue": "*",
"allowCustomValue": true,
"current": {
"text": [
"Production"
],
"value": [
"prod"
]
},
"description": "Custom multi-value variable",
"hide": 0,
"includeAll": true,
"label": "Custom Options",
"multi": true,
"name": "custom_var",
"options": [
{
"text": "Production",
"value": "prod"
},
{
"text": "Staging",
"value": "staging"
},
{
"text": "Development",
"value": "dev"
}
],
"query": "prod : Production, staging : Staging, dev : Development",
"skipUrlSync": false,
"type": "custom"
},
{
"current": {
"text": "instance",
"value": "instance"
},
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"description": "Group metrics by label",
"hide": 0,
"label": "Group By",
"multi": false,
"name": "group_by",
"options": [],
"skipUrlSync": false,
"type": "groupby"
},
{
"allowCustomValue": false,
"baseFilters": [
{
"condition": "AND",
"key": "job",
"operator": "=",
"value": "grafana"
}
],
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"defaultKeys": [
{
"expandable": true,
"text": "job",
"value": "job"
},
{
"expandable": true,
"text": "instance",
"value": "instance"
}
],
"hide": 0,
"label": "Filters",
"name": "filters",
"skipUrlSync": false,
"type": "adhoc"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "browser",
"title": "Test: Complete V2alpha1 Dashboard Example",
"weekStart": "monday"
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v2beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/v2beta1.complete.v0alpha1.json |
steps:
- label: pr-upgrade
command: ".buildkite/scripts/run-pr-upgrade-tests.sh"
agents:
image: "docker.elastic.co/ci-agent-images/eck-region/buildkite-agent:1.15"
memory: "4G" | unknown | github | https://github.com/elastic/elasticsearch | .buildkite/pipelines/pull-request/pr-upgrade.yml |
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
//! Types for individual calendars
pub(crate) mod buddhist;
pub(crate) mod chinese;
pub(crate) mod chinese_based;
pub(crate) mod coptic;
pub(crate) mod dangi;
pub(crate) mod ethiopian;
pub(crate) mod gregorian;
pub(crate) mod hebrew;
pub(crate) mod hijri;
pub(crate) mod indian;
pub(crate) mod iso;
pub(crate) mod japanese;
pub(crate) mod julian;
pub(crate) mod persian;
pub(crate) mod roc;
pub use buddhist::Buddhist;
pub use chinese::Chinese;
pub use coptic::Coptic;
pub use dangi::Dangi;
pub use ethiopian::{Ethiopian, EthiopianEraStyle};
pub use gregorian::Gregorian;
pub use hebrew::Hebrew;
pub use hijri::{
HijriSimulated, HijriTabular, HijriTabularEpoch, HijriTabularLeapYears, HijriUmmAlQura,
};
pub use indian::Indian;
pub use iso::Iso;
pub use japanese::{Japanese, JapaneseExtended};
pub use julian::Julian;
pub use persian::Persian;
pub use roc::Roc;
pub use crate::any_calendar::{AnyCalendar, AnyCalendarKind};
/// Internal scaffolding types
pub mod scaffold {
/// Trait marking other traits that are considered unstable and should not generally be
/// implemented outside of the calendar crate.
///
/// <div class="stab unstable">
/// 🚧 This trait is considered unstable; it may change at any time, in breaking or non-breaking ways,
/// including in SemVer minor releases. Do not implement this trait in userland unless you are prepared for things to occasionally break.
/// </div>
pub trait UnstableSealed {}
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_calendar/src/cal/mod.rs |
# Copyright 2020-2021 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Refer to the README and COPYING files for full details of the license.
from collections import namedtuple
from contextlib import contextmanager
import os
import stat
import tempfile
import time
import pytest
from vdsm.common import exception
from vdsm.common import password
from vdsm.supervdsm_api import virt
from vdsm.virt import filedata
# Core
class VariableData(filedata._FileSystemData):
def __init__(self):
super().__init__('/does-not-exist', compress=False)
self.data = None
def _retrieve(self, last_modified=-1):
return self.data
def _store(self, data):
self.data = data
def test_invalid_data():
data = VariableData()
with pytest.raises(exception.ExternalDataFailed):
# Not base64
data.store('!@#$%^&*()')
with pytest.raises(exception.ExternalDataFailed):
# Mixed
data.store('aaa!ccc')
with pytest.raises(exception.ExternalDataFailed):
# Padding character at the beginning
data.store('=aaaa')
def test_invalid_compression():
data = VariableData()
with pytest.raises(exception.ExternalDataFailed):
# Unknown format
data.store('=X=aaaa')
with pytest.raises(exception.ExternalDataFailed):
# Content is not bzip2
data.store('=0=aaaa')
def test_legacy_data():
data = VariableData()
# Data with line ends
data.store('''
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx
MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE=
''')
assert data.data == b'11111111111111111111111111111111111111111111' + \
b'111111111111111111111111111111111111111111111111111'
def test_compressed():
data = VariableData()
data.store('=0=QlpoOTFBWSZTWU7wmXMAAAEBADgAIAAhsQZiEji7kinChIJ3hMuY')
assert data.data == b'abcabcabc'
# File data
FILE_DATA = 'hello'
FILE_DATA_2 = 'world'
ENCODED_DATA = 'aGVsbG8='
ENCODED_DATA_BZ2 = \
'=0=QlpoOTFBWSZTWRkxZT0AAACBAAJEoAAhmmgzTQczi7kinChIDJiynoA='
DIRECTORY_MODE = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IXOTH
UUID = '12345678-1234-1234-1234-1234567890ab'
def test_file_data_read():
with tempfile.TemporaryDirectory() as d:
path = os.path.join(d, 'test')
open(path, 'w').write(FILE_DATA)
data = filedata.FileData(path, compress=False)
assert data.retrieve() == ENCODED_DATA
def test_file_data_write():
with tempfile.TemporaryDirectory() as d:
path = os.path.join(d, 'test')
data = filedata.FileData(path)
data.store(ENCODED_DATA)
assert open(path).read() == FILE_DATA
def test_file_data_modified():
with tempfile.TemporaryDirectory() as d:
path = os.path.join(d, 'test')
open(path, 'w').write(FILE_DATA)
data = filedata.FileData(path, compress=False)
assert data.last_modified() == os.stat(path).st_mtime
@pytest.mark.parametrize("last_modified, is_none", [
pytest.param(
0,
False,
id="forced read"
),
pytest.param(
time.time() - 0.1, # file mtime may differ from system time a bit
False,
id="new data"
),
pytest.param(
time.time() + 1000,
False,
id="future time"
),
pytest.param(
None,
True,
id="current data"
),
])
def test_file_data_conditional_read(last_modified, is_none):
with tempfile.TemporaryDirectory() as d:
path = os.path.join(d, 'test')
open(path, 'w').write(FILE_DATA)
data = filedata.FileData(path, compress=True)
if last_modified is None:
last_modified = data.last_modified()
encoded = data.retrieve(last_modified=last_modified)
if is_none:
assert encoded is None
else:
assert encoded == ENCODED_DATA_BZ2
def test_file_data_no_data():
with tempfile.TemporaryDirectory() as d:
path = os.path.join(d, 'test')
# file does not exist
data = filedata.FileData(path, compress=False)
with pytest.raises(exception.ExternalDataFailed):
data.retrieve()
# file is empty
open(path, 'w').write('')
data = filedata.FileData(path, compress=False, allow_empty=False)
with pytest.raises(exception.ExternalDataFailed):
data.retrieve()
data = filedata.FileData(path, compress=False, allow_empty=True)
assert data.retrieve() == ''
# Directory data
Paths = namedtuple("Paths", ['directory', 'path', 'subdirectory', 'subpath'])
@contextmanager
def temporary_directory(monkeypatch=None):
with tempfile.TemporaryDirectory() as d:
directory = os.path.join(d, UUID)
path = os.path.join(directory, 'file1')
subdirectory = os.path.join(directory, 'data')
subpath = os.path.join(subdirectory, 'file2')
if monkeypatch is not None:
monkeypatch.setattr(filedata.constants, 'P_LIBVIRT_SWTPM',
os.path.dirname(directory))
yield Paths(directory=directory,
path=path, subdirectory=subdirectory, subpath=subpath)
@contextmanager
def directory_data(monkeypatch=None):
with temporary_directory(monkeypatch) as d:
os.mkdir(d.directory)
os.chmod(d.directory, DIRECTORY_MODE)
os.mkdir(d.subdirectory)
open(d.path, 'w').write(FILE_DATA)
open(d.subpath, 'w').write(FILE_DATA_2)
yield d
def test_directory_data_read_write():
with directory_data() as d:
data = filedata.DirectoryData(d.directory)
encoded = data.retrieve()
assert encoded is not None
with temporary_directory() as d:
data = filedata.DirectoryData(d.directory)
data.store(encoded)
assert open(d.path).read() == FILE_DATA
assert open(d.subpath).read() == FILE_DATA_2
n = 0
for _root, _dirs, files in os.walk(d.directory):
n += len(files)
assert n == 2
permissions = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
assert os.stat(d.directory).st_mode & permissions == DIRECTORY_MODE
def test_directory_data_rewrite():
with directory_data() as d:
data = filedata.DirectoryData(d.directory)
encoded = data.retrieve()
with temporary_directory() as d:
os.mkdir(d.directory)
old_path = os.path.join(d.directory, 'old')
open(old_path, 'w').write("invalid")
open(d.path, 'w').write("invalid")
data = filedata.DirectoryData(d.directory)
data.store(encoded)
assert not os.path.exists(old_path)
assert open(d.path).read() == FILE_DATA
assert open(d.subpath).read() == FILE_DATA_2
n = 0
for _root, _dirs, files in os.walk(d.directory):
n += len(files)
assert n == 2
def test_directory_data_modified():
with directory_data() as d:
data = filedata.DirectoryData(d.directory)
data.retrieve()
assert data.last_modified() == \
max(os.stat(d.path).st_mtime, os.stat(d.subpath).st_mtime)
def test_directory_data_no_data():
# no directory
data = filedata.DirectoryData('/this-directory-must-not-exist')
with pytest.raises(exception.ExternalDataFailed):
data.retrieve()
# directory empty
with tempfile.TemporaryDirectory() as d:
data = filedata.DirectoryData(d, allow_empty=False)
with pytest.raises(exception.ExternalDataFailed):
data.retrieve()
data = filedata.DirectoryData(d, allow_empty=True)
assert data.retrieve() is not None
# Monitor
def data_retriever(directory):
data = filedata.DirectoryData(directory)
def retriever(last_modified):
encoded = data.retrieve(last_modified=last_modified)
return encoded, data.last_modified()
return retriever
def test_monitor_read():
with directory_data() as d:
monitor = filedata.Monitor(data_retriever(d.directory))
encoded = monitor.data()
assert encoded is not None
with temporary_directory() as d:
data = filedata.DirectoryData(d.directory)
data.store(encoded)
assert open(d.path).read() == FILE_DATA
assert open(d.subpath).read() == FILE_DATA_2
n = 0
for _root, _dirs, files in os.walk(d.directory):
n += len(files)
assert n == 2
def test_monitor_repeated_read():
with directory_data() as d:
monitor = filedata.Monitor(data_retriever(d.directory))
data = monitor.data()
hash_ = monitor.data_hash()
assert data is not None
assert hash_ is not None
assert monitor.data() is None
assert monitor.data_hash() == hash_
assert monitor.data(force=True) == data
assert monitor.data_hash() == hash_
def test_monitor_data_change():
with directory_data() as d:
monitor = filedata.Monitor(data_retriever(d.directory))
data = monitor.data()
hash_ = monitor.data_hash()
open(d.subpath, 'a').write('\n')
new_data = monitor.data()
new_hash = monitor.data_hash()
assert new_data is not None
assert new_data != data
assert new_hash is not None
assert new_hash != hash_
assert monitor.data() is None
assert monitor.data_hash() == new_hash
def test_monitor_no_data():
retriever = data_retriever('/this-directory-must-not-exist')
monitor = filedata.Monitor(retriever)
with pytest.raises(exception.ExternalDataFailed):
monitor.data()
# Supervdsm API
def test_supervdsm_read_write(monkeypatch):
with directory_data(monkeypatch):
encoded, _modified = virt.read_tpm_data(UUID, -1)
assert password.unprotect(encoded)
with temporary_directory(monkeypatch):
virt.write_tpm_data(UUID, encoded)
assert encoded == virt.read_tpm_data(UUID, -1)[0]
def test_supervdsm_invalid_vmid(monkeypatch):
with directory_data(monkeypatch):
encoded, _modified = virt.read_tpm_data(UUID, -1)
with pytest.raises(exception.ExternalDataFailed):
virt.write_tpm_data('../foo', encoded)
def test_supervdsm_symlink(monkeypatch):
with directory_data(monkeypatch) as d:
os.symlink('/foo', os.path.join(d.directory, 'bar'))
encoded = filedata.DirectoryData(d.directory).retrieve()
with temporary_directory(monkeypatch):
with pytest.raises(exception.ExternalDataFailed):
virt.write_tpm_data(UUID, encoded) | unknown | codeparrot/codeparrot-clean | ||
import errno
import os
import sys
import tempfile
import unittest
from os.path import join
# Make it possible to run out of the working copy.
sys.path.insert(
0,
os.path.join(
os.path.dirname(__file__),
os.pardir,
"lib",
))
import opensub # noqa
import opensub.main # noqa
def _test_data_dir():
return os.path.join(
os.path.dirname(__file__),
"test-data",
)
class LookIntoArchive(unittest.TestCase):
def test__extract_filenames_from_zip(self):
"""Should see filenames in the archive."""
expected = [
"Birdman of Alcatraz - 1.srt",
"Birdman of Alcatraz - 2.srt",
]
test_file = os.path.join(_test_data_dir(), "4130212.zip")
with open(test_file, "rb") as tfile:
archive = opensub.SubtitleArchive(url="http://127.0.0.1/dummy/")
archive.tempfile = tfile
subtitle_names = [sfile.name for sfile in archive.yield_open()]
self.assertEqual(subtitle_names, expected)
# Yeah, I know that multiple asserts are not recommended in a single
# test method, but I couldn't bear the repetitive code. In the
# traceback you'll see which assert failed anyway... -- rubasov
class DefaultTemplate(unittest.TestCase):
def setUp(self):
self.template = "{video/dir}{video/base}{subtitle/ext}"
def _assertEqual(self, video, subtitle, expected):
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(video=video, subtitle=subtitle)
self.assertEqual(os.path.normpath(fname), expected)
def test__combinations(self):
"""Zillion combinations of templating input."""
self._assertEqual(
"video.avi",
"subtitle.srt",
"video.srt",
)
self._assertEqual(
"video",
"subtitle.srt",
"video.srt",
)
self._assertEqual(
"video.avi",
"subtitle",
"video",
)
self._assertEqual(
"foo.bar.avi",
"baz.qux.srt",
"foo.bar.srt",
)
self._assertEqual(
".video.avi",
".subtitle.srt",
".video.srt",
)
self._assertEqual(
join("dir", "video.avi"),
"subtitle.srt",
join("dir", "video.srt"),
)
self._assertEqual(
"video.avi",
join("dir", "subtitle.srt"),
"video.srt",
)
self._assertEqual(
join("", "dir", "video.avi"),
"subtitle.srt",
join("", "dir", "video.srt"),
)
self._assertEqual(
"video.avi",
join("", "dir", "subtitle.srt"),
"video.srt",
)
self._assertEqual(
join("", "video.avi"),
"subtitle.srt",
join("", "video.srt"),
)
self._assertEqual(
"video.avi",
join("", "subtitle.srt"),
"video.srt",
)
def _assertRaises(self, video, subtitle, expected):
builder = opensub.FilenameBuilder(template=self.template)
with self.assertRaises(expected):
builder.build(video=video, subtitle=subtitle)
def test__empty_string_is_invalid_path(self):
"""Fail on empty string."""
self._assertRaises("", "junk", Exception)
self._assertRaises("junk", "", Exception)
class RoundTrip(unittest.TestCase):
def setUp(self):
self.template = "{subtitle/dir}{subtitle/base}{subtitle/ext}"
def _assertEqual(self, video, subtitle, expected):
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(video=video, subtitle=subtitle)
self.assertEqual(os.path.normpath(fname), expected)
def test__roundtrip_safety(self):
"""A break-to-pieces-assemble cycle should result in the original."""
self._assertEqual(
"junk",
"subtitle.srt",
"subtitle.srt",
)
self._assertEqual(
"junk",
join("dir", "subtitle.srt"),
join("dir", "subtitle.srt"),
)
self._assertEqual(
"junk",
join("", "dir", "subtitle.srt"),
join("", "dir", "subtitle.srt"),
)
self._assertEqual(
"junk",
join("", "subtitle.srt"),
join("", "subtitle.srt"),
)
class Extract(unittest.TestCase):
def setUp(self):
self.template = "{subtitle/base}{subtitle/ext}"
def test__extract_to_current_dir(self):
"""Extract subtitles by their original names."""
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(
video="junk",
subtitle=join("", "dir", "subdir", "subtitle.srt"),
)
self.assertEqual(os.path.normpath(fname), "subtitle.srt")
class NumberedTemplate(unittest.TestCase):
def setUp(self):
self.template = "episode{num:02}{subtitle/ext}"
def test__number_formatting(self):
"""Can use numbered templates."""
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(
video="junk",
subtitle="subtitle.srt",
num=7,
)
self.assertEqual(os.path.normpath(fname), "episode07.srt")
def test__missing_value_for_template_variable(self):
"""Fail on missing value for template variable."""
builder = opensub.FilenameBuilder(template=self.template)
with self.assertRaises(Exception):
builder.build(
video="junk",
subtitle="subtitle.srt",
)
class SafeOpen(unittest.TestCase):
def test__no_overwrite(self):
"""Do not overwrite existing files by default."""
tmpfile = tempfile.NamedTemporaryFile()
with self.assertRaises(OSError) as cm:
opensub.main.safe_open(tmpfile.name)
self.assertEqual(cm.exception.errno, errno.EEXIST)
tmpfile.close()
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import datetime
import os
import subprocess
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
if version is None:
from geonode import __version__ as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_show = subprocess.Popen('git show --pretty=format:%ct --quiet HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_show.communicate()[0].partition('\n')[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S') | unknown | codeparrot/codeparrot-clean | ||
"""
sentry.utils.auth
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.core.urlresolvers import reverse
from sentry.models import User
def parse_auth_header(header):
return dict(map(lambda x: x.strip().split('='), header.split(' ', 1)[1].split(',')))
def get_auth_providers():
return [
key for key, cfg_names
in settings.AUTH_PROVIDERS.iteritems()
if all(getattr(settings, c, None) for c in cfg_names)
]
def get_login_redirect(request):
default = reverse('sentry')
login_url = request.session.pop('_next', None) or default
if '//' in login_url:
login_url = default
elif login_url.startswith(reverse('sentry-login')):
login_url = default
return login_url
def find_users(username, with_valid_password=True):
"""
Return a list of users that match a username
and falling back to email
"""
qs = User.objects
if with_valid_password:
qs = qs.exclude(password='!')
try:
# First, assume username is an iexact match for username
user = qs.get(username__iexact=username)
return [user]
except User.DoesNotExist:
# If not, we can take a stab at guessing it's an email address
if '@' in username:
# email isn't guaranteed unique
return list(qs.filter(email__iexact=username))
return None
class EmailAuthBackend(ModelBackend):
"""
Authenticate against django.contrib.auth.models.User.
Supports authenticating via an email address or a username.
"""
def authenticate(self, username=None, password=None):
users = find_users(username)
if users:
for user in users:
try:
if user.password and user.check_password(password):
return user
except ValueError:
continue
return None | unknown | codeparrot/codeparrot-clean | ||
"""
'library' XBlock (LibraryRoot)
"""
import logging
from xblock.core import XBlock
from xblock.fields import Boolean, List, Scope, String
from xblock.fragment import Fragment
from xmodule.studio_editable import StudioEditableModule
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class LibraryRoot(XBlock):
"""
The LibraryRoot is the root XBlock of a content library. All other blocks in
the library are its children. It contains metadata such as the library's
display_name.
"""
resources_dir = None
display_name = String(
help=_("The display name for this component."),
default="Library",
display_name=_("Library Display Name"),
scope=Scope.settings
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your library."),
scope=Scope.settings,
xml_node=True,
)
show_children_previews = Boolean(
display_name="Hide children preview",
help="Choose if preview of library contents is shown",
scope=Scope.user_state,
default=True
)
has_children = True
has_author_view = True
def __unicode__(self):
return u"Library: {}".format(self.display_name)
def __str__(self):
return unicode(self).encode('utf-8')
def author_view(self, context):
"""
Renders the Studio preview view.
"""
fragment = Fragment()
self.render_children(context, fragment, can_reorder=False, can_add=True)
return fragment
def render_children(self, context, fragment, can_reorder=False, can_add=False): # pylint: disable=unused-argument
"""
Renders the children of the module with HTML appropriate for Studio. Reordering is not supported.
"""
contents = []
paging = context.get('paging', None)
children_count = len(self.children) # pylint: disable=no-member
item_start, item_end = 0, children_count
# TODO sort children
if paging:
page_number = paging.get('page_number', 0)
raw_page_size = paging.get('page_size', None)
page_size = raw_page_size if raw_page_size is not None else children_count
item_start, item_end = page_size * page_number, page_size * (page_number + 1)
children_to_show = self.children[item_start:item_end] # pylint: disable=no-member
force_render = context.get('force_render', None)
context['can_move'] = False
for child_key in children_to_show:
# Children must have a separate context from the library itself. Make a copy.
child_context = context.copy()
child_context['show_preview'] = self.show_children_previews
child_context['can_edit_visibility'] = False
child = self.runtime.get_block(child_key)
child_view_name = StudioEditableModule.get_preview_view_name(child)
if unicode(child.location) == force_render:
child_context['show_preview'] = True
if child_context['show_preview']:
rendered_child = self.runtime.render_child(child, child_view_name, child_context)
else:
rendered_child = self.runtime.render_child_placeholder(child, child_view_name, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': unicode(child.location),
'content': rendered_child.content,
})
fragment.add_content(
self.runtime.render_template("studio_render_paged_children_view.html", {
'items': contents,
'xblock_context': context,
'can_add': can_add,
'first_displayed': item_start,
'total_children': children_count,
'displayed_children': len(children_to_show),
'previews': self.show_children_previews
})
)
@property
def display_org_with_default(self):
"""
Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'org' field from the key.
"""
return self.scope_ids.usage_id.course_key.org
@property
def display_number_with_default(self):
"""
Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'library' field from the key.
"""
return self.scope_ids.usage_id.course_key.library
@XBlock.json_handler
def trigger_previews(self, request_body, suffix): # pylint: disable=unused-argument
""" Enable or disable previews in studio for library children. """
self.show_children_previews = request_body.get('showChildrenPreviews', self.show_children_previews)
return {'showChildrenPreviews': self.show_children_previews} | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.typeRelationChecker
import org.jetbrains.kotlin.analysis.api.symbols.KaCallableSymbol
import org.jetbrains.kotlin.analysis.test.framework.base.AbstractAnalysisApiBasedTest
import org.jetbrains.kotlin.analysis.test.framework.projectStructure.KtTestModule
import org.jetbrains.kotlin.analysis.test.framework.services.expressionMarkerProvider
import org.jetbrains.kotlin.psi.KtCallableDeclaration
import org.jetbrains.kotlin.psi.KtExpression
import org.jetbrains.kotlin.psi.KtFile
import org.jetbrains.kotlin.test.services.TestServices
import org.jetbrains.kotlin.test.services.assertions
import org.jetbrains.kotlin.types.Variance
abstract class AbstractCanBeCalledAsExtensionOnTest : AbstractAnalysisApiBasedTest() {
override fun doTestByMainFile(mainFile: KtFile, mainModule: KtTestModule, testServices: TestServices) {
val callable = testServices.expressionMarkerProvider.getBottommostElementOfTypeAtCaret<KtCallableDeclaration>(mainFile)
val expression = testServices.expressionMarkerProvider.getTopmostSelectedElementOfType<KtExpression>(mainFile)
val actual = copyAwareAnalyzeForTest(mainFile) { _ ->
val callableSymbol = callable.symbol as KaCallableSymbol
val expressionType = expression.expressionType ?: error("Expression type should not be null for ${expression.text}")
val canBeCalled = callableSymbol.canBeCalledAsExtensionOn(expressionType)
buildString {
appendLine("CAN_BE_CALLED_AS_EXTENSION_ON: $canBeCalled")
appendLine("CALLABLE: ${callableSymbol.render()}")
appendLine("TYPE: ${expressionType.render(position = Variance.INVARIANT)}")
}
}
testServices.assertions.assertEqualsToTestOutputFile(actual)
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-impl-base/testFixtures/org/jetbrains/kotlin/analysis/api/impl/base/test/cases/components/typeRelationChecker/AbstractCanBeCalledAsExtensionOnTest.kt |
import {bootstrapApplication} from '@angular/platform-browser';
import {CdkDragDropHorizontalSortingExample} from './app/app';
bootstrapApplication(CdkDragDropHorizontalSortingExample); | typescript | github | https://github.com/angular/angular | adev/src/content/examples/drag-drop/src/horizontal-sorting/main.ts |
from boto.swf.exceptions import SWFResponseError
from freezegun import freeze_time
from moto import mock_swf_deprecated
from moto.swf import swf_backend
from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION
# PollForActivityTask endpoint
@mock_swf_deprecated
def test_poll_for_activity_task_when_one():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
resp = conn.poll_for_activity_task(
"test-domain", "activity-task-list", identity="surprise")
resp["activityId"].should.equal("my-activity-001")
resp["taskToken"].should_not.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted")
resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal(
{"identity": "surprise", "scheduledEventId": 5}
)
@mock_swf_deprecated
def test_poll_for_activity_task_when_none():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "activity-task-list")
resp.should.equal({"startedEventId": 0})
@mock_swf_deprecated
def test_poll_for_activity_task_on_non_existent_queue():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "non-existent-queue")
resp.should.equal({"startedEventId": 0})
# CountPendingActivityTasks endpoint
@mock_swf_deprecated
def test_count_pending_activity_tasks():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
resp = conn.count_pending_activity_tasks(
"test-domain", "activity-task-list")
resp.should.equal({"count": 1, "truncated": False})
@mock_swf_deprecated
def test_count_pending_decision_tasks_on_non_existent_task_list():
conn = setup_workflow()
resp = conn.count_pending_activity_tasks("test-domain", "non-existent")
resp.should.equal({"count": 0, "truncated": False})
# RespondActivityTaskCompleted endpoint
@mock_swf_deprecated
def test_respond_activity_task_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
resp = conn.respond_activity_task_completed(
activity_token, result="result of the task")
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted")
resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal(
{"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6}
)
@mock_swf_deprecated
def test_respond_activity_task_completed_on_closed_workflow_execution():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
# bad: we're closing workflow execution manually, but endpoints are not
# coded for now..
wfe = swf_backend.domains[0].workflow_executions[-1]
wfe.execution_status = "CLOSED"
# /bad
conn.respond_activity_task_completed.when.called_with(
activity_token
).should.throw(SWFResponseError, "WorkflowExecution=")
@mock_swf_deprecated
def test_respond_activity_task_completed_with_task_already_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
conn.respond_activity_task_completed(activity_token)
conn.respond_activity_task_completed.when.called_with(
activity_token
).should.throw(SWFResponseError, "Unknown activity, scheduledEventId = 5")
# RespondActivityTaskFailed endpoint
@mock_swf_deprecated
def test_respond_activity_task_failed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
resp = conn.respond_activity_task_failed(activity_token,
reason="short reason",
details="long details")
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed")
resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal(
{"reason": "short reason", "details": "long details",
"scheduledEventId": 5, "startedEventId": 6}
)
@mock_swf_deprecated
def test_respond_activity_task_completed_with_wrong_token():
# NB: we just test ONE failure case for RespondActivityTaskFailed
# because the safeguards are shared with RespondActivityTaskCompleted, so
# no need to retest everything end-to-end.
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
conn.poll_for_activity_task("test-domain", "activity-task-list")
conn.respond_activity_task_failed.when.called_with(
"not-a-correct-token"
).should.throw(SWFResponseError, "Invalid token")
# RecordActivityTaskHeartbeat endpoint
@mock_swf_deprecated
def test_record_activity_task_heartbeat():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
resp = conn.record_activity_task_heartbeat(activity_token)
resp.should.equal({"cancelRequested": False})
@mock_swf_deprecated
def test_record_activity_task_heartbeat_with_wrong_token():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
conn.record_activity_task_heartbeat.when.called_with(
"bad-token", details="some progress details"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
with freeze_time("2015-01-01 12:00:00"):
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
conn.record_activity_task_heartbeat(
activity_token, details="some progress details")
with freeze_time("2015-01-01 12:05:30"):
# => Activity Task Heartbeat timeout reached!!
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut")
attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"]
attrs["details"].should.equal("some progress details") | unknown | codeparrot/codeparrot-clean | ||
nav a {
padding: 0.7rem;
}
h1 {
margin-bottom: 0.3rem;
}
form {
margin-bottom: 2rem;
}
nav {
padding-bottom: 3rem;
} | css | github | https://github.com/angular/angular | adev/src/content/examples/animations/src/app/app.css |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from caching_file_system import CachingFileSystem
from extensions_paths import SERVER2
from file_system import FileNotFoundError, StatInfo
from local_file_system import LocalFileSystem
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_object_store import TestObjectStore
def _CreateLocalFs():
return LocalFileSystem.Create(SERVER2, 'test_data', 'file_system/')
class CachingFileSystemTest(unittest.TestCase):
def setUp(self):
# Use this to make sure that every time _CreateCachingFileSystem is called
# the underlying object store data is the same, within each test.
self._object_store_dbs = {}
def _CreateCachingFileSystem(self, fs, start_empty=False):
def store_type_constructor(namespace, start_empty=False):
'''Returns an ObjectStore backed onto test-lifetime-persistent objects
in |_object_store_dbs|.
'''
if namespace not in self._object_store_dbs:
self._object_store_dbs[namespace] = {}
db = self._object_store_dbs[namespace]
if start_empty:
db.clear()
return TestObjectStore(namespace, init=db)
object_store_creator = ObjectStoreCreator(start_empty=start_empty,
store_type=store_type_constructor)
return CachingFileSystem(fs, object_store_creator)
def testReadFiles(self):
file_system = self._CreateCachingFileSystem(
_CreateLocalFs(), start_empty=False)
expected = {
'./test1.txt': 'test1\n',
'./test2.txt': 'test2\n',
'./test3.txt': 'test3\n',
}
self.assertEqual(
expected,
file_system.Read(['./test1.txt', './test2.txt', './test3.txt']).Get())
def testListDir(self):
file_system = self._CreateCachingFileSystem(
_CreateLocalFs(), start_empty=False)
expected = ['dir/'] + ['file%d.html' % i for i in range(7)]
file_system._read_cache.Set(
'list/',
(expected, file_system.Stat('list/').version))
self.assertEqual(expected, sorted(file_system.ReadSingle('list/').Get()))
expected.remove('file0.html')
file_system._read_cache.Set(
'list/',
(expected, file_system.Stat('list/').version))
self.assertEqual(expected, sorted(file_system.ReadSingle('list/').Get()))
def testCaching(self):
test_fs = TestFileSystem({
'bob': {
'bob0': 'bob/bob0 contents',
'bob1': 'bob/bob1 contents',
'bob2': 'bob/bob2 contents',
'bob3': 'bob/bob3 contents',
}
})
mock_fs = MockFileSystem(test_fs)
def create_empty_caching_fs():
return self._CreateCachingFileSystem(mock_fs, start_empty=True)
file_system = create_empty_caching_fs()
# The stat/read should happen before resolving the Future, and resolving
# the future shouldn't do any additional work.
get_future = file_system.ReadSingle('bob/bob0')
self.assertTrue(*mock_fs.CheckAndReset(read_count=1))
self.assertEqual('bob/bob0 contents', get_future.Get())
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1, stat_count=1))
# Resource has been cached, so test resource is not re-fetched.
self.assertEqual('bob/bob0 contents',
file_system.ReadSingle('bob/bob0').Get())
self.assertTrue(*mock_fs.CheckAndReset())
# Test if the Stat version is the same the resource is not re-fetched.
file_system = create_empty_caching_fs()
self.assertEqual('bob/bob0 contents',
file_system.ReadSingle('bob/bob0').Get())
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1))
# Test if there is a newer version, the resource is re-fetched.
file_system = create_empty_caching_fs()
test_fs.IncrementStat();
future = file_system.ReadSingle('bob/bob0')
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, stat_count=1))
self.assertEqual('bob/bob0 contents', future.Get())
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
# Test directory and subdirectory stats are cached.
file_system = create_empty_caching_fs()
file_system._stat_cache.Del('bob/bob0')
file_system._read_cache.Del('bob/bob0')
file_system._stat_cache.Del('bob/bob1')
test_fs.IncrementStat();
futures = (file_system.ReadSingle('bob/bob1'),
file_system.ReadSingle('bob/bob0'))
self.assertTrue(*mock_fs.CheckAndReset(read_count=2))
self.assertEqual(('bob/bob1 contents', 'bob/bob0 contents'),
tuple(future.Get() for future in futures))
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=2, stat_count=1))
self.assertEqual('bob/bob1 contents',
file_system.ReadSingle('bob/bob1').Get())
self.assertTrue(*mock_fs.CheckAndReset())
# Test a more recent parent directory doesn't force a refetch of children.
file_system = create_empty_caching_fs()
file_system._read_cache.Del('bob/bob0')
file_system._read_cache.Del('bob/bob1')
futures = (file_system.ReadSingle('bob/bob1'),
file_system.ReadSingle('bob/bob2'),
file_system.ReadSingle('bob/bob3'))
self.assertTrue(*mock_fs.CheckAndReset(read_count=3))
self.assertEqual(
('bob/bob1 contents', 'bob/bob2 contents', 'bob/bob3 contents'),
tuple(future.Get() for future in futures))
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=3, stat_count=1))
test_fs.IncrementStat(path='bob/bob0')
file_system = create_empty_caching_fs()
self.assertEqual('bob/bob1 contents',
file_system.ReadSingle('bob/bob1').Get())
self.assertEqual('bob/bob2 contents',
file_system.ReadSingle('bob/bob2').Get())
self.assertEqual('bob/bob3 contents',
file_system.ReadSingle('bob/bob3').Get())
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1))
file_system = create_empty_caching_fs()
file_system._stat_cache.Del('bob/bob0')
future = file_system.ReadSingle('bob/bob0')
self.assertTrue(*mock_fs.CheckAndReset(read_count=1))
self.assertEqual('bob/bob0 contents', future.Get())
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1, stat_count=1))
self.assertEqual('bob/bob0 contents',
file_system.ReadSingle('bob/bob0').Get())
self.assertTrue(*mock_fs.CheckAndReset())
# Test skip_not_found caching behavior.
file_system = create_empty_caching_fs()
future = file_system.ReadSingle('bob/no_file', skip_not_found=True)
self.assertTrue(*mock_fs.CheckAndReset(read_count=1))
self.assertEqual(None, future.Get())
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1, stat_count=1))
future = file_system.ReadSingle('bob/no_file', skip_not_found=True)
# There shouldn't be another read/stat from the file system;
# we know the file is not there.
self.assertTrue(*mock_fs.CheckAndReset())
future = file_system.ReadSingle('bob/no_file')
self.assertTrue(*mock_fs.CheckAndReset(read_count=1))
# Even though we cached information about non-existent files,
# trying to read one without specifiying skip_not_found should
# still raise an error.
self.assertRaises(FileNotFoundError, future.Get)
def testCachedStat(self):
test_fs = TestFileSystem({
'bob': {
'bob0': 'bob/bob0 contents',
'bob1': 'bob/bob1 contents'
}
})
mock_fs = MockFileSystem(test_fs)
file_system = self._CreateCachingFileSystem(mock_fs, start_empty=False)
self.assertEqual(StatInfo('0'), file_system.Stat('bob/bob0'))
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1))
self.assertEqual(StatInfo('0'), file_system.Stat('bob/bob0'))
self.assertTrue(*mock_fs.CheckAndReset())
# Caching happens on a directory basis, so reading other files from that
# directory won't result in a stat.
self.assertEqual(StatInfo('0'), file_system.Stat('bob/bob1'))
self.assertEqual(
StatInfo('0', child_versions={'bob0': '0', 'bob1': '0'}),
file_system.Stat('bob/'))
self.assertTrue(*mock_fs.CheckAndReset())
# Even though the stat is bumped, the object store still has it cached so
# this won't update.
test_fs.IncrementStat()
self.assertEqual(StatInfo('0'), file_system.Stat('bob/bob0'))
self.assertEqual(StatInfo('0'), file_system.Stat('bob/bob1'))
self.assertEqual(
StatInfo('0', child_versions={'bob0': '0', 'bob1': '0'}),
file_system.Stat('bob/'))
self.assertTrue(*mock_fs.CheckAndReset())
def testFreshStat(self):
test_fs = TestFileSystem({
'bob': {
'bob0': 'bob/bob0 contents',
'bob1': 'bob/bob1 contents'
}
})
mock_fs = MockFileSystem(test_fs)
def run_expecting_stat(stat):
def run():
file_system = self._CreateCachingFileSystem(mock_fs, start_empty=True)
self.assertEqual(
StatInfo(stat, child_versions={'bob0': stat, 'bob1': stat}),
file_system.Stat('bob/'))
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1))
self.assertEqual(StatInfo(stat), file_system.Stat('bob/bob0'))
self.assertEqual(StatInfo(stat), file_system.Stat('bob/bob0'))
self.assertTrue(*mock_fs.CheckAndReset())
run()
run()
run_expecting_stat('0')
test_fs.IncrementStat()
run_expecting_stat('1')
def testSkipNotFound(self):
caching_fs = self._CreateCachingFileSystem(TestFileSystem({
'bob': {
'bob0': 'bob/bob0 contents',
'bob1': 'bob/bob1 contents'
}
}))
def read_skip_not_found(paths):
return caching_fs.Read(paths, skip_not_found=True).Get()
self.assertEqual({}, read_skip_not_found(('grub',)))
self.assertEqual({}, read_skip_not_found(('bob/bob2',)))
self.assertEqual({
'bob/bob0': 'bob/bob0 contents',
}, read_skip_not_found(('bob/bob0', 'bob/bob2')))
def testWalkCaching(self):
test_fs = TestFileSystem({
'root': {
'file1': 'file1',
'file2': 'file2',
'dir1': {
'dir1_file1': 'dir1_file1',
'dir2': {},
'dir3': {
'dir3_file1': 'dir3_file1',
'dir3_file2': 'dir3_file2'
}
}
}
})
mock_fs = MockFileSystem(test_fs)
file_system = self._CreateCachingFileSystem(mock_fs, start_empty=True)
for walkinfo in file_system.Walk(''):
pass
self.assertTrue(*mock_fs.CheckAndReset(
read_resolve_count=5, read_count=5, stat_count=5))
all_dirs, all_files = [], []
for root, dirs, files in file_system.Walk(''):
all_dirs.extend(dirs)
all_files.extend(files)
self.assertEqual(sorted(['root/', 'dir1/', 'dir2/', 'dir3/']),
sorted(all_dirs))
self.assertEqual(
sorted(['file1', 'file2', 'dir1_file1', 'dir3_file1', 'dir3_file2']),
sorted(all_files))
# All data should be cached.
self.assertTrue(*mock_fs.CheckAndReset())
# Starting from a different root should still pull cached data.
for walkinfo in file_system.Walk('root/dir1/'):
pass
self.assertTrue(*mock_fs.CheckAndReset())
# TODO(ahernandez): Test with a new instance CachingFileSystem so a
# different object store is utilized.
def testVersionedStat(self):
test_fs = TestFileSystem({
'bob': {
'bob0': 'bob/bob0 contents',
'bob1': 'bob/bob1 contents'
}
})
# Create a versioned FileSystem and verify that multiple CachingFileSystem
# instances wrapping it will share the same stat cache.
mock_fs = MockFileSystem(test_fs)
mock_fs.SetVersion('abcdefg')
def run_and_expect_stat_count(paths, stat_count=0):
file_system = self._CreateCachingFileSystem(mock_fs, start_empty=True)
[file_system.Stat(path) for path in paths]
self.assertTrue(*mock_fs.CheckAndReset(stat_count=stat_count))
run_and_expect_stat_count(['bob/', 'bob/bob0', 'bob/bob1'], stat_count=1)
run_and_expect_stat_count(['bob/', 'bob/bob0', 'bob/bob1'], stat_count=0)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.