code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
""" Dropbox OAuth1 backend, docs at: http://psa.matiasaguirre.net/docs/backends/dropbox.html """ from social.backends.oauth import BaseOAuth1, BaseOAuth2 class DropboxOAuth(BaseOAuth1): """Dropbox OAuth authentication backend""" name = 'dropbox' ID_KEY = 'uid' AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth/authorize' REQUEST_TOKEN_URL = 'https://api.dropbox.com/1/oauth/request_token' REQUEST_TOKEN_METHOD = 'POST' ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth/access_token' ACCESS_TOKEN_METHOD = 'POST' REDIRECT_URI_PARAMETER_NAME = 'oauth_callback' EXTRA_DATA = [ ('id', 'id'), ('expires', 'expires') ] def get_user_details(self, response): """Return user details from Dropbox account""" fullname, first_name, last_name = self.get_user_names( response.get('display_name') ) return {'username': str(response.get('uid')), 'email': response.get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name} def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" return self.get_json('https://api.dropbox.com/1/account/info', auth=self.oauth_auth(access_token)) class DropboxOAuth2(BaseOAuth2): name = 'dropbox-oauth2' ID_KEY = 'uid' AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth2/authorize' ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth2/token' ACCESS_TOKEN_METHOD = 'POST' REDIRECT_STATE = False EXTRA_DATA = [ ('uid', 'username'), ] def get_user_details(self, response): """Return user details from Dropbox account""" fullname, first_name, last_name = self.get_user_names( response.get('display_name') ) return {'username': str(response.get('uid')), 'email': response.get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name} def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" return self.get_json( 'https://api.dropbox.com/1/account/info', headers={'Authorization': 'Bearer {0}'.format(access_token)} )
unknown
codeparrot/codeparrot-clean
#! /usr/bin/env python import paramiko import time def connect(command, ip, user, passwd, port=22): remote_conn_pre = paramiko.SSHClient() remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy()) remote_conn_pre.connect(ip, username=user, password=passwd, look_for_keys=False, allow_agent=False, port=port) remote_conn = remote_conn_pre.invoke_shell() output = remote_conn.recv(5000) time.sleep(1) output = remote_conn.send('terminal length 0\n') time.sleep(1) output = remote_conn.recv(65535) for cmd in command: output = remote_conn.send(cmd + '\n') time.sleep(1) output = remote_conn.recv(65535) return output def show_ver(): ip_addr = '50.76.53.27' username = 'pyclass' password = '88newclass' port = 8022 cmd_list = ['show version'] connection = connect(cmd_list, ip_addr, username, password, port) print connection def change_logging(): ip_addr = '50.76.53.27' username = 'pyclass' password = '88newclass' port = 8022 cmd_list = ['config t', 'logging buffered 51212', 'end', 'show run | i logging'] connection = connect(cmd_list, ip_addr, username, password, port) print connection if __name__ == '__main__': print show_ver() print print change_logging() print
unknown
codeparrot/codeparrot-clean
from django.test import TestCase import json import logging from readthedocs.projects.models import Project from readthedocs.projects import tasks log = logging.getLogger(__name__) class GitLabWebHookTest(TestCase): fixtures = ["eric", "test_data"] def tearDown(self): tasks.update_docs = self.old_bd def setUp(self): self.old_bd = tasks.update_docs def mock(*args, **kwargs): log.info("Mocking for great profit and speed.") tasks.update_docs = mock tasks.update_docs.delay = mock self.client.login(username='eric', password='test') self.payload = { "object_kind": "push", "before": "95790bf891e76fee5e1747ab589903a6a1f80f22", "after": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7", "ref": "refs/heads/awesome", "user_id": 4, "user_name": "John Smith", "user_email": "john@example.com", "project_id": 15, "repository": { "name": "Diaspora", "url": "git@github.com:rtfd/readthedocs.org.git", "description": "", "homepage": "http://github.com/rtfd/readthedocs.org", "git_http_url": "http://github.com/rtfd/readthedocs.org.git", "git_ssh_url": "git@github.com:rtfd/readthedocs.org.git", "visibility_level":0 }, "commits": [ { "id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327", "message": "Update Catalan translation to e38cb41.", "timestamp": "2011-12-12T14:27:31+02:00", "url": "http://github.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327", "author": { "name": "Jordi Mallach", "email": "jordi@softcatala.org" } }, { "id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7", "message": "fixed readme", "timestamp": "2012-01-03T23:36:29+02:00", "url": "http://github.com/mike/diaspora/commit/da1560886d4f094c3e6c9ef40349f7d38b5d27d7", "author": { "name": "GitLab dev user", "email": "gitlabdev@dv6700.(none)" } } ], "total_commits_count": 4 } def test_gitlab_post_commit_hook_builds_branch_docs_if_it_should(self): """ Test the github post commit hook to see if it will only build versions that are set to be built if the branch they refer to is updated. Otherwise it is no op. """ r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]') self.payload['ref'] = 'refs/heads/not_ok' r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org [not_ok]') self.payload['ref'] = 'refs/heads/unknown' r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org []') def test_gitlab_post_commit_knows_default_branches(self): """ Test the gitlab post commit hook so that the default branch will be respected and built as the latest version. """ rtd = Project.objects.get(slug='read-the-docs') old_default = rtd.default_branch rtd.default_branch = 'master' rtd.save() self.payload['ref'] = 'refs/heads/master' r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [latest]') rtd.default_branch = old_default rtd.save() class PostCommitTest(TestCase): fixtures = ["eric", "test_data"] def setUp(self): def mock(*args, **kwargs): pass tasks.UpdateDocsTask.run = mock tasks.UpdateDocsTask.delay = mock self.client.login(username='eric', password='test') self.payload = { "after": "5ad757394b926e5637ffeafe340f952ef48bd270", "base_ref": "refs/heads/master", "before": "5b4e453dc913b08642b1d4fb10ed23c9d6e5b129", "commits": [ { "added": [], "author": { "email": "eric@ericholscher.com", "name": "Eric Holscher", "username": "ericholscher" }, "distinct": False, "id": "11f229c6a78f5bc8cb173104a3f7a68cdb7eb15a", "message": "Fix it on the front list as well.", "modified": [ "readthedocs/templates/core/project_list_detailed.html" ], "removed": [], "timestamp": "2011-09-12T19:38:55-07:00", "url": ("https://github.com/wraithan/readthedocs.org/" "commit/11f229c6a78f5bc8cb173104a3f7a68cdb7eb15a") }, ], "compare": ("https://github.com/wraithan/readthedocs.org/compare/" "5b4e453...5ad7573"), "created": False, "deleted": False, "forced": False, "pusher": { "name": "none" }, "ref": "refs/heads/awesome", "repository": { "created_at": "2011/09/09 14:20:13 -0700", "description": "source code to readthedocs.org", "fork": True, "forks": 0, "has_downloads": True, "has_issues": False, "has_wiki": True, "homepage": "http://rtfd.org/", "language": "Python", "name": "readthedocs.org", "open_issues": 0, "owner": { "email": "XWraithanX@gmail.com", "name": "wraithan" }, "private": False, "pushed_at": "2011/09/12 22:33:34 -0700", "size": 140, "url": "https://github.com/rtfd/readthedocs.org", "watchers": 1 } } def test_github_post_commit_hook_builds_branch_docs_if_it_should(self): """ Test the github post commit hook to see if it will only build versions that are set to be built if the branch they refer to is updated. Otherwise it is no op. """ r = self.client.post('/github/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]') self.payload['ref'] = 'refs/heads/not_ok' r = self.client.post('/github/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org [not_ok]') self.payload['ref'] = 'refs/heads/unknown' r = self.client.post('/github/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org []') def test_github_post_commit_knows_default_branches(self): """ Test the github post commit hook so that the default branch will be respected and built as the latest version. """ rtd = Project.objects.get(slug='read-the-docs') old_default = rtd.default_branch rtd.default_branch = 'master' rtd.save() self.payload['ref'] = 'refs/heads/master' r = self.client.post('/github/', {'payload': json.dumps(self.payload)}) self.assertEqual(r.status_code, 200) self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [latest]') rtd.default_branch = old_default rtd.save() def test_core_commit_hook(self): rtd = Project.objects.get(slug='read-the-docs') rtd.default_branch = 'master' rtd.save() r = self.client.post('/build/%s' % rtd.pk, {'version_slug': 'master'}) self.assertEqual(r.status_code, 302) self.assertEqual(r._headers['location'][1], 'http://testserver/projects/read-the-docs/builds/')
unknown
codeparrot/codeparrot-clean
contact_links: - name: Ask the community url: https://github.com/twbs/bootstrap/discussions/new about: Ask and discuss questions with other Bootstrap community members.
unknown
github
https://github.com/twbs/bootstrap
.github/ISSUE_TEMPLATE/config.yml
#ifndef RBIMPL_ATTR_ALLOC_SIZE_H /*-*-C++-*-vi:se ft=cpp:*/ #define RBIMPL_ATTR_ALLOC_SIZE_H /** * @file * @author Ruby developers <ruby-core@ruby-lang.org> * @copyright This file is a part of the programming language Ruby. * Permission is hereby granted, to either redistribute and/or * modify this file, provided that the conditions mentioned in the * file COPYING are met. Consult the file for details. * @warning Symbols prefixed with either `RBIMPL` or `rbimpl` are * implementation details. Don't take them as canon. They could * rapidly appear then vanish. The name (path) of this header file * is also an implementation detail. Do not expect it to persist * at the place it is now. Developers are free to move it anywhere * anytime at will. * @note To ruby-core: remember that this header can be possibly * recursively included from extension libraries written in C++. * Do not expect for instance `__VA_ARGS__` is always available. * We assume C99 for ruby itself but we don't assume languages of * extension libraries. They could be written in C++98. * @brief Defines #RBIMPL_ATTR_ALLOC_SIZE. */ #include "ruby/internal/has/attribute.h" /** Wraps (or simulates) `__attribute__((alloc_size))` */ #if RBIMPL_HAS_ATTRIBUTE(alloc_size) # define RBIMPL_ATTR_ALLOC_SIZE(tuple) __attribute__((__alloc_size__ tuple)) #else # define RBIMPL_ATTR_ALLOC_SIZE(tuple) /* void */ #endif #endif /* RBIMPL_ATTR_ALLOC_SIZE_H */
c
github
https://github.com/ruby/ruby
include/ruby/internal/attr/alloc_size.h
//! Parks the runtime. //! //! A combination of the various resource driver park handles. use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::{Arc, Condvar, Mutex}; use crate::runtime::driver::{self, Driver}; use crate::util::TryLock; use std::sync::atomic::Ordering::SeqCst; use std::time::{Duration, Instant}; #[cfg(loom)] use crate::runtime::park::CURRENT_THREAD_PARK_COUNT; pub(crate) struct Parker { inner: Arc<Inner>, } pub(crate) struct Unparker { inner: Arc<Inner>, } struct Inner { /// Avoids entering the park if possible state: AtomicUsize, /// Used to coordinate access to the driver / `condvar` mutex: Mutex<()>, /// `Condvar` to block on if the driver is unavailable. condvar: Condvar, /// Resource (I/O, time, ...) driver shared: Arc<Shared>, } const EMPTY: usize = 0; const PARKED_CONDVAR: usize = 1; const PARKED_DRIVER: usize = 2; const NOTIFIED: usize = 3; /// Shared across multiple Parker handles struct Shared { /// Shared driver. Only one thread at a time can use this driver: TryLock<Driver>, } impl Parker { pub(crate) fn new(driver: Driver) -> Parker { Parker { inner: Arc::new(Inner { state: AtomicUsize::new(EMPTY), mutex: Mutex::new(()), condvar: Condvar::new(), shared: Arc::new(Shared { driver: TryLock::new(driver), }), }), } } pub(crate) fn unpark(&self) -> Unparker { Unparker { inner: self.inner.clone(), } } pub(crate) fn park(&mut self, handle: &driver::Handle) { self.inner.park(handle); } /// Parks the current thread for up to `duration`. /// /// This function tries to acquire the driver lock. If it succeeds, it /// parks using the driver. Otherwise, it fails back to using a condvar, /// unless the duration is zero, in which case it returns immediately. pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { if let Some(mut driver) = self.inner.shared.driver.try_lock() { self.inner.park_driver(&mut driver, handle, Some(duration)); } else if !duration.is_zero() { self.inner.park_condvar(Some(duration)); } else { // https://github.com/tokio-rs/tokio/issues/6536 // Hacky, but it's just for loom tests. The counter gets incremented during // `park_timeout`, but we still have to increment the counter if we can't acquire the // lock. #[cfg(loom)] CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); } } pub(crate) fn shutdown(&mut self, handle: &driver::Handle) { self.inner.shutdown(handle); } } impl Clone for Parker { fn clone(&self) -> Parker { Parker { inner: Arc::new(Inner { state: AtomicUsize::new(EMPTY), mutex: Mutex::new(()), condvar: Condvar::new(), shared: self.inner.shared.clone(), }), } } } impl Unparker { pub(crate) fn unpark(&self, driver: &driver::Handle) { self.inner.unpark(driver); } } impl Inner { /// Parks the current thread for at most `dur`. fn park(&self, handle: &driver::Handle) { // If we were previously notified then we consume this notification and // return quickly. if self .state .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) .is_ok() { return; } if let Some(mut driver) = self.shared.driver.try_lock() { self.park_driver(&mut driver, handle, None); } else { self.park_condvar(None); } } /// Parks the current thread using a condvar for up to `duration`. /// /// If `duration` is `None`, parks indefinitely until notified. /// /// # Panics /// /// Panics if `duration` is `Some` and the duration is zero. fn park_condvar(&self, duration: Option<Duration>) { // Otherwise we need to coordinate going to sleep let mut m = self.mutex.lock(); match self .state .compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst) { Ok(_) => {} Err(NOTIFIED) => { // We must read here, even though we know it will be `NOTIFIED`. // This is because `unpark` may have been called again since we read // `NOTIFIED` in the `compare_exchange` above. We must perform an // acquire operation that synchronizes with that `unpark` to observe // any writes it made before the call to unpark. To do that we must // read from the write it made to `state`. let old = self.state.swap(EMPTY, SeqCst); debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); return; } Err(actual) => panic!("inconsistent park state; actual = {actual}"), } let timeout_at = duration.map(|d| { Instant::now() .checked_add(d) // best effort to avoid overflow and still provide a usable timeout .unwrap_or(Instant::now() + Duration::from_secs(1)) }); loop { let is_timeout; (m, is_timeout) = match timeout_at { Some(timeout_at) => { let dur = timeout_at.saturating_duration_since(Instant::now()); if !dur.is_zero() { // Ideally, we would use `condvar.wait_timeout_until` here, but it is not available // in `loom`. So we manually compute the timeout. let (m, res) = self.condvar.wait_timeout(m, dur).unwrap(); (m, res.timed_out()) } else { (m, true) } } None => (self.condvar.wait(m).unwrap(), false), }; if is_timeout { match self.state.swap(EMPTY, SeqCst) { PARKED_CONDVAR => return, // timed out, and no notification received NOTIFIED => return, // notification and timeout happened concurrently actual @ (PARKED_DRIVER | EMPTY) => { panic!("inconsistent park_timeout state, actual = {actual}") } invalid => panic!("invalid park_timeout state, actual = {invalid}"), } } else if self .state .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) .is_ok() { // got a notification return; } // spurious wakeup, go back to sleep } } fn park_driver( &self, driver: &mut Driver, handle: &driver::Handle, duration: Option<Duration>, ) { if duration.as_ref().is_some_and(Duration::is_zero) { // zero duration doesn't actually park the thread, it just // polls the I/O events, timers, etc. driver.park_timeout(handle, Duration::ZERO); return; } match self .state .compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst) { Ok(_) => {} Err(NOTIFIED) => { // We must read here, even though we know it will be `NOTIFIED`. // This is because `unpark` may have been called again since we read // `NOTIFIED` in the `compare_exchange` above. We must perform an // acquire operation that synchronizes with that `unpark` to observe // any writes it made before the call to unpark. To do that we must // read from the write it made to `state`. let old = self.state.swap(EMPTY, SeqCst); debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); return; } Err(actual) => panic!("inconsistent park state; actual = {actual}"), } if let Some(duration) = duration { debug_assert_ne!(duration, Duration::ZERO); driver.park_timeout(handle, duration); } else { driver.park(handle); } match self.state.swap(EMPTY, SeqCst) { NOTIFIED => {} // got a notification, hurray! PARKED_DRIVER => {} // no notification, alas n => panic!("inconsistent park_timeout state: {n}"), } } fn unpark(&self, driver: &driver::Handle) { // To ensure the unparked thread will observe any writes we made before // this call, we must perform a release operation that `park` can // synchronize with. To do that we must write `NOTIFIED` even if `state` // is already `NOTIFIED`. That is why this must be a swap rather than a // compare-and-swap that returns if it reads `NOTIFIED` on failure. match self.state.swap(NOTIFIED, SeqCst) { EMPTY => {} // no one was waiting NOTIFIED => {} // already unparked PARKED_CONDVAR => self.unpark_condvar(), PARKED_DRIVER => driver.unpark(), actual => panic!("inconsistent state in unpark; actual = {actual}"), } } fn unpark_condvar(&self) { // There is a period between when the parked thread sets `state` to // `PARKED` (or last checked `state` in the case of a spurious wake // up) and when it actually waits on `cvar`. If we were to notify // during this period it would be ignored and then when the parked // thread went to sleep it would never wake up. Fortunately, it has // `lock` locked at this stage so we can acquire `lock` to wait until // it is ready to receive the notification. // // Releasing `lock` before the call to `notify_one` means that when the // parked thread wakes it doesn't get woken only to have to wait for us // to release `lock`. drop(self.mutex.lock()); self.condvar.notify_one(); } fn shutdown(&self, handle: &driver::Handle) { if let Some(mut driver) = self.shared.driver.try_lock() { driver.shutdown(handle); } self.condvar.notify_all(); } }
rust
github
https://github.com/tokio-rs/tokio
tokio/src/runtime/scheduler/multi_thread/park.rs
# -*- coding: utf-8 -*- """ JSON 2 HTML Converter ===================== (c) Varun Malhotra 2013-2021 Source Code: https://github.com/softvar/json2html Contributors: ------------- 1. Michel Müller (@muellermichel), https://github.com/softvar/json2html/pull/2 2. Daniel Lekic (@lekic), https://github.com/softvar/json2html/pull/17 LICENSE: MIT -------- """ import sys from collections import OrderedDict import json as json_parser if sys.version_info[:2] < (3, 0): from cgi import escape as html_escape text = unicode text_types = (unicode, str) else: from html import escape as html_escape text = str text_types = (str,) class Json2Html: def convert(self, json="", table_attributes='border="1"', clubbing=True, encode=False, escape=True): """ Convert JSON to HTML Table format """ # table attributes such as class, id, data-attr-*, etc. # eg: table_attributes = 'class = "table table-bordered sortable"' self.table_init_markup = "<table %s>" % table_attributes self.clubbing = clubbing self.escape = escape json_input = None if not json: json_input = {} elif type(json) in text_types: try: json_input = json_parser.loads(json, object_pairs_hook=OrderedDict) except ValueError as e: #so the string passed here is actually not a json string # - let's analyze whether we want to pass on the error or use the string as-is as a text node if u"Expecting property name" in text(e): #if this specific json loads error is raised, then the user probably actually wanted to pass json, but made a mistake raise e json_input = json else: json_input = json converted = self.convert_json_node(json_input) if encode: return converted.encode('ascii', 'xmlcharrefreplace') return converted def column_headers_from_list_of_dicts(self, json_input): """ This method is required to implement clubbing. It tries to come up with column headers for your input """ if not json_input \ or not hasattr(json_input, '__getitem__') \ or not hasattr(json_input[0], 'keys'): return None column_headers = json_input[0].keys() for entry in json_input: if not hasattr(entry, 'keys') \ or not hasattr(entry, '__iter__') \ or len(entry.keys()) != len(column_headers): return None for header in column_headers: if header not in entry: return None return column_headers def convert_json_node(self, json_input): """ Dispatch JSON input according to the outermost type and process it to generate the super awesome HTML format. We try to adhere to duck typing such that users can just pass all kinds of funky objects to json2html that *behave* like dicts and lists and other basic JSON types. """ if type(json_input) in text_types: if self.escape: return html_escape(text(json_input)) else: return text(json_input) if hasattr(json_input, 'items'): return self.convert_object(json_input) if hasattr(json_input, '__iter__') and hasattr(json_input, '__getitem__'): return self.convert_list(json_input) return text(json_input) def convert_list(self, list_input): """ Iterate over the JSON list and process it to generate either an HTML table or a HTML list, depending on what's inside. If suppose some key has array of objects and all the keys are same, instead of creating a new row for each such entry, club such values, thus it makes more sense and more readable table. @example: jsonObject = { "sampleData": [ {"a":1, "b":2, "c":3}, {"a":5, "b":6, "c":7} ] } OUTPUT: _____________________________ | | | | | | | a | c | b | | sampleData |---|---|---| | | 1 | 3 | 2 | | | 5 | 7 | 6 | ----------------------------- @contributed by: @muellermichel """ if not list_input: return "" converted_output = "" column_headers = None if self.clubbing: column_headers = self.column_headers_from_list_of_dicts(list_input) if column_headers is not None: converted_output += self.table_init_markup converted_output += '<thead>' converted_output += '<tr><th>' + '</th><th>'.join(column_headers) + '</th></tr>' converted_output += '</thead>' converted_output += '<tbody>' for list_entry in list_input: converted_output += '<tr><td>' converted_output += '</td><td>'.join([self.convert_json_node(list_entry[column_header]) for column_header in column_headers]) converted_output += '</td></tr>' converted_output += '</tbody>' converted_output += '</table>' return converted_output #so you don't want or need clubbing eh? This makes @muellermichel very sad... ;( #alright, let's fall back to a basic list here... converted_output = '<ul><li>' converted_output += '</li><li>'.join([self.convert_json_node(child) for child in list_input]) converted_output += '</li></ul>' return converted_output def convert_object(self, json_input): """ Iterate over the JSON object and process it to generate the super awesome HTML Table format """ if not json_input: return "" #avoid empty tables converted_output = self.table_init_markup + "<tr>" converted_output += "</tr><tr>".join([ "<th>%s</th><td>%s</td>" %( self.convert_json_node(k), self.convert_json_node(v) ) for k, v in json_input.items() ]) converted_output += '</tr></table>' return converted_output json2html = Json2Html()
unknown
codeparrot/codeparrot-clean
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- from __future__ import absolute_import import logging log = logging.getLogger(__name__) import atexit import uuid from ..utils.multi_dict import MultiDict class WebSocketManager(object): def __init__(self): self.sockets = {} self.topic_clientid_map = MultiDict() self.clientid_topic_map = MultiDict() self.auth_functions = {} atexit.register(self._atexit) def _atexit(self): if len(self.sockets) != 0: log.warning("Not all websocket connections were closed properly") def remove_clientid(self, clientid): topics = self.clientid_topic_map.get(clientid, []) for topic in topics: self.topic_clientid_map.remove_val(topic, clientid) def remove_topic(self, topic): clientids = self.topic_clientid_map.get(topic) for clientid in clientids: self.clientid_topic_map.remove_val(clientid, topic) def subscribe_socket(self, socket, topic, clientid=None): if clientid is None : clientid = str(uuid.uuid4()) self.subscribe(clientid, topic) self.add_socket(socket, clientid) def can_subscribe(self, clientid, topic): #auth goes here return True def register_auth(self, authtype, func): self.auth_functions[authtype] = func def auth(self, authtoken, topic): #authtoken - some string, whatever you want it to be #topic - string topic, of syntax type:value. #topic type maps to auth function authtype, topic = topic.split(":", 1) if self.auth_functions.get(authtype): return self.auth_functions[authtype](authtoken, topic) else: return True def subscribe(self, clientid, topic): if self.can_subscribe(clientid, topic): log.debug("subscribe %s, %s", topic, clientid) self.topic_clientid_map.add(topic, clientid) self.clientid_topic_map.add(clientid, topic) def add_socket(self, socket, clientid): log.debug("add socket %s", clientid) self.sockets[clientid] = socket def remove_socket(self, clientid): log.debug("remove socket %s", clientid) self.sockets.pop(clientid, None) def send(self, topic, msg, exclude=None): if exclude is None: exclude = set() log.debug("sending to %s", self.topic_clientid_map.get(topic, [])) for clientid in tuple(self.topic_clientid_map.get(topic, [])): socket = self.sockets.get(clientid, None) if not socket: continue if clientid in exclude: continue try: socket.write_message(topic + ":" + msg) except Exception as e: #what exception is this?if a client disconnects log.exception(e) self.remove_socket(clientid) self.remove_clientid(clientid)
unknown
codeparrot/codeparrot-clean
// Copyright 2014 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package storage import ( "bytes" "context" "fmt" "hash/fnv" "math" "math/rand" "reflect" "slices" "strconv" "strings" "testing" "time" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/isolation" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/storage/fs" "github.com/cockroachdb/cockroach/pkg/storage/mvccencoding" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/echotest" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/zerofields" "github.com/cockroachdb/cockroach/pkg/util/admission" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/objstorage" "github.com/kr/pretty" "github.com/stretchr/testify/require" ) // Constants for system-reserved keys in the KV map. var ( localMax = keys.LocalMax keyMax = roachpb.KeyMax testKey1 = roachpb.Key("/db1") testKey2 = roachpb.Key("/db2") testKey3 = roachpb.Key("/db3") testKey4 = roachpb.Key("/db4") testKey5 = roachpb.Key("/db5") testKey6 = roachpb.Key("/db6") txn1ID = uuid.MakeV4() txn2ID = uuid.MakeV4() txn1TS = hlc.Timestamp{Logical: 1} txn2TS = hlc.Timestamp{Logical: 2} txn1 = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn1ID, Epoch: 1, WriteTimestamp: txn1TS, MinTimestamp: txn1TS, CoordinatorNodeID: 1}, ReadTimestamp: txn1TS} txn1Commit = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn1ID, Epoch: 1, WriteTimestamp: txn1TS, MinTimestamp: txn1TS, CoordinatorNodeID: 1}, ReadTimestamp: txn1TS, Status: roachpb.COMMITTED} txn1Abort = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn1ID, Epoch: 1, WriteTimestamp: txn1TS, MinTimestamp: txn1TS, CoordinatorNodeID: 1}, Status: roachpb.ABORTED} txn1e2 = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn1ID, Epoch: 2, WriteTimestamp: txn1TS, MinTimestamp: txn1TS, CoordinatorNodeID: 1}, ReadTimestamp: txn1TS} txn1e2Commit = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn1ID, Epoch: 2, WriteTimestamp: txn1TS, MinTimestamp: txn1TS, CoordinatorNodeID: 1}, ReadTimestamp: txn1TS, Status: roachpb.COMMITTED} txn2 = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn2ID, WriteTimestamp: txn2TS, MinTimestamp: txn2TS, CoordinatorNodeID: 2}, ReadTimestamp: txn2TS} txn2Commit = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Key: roachpb.Key("a"), ID: txn2ID, WriteTimestamp: txn2TS, MinTimestamp: txn2TS, CoordinatorNodeID: 2}, ReadTimestamp: txn2TS, Status: roachpb.COMMITTED} value1 = roachpb.MakeValueFromString("testValue1") value2 = roachpb.MakeValueFromString("testValue2") value3 = roachpb.MakeValueFromString("testValue3") value4 = roachpb.MakeValueFromString("testValue4") value5 = roachpb.MakeValueFromString("testValue5") value6 = roachpb.MakeValueFromString("testValue6") tsvalue1 = timeSeriesRowAsValue(testtime, 1000, []tsSample{ {1, 1, 5, 5, 5}, }...) tsvalue2 = timeSeriesRowAsValue(testtime, 1000, []tsSample{ {1, 1, 15, 15, 15}, }...) ) // createTestPebbleEngine returns a new in-memory Pebble storage engine. func createTestPebbleEngine(opts ...ConfigOption) Engine { return NewDefaultInMemForTesting(opts...) } // makeTxn creates a new transaction using the specified base // txn and timestamp. func makeTxn(baseTxn roachpb.Transaction, ts hlc.Timestamp) *roachpb.Transaction { txn := baseTxn.Clone() txn.ReadTimestamp = ts txn.WriteTimestamp = ts return txn } func mvccVersionKey(key roachpb.Key, ts hlc.Timestamp) MVCCKey { return MVCCKey{Key: key, Timestamp: ts} } type mvccKeys []MVCCKey func (n mvccKeys) Len() int { return len(n) } func (n mvccKeys) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (n mvccKeys) Less(i, j int) bool { return n[i].Less(n[j]) } func TestMVCCStatsAddSubForward(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) goldMS := enginepb.MVCCStats{ ContainsEstimates: 1, KeyBytes: 1, KeyCount: 1, ValBytes: 1, ValCount: 1, IntentBytes: 1, IntentCount: 1, RangeKeyCount: 1, RangeKeyBytes: 1, RangeValCount: 1, RangeValBytes: 1, LockBytes: 1, LockCount: 1, LockAge: 1, GCBytesAge: 1, LiveBytes: 1, LiveCount: 1, SysBytes: 1, SysCount: 1, LastUpdateNanos: 1, AbortSpanBytes: 1, } require.NoError(t, zerofields.NoZeroField(&goldMS)) ms := goldMS zeroWithLU := enginepb.MVCCStats{ ContainsEstimates: 0, LastUpdateNanos: ms.LastUpdateNanos, } ms.Subtract(goldMS) require.Equal(t, zeroWithLU, ms) ms.Add(goldMS) require.Equal(t, goldMS, ms) // Double-add double-sub guards against mistaking `+=` for `=`. ms = zeroWithLU ms.Add(goldMS) ms.Add(goldMS) ms.Subtract(goldMS) ms.Subtract(goldMS) require.Equal(t, zeroWithLU, ms) // Run some checks for Forward. goldDelta := enginepb.MVCCStats{ KeyBytes: 42, LockCount: 11, LastUpdateNanos: 1e9 - 1000, } delta := goldDelta for _, ns := range []int64{1, 1e9 - 1001, 1e9 - 1000, 1e9 - 1, 1e9, 1e9 + 1, 2e9 - 1} { oldDelta := delta delta.AgeTo(ns) require.GreaterOrEqual(t, delta.LastUpdateNanos, ns, "LastUpdateNanos") shouldAge := ns/1e9-oldDelta.LastUpdateNanos/1e9 > 0 didAge := delta.LockAge != oldDelta.LockAge && delta.GCBytesAge != oldDelta.GCBytesAge require.Equal(t, shouldAge, didAge) } expDelta := goldDelta expDelta.LastUpdateNanos = 2e9 - 1 expDelta.GCBytesAge = 42 expDelta.LockAge = 11 require.Equal(t, expDelta, delta) delta.AgeTo(2e9) expDelta.LastUpdateNanos = 2e9 expDelta.GCBytesAge += 42 expDelta.LockAge += 11 require.Equal(t, expDelta, delta) { // Verify that AgeTo can go backwards in time. // Works on a copy. tmpDelta := delta expDelta := expDelta tmpDelta.AgeTo(2e9 - 1) expDelta.LastUpdateNanos = 2e9 - 1 expDelta.GCBytesAge -= 42 expDelta.LockAge -= 11 require.Equal(t, expDelta, tmpDelta) } delta.AgeTo(3e9 - 1) delta.Forward(5) // should be noop expDelta.LastUpdateNanos = 3e9 - 1 require.Equal(t, expDelta, delta) // Check that Add calls Forward appropriately. mss := []enginepb.MVCCStats{goldMS, goldMS} mss[0].LastUpdateNanos = 2e9 - 1 mss[1].LastUpdateNanos = 10e9 + 1 expMS := goldMS expMS.Add(goldMS) expMS.LastUpdateNanos = 10e9 + 1 expMS.LockAge += 9 // from aging 9 ticks from 2E9-1 to 10E9+1 expMS.GCBytesAge += 3 * 9 // ditto for i := range mss[:1] { ms := mss[(1+i)%2] ms.Add(mss[i]) require.Equal(t, expMS, ms) } // Finally, check Forward with negative counts (can happen). neg := zeroWithLU neg.Subtract(goldMS) exp := neg neg.AgeTo(2e9) exp.LastUpdateNanos = 2e9 exp.GCBytesAge = -7 exp.LockAge = -3 require.Equal(t, exp, neg) } func TestMVCCStatsHasUserDataCloseTo(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ms1 := enginepb.MVCCStats{ ContainsEstimates: 10, KeyBytes: 10, KeyCount: 10, ValBytes: 10, ValCount: 10, IntentBytes: 10, IntentCount: 10, RangeKeyCount: 10, RangeKeyBytes: 10, RangeValCount: 10, RangeValBytes: 10, LockBytes: 10, LockCount: 10, LockAge: 10, GCBytesAge: 10, LiveBytes: 10, LiveCount: 10, SysBytes: 10, SysCount: 10, LastUpdateNanos: 10, AbortSpanBytes: 10, } require.NoError(t, zerofields.NoZeroField(&ms1)) ms2 := ms1 require.True(t, ms1.HasUserDataCloseTo(ms2, 1, 2)) require.True(t, ms1.HasUserDataCloseTo(ms2, 0, 0)) ms2.KeyCount += 5 require.True(t, ms1.HasUserDataCloseTo(ms2, 6, 0)) require.True(t, ms1.HasUserDataCloseTo(ms2, 5, 0)) require.False(t, ms1.HasUserDataCloseTo(ms2, 4, 0)) ms2.ValBytes += 20 require.True(t, ms1.HasUserDataCloseTo(ms2, 5, 21)) require.True(t, ms1.HasUserDataCloseTo(ms2, 5, 20)) require.False(t, ms1.HasUserDataCloseTo(ms2, 5, 19)) } func TestMVCCGetNotExist(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) engine := NewDefaultInMemForTesting() defer engine.Close() valueRes, err := MVCCGet(context.Background(), engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if valueRes.Value.Exists() { t.Fatal("the value should be empty") } } func TestMVCCGetNoMoreOldVersion(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Need to handle the case here where the scan takes us to the // next key, which may not match the key we're looking for. In // other words, if we're looking for a<T=2>, and we have the // following keys: // // a: MVCCMetadata(a) // a<T=3> // b: MVCCMetadata(b) // b<T=1> // // If we search for a<T=2>, the scan should not return "b". if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if valueRes.Value.Exists() { t.Fatal("the value should be empty") } } func TestMVCCGetWithValueHeader(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1, Logical: 1}, value1, MVCCWriteOptions{LocalTimestamp: hlc.ClockTimestamp{WallTime: 1}}); err != nil { t.Fatal(err) } valueRes, vh, err := MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !valueRes.Value.Exists() { t.Fatal("the value should not be empty") } require.Equal(t, hlc.ClockTimestamp{WallTime: 1}, vh.LocalTimestamp) _, _, err = MVCCDelete(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, MVCCWriteOptions{LocalTimestamp: hlc.ClockTimestamp{WallTime: 2, Logical: 1}}) if err != nil { t.Fatal(err) } // Read the latest version which should be deleted. valueRes, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if valueRes.Value.Exists() { t.Fatal("the value should be empty") } require.Zero(t, vh.LocalTimestamp) // Read the latest version with tombstone. valueRes, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{Tombstones: true}) if err != nil { t.Fatal(err) } else if !valueRes.Value.Exists() || len(valueRes.Value.Value.RawBytes) != 0 { t.Fatalf("the value should be non-nil with empty RawBytes; got %+v", valueRes.Value.Value) } require.Equal(t, hlc.ClockTimestamp{WallTime: 2, Logical: 1}, vh.LocalTimestamp) // Read the old version which should still exist. for _, logical := range []int32{0, math.MaxInt32} { valueRes, vh, err := MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2, Logical: logical}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !valueRes.Value.Exists() { t.Fatal("the value should not be empty") } require.Equal(t, hlc.ClockTimestamp{WallTime: 1}, vh.LocalTimestamp) } } func TestMVCCValueHeaderOriginTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Put a value with a non-zero origin timestamp. _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{OriginTimestamp: hlc.Timestamp{WallTime: 1}}) require.NoError(t, err) valueRes, vh, err := MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, MVCCGetOptions{}) require.NoError(t, err) require.NotNil(t, valueRes.Value) require.Equal(t, hlc.Timestamp{WallTime: 1}, vh.OriginTimestamp) // Ensure a regular put has no origin timestamp. _, err = MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, value1, MVCCWriteOptions{}) require.NoError(t, err) valueRes, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, MVCCGetOptions{}) require.NoError(t, err) require.Zero(t, vh.OriginTimestamp) } // TestMVCCValueHeadersForRangefeeds tests that the value headers used by // rangefeeds are set correctly. func TestMVCCValueHeadersForRangefeeds(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) for _, omitInRangefeeds := range []bool{false, true} { for _, originID := range []uint32{0, 1} { t.Run(fmt.Sprintf("omitInRangefeeds=%t/originID=%d", omitInRangefeeds, originID), func(t *testing.T) { ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Transactional put txn := *txn1 _, err := MVCCPut(ctx, engine, testKey1, txn.WriteTimestamp, value1, MVCCWriteOptions{Txn: &txn, OmitInRangefeeds: omitInRangefeeds, OriginID: originID}) require.NoError(t, err) txnCommit := txn txnCommit.Status = roachpb.COMMITTED txnCommit.WriteTimestamp = txn.WriteTimestamp _, _, _, _, err = MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(&txnCommit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}) require.NoError(t, err) valueRes, vh, err := MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{}) require.NoError(t, err) require.NotNil(t, valueRes.Value) require.Equal(t, omitInRangefeeds, vh.OmitInRangefeeds) require.Equal(t, originID, vh.OriginID) txn = *txn2 // Transactional delete _, _, err = MVCCDelete(ctx, engine, testKey1, txn.WriteTimestamp, MVCCWriteOptions{Txn: &txn, OmitInRangefeeds: omitInRangefeeds, OriginID: originID}) require.NoError(t, err) txnCommit = txn txnCommit.Status = roachpb.COMMITTED txnCommit.WriteTimestamp = txn.WriteTimestamp _, _, _, _, err = MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(&txnCommit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}) require.NoError(t, err) // Read the latest version with tombstone. valueRes, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{Tombstones: true}) require.NoError(t, err) require.NotNil(t, valueRes.Value) require.Zero(t, len(valueRes.Value.Value.RawBytes)) require.Equal(t, omitInRangefeeds, vh.OmitInRangefeeds) require.Equal(t, originID, vh.OriginID) // Non-transactional put (e.g. 1PC put) writeTs := hlc.Timestamp{Logical: 3} _, err = MVCCPut(ctx, engine, testKey1, writeTs, value2, MVCCWriteOptions{OmitInRangefeeds: omitInRangefeeds, OriginID: originID}) require.NoError(t, err) valueRes, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{}) require.NoError(t, err) require.NotNil(t, valueRes.Value) require.Equal(t, omitInRangefeeds, vh.OmitInRangefeeds) require.Equal(t, originID, vh.OriginID) // Non-transactional delete (e.g. 1PC delete) writeTs = hlc.Timestamp{Logical: 4} _, _, err = MVCCDelete(ctx, engine, testKey1, writeTs, MVCCWriteOptions{OmitInRangefeeds: omitInRangefeeds, OriginID: originID}) require.NoError(t, err) // Read the latest version with tombstone. valueRes, vh, err = MVCCGetWithValueHeader(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{Tombstones: true}) require.NoError(t, err) require.NotNil(t, valueRes.Value) require.Zero(t, len(valueRes.Value.Value.RawBytes)) require.Equal(t, omitInRangefeeds, vh.OmitInRangefeeds) require.Equal(t, originID, vh.OriginID) }) } } } // TestMVCCWriteWithOlderTimestampAfterDeletionOfNonexistentKey tests a write // that comes after a delete on a nonexistent key, with the write holding a // timestamp earlier than the delete timestamp. The delete must write a // tombstone with its timestamp in order to prevent the write from succeeding. func TestMVCCWriteWithOlderTimestampAfterDeletionOfNonexistentKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) engine := NewDefaultInMemForTesting() defer engine.Close() _, _, err := MVCCDelete(context.Background(), engine, testKey1, hlc.Timestamp{WallTime: 3}, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(context.Background(), engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}) require.ErrorAs(t, err, new(*kvpb.WriteTooOldError)) require.Regexp(t, err, "WriteTooOldError: write for key \"/db1\" at timestamp 0.000000001,0 too old; must write at or above 0.000000003,1") // The attempted write at ts(1,0) failed, so we should not be able to see it. valueRes, err := MVCCGet(context.Background(), engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{}) require.NoError(t, err) require.False(t, valueRes.Value.IsPresent()) } func TestMVCCInlineWithTxn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Put an inline value. if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } // Now verify inline get. valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(value1, valueRes.Value.Value) { t.Errorf("the inline value should be %v; got %v", value1, valueRes.Value.Value) } // Verify inline get with txn does still work (this will happen on a // scan if the distributed sender is forced to wrap it in a txn). if _, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{}, MVCCGetOptions{ Txn: txn1, }); err != nil { t.Error(err) } // Verify inline put with txn is an error. _, err = MVCCPut(ctx, engine, testKey2, hlc.Timestamp{}, value2, MVCCWriteOptions{Txn: txn2}) if !testutils.IsError(err, "writes not allowed within transactions") { t.Errorf("unexpected error: %+v", err) } } func TestMVCCDeleteMissingKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) DisableMetamorphicSimpleValueEncoding(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, _, err := MVCCDelete(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCWriteOptions{}); err != nil { t.Fatal(err) } require.Empty(t, mvccGetRaw(t, engine, mvccKey(testKey1))) } func TestMVCCGetAndDeleteInTxn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) txn.Sequence++ if _, err := MVCCPut(ctx, engine, testKey1, txn.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } if valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{ Txn: txn, }); err != nil { t.Fatal(err) } else if !valueRes.Value.Exists() { t.Fatal("the value should not be empty") } txn.Sequence++ txn.WriteTimestamp = hlc.Timestamp{WallTime: 3} if _, _, err := MVCCDelete(ctx, engine, testKey1, txn.ReadTimestamp, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } // Read the latest version which should be deleted. if valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{ Txn: txn, }); err != nil { t.Fatal(err) } else if valueRes.Value.Exists() { t.Fatal("the value should be empty") } // Read the latest version with tombstone. if valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 4}, MVCCGetOptions{ Tombstones: true, Txn: txn, }); err != nil { t.Fatal(err) } else if !valueRes.Value.Exists() || len(valueRes.Value.Value.RawBytes) != 0 { t.Fatalf("the value should be non-nil with empty RawBytes; got %+v", valueRes.Value.Value) } // Read the old version which shouldn't exist, as within a // transaction, we delete previous values. if valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{}); err != nil { t.Fatal(err) } else if valueRes.Value.Exists() { t.Fatalf("expected value nil, got: %s", valueRes.Value.Value) } } func TestMVCCGetLockConflictError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } if _, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{}); err == nil { t.Fatal("cannot read the value of a write intent without TxnID") } if _, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{ Txn: txn2, }); err == nil { t.Fatal("cannot read the value of a write intent from a different TxnID") } } func mkVal(s string, ts hlc.Timestamp) roachpb.Value { v := roachpb.MakeValueFromString(s) v.Timestamp = ts return v } func TestMVCCScanLockConflictError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() ts := []hlc.Timestamp{{Logical: 1}, {Logical: 2}, {Logical: 3}, {Logical: 4}, {Logical: 5}, {Logical: 6}, {Logical: 7}} txn1ts := makeTxn(*txn1, ts[2]) txn2ts := makeTxn(*txn2, ts[5]) txnMap := map[int]*roachpb.Transaction{ 2: txn1ts, 5: txn2ts, 6: txn2ts, 7: txn2ts, } fixtureKVs := []roachpb.KeyValue{ {Key: testKey1, Value: mkVal("testValue1 pre", ts[0])}, {Key: testKey4, Value: mkVal("testValue4 pre", ts[1])}, {Key: testKey1, Value: mkVal("testValue1", ts[2])}, {Key: testKey2, Value: mkVal("testValue2", ts[3])}, {Key: testKey3, Value: mkVal("testValue3", ts[4])}, {Key: testKey4, Value: mkVal("testValue4", ts[5])}, {Key: testKey5, Value: mkVal("testValue5", ts[5])}, {Key: testKey6, Value: mkVal("testValue5", ts[5])}, } for i, kv := range fixtureKVs { v := *protoutil.Clone(&kv.Value).(*roachpb.Value) v.Timestamp = hlc.Timestamp{} if _, err := MVCCPut(ctx, engine, kv.Key, kv.Value.Timestamp, v, MVCCWriteOptions{Txn: txnMap[i]}); err != nil { t.Fatal(err) } } scanCases := []struct { name string consistent bool txn *roachpb.Transaction expIntents []roachpb.Intent expValues []roachpb.KeyValue }{ { name: "consistent-all-keys", consistent: true, txn: nil, expIntents: []roachpb.Intent{ roachpb.MakeIntent(&txn1ts.TxnMeta, testKey1), roachpb.MakeIntent(&txn2ts.TxnMeta, testKey4), }, // would be []roachpb.KeyValue{fixtureKVs[3], fixtureKVs[4]} without LockConflictError expValues: nil, }, { name: "consistent-txn1", consistent: true, txn: txn1ts, expIntents: []roachpb.Intent{ roachpb.MakeIntent(&txn2ts.TxnMeta, testKey4), roachpb.MakeIntent(&txn2ts.TxnMeta, testKey5), }, expValues: nil, // []roachpb.KeyValue{fixtureKVs[2], fixtureKVs[3], fixtureKVs[4]}, }, { name: "consistent-txn2", consistent: true, txn: txn2ts, expIntents: []roachpb.Intent{ roachpb.MakeIntent(&txn1ts.TxnMeta, testKey1), }, expValues: nil, // []roachpb.KeyValue{fixtureKVs[3], fixtureKVs[4], fixtureKVs[5]}, }, { name: "inconsistent-all-keys", consistent: false, txn: nil, expIntents: []roachpb.Intent{ roachpb.MakeIntent(&txn1ts.TxnMeta, testKey1), roachpb.MakeIntent(&txn2ts.TxnMeta, testKey4), roachpb.MakeIntent(&txn2ts.TxnMeta, testKey5), roachpb.MakeIntent(&txn2ts.TxnMeta, testKey6), }, expValues: []roachpb.KeyValue{fixtureKVs[0], fixtureKVs[3], fixtureKVs[4], fixtureKVs[1]}, }, } for _, scan := range scanCases { t.Run(scan.name, func(t *testing.T) { res, err := MVCCScan(ctx, engine, testKey1, testKey6.Next(), hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Inconsistent: !scan.consistent, Txn: scan.txn, MaxLockConflicts: 2}) var lcErr *kvpb.LockConflictError _ = errors.As(err, &lcErr) if (err == nil) != (lcErr == nil) { t.Errorf("unexpected error: %+v", err) } if lcErr == nil != !scan.consistent { t.Fatalf("expected lock conflict error; got %s", err) } locks := roachpb.AsLocks(res.Intents) kvs := res.KVs if len(locks) > 0 != !scan.consistent { t.Fatalf("expected different intents slice; got %+v", locks) } if scan.consistent { locks = lcErr.Locks } expLocks := roachpb.AsLocks(scan.expIntents) if !reflect.DeepEqual(locks, expLocks) { t.Fatalf("expected locks:\n%+v;\n got\n%+v", expLocks, locks) } if !reflect.DeepEqual(kvs, scan.expValues) { t.Fatalf("expected values %+v; got %+v", scan.expValues, kvs) } }) } } // TestMVCCGetInconsistent verifies the behavior of get with // consistent set to false. func TestMVCCGetInconsistent(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Put two values to key 1, the latest with a txn. if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 2}) if _, err := MVCCPut(ctx, engine, testKey1, txn1ts.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } // A get with consistent=false should fail in a txn. if _, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{ Inconsistent: true, Txn: txn1, }); err == nil { t.Error("expected an error getting with consistent=false in txn") } // Inconsistent get will fetch value1 for any timestamp. for _, ts := range []hlc.Timestamp{{WallTime: 1}, {WallTime: 2}} { res, err := MVCCGet(ctx, engine, testKey1, ts, MVCCGetOptions{Inconsistent: true}) if ts.Less(hlc.Timestamp{WallTime: 2}) { if err != nil { t.Fatal(err) } } else { if res.Intent == nil || !res.Intent.Key.Equal(testKey1) { t.Fatalf("expected %v, but got %v", testKey1, res.Intent) } } if !bytes.Equal(res.Value.Value.RawBytes, value1.RawBytes) { t.Errorf("@%s expected %q; got %q", ts, value1.RawBytes, res.Value.Value.RawBytes) } } // Write a single intent for key 2 and verify get returns empty. if _, err := MVCCPut(ctx, engine, testKey2, txn2.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn2}); err != nil { t.Fatal(err) } res, err := MVCCGet(ctx, engine, testKey2, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{Inconsistent: true}) if res.Intent == nil || !res.Intent.Key.Equal(testKey2) { t.Fatal(err) } if res.Value.Exists() { t.Errorf("expected empty val; got %+v", res.Value.Value) } } // TestMVCCGetProtoInconsistent verifies the behavior of MVCCGetProto with // consistent set to false. func TestMVCCGetProtoInconsistent(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() bytes1, err := protoutil.Marshal(&value1) if err != nil { t.Fatal(err) } bytes2, err := protoutil.Marshal(&value2) if err != nil { t.Fatal(err) } v1 := roachpb.MakeValueFromBytes(bytes1) v2 := roachpb.MakeValueFromBytes(bytes2) // Put two values to key 1, the latest with a txn. if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, v1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 2}) if _, err := MVCCPut(ctx, engine, testKey1, txn1ts.ReadTimestamp, v2, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } // An inconsistent get should fail in a txn. if _, err := MVCCGetProto(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, nil, MVCCGetOptions{ Inconsistent: true, Txn: txn1, }); err == nil { t.Error("expected an error getting inconsistently in txn") } else if errors.HasType(err, (*kvpb.LockConflictError)(nil)) { t.Error("expected non-LockConflictError with inconsistent read in txn") } // Inconsistent get will fetch value1 for any timestamp. for _, ts := range []hlc.Timestamp{{WallTime: 1}, {WallTime: 2}} { val := roachpb.Value{} found, err := MVCCGetProto(ctx, engine, testKey1, ts, &val, MVCCGetOptions{ Inconsistent: true, }) if ts.Less(hlc.Timestamp{WallTime: 2}) { if err != nil { t.Fatal(err) } } else if err != nil { t.Fatal(err) } if !found { t.Errorf("expected to find result with inconsistent read") } valBytes, err := val.GetBytes() if err != nil { t.Fatal(err) } if !bytes.Equal(valBytes, []byte("testValue1")) { t.Errorf("@%s expected %q; got %q", ts, []byte("value1"), valBytes) } } { // Write a single intent for key 2 and verify get returns empty. if _, err := MVCCPut(ctx, engine, testKey2, txn2.ReadTimestamp, v1, MVCCWriteOptions{Txn: txn2}); err != nil { t.Fatal(err) } val := roachpb.Value{} found, err := MVCCGetProto(ctx, engine, testKey2, hlc.Timestamp{WallTime: 2}, &val, MVCCGetOptions{ Inconsistent: true, }) if err != nil { t.Fatal(err) } if found { t.Errorf("expected no result; got %+v", val) } } { // Write a malformed value (not an encoded MVCCKeyValue) and a // write intent to key 3; the parse error is returned instead of the // write intent. if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, txn1ts.ReadTimestamp, v2, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } val := roachpb.Value{} found, err := MVCCGetProto(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, &val, MVCCGetOptions{ Inconsistent: true, }) if err == nil { t.Errorf("expected error reading malformed data") } else if !strings.HasPrefix(err.Error(), "proto: ") { t.Errorf("expected proto error, got %s", err) } if !found { t.Errorf("expected to find result with malformed data") } } } // Regression test for #28205: MVCCGet and MVCCScan, FindSplitKey, and // ComputeStatsForIter need to invalidate the cached iterator data. func TestMVCCInvalidateIterator(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) for _, which := range []string{"get", "scan", "findSplitKey", "computeStats"} { t.Run(which, func(t *testing.T) { engine := NewDefaultInMemForTesting() defer engine.Close() ctx := context.Background() ts1 := hlc.Timestamp{WallTime: 1} ts2 := hlc.Timestamp{WallTime: 2} key := roachpb.Key("a") if _, err := MVCCPut(ctx, engine, key, ts1, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } var iterOptions IterOptions switch which { case "get": iterOptions.Prefix = true case "computeStats": iterOptions.KeyTypes = IterKeyTypePointsAndRanges iterOptions.UpperBound = roachpb.KeyMax case "scan", "findSplitKey": iterOptions.UpperBound = roachpb.KeyMax } // Use a batch which internally caches the iterator. batch := engine.NewBatch() defer batch.Close() { // Seek the iter to a valid position. iter, err := batch.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, iterOptions) if err != nil { t.Fatal(err) } iter.SeekGE(MakeMVCCMetadataKey(key)) iter.Close() } var err error switch which { case "get": _, err = MVCCGet(ctx, batch, key, ts2, MVCCGetOptions{}) case "scan": _, err = MVCCScan(ctx, batch, key, roachpb.KeyMax, ts2, MVCCScanOptions{}) case "findSplitKey": _, err = MVCCFindSplitKey(ctx, batch, roachpb.RKeyMin, roachpb.RKeyMax, 64<<20) case "computeStatsForIter": iter, err := batch.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, iterOptions) if err != nil { t.Fatal(err) } iter.SeekGE(MVCCKey{Key: iterOptions.LowerBound}) _, err = ComputeStatsForIter(iter, 0) if err != nil { t.Fatal(err) } iter.Close() } if err != nil { t.Fatal(err) } // Verify that the iter is invalid. iter, err := batch.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, iterOptions) if err != nil { t.Fatal(err) } defer iter.Close() if ok, _ := iter.Valid(); ok { t.Fatalf("iterator should not be valid") } }) } } func mvccScanTest(ctx context.Context, t *testing.T, engine Engine) { if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 3}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 4}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 5}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } kvs := res.KVs resumeSpan := res.ResumeSpan if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey2) || !bytes.Equal(kvs[1].Key, testKey3) || !bytes.Equal(kvs[0].Value.RawBytes, value2.RawBytes) || !bytes.Equal(kvs[1].Value.RawBytes, value3.RawBytes) { t.Fatal("the value should not be empty") } if resumeSpan != nil { t.Fatalf("resumeSpan = %+v", resumeSpan) } res, err = MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 4}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } kvs = res.KVs resumeSpan = res.ResumeSpan if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey2) || !bytes.Equal(kvs[1].Key, testKey3) || !bytes.Equal(kvs[0].Value.RawBytes, value3.RawBytes) || !bytes.Equal(kvs[1].Value.RawBytes, value2.RawBytes) { t.Fatal("the value should not be empty") } if resumeSpan != nil { t.Fatalf("resumeSpan = %+v", resumeSpan) } res, err = MVCCScan( ctx, engine, testKey4, keyMax, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}, ) if err != nil { t.Fatal(err) } kvs = res.KVs resumeSpan = res.ResumeSpan if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey4) || !bytes.Equal(kvs[0].Value.RawBytes, value4.RawBytes) { t.Fatal("the value should not be empty") } if resumeSpan != nil { t.Fatalf("resumeSpan = %+v", resumeSpan) } if _, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{ Txn: txn2, }); err != nil { t.Fatal(err) } res, err = MVCCScan(ctx, engine, localMax, testKey2, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } kvs = res.KVs if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey1) || !bytes.Equal(kvs[0].Value.RawBytes, value1.RawBytes) { t.Fatal("the value should not be empty") } } func TestMVCCScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() mvccScanTest(ctx, t, engine) } func TestMVCCScanMaxNum(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey6, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey2) || !bytes.Equal(res.KVs[0].Value.RawBytes, value2.RawBytes) { t.Fatal("the value should not be empty") } if expected := (roachpb.Span{Key: testKey3, EndKey: testKey4}); !res.ResumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, res.ResumeSpan) } res, err = MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{MaxKeys: -1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 0 { t.Fatal("the value should be empty") } if expected := (roachpb.Span{Key: testKey2, EndKey: testKey4}); !res.ResumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, res.ResumeSpan) } // Note: testKey6, though not scanned directly, is important in testing that // the computed resume span does not extend beyond the upper bound of a scan. res, err = MVCCScan(ctx, engine, testKey4, testKey5, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 { t.Fatalf("expected 1 key but got %d", len(res.KVs)) } if res.ResumeSpan != nil { t.Fatalf("resumeSpan = %+v", res.ResumeSpan) } res, err = MVCCScan(ctx, engine, testKey5, testKey6.Next(), hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true, MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 { t.Fatalf("expected 1 key but got %d", len(res.KVs)) } if res.ResumeSpan != nil { t.Fatalf("resumeSpan = %+v", res.ResumeSpan) } } func TestMVCCScanWithKeyPrefix(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Let's say you have: // a // a<T=2> // a<T=1> // aa // aa<T=3> // aa<T=2> // b // b<T=5> // In this case, if we scan from "a"-"b", we wish to skip // a<T=2> and a<T=1> and find "aa'. if _, err := MVCCPut(ctx, engine, roachpb.Key("/a"), hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, roachpb.Key("/a"), hlc.Timestamp{WallTime: 2}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, roachpb.Key("/aa"), hlc.Timestamp{WallTime: 2}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, roachpb.Key("/aa"), hlc.Timestamp{WallTime: 3}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, roachpb.Key("/b"), hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, roachpb.Key("/a"), roachpb.Key("/b"), hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } if len(res.KVs) != 2 || !bytes.Equal(res.KVs[0].Key, roachpb.Key("/a")) || !bytes.Equal(res.KVs[1].Key, roachpb.Key("/aa")) || !bytes.Equal(res.KVs[0].Value.RawBytes, value2.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value2.RawBytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanInTxn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) if _, err := MVCCPut(ctx, engine, testKey3, txn.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Txn: txn1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 2 || !bytes.Equal(res.KVs[0].Key, testKey2) || !bytes.Equal(res.KVs[1].Key, testKey3) || !bytes.Equal(res.KVs[0].Value.RawBytes, value2.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value3.RawBytes) { t.Fatal("the value should not be empty") } if _, err := MVCCScan( ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}, ); err == nil { t.Fatal("expected error on uncommitted write intent") } } // TestMVCCScanInconsistent writes several values, some as intents and // verifies that the scan sees only the committed versions. func TestMVCCScanInconsistent(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // A scan with consistent=false should fail in a txn. if _, err := MVCCScan( ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Inconsistent: true, Txn: txn1}, ); err == nil { t.Error("expected an error scanning with consistent=false in txn") } ts1 := hlc.Timestamp{WallTime: 1} ts2 := hlc.Timestamp{WallTime: 2} ts3 := hlc.Timestamp{WallTime: 3} ts4 := hlc.Timestamp{WallTime: 4} ts5 := hlc.Timestamp{WallTime: 5} ts6 := hlc.Timestamp{WallTime: 6} if _, err := MVCCPut(ctx, engine, testKey1, ts1, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn1ts2 := makeTxn(*txn1, ts2) if _, err := MVCCPut(ctx, engine, testKey1, txn1ts2.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1ts2}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, ts3, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, ts4, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn2ts5 := makeTxn(*txn2, ts5) if _, err := MVCCPut(ctx, engine, testKey3, txn2ts5.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn2ts5}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, ts6, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } expIntents := []roachpb.Intent{ roachpb.MakeIntent(&txn1ts2.TxnMeta, testKey1), roachpb.MakeIntent(&txn2ts5.TxnMeta, testKey3), } res, err := MVCCScan( ctx, engine, testKey1, testKey4.Next(), hlc.Timestamp{WallTime: 7}, MVCCScanOptions{Inconsistent: true}, ) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(res.Intents, expIntents) { t.Fatalf("expected %v, but found %v", expIntents, res.Intents) } makeTimestampedValue := func(v roachpb.Value, ts hlc.Timestamp) roachpb.Value { v.Timestamp = ts return v } expKVs := []roachpb.KeyValue{ {Key: testKey1, Value: makeTimestampedValue(value1, ts1)}, {Key: testKey2, Value: makeTimestampedValue(value2, ts4)}, {Key: testKey4, Value: makeTimestampedValue(value4, ts6)}, } if !reflect.DeepEqual(res.KVs, expKVs) { t.Errorf("expected key values equal %v != %v", res.KVs, expKVs) } // Now try a scan at a historical timestamp. expIntents = expIntents[:1] res, err = MVCCScan(ctx, engine, testKey1, testKey4.Next(), hlc.Timestamp{WallTime: 3}, MVCCScanOptions{Inconsistent: true}) if !reflect.DeepEqual(res.Intents, expIntents) { t.Fatal(err) } expKVs = []roachpb.KeyValue{ {Key: testKey1, Value: makeTimestampedValue(value1, ts1)}, {Key: testKey2, Value: makeTimestampedValue(value1, ts3)}, } if !reflect.DeepEqual(res.KVs, expKVs) { t.Errorf("expected key values equal %v != %v", res.Intents, expKVs) } } func TestMVCCDeleteRange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey5, hlc.Timestamp{WallTime: 1}, value5, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey6, hlc.Timestamp{WallTime: 1}, value6, MVCCWriteOptions{}); err != nil { t.Fatal(err) } // Attempt to delete two keys. deleted, resumeSpan, num, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey6, 2, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, false) if err != nil { t.Fatal(err) } if deleted != nil { t.Fatal("the value should be empty") } if num != 2 { t.Fatalf("incorrect number of keys deleted: %d", num) } if expected := (roachpb.Span{Key: testKey4, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } res, _ := MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(res.KVs) != 4 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[1].Key, testKey4) || !bytes.Equal(res.KVs[2].Key, testKey5) || !bytes.Equal(res.KVs[3].Key, testKey6) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value4.RawBytes) || !bytes.Equal(res.KVs[2].Value.RawBytes, value5.RawBytes) || !bytes.Equal(res.KVs[3].Value.RawBytes, value6.RawBytes) { t.Fatal("the value should not be empty") } // Try again, but with tombstones set to true to fetch the deleted keys as well. kvs := []roachpb.KeyValue{} if _, err = MVCCIterate( ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{Tombstones: true}, func(kv roachpb.KeyValue) error { kvs = append(kvs, kv) return nil }, ); err != nil { t.Fatal(err) } if len(kvs) != 6 || !bytes.Equal(kvs[0].Key, testKey1) || !bytes.Equal(kvs[1].Key, testKey2) || !bytes.Equal(kvs[2].Key, testKey3) || !bytes.Equal(kvs[3].Key, testKey4) || !bytes.Equal(kvs[4].Key, testKey5) || !bytes.Equal(kvs[5].Key, testKey6) || !bytes.Equal(kvs[0].Value.RawBytes, value1.RawBytes) || !bytes.Equal(kvs[1].Value.RawBytes, nil) || !bytes.Equal(kvs[2].Value.RawBytes, nil) || !bytes.Equal(kvs[3].Value.RawBytes, value4.RawBytes) || !bytes.Equal(kvs[4].Value.RawBytes, value5.RawBytes) || !bytes.Equal(kvs[5].Value.RawBytes, value6.RawBytes) { t.Fatal("the value should not be empty") } // Attempt to delete no keys. deleted, resumeSpan, num, _, err = MVCCDeleteRange(ctx, engine, testKey2, testKey6, -1, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, false) if err != nil { t.Fatal(err) } if deleted != nil { t.Fatal("the value should be empty") } if num != 0 { t.Fatalf("incorrect number of keys deleted: %d", num) } if expected := (roachpb.Span{Key: testKey2, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } res, _ = MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(res.KVs) != 4 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[1].Key, testKey4) || !bytes.Equal(res.KVs[2].Key, testKey5) || !bytes.Equal(res.KVs[3].Key, testKey6) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value4.RawBytes) || !bytes.Equal(res.KVs[2].Value.RawBytes, value5.RawBytes) || !bytes.Equal(res.KVs[3].Value.RawBytes, value6.RawBytes) { t.Fatal("the value should not be empty") } deleted, resumeSpan, num, _, err = MVCCDeleteRange(ctx, engine, testKey4, keyMax, 0, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, false) if err != nil { t.Fatal(err) } if deleted != nil { t.Fatal("the value should be empty") } if num != 3 { t.Fatalf("incorrect number of keys deleted: %d", num) } if resumeSpan != nil { t.Fatalf("wrong resume key: expected nil, found %v", resumeSpan) } res, err = MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) { t.Fatalf("the value should not be empty: %+v", res.KVs) } deleted, resumeSpan, num, _, err = MVCCDeleteRange(ctx, engine, localMax, testKey2, 0, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, false) if err != nil { t.Fatal(err) } if deleted != nil { t.Fatal("the value should not be empty") } if num != 1 { t.Fatalf("incorrect number of keys deleted: %d", num) } if resumeSpan != nil { t.Fatalf("wrong resume key: expected nil, found %v", resumeSpan) } res, _ = MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } if len(res.KVs) != 0 { t.Fatal("the value should be empty") } } func TestMVCCDeleteRangeReturnKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey5, hlc.Timestamp{WallTime: 1}, value5, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey6, hlc.Timestamp{WallTime: 1}, value6, MVCCWriteOptions{}); err != nil { t.Fatal(err) } // Attempt to delete two keys. deleted, resumeSpan, num, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey6, 2, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, true) if err != nil { t.Fatal(err) } if len(deleted) != 2 { t.Fatal("the value should not be empty") } if num != 2 { t.Fatalf("incorrect number of keys deleted: %d", num) } if expected, actual := testKey2, deleted[0]; !expected.Equal(actual) { t.Fatalf("wrong key deleted: expected %v found %v", expected, actual) } if expected, actual := testKey3, deleted[1]; !expected.Equal(actual) { t.Fatalf("wrong key deleted: expected %v found %v", expected, actual) } if expected := (roachpb.Span{Key: testKey4, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } res, _ := MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(res.KVs) != 4 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[1].Key, testKey4) || !bytes.Equal(res.KVs[2].Key, testKey5) || !bytes.Equal(res.KVs[3].Key, testKey6) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value4.RawBytes) || !bytes.Equal(res.KVs[2].Value.RawBytes, value5.RawBytes) || !bytes.Equal(res.KVs[3].Value.RawBytes, value6.RawBytes) { t.Fatal("the value should not be empty") } // Attempt to delete no keys. deleted, resumeSpan, num, _, err = MVCCDeleteRange(ctx, engine, testKey2, testKey6, -1, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, true) if err != nil { t.Fatal(err) } if deleted != nil { t.Fatalf("the value should be empty: %s", deleted) } if num != 0 { t.Fatalf("incorrect number of keys deleted: %d", num) } if expected := (roachpb.Span{Key: testKey2, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } res, _ = MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(res.KVs) != 4 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[1].Key, testKey4) || !bytes.Equal(res.KVs[2].Key, testKey5) || !bytes.Equal(res.KVs[3].Key, testKey6) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value4.RawBytes) || !bytes.Equal(res.KVs[2].Value.RawBytes, value5.RawBytes) || !bytes.Equal(res.KVs[3].Value.RawBytes, value6.RawBytes) { t.Fatal("the value should not be empty") } deleted, resumeSpan, num, _, err = MVCCDeleteRange(ctx, engine, testKey4, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, true) if err != nil { t.Fatal(err) } if len(deleted) != 3 { t.Fatal("the value should not be empty") } if num != 3 { t.Fatalf("incorrect number of keys deleted: %d", num) } if expected, actual := testKey4, deleted[0]; !expected.Equal(actual) { t.Fatalf("wrong key deleted: expected %v found %v", expected, actual) } if expected, actual := testKey5, deleted[1]; !expected.Equal(actual) { t.Fatalf("wrong key deleted: expected %v found %v", expected, actual) } if expected, actual := testKey6, deleted[2]; !expected.Equal(actual) { t.Fatalf("wrong key deleted: expected %v found %v", expected, actual) } if resumeSpan != nil { t.Fatalf("wrong resume key: expected nil, found %v", resumeSpan) } res, _ = MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) { t.Fatal("the value should not be empty") } deleted, resumeSpan, num, _, err = MVCCDeleteRange(ctx, engine, localMax, testKey2, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, true) if err != nil { t.Fatal(err) } if len(deleted) != 1 { t.Fatal("the value should not be empty") } if num != 1 { t.Fatalf("incorrect number of keys deleted: %d", num) } if expected, actual := testKey1, deleted[0]; !expected.Equal(actual) { t.Fatalf("wrong key deleted: expected %v found %v", expected, actual) } if resumeSpan != nil { t.Fatalf("wrong resume key: %v", resumeSpan) } res, _ = MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(res.KVs) != 0 { t.Fatal("the value should be empty") } } func TestMVCCDeleteRangeFailed(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn.Sequence++ if _, err := MVCCPut(ctx, engine, testKey2, txn.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } txn.Sequence++ if _, err := MVCCPut(ctx, engine, testKey3, txn.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCWriteOptions{}, false); err == nil { t.Fatal("expected error on uncommitted write intent") } txn.Sequence++ if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey4, math.MaxInt64, txn.ReadTimestamp, MVCCWriteOptions{Txn: txn}, false); err != nil { t.Fatal(err) } } func TestMVCCDeleteRangeConcurrentTxn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) txn2ts := makeTxn(*txn2, hlc.Timestamp{WallTime: 2}) if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, txn1ts.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, txn2ts.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn2ts}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey4, math.MaxInt64, txn1ts.ReadTimestamp, MVCCWriteOptions{Txn: txn1ts}, false, ); err == nil { t.Fatal("expected error on uncommitted write intent") } } // TestMVCCUncommittedDeleteRangeVisible tests that the keys in an uncommitted // DeleteRange are visible to the same transaction at a higher epoch. func TestMVCCUncommittedDeleteRangeVisible(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, _, err := MVCCDelete(ctx, engine, testKey2, hlc.Timestamp{WallTime: 2, Logical: 1}, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 2, Logical: 2}) if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey1, testKey4, math.MaxInt64, txn.ReadTimestamp, MVCCWriteOptions{Txn: txn}, false, ); err != nil { t.Fatal(err) } txn.Epoch++ res, _ := MVCCScan(ctx, engine, testKey1, testKey4, hlc.Timestamp{WallTime: 3}, MVCCScanOptions{Txn: txn}) if e := 2; len(res.KVs) != e { t.Fatalf("e = %d, got %d", e, len(res.KVs)) } } // TestMVCCDeleteRangeOldTimestamp tests a case where a delete range with an // older timestamp happens after a delete with a newer timestamp. func TestMVCCDeleteRangeOldTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}) if err != nil { t.Fatal(err) } _, err = MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 3}, value2, MVCCWriteOptions{}) if err != nil { t.Fatal(err) } _, _, err = MVCCDelete(ctx, engine, testKey2, hlc.Timestamp{WallTime: 5}, MVCCWriteOptions{}) if err != nil { t.Fatal(err) } // Delete at a time before the tombstone. Should return a WriteTooOld error. b := engine.NewBatch() defer b.Close() keys, resume, keyCount, _, err := MVCCDeleteRange(ctx, b, testKey1, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 4}, MVCCWriteOptions{}, true) require.Nil(t, keys) require.Nil(t, resume) require.Equal(t, int64(0), keyCount) require.NotNil(t, err) require.IsType(t, (*kvpb.WriteTooOldError)(nil), err) // Delete at the same time as the tombstone. Should return a WriteTooOld error. b = engine.NewBatch() defer b.Close() keys, resume, keyCount, _, err = MVCCDeleteRange(ctx, b, testKey1, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 5}, MVCCWriteOptions{}, true) require.Nil(t, keys) require.Nil(t, resume) require.Equal(t, int64(0), keyCount) require.NotNil(t, err) require.IsType(t, (*kvpb.WriteTooOldError)(nil), err) // Delete at a time after the tombstone. Should succeed and should not // include the tombstone in the returned keys. b = engine.NewBatch() defer b.Close() keys, resume, keyCount, _, err = MVCCDeleteRange(ctx, b, testKey1, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 6}, MVCCWriteOptions{}, true) require.Equal(t, []roachpb.Key{testKey1}, keys) require.Nil(t, resume) require.Equal(t, int64(1), keyCount) require.NoError(t, err) } func TestMVCCDeleteRangeInline(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Make five inline values (zero timestamp). for i, kv := range []struct { key roachpb.Key value roachpb.Value }{ {testKey1, value1}, {testKey2, value2}, {testKey3, value3}, {testKey4, value4}, {testKey5, value5}, } { if _, err := MVCCPut(ctx, engine, kv.key, hlc.Timestamp{Logical: 0}, kv.value, MVCCWriteOptions{}); err != nil { t.Fatalf("%d: %+v", i, err) } } // Create one non-inline value (non-zero timestamp). if _, err := MVCCPut(ctx, engine, testKey6, hlc.Timestamp{WallTime: 1}, value6, MVCCWriteOptions{}); err != nil { t.Fatal(err) } // Attempt to delete two inline keys, should succeed. deleted, resumeSpan, num, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey6, 2, hlc.Timestamp{Logical: 0}, MVCCWriteOptions{}, true) if err != nil { t.Fatal(err) } if expected := int64(2); num != expected { t.Fatalf("got %d deleted keys, expected %d", num, expected) } if expected := []roachpb.Key{testKey2, testKey3}; !reflect.DeepEqual(deleted, expected) { t.Fatalf("got deleted values = %v, expected = %v", deleted, expected) } if expected := (roachpb.Span{Key: testKey4, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("got resume span = %s, expected = %s", resumeSpan, expected) } // Attempt to delete inline keys at a timestamp; should fail. const inlineMismatchErrString = "put is inline" if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey1, testKey6, 1, hlc.Timestamp{WallTime: 2}, MVCCWriteOptions{}, true, ); !testutils.IsError(err, inlineMismatchErrString) { t.Fatalf("got error %v, expected error with text '%s'", err, inlineMismatchErrString) } // Attempt to delete non-inline key at zero timestamp; should fail. if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey6, keyMax, 1, hlc.Timestamp{Logical: 0}, MVCCWriteOptions{}, true, ); !testutils.IsError(err, inlineMismatchErrString) { t.Fatalf("got error %v, expected error with text '%s'", err, inlineMismatchErrString) } // Attempt to delete inline keys in a transaction; should fail. if _, _, _, _, err := MVCCDeleteRange(ctx, engine, testKey2, testKey6, 2, hlc.Timestamp{Logical: 0}, MVCCWriteOptions{Txn: txn1}, true, ); !testutils.IsError(err, "writes not allowed within transactions") { t.Errorf("unexpected error: %+v", err) } // Verify final state of the engine. expectedKvs := []roachpb.KeyValue{ { Key: testKey1, Value: value1, }, { Key: testKey4, Value: value4, }, { Key: testKey5, Value: value5, }, { Key: testKey6, Value: value6, }, } res, err := MVCCScan(ctx, engine, localMax, keyMax, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if err != nil { t.Fatal(err) } if a, e := len(res.KVs), len(expectedKvs); a != e { t.Fatalf("engine scan found %d keys; expected %d", a, e) } res.KVs[3].Value.Timestamp = hlc.Timestamp{} if !reflect.DeepEqual(expectedKvs, res.KVs) { t.Fatalf( "engine scan found key/values: %v; expected %v. Diff: %s", res.KVs, expectedKvs, pretty.Diff(res.KVs, expectedKvs), ) } } func TestMVCCClearTimeRange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() ts0 := hlc.Timestamp{WallTime: 0} ts0Content := []roachpb.KeyValue{} ts1 := hlc.Timestamp{WallTime: 10} v1 := value1 v1.Timestamp = ts1 ts1Content := []roachpb.KeyValue{{Key: testKey2, Value: v1}} ts2 := hlc.Timestamp{WallTime: 20} v2 := value2 v2.Timestamp = ts2 ts2Content := []roachpb.KeyValue{{Key: testKey2, Value: v2}, {Key: testKey5, Value: v2}} ts3 := hlc.Timestamp{WallTime: 30} v3 := value3 v3.Timestamp = ts3 ts3Content := []roachpb.KeyValue{ {Key: testKey1, Value: v3}, {Key: testKey2, Value: v2}, {Key: testKey5, Value: v2}, } ts4 := hlc.Timestamp{WallTime: 40} v4 := value4 v4.Timestamp = ts4 ts4Content := []roachpb.KeyValue{ {Key: testKey1, Value: v3}, {Key: testKey2, Value: v4}, {Key: testKey5, Value: v4}, } ts5 := hlc.Timestamp{WallTime: 50} // Set up an engine with the key-time space as follows: // 50 - // | // 40 - v4 v4 // | // 30 - v3 // time | // 20 - v2 v2 // | // 10 - v1 // | // 0 ----------------------- // k1 k2 k3 k4 k5 // keys eng := NewDefaultInMemForTesting() defer eng.Close() _, err := MVCCPut(ctx, eng, testKey2, ts1, value1, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(ctx, eng, testKey2, ts2, value2, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(ctx, eng, testKey5, ts2, value2, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(ctx, eng, testKey1, ts3, value3, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(ctx, eng, testKey5, ts4, value4, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(ctx, eng, testKey2, ts4, value4, MVCCWriteOptions{}) require.NoError(t, err) assertKVs := func(t *testing.T, reader Reader, at hlc.Timestamp, expected []roachpb.KeyValue) { t.Helper() res, err := MVCCScan(ctx, reader, localMax, keyMax, at, MVCCScanOptions{}) require.NoError(t, err) require.Equal(t, expected, res.KVs) } const kb = 1024 resumingClear := func( t *testing.T, ctx context.Context, rw ReadWriter, ms *enginepb.MVCCStats, key, endKey roachpb.Key, ts, endTs hlc.Timestamp, sz int64, byteLimit int64, ) int { resume, err := MVCCClearTimeRange(ctx, rw, ms, key, endKey, ts, endTs, nil, nil, 64, sz, byteLimit, 0) require.NoError(t, err) attempts := 1 for resume != nil { resume, err = MVCCClearTimeRange(ctx, rw, ms, resume, endKey, ts, endTs, nil, nil, 64, sz, byteLimit, 0) require.NoError(t, err) attempts++ } return attempts } t.Run("clear > ts0", func(t *testing.T) { b := eng.NewBatch() defer b.Close() _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts0, ts5, nil, nil, 64, 10, 1<<10, 0) require.NoError(t, err) assertKVs(t, b, ts0, ts0Content) assertKVs(t, b, ts1, ts0Content) assertKVs(t, b, ts5, ts0Content) }) t.Run("clear > ts1 ", func(t *testing.T) { b := eng.NewBatch() defer b.Close() attempts := resumingClear(t, ctx, b, nil, localMax, keyMax, ts1, ts5, 10, kb) require.Equal(t, 1, attempts) assertKVs(t, b, ts1, ts1Content) assertKVs(t, b, ts2, ts1Content) assertKVs(t, b, ts5, ts1Content) }) t.Run("clear > ts1 count-size batch", func(t *testing.T) { b := eng.NewBatch() defer b.Close() attempts := resumingClear(t, ctx, b, nil, localMax, keyMax, ts1, ts5, 1, kb) require.Equal(t, 2, attempts) assertKVs(t, b, ts1, ts1Content) assertKVs(t, b, ts2, ts1Content) assertKVs(t, b, ts5, ts1Content) }) t.Run("clear > ts1 byte-size batch", func(t *testing.T) { b := eng.NewBatch() defer b.Close() attempts := resumingClear(t, ctx, b, nil, localMax, keyMax, ts1, ts5, 10, 1) require.Equal(t, 2, attempts) assertKVs(t, b, ts1, ts1Content) assertKVs(t, b, ts2, ts1Content) assertKVs(t, b, ts5, ts1Content) }) t.Run("clear > ts2", func(t *testing.T) { b := eng.NewBatch() defer b.Close() attempts := resumingClear(t, ctx, b, nil, localMax, keyMax, ts2, ts5, 10, kb) require.Equal(t, 1, attempts) assertKVs(t, b, ts2, ts2Content) assertKVs(t, b, ts5, ts2Content) }) t.Run("clear > ts3", func(t *testing.T) { b := eng.NewBatch() defer b.Close() resumingClear(t, ctx, b, nil, localMax, keyMax, ts3, ts5, 10, kb) assertKVs(t, b, ts3, ts3Content) assertKVs(t, b, ts5, ts3Content) }) t.Run("clear > ts4 (nothing) ", func(t *testing.T) { b := eng.NewBatch() defer b.Close() _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts4, ts5, nil, nil, 64, 10, kb, 0) require.NoError(t, err) assertKVs(t, b, ts4, ts4Content) assertKVs(t, b, ts5, ts4Content) }) t.Run("clear > ts5 (nothing)", func(t *testing.T) { b := eng.NewBatch() defer b.Close() _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts5, ts5.Next(), nil, nil, 64, 10, kb, 0) require.NoError(t, err) assertKVs(t, b, ts4, ts4Content) assertKVs(t, b, ts5, ts4Content) }) t.Run("clear up to k5 to ts0", func(t *testing.T) { b := eng.NewBatch() defer b.Close() resumingClear(t, ctx, b, nil, testKey1, testKey5, ts0, ts5, 10, kb) assertKVs(t, b, ts2, []roachpb.KeyValue{{Key: testKey5, Value: v2}}) assertKVs(t, b, ts5, []roachpb.KeyValue{{Key: testKey5, Value: v4}}) }) t.Run("clear > ts0 in empty span (nothing)", func(t *testing.T) { b := eng.NewBatch() defer b.Close() _, err := MVCCClearTimeRange(ctx, b, nil, testKey3, testKey5, ts0, ts5, nil, nil, 64, 10, kb, 0) require.NoError(t, err) assertKVs(t, b, ts2, ts2Content) assertKVs(t, b, ts5, ts4Content) }) t.Run("clear > ts0 in empty span [k3,k5) (nothing)", func(t *testing.T) { b := eng.NewBatch() defer b.Close() _, err := MVCCClearTimeRange(ctx, b, nil, testKey3, testKey5, ts0, ts5, nil, nil, 64, 10, 1<<10, 0) require.NoError(t, err) assertKVs(t, b, ts2, ts2Content) assertKVs(t, b, ts5, ts4Content) }) t.Run("clear k3 and up in ts0 > x >= ts1 (nothing)", func(t *testing.T) { b := eng.NewBatch() defer b.Close() _, err := MVCCClearTimeRange(ctx, b, nil, testKey3, keyMax, ts0, ts1, nil, nil, 64, 10, 1<<10, 0) require.NoError(t, err) assertKVs(t, b, ts2, ts2Content) assertKVs(t, b, ts5, ts4Content) }) // Add an intent at k3@ts3. txn := roachpb.MakeTransaction("test", nil, isolation.Serializable, roachpb.NormalUserPriority, ts3, 1, 1, 0, false /* omitInRangefeeds */) addIntent := func(t *testing.T, rw ReadWriter) { _, err := MVCCPut(ctx, rw, testKey3, ts3, value3, MVCCWriteOptions{Txn: &txn}) require.NoError(t, err) } t.Run("clear everything hitting intent fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addIntent(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts0, ts5, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db3\"") }) t.Run("clear exactly hitting intent fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addIntent(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, testKey3, testKey4, ts2, ts3, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db3\"") }) t.Run("clear everything above intent fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addIntent(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts3, ts5, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db3\"") }) t.Run("clear below intent fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addIntent(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts1, ts2, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db3\"") }) // Add a shared lock at k1 with a txn at ts3. addLock := func(t *testing.T, rw ReadWriter) { err := MVCCAcquireLock(ctx, rw, &txn.TxnMeta, txn.IgnoredSeqNums, lock.Shared, testKey1, nil, 0, 0, false) require.NoError(t, err) } t.Run("clear everything hitting lock fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addLock(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts0, ts5, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db1\"") }) t.Run("clear exactly hitting lock fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addLock(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts2, ts3, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db1\"") }) t.Run("clear everything above lock fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addLock(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts3, ts5, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db1\"") }) t.Run("clear below lock fails", func(t *testing.T) { b := eng.NewBatch() defer b.Close() addLock(t, b) _, err := MVCCClearTimeRange(ctx, b, nil, localMax, keyMax, ts1, ts2, nil, nil, 64, 10, 1<<10, 0) require.EqualError(t, err, "conflicting locks on \"/db1\"") }) } // TestMVCCClearTimeRangeOnRandomData sets up mostly random KVs and then picks // some random times to which to revert, ensuring that a MVCC-Scan at each of // those times before reverting matches the result of an MVCC-Scan done at a // later time post-revert. func TestMVCCClearTimeRangeOnRandomData(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) rng, _ := randutil.NewTestRand() ctx := context.Background() e := NewDefaultInMemForTesting() defer e.Close() now := hlc.Timestamp{WallTime: 100000000} var ms enginepb.MVCCStats // Setup numKVs random kv by writing to random keys [0, keyRange) except for // the span [swathStart, swathEnd). Then fill in that swath with kvs all // having the same ts, to ensure they all revert at the same time, thus // triggering the ClearRange optimization path. const numKVs = 10000 const keyRange, swathStart, swathEnd = 5000, 3500, 4000 const swathSize = swathEnd - swathStart const randTimeRange = 1000 wrote := make(map[int]int64, keyRange) for i := 0; i < numKVs-swathSize; i++ { k := rng.Intn(keyRange - swathSize) if k >= swathStart { k += swathSize } ts := int64(rng.Intn(randTimeRange)) // Ensure writes to a given key are increasing in time. if ts <= wrote[k] { ts = wrote[k] + 1 } wrote[k] = ts key := roachpb.Key(fmt.Sprintf("%05d", k)) if rand.Float64() > 0.8 { _, _, err := MVCCDelete(ctx, e, key, hlc.Timestamp{WallTime: ts}, MVCCWriteOptions{Stats: &ms}) require.NoError(t, err) } else { v := roachpb.MakeValueFromString(fmt.Sprintf("v-%d", i)) _, err := MVCCPut(ctx, e, key, hlc.Timestamp{WallTime: ts}, v, MVCCWriteOptions{Stats: &ms}) require.NoError(t, err) } } swathTime := rand.Intn(randTimeRange-100) + 100 for i := swathStart; i < swathEnd; i++ { key := roachpb.Key(fmt.Sprintf("%05d", i)) v := roachpb.MakeValueFromString(fmt.Sprintf("v-%d", i)) _, err := MVCCPut(ctx, e, key, hlc.Timestamp{WallTime: int64(swathTime)}, v, MVCCWriteOptions{Stats: &ms}) require.NoError(t, err) } // Add another swath of keys above to exercise an after-iteration range flush. for i := keyRange; i < keyRange+200; i++ { key := roachpb.Key(fmt.Sprintf("%05d", i)) v := roachpb.MakeValueFromString(fmt.Sprintf("v-%d", i)) _, err := MVCCPut(ctx, e, key, hlc.Timestamp{WallTime: int64(randTimeRange + 1)}, v, MVCCWriteOptions{Stats: &ms}) require.NoError(t, err) } ms.AgeTo(2000) // Sanity check starting stats. msComputed, err := ComputeStats(ctx, e, fs.UnknownReadCategory, localMax, keyMax, 2000) require.NoError(t, err) require.Equal(t, msComputed, ms) // Pick timestamps to which we'll revert, and sort them so we can go back // though them in order. The largest will still be less than randTimeRange so // the initial revert will be assured to use ClearRange. reverts := make([]int, 5) for i := range reverts { reverts[i] = rand.Intn(randTimeRange) } reverts[0] = swathTime - 1 slices.Sort(reverts) const byteLimit = 1000 const keyLimit = 100 const clearRangeThreshold = 64 keyLen := int64(len(roachpb.Key(fmt.Sprintf("%05d", 1)))) + MVCCVersionTimestampSize maxAttempts := (numKVs * keyLen) / byteLimit var attempts int64 for i := len(reverts) - 1; i >= 0; i-- { t.Run(fmt.Sprintf("revert-%d", i), func(t *testing.T) { revertTo := hlc.Timestamp{WallTime: int64(reverts[i])} // MVCC-Scan at the revert time. resBefore, err := MVCCScan(ctx, e, localMax, keyMax, revertTo, MVCCScanOptions{MaxKeys: numKVs}) require.NoError(t, err) // Revert to the revert time. startKey := localMax for len(startKey) > 0 { attempts++ batch := e.NewBatch() startKey, err = MVCCClearTimeRange(ctx, batch, &ms, startKey, keyMax, revertTo, now, nil, nil, clearRangeThreshold, keyLimit, byteLimit, 0) require.NoError(t, err) require.NoError(t, batch.Commit(false)) batch.Close() } msComputed, err := ComputeStats(ctx, e, fs.UnknownReadCategory, localMax, keyMax, 2000) require.NoError(t, err) require.Equal(t, msComputed, ms) // Scanning at "now" post-revert should yield the same result as scanning // at revert-time pre-revert. resAfter, err := MVCCScan(ctx, e, localMax, keyMax, now, MVCCScanOptions{MaxKeys: numKVs}) require.NoError(t, err) require.Equal(t, resBefore.KVs, resAfter.KVs) }) } require.LessOrEqual(t, attempts, maxAttempts) } // TestMVCCReverseScan verifies that MVCCReverseScan scans [start, // end) in descending order of keys. func TestMVCCReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 3}, value4, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey5, hlc.Timestamp{WallTime: 3}, value5, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey6, hlc.Timestamp{WallTime: 3}, value6, MVCCWriteOptions{}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) } if len(res.KVs) != 2 || !bytes.Equal(res.KVs[0].Key, testKey3) || !bytes.Equal(res.KVs[1].Key, testKey2) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) || !bytes.Equal(res.KVs[1].Value.RawBytes, value3.RawBytes) { t.Fatalf("unexpected value: %v", res.KVs) } if res.ResumeSpan != nil { t.Fatalf("resumeSpan = %+v", res.ResumeSpan) } res, err = MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true, MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey3) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) { t.Fatalf("unexpected value: %v", res.KVs) } if expected := (roachpb.Span{Key: testKey2, EndKey: testKey2.Next()}); !res.ResumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, res.ResumeSpan) } res, err = MVCCScan(ctx, engine, testKey2, testKey4, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true, MaxKeys: -1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 0 { t.Fatalf("unexpected value: %v", res.KVs) } if expected := (roachpb.Span{Key: testKey2, EndKey: testKey4}); !res.ResumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, res.ResumeSpan) } // The first key we encounter has multiple versions and we need to read the // latest. res, err = MVCCScan(ctx, engine, testKey2, testKey3, hlc.Timestamp{WallTime: 4}, MVCCScanOptions{Reverse: true, MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey2) || !bytes.Equal(res.KVs[0].Value.RawBytes, value4.RawBytes) { t.Errorf("unexpected value: %v", res.KVs) } // The first key we encounter is newer than our read timestamp and we need to // back up to the previous key. res, err = MVCCScan(ctx, engine, testKey4, testKey6, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true, MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey4) || !bytes.Equal(res.KVs[0].Value.RawBytes, value2.RawBytes) { t.Fatalf("unexpected value: %v", res.KVs) } // Scan only the first key in the key space. res, err = MVCCScan(ctx, engine, testKey1, testKey1.Next(), hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true, MaxKeys: 1}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) { t.Fatalf("unexpected value: %v", res.KVs) } } // TestMVCCReverseScanFirstKeyInFuture verifies that when MVCCReverseScan scans // encounter a key with only future timestamps first, that it skips the key and // continues to scan in reverse. #17825 was caused by this not working correctly. func TestMVCCReverseScanFirstKeyInFuture(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // The value at key2 will be at a lower timestamp than the ReverseScan, but // the value at key3 will be at a larger timestamp. The ReverseScan should // see key3 and ignore it because none of it versions are at a low enough // timestamp to read. It should then continue scanning backwards and find a // value at key2. // // Before fixing #17825, the MVCC version scan on key3 would fall out of the // scan bounds and if it never found another valid key before reaching // KeyMax, would stop the ReverseScan from continuing. if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, hlc.Timestamp{WallTime: 3}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, testKey1, testKey4, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey2) || !bytes.Equal(res.KVs[0].Value.RawBytes, value2.RawBytes) { t.Errorf("unexpected value: %v", res.KVs) } } // Exposes a bug where the reverse MVCC scan can get stuck in an infinite loop // until we OOM. It happened in the code path optimized to use `SeekForPrev()` // after N `Prev()`s do not reach another logical key. Further, a write intent // needed to be present on the logical key to make it conflict with our chosen // `SeekForPrev()` target (logical key + '\0'). func TestMVCCReverseScanSeeksOverRepeatedKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // 10 is the value of `kMaxItersBeforeSeek` at the time this test case was // written. Repeat the key enough times to make sure the `SeekForPrev()` // optimization will be used. for i := 1; i <= 10; i++ { if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{WallTime: int64(i)}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } } txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 11}) if _, err := MVCCPut(ctx, engine, testKey2, txn1ts.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } res, err := MVCCScan(ctx, engine, testKey1, testKey3, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey2) || !bytes.Equal(res.KVs[0].Value.RawBytes, value2.RawBytes) { t.Fatal("unexpected scan results") } } // Exposes a bug where the reverse MVCC scan can get stuck in an infinite loop until we OOM. // // The bug happened in this scenario. // (1) reverse scan is positioned at the range's smallest key and calls `prevKey()` // (2) `prevKey()` peeks and sees newer versions of the same logical key // // `iters_before_seek_-1` times, moving the iterator backwards each time // // (3) on the `iters_before_seek_`th peek, there are no previous keys found // // Then, the problem was `prevKey()` treated finding no previous key as if it had found a // new logical key with the empty string. It would use `backwardLatestVersion()` to find // the latest version of this empty string logical key. Due to condition (3), // `backwardLatestVersion()` would go directly to its seeking optimization rather than // trying to incrementally move backwards (if it had tried moving incrementally backwards, // it would've noticed it's out of bounds). The seek optimization would then seek to "\0", // which is the empty logical key with zero timestamp. Since we set RocksDB iterator lower // bound to be the lower bound of the range scan, this seek actually lands back at the // range's smallest key. It thinks it found a new key so it adds it to the result, and then // this whole process repeats ad infinitum. func TestMVCCReverseScanStopAtSmallestKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) run := func(numPuts int, ts int64) { ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() for i := 1; i <= numPuts; i++ { if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: int64(i)}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } } res, err := MVCCScan(ctx, engine, testKey1, testKey3, hlc.Timestamp{WallTime: ts}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) } if len(res.KVs) != 1 || !bytes.Equal(res.KVs[0].Key, testKey1) || !bytes.Equal(res.KVs[0].Value.RawBytes, value1.RawBytes) { t.Fatal("unexpected scan results") } } // Satisfying (2) and (3) is incredibly intricate because of how `iters_before_seek_` // is incremented/decremented heuristically. For example, at the time of writing, the // infinitely looping cases are `numPuts == 6 && ts == 2`, `numPuts == 7 && ts == 3`, // `numPuts == 8 && ts == 4`, `numPuts == 9 && ts == 5`, and `numPuts == 10 && ts == 6`. // Tying our test case to the `iters_before_seek_` setting logic seems brittle so let's // just brute force test a wide range of cases. for numPuts := 1; numPuts <= 10; numPuts++ { for ts := 1; ts <= 10; ts++ { run(numPuts, int64(ts)) } } } func TestMVCCResolveTxn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } { valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{ Txn: txn1, }) if err != nil { t.Fatal(err) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } } // Resolve will write with txn1's timestamp which is 0,1. if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1Commit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } { valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } } } // TestMVCCResolveNewerIntent verifies that resolving a newer intent // than the committing transaction aborts the intent. func TestMVCCResolveNewerIntent(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Write first value. if _, err := MVCCPut(ctx, engine, testKey1, txn1Commit.WriteTimestamp, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } // Now, put down an intent which should return a write too old error // (but will still write the intent at tx1Commit.Timestamp+1. _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1}) if !errors.HasType(err, (*kvpb.WriteTooOldError)(nil)) { t.Fatalf("expected write too old error; got %s", err) } // Resolve will succeed but should remove the intent. if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1Commit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 2}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("expected value1 bytes; got %q", valueRes.Value.Value.RawBytes) } } // TestMVCCPaginate tests that MVCCPaginate respects the MaxKeys and // TargetBytes limits, and returns the correct numKeys, numBytes, and // resumeReason. func TestMVCCPaginate(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testCases := []struct { maxKeys int64 targetBytes int64 allowEmpty bool numKeysPerIter int64 numBytesPerIter int64 numIters int expectedNumKeys int64 expectedNumBytes int64 expectedResumeReason kvpb.ResumeReason }{ // MaxKeys and TargetBytes limits not reached, so do all 10 iterations. { maxKeys: 31, targetBytes: 51, allowEmpty: false, numKeysPerIter: 3, numBytesPerIter: 5, numIters: 10, expectedNumKeys: 30, expectedNumBytes: 50, expectedResumeReason: 0, }, // MaxKeys limit reached after 7 iterations. { maxKeys: 21, targetBytes: 51, allowEmpty: false, numKeysPerIter: 3, numBytesPerIter: 5, numIters: 10, expectedNumKeys: 21, expectedNumBytes: 35, expectedResumeReason: kvpb.RESUME_KEY_LIMIT, }, // MaxKeys limit reached after 10 iterations. Despite the fact we // finished iterating, we still return a resume reason because we check // the MaxKeys and TargetBytes limits before we check if we stop // iteration. { maxKeys: 30, targetBytes: 50, allowEmpty: false, numKeysPerIter: 3, numBytesPerIter: 5, numIters: 10, expectedNumKeys: 30, expectedNumBytes: 50, expectedResumeReason: kvpb.RESUME_KEY_LIMIT, }, // TargetBytes limit reached after 7 iterations. { maxKeys: 31, targetBytes: 34, allowEmpty: false, numKeysPerIter: 3, numBytesPerIter: 5, numIters: 10, expectedNumKeys: 21, expectedNumBytes: 35, expectedResumeReason: kvpb.RESUME_BYTE_LIMIT, }, // TargetBytes limit reached after 7 iterations, but with TargetBytes // limit exactly the number of bytes. { maxKeys: 31, targetBytes: 35, allowEmpty: false, numKeysPerIter: 3, numBytesPerIter: 5, numIters: 10, expectedNumKeys: 21, expectedNumBytes: 35, expectedResumeReason: kvpb.RESUME_BYTE_LIMIT, }, // TargetBytes limit reached after 7 iterations, but with AllowEmpty // set to true, so only 6 iterations are completed. { maxKeys: 31, targetBytes: 34, allowEmpty: true, numKeysPerIter: 3, numBytesPerIter: 5, numIters: 10, expectedNumKeys: 18, expectedNumBytes: 30, expectedResumeReason: kvpb.RESUME_BYTE_LIMIT, }, } for _, tc := range testCases { var iter int numKeys, numBytes, resumeReason, err := MVCCPaginate(context.Background(), tc.maxKeys, tc.targetBytes, tc.allowEmpty, func(maxKeys, targetBytes int64) (numKeys int64, numBytes int64, resumeReason kvpb.ResumeReason, err error) { if iter == tc.numIters { return 0, 0, 0, iterutil.StopIteration() } iter++ if tc.allowEmpty && tc.numBytesPerIter > targetBytes { return 0, 0, kvpb.RESUME_BYTE_LIMIT, nil } return tc.numKeysPerIter, tc.numBytesPerIter, 0, nil }) require.NoError(t, err) require.Equal(t, tc.expectedNumKeys, numKeys) require.Equal(t, tc.expectedNumBytes, numBytes) require.Equal(t, tc.expectedResumeReason, resumeReason) } } func TestMVCCResolveIntentTxnTimestampMismatch(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() txn := txn1.Clone() tsEarly := txn.WriteTimestamp txn.TxnMeta.WriteTimestamp.Forward(tsEarly.Add(10, 0)) // Write an intent which has txn.WriteTimestamp > meta.timestamp. if _, err := MVCCPut(ctx, engine, testKey1, tsEarly, value1, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } // The Timestamp within is equal to that of txn.Meta even though // the intent sits at tsEarly. The bug was looking at the former // instead of the latter (and so we could also tickle it with // smaller timestamps in Txn). intent := roachpb.MakeLockUpdate(txn, roachpb.Span{Key: testKey1}) intent.Status = roachpb.PENDING // A bug (see #7654) caused intents to just stay where they were instead // of being moved forward in the situation set up above. if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, intent, MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } for i, test := range []struct { hlc.Timestamp found bool }{ // Check that the intent has indeed moved to where we pushed it. {tsEarly, false}, {intent.Txn.WriteTimestamp.Prev(), false}, {intent.Txn.WriteTimestamp, true}, {hlc.MaxTimestamp, true}, } { _, err := MVCCGet(ctx, engine, testKey1, test.Timestamp, MVCCGetOptions{}) if errors.HasType(err, (*kvpb.LockConflictError)(nil)) != test.found { t.Fatalf("%d: expected lock conflict error: %t, got %v", i, test.found, err) } } } // TestMVCCConditionalPutOldTimestamp tests a case where a conditional // put with an older timestamp happens after a put with a newer timestamp. // The conditional put fails with WriteTooOld errors, regardless of whether // the condition succeeds or not. func TestMVCCConditionalPutOldTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}) require.NoError(t, err) _, err = MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, value2, MVCCWriteOptions{}) require.NoError(t, err) // Check that a write too old error is thrown, regardless of whether the value // matches. for _, expVal := range []roachpb.Value{ // Condition does not match. value1, // Condition matches. value2, } { _, err = MVCCConditionalPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, value3, expVal.TagAndDataBytes(), ConditionalPutWriteOptions{AllowIfDoesNotExist: CPutFailIfMissing}) require.ErrorAs(t, err, new(*kvpb.WriteTooOldError)) // Either way, no new value is written. ts := hlc.Timestamp{WallTime: 3} valueRes, err := MVCCGet(ctx, engine, testKey1, ts, MVCCGetOptions{}) require.NoError(t, err) require.Equal(t, value2.RawBytes, valueRes.Value.Value.RawBytes) } } // TestMVCCMultiplePutOldTimestamp tests a case where multiple transactional // Puts occur to the same key, but with older timestamps than a pre-existing // key. The first should generate a WriteTooOldError and fail to write. The // second should avoid the WriteTooOldError and write at the higher timestamp. func TestMVCCMultiplePutOldTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, value1, MVCCWriteOptions{}) require.NoError(t, err) // Verify the first txn Put returns a write too old error and does not // write a new value. txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) txn.Sequence++ _, err = MVCCPut(ctx, engine, testKey1, txn.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn}) var wtoErr *kvpb.WriteTooOldError require.ErrorAs(t, err, &wtoErr) expTS := hlc.Timestamp{WallTime: 3, Logical: 1} require.Equal(t, expTS, wtoErr.ActualTimestamp) // Verify no value was written. valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.MaxTimestamp, MVCCGetOptions{Txn: txn}) require.NoError(t, err) require.Equal(t, value1.RawBytes, valueRes.Value.Value.RawBytes) // Put again after advancing the txn's timestamp to the WriteTooOld error's // timestamp and verify no WriteTooOldError. txn.BumpReadTimestamp(wtoErr.ActualTimestamp) txn.Sequence++ _, err = MVCCPut(ctx, engine, testKey1, txn.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn}) require.NoError(t, err) // Verify new value was actually written at (3, 1). valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.MaxTimestamp, MVCCGetOptions{Txn: txn}) require.NoError(t, err) require.Equal(t, expTS, valueRes.Value.Value.Timestamp) require.Equal(t, value3.RawBytes, valueRes.Value.Value.RawBytes) } func TestMVCCPutNegativeTimestampError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() timestamp := hlc.Timestamp{WallTime: -1} expectedErrorString := fmt.Sprintf("cannot write to %q at timestamp %s", testKey1, timestamp) _, err := MVCCPut(ctx, engine, testKey1, timestamp, value1, MVCCWriteOptions{}) require.EqualError(t, err, expectedErrorString) } // TestMVCCPutOldOrigTimestampNewCommitTimestamp tests a case where a // transactional Put occurs to the same key, but with an older original // timestamp than a pre-existing key. As always, this should result in a // WriteTooOld error. func TestMVCCPutOldOrigTimestampNewCommitTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, value1, MVCCWriteOptions{}) require.NoError(t, err) // Perform a transactional Put with a transaction whose read timestamp is // below the existing key's timestamp and whose write timestamp is above the // existing key's timestamp. txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) txn.WriteTimestamp = hlc.Timestamp{WallTime: 5} txn.Sequence++ _, err = MVCCPut(ctx, engine, testKey1, txn.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn}) // Verify that the Put returned a WriteTooOld with the ActualTime set to the // transactions provisional commit timestamp. expTS := txn.WriteTimestamp var wtoErr *kvpb.WriteTooOldError require.ErrorAs(t, err, &wtoErr) require.Equal(t, expTS, wtoErr.ActualTimestamp) // Verify no value was written. valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.MaxTimestamp, MVCCGetOptions{Txn: txn}) require.NoError(t, err) require.Equal(t, value1.RawBytes, valueRes.Value.Value.RawBytes) } func TestMVCCAbortTxn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut( ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}, ); err != nil { t.Fatal(err) } txn1AbortWithTS := txn1Abort.Clone() txn1AbortWithTS.WriteTimestamp = hlc.Timestamp{Logical: 1} if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1AbortWithTS, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}, ); err != nil { t.Fatal(err) } if valueRes, err := MVCCGet( ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{}, ); err != nil { t.Fatal(err) } else if valueRes.Value.Exists() { t.Fatalf("expected the value to be empty: %s", valueRes.Value.Value) } require.Empty(t, mvccGetRaw(t, engine, mvccKey(testKey1))) } func TestMVCCAbortTxnWithPreviousVersion(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 2}) if _, err := MVCCPut(ctx, engine, testKey1, txn1ts.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } txn1AbortWithTS := txn1Abort.Clone() txn1AbortWithTS.WriteTimestamp = hlc.Timestamp{WallTime: 2} if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1AbortWithTS, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}, ); err != nil { t.Fatal(err) } if intentRes, err := MVCCGet(ctx, engine, testKey1, hlc.MaxTimestamp, MVCCGetOptions{ Inconsistent: true, }); err != nil { t.Fatal(err) } else if intentRes.Intent != nil { t.Fatalf("expected no intent, got: %s", intentRes.Intent) } if valueRes, err := MVCCGet( ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, MVCCGetOptions{}, ); err != nil { t.Fatal(err) } else if expTS := (hlc.Timestamp{WallTime: 1}); valueRes.Value.Value.Timestamp != expTS { t.Fatalf("expected timestamp %+v == %+v", valueRes.Value.Value.Timestamp, expTS) } else if !bytes.Equal(value2.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %q in get result does not match the value %q in request", valueRes.Value.Value.RawBytes, value2.RawBytes) } } func TestMVCCWriteWithDiffTimestampsAndEpochs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Start with epoch 1. txn := *txn1 txn.Sequence++ _, err := MVCCPut(ctx, engine, testKey1, txn.ReadTimestamp, value1, MVCCWriteOptions{Txn: &txn}) require.NoError(t, err) // Now write with greater timestamp and epoch 2. txne2 := txn txne2.Sequence++ txne2.Epoch = 2 txne2.WriteTimestamp = hlc.Timestamp{WallTime: 1} _, err = MVCCPut(ctx, engine, testKey1, txne2.ReadTimestamp, value2, MVCCWriteOptions{Txn: &txne2}) require.NoError(t, err) // Try a write with an earlier timestamp; this is just ignored. txne2.Sequence++ txne2.WriteTimestamp = hlc.Timestamp{WallTime: 1} _, err = MVCCPut(ctx, engine, testKey1, txne2.ReadTimestamp, value1, MVCCWriteOptions{Txn: &txne2}) require.NoError(t, err) // Try a write with an earlier epoch; ignored with error. _, err = MVCCPut(ctx, engine, testKey1, txn.ReadTimestamp, value1, MVCCWriteOptions{Txn: &txn}) require.Error(t, err) require.Regexp(t, "put with epoch 1 came after put with epoch 2 in txn", err) // Try a write with different value using both later timestamp and epoch. txne2.Sequence++ _, err = MVCCPut(ctx, engine, testKey1, txne2.ReadTimestamp, value3, MVCCWriteOptions{Txn: &txne2}) require.NoError(t, err) // Resolve the intent. txne2Commit := txne2 txne2Commit.Status = roachpb.COMMITTED txne2Commit.WriteTimestamp = hlc.Timestamp{WallTime: 1} _, _, _, _, err = MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(&txne2Commit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}) require.NoError(t, err) expTS := txne2Commit.WriteTimestamp.Next() // Now try writing an earlier value without a txn--should get WriteTooOldError. _, err = MVCCPut(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, value4, MVCCWriteOptions{}) var wtoErr *kvpb.WriteTooOldError require.ErrorAs(t, err, &wtoErr) require.Equal(t, expTS, wtoErr.ActualTimestamp) // Verify no value was written. valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.MaxTimestamp, MVCCGetOptions{}) require.NoError(t, err) require.Equal(t, txne2Commit.WriteTimestamp, valueRes.Value.Value.Timestamp) require.Equal(t, value3.RawBytes, valueRes.Value.Value.RawBytes) // Attempt to read older timestamp; should fail. valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 0}, MVCCGetOptions{}) require.NoError(t, err) require.False(t, valueRes.Value.IsPresent()) // Read at correct timestamp. valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{}) require.NoError(t, err) require.Equal(t, txne2Commit.WriteTimestamp, valueRes.Value.Value.Timestamp) require.Equal(t, value3.RawBytes, valueRes.Value.Value.RawBytes) } // TestMVCCGetWithDiffEpochs writes a value first using epoch 1, then // reads using epoch 2 to verify that values written during different // transaction epochs are not visible. func TestMVCCGetWithDiffEpochs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Write initial value without a txn. if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } // Now write using txn1, epoch 1. txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) if _, err := MVCCPut(ctx, engine, testKey1, txn1ts.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1ts}); err != nil { t.Fatal(err) } // Try reading using different txns & epochs. testCases := []struct { txn *roachpb.Transaction expValue *roachpb.Value expErr bool }{ // No transaction; should see error. {nil, nil, true}, // Txn1, epoch 1; should see new value2. {txn1, &value2, false}, // Txn1, epoch 2; should see original value1. {txn1e2, &value1, false}, // Txn2; should see error. {txn2, nil, true}, } for i, test := range testCases { t.Run(strconv.Itoa(i), func(t *testing.T) { valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{ Txn: test.txn, }) if test.expErr { if err == nil { t.Errorf("test %d: unexpected success", i) } else if !errors.HasType(err, (*kvpb.LockConflictError)(nil)) { t.Errorf("test %d: expected lock conflict error; got %v", i, err) } } else if err != nil || !valueRes.Value.Exists() || !bytes.Equal(test.expValue.RawBytes, valueRes.Value.Value.RawBytes) { t.Errorf("test %d: expected value %q, err nil; got %+v, %v", i, test.expValue.RawBytes, valueRes.Value.Value, err) } }) } } // TestMVCCGetWithDiffEpochsAndTimestamps writes a value first using // epoch 1, then reads using epoch 2 with different timestamps to verify // that values written during different transaction epochs are not visible. // // The test includes the case where the read at epoch 2 is at a *lower* // timestamp than the intent write at epoch 1. This is not expected to // happen commonly, but caused issues in #36089. func TestMVCCGetWithDiffEpochsAndTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Write initial value without a txn at timestamp 1. _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, value1, MVCCWriteOptions{}) require.NoError(t, err) // Write another value without a txn at timestamp 3. _, err = MVCCPut(ctx, engine, testKey1, hlc.Timestamp{WallTime: 3}, value2, MVCCWriteOptions{}) require.NoError(t, err) // Now write using txn1, epoch 1. txn1ts := makeTxn(*txn1, hlc.Timestamp{WallTime: 4}) _, err = MVCCPut(ctx, engine, testKey1, txn1ts.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn1ts}) require.NoError(t, err) // Try reading using different epochs & timestamps. testCases := []struct { txn *roachpb.Transaction readTS hlc.Timestamp expValue *roachpb.Value }{ // Epoch 1, read 1; should see new value3. {txn1, hlc.Timestamp{WallTime: 1}, &value3}, // Epoch 1, read 2; should see new value3. {txn1, hlc.Timestamp{WallTime: 2}, &value3}, // Epoch 1, read 3; should see new value3. {txn1, hlc.Timestamp{WallTime: 3}, &value3}, // Epoch 1, read 4; should see new value3. {txn1, hlc.Timestamp{WallTime: 4}, &value3}, // Epoch 1, read 5; should see new value3. {txn1, hlc.Timestamp{WallTime: 5}, &value3}, // Epoch 2, read 1; should see committed value1. {txn1e2, hlc.Timestamp{WallTime: 1}, &value1}, // Epoch 2, read 2; should see committed value1. {txn1e2, hlc.Timestamp{WallTime: 2}, &value1}, // Epoch 2, read 3; should see committed value2. {txn1e2, hlc.Timestamp{WallTime: 3}, &value2}, // Epoch 2, read 4; should see committed value2. {txn1e2, hlc.Timestamp{WallTime: 4}, &value2}, // Epoch 2, read 5; should see committed value2. {txn1e2, hlc.Timestamp{WallTime: 5}, &value2}, } for i, test := range testCases { t.Run(strconv.Itoa(i), func(t *testing.T) { valueRes, err := MVCCGet(ctx, engine, testKey1, test.readTS, MVCCGetOptions{Txn: test.txn}) if err != nil || !valueRes.Value.Exists() || !bytes.Equal(test.expValue.RawBytes, valueRes.Value.Value.RawBytes) { t.Errorf("test %d: expected value %q, err nil; got %+v, %v", i, test.expValue.RawBytes, valueRes.Value.Value, err) } }) } } // TestMVCCGetWithOldEpoch writes a value first using epoch 2, then // reads using epoch 1 to verify that the read will fail. func TestMVCCGetWithOldEpoch(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1e2.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1e2}); err != nil { t.Fatal(err) } _, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 2}, MVCCGetOptions{ Txn: txn1, }) if err == nil { t.Fatalf("unexpected success of get") } } // TestMVCCDeleteRangeWithSequence verifies that delete range operations at sequence // numbers equal to or below the sequence of a previous delete range operation // verify that they agree with the sequence history of each intent left by the // delete range. If so, they become no-ops because writes are meant to be // idempotent. If not, they throw errors. func TestMVCCDeleteRangeWithSequence(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() testCases := []struct { name string sequence enginepb.TxnSeq expErr string }{ {"old seq", 5, "missing an intent"}, {"same seq", 6, ""}, {"new seq", 7, ""}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { prefix := roachpb.Key(fmt.Sprintf("key-%d", tc.sequence)) txn := *txn1 for i := enginepb.TxnSeq(0); i < 3; i++ { key := append(prefix, []byte(strconv.Itoa(int(i)))...) txn.Sequence = 2 + i if _, err := MVCCPut(ctx, engine, key, txn.WriteTimestamp, value1, MVCCWriteOptions{Txn: &txn}); err != nil { t.Fatal(err) } } // Perform the initial DeleteRange. const origSeq = 6 txn.Sequence = origSeq origDeleted, _, origNum, _, err := MVCCDeleteRange(ctx, engine, prefix, prefix.PrefixEnd(), math.MaxInt64, txn.WriteTimestamp, MVCCWriteOptions{Txn: &txn}, true) if err != nil { t.Fatal(err) } txn.Sequence = tc.sequence deleted, _, num, _, err := MVCCDeleteRange(ctx, engine, prefix, prefix.PrefixEnd(), math.MaxInt64, txn.WriteTimestamp, MVCCWriteOptions{Txn: &txn}, true) if tc.expErr != "" && err != nil { if !testutils.IsError(err, tc.expErr) { t.Fatalf("unexpected error: %+v", err) } } else if err != nil { t.Fatalf("unexpected error: %+v", err) } // If at the same sequence as the initial DeleteRange. if tc.sequence == origSeq { if !reflect.DeepEqual(origDeleted, deleted) { t.Fatalf("deleted keys did not match original execution: %+v vs. %+v", origDeleted, deleted) } if origNum != num { t.Fatalf("number of keys deleted did not match original execution: %d vs. %d", origNum, num) } } }) } } // TestMVCCGetWithPushedTimestamp verifies that a read for a value // written by the transaction, but then subsequently pushed, can still // be read by the txn at the later timestamp, even if an earlier // timestamp is specified. This happens when a txn's intents are // resolved by other actors; the intents shouldn't become invisible // to pushed txn. func TestMVCCGetWithPushedTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Start with epoch 1. if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } // Resolve the intent, pushing its timestamp forward. txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } // Attempt to read using naive txn's previous timestamp. valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{ Txn: txn1, }) if err != nil || !valueRes.Value.Exists() || !bytes.Equal(valueRes.Value.Value.RawBytes, value1.RawBytes) { t.Errorf("expected value %q, err nil; got %+v, %v", value1.RawBytes, valueRes.Value.Value, err) } } func TestMVCCResolveWithDiffEpochs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, txn1e2.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1e2}); err != nil { t.Fatal(err) } numKeys, _, _, _, _, err := MVCCResolveWriteIntentRange(ctx, engine, nil, roachpb.MakeLockUpdate(txn1e2Commit, roachpb.Span{Key: testKey1, EndKey: testKey2.Next()}), MVCCResolveWriteIntentRangeOptions{MaxKeys: 2}) if err != nil { t.Fatal(err) } if numKeys != 2 { t.Errorf("expected 2 rows resolved; got %d", numKeys) } // Verify key1 is empty, as resolution with epoch 2 would have // aborted the epoch 1 intent. valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if valueRes.Value.Exists() || err != nil { t.Errorf("expected value nil, err nil; got %+v, %v", valueRes.Value.Value, err) } // Key2 should be committed. valueRes, err = MVCCGet(ctx, engine, testKey2, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !bytes.Equal(value2.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value2.RawBytes, valueRes.Value.Value.RawBytes) } } func TestMVCCResolveWithUpdatedTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{ Txn: txn1, }) if err != nil { t.Fatal(err) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } // Resolve with a higher commit timestamp -- this should rewrite the // intent when making it permanent. txn := makeTxn(*txn1Commit, hlc.Timestamp{WallTime: 1}) if _, _, _, _, err = MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if valueRes.Value.Exists() || err != nil { t.Fatalf("expected both value and err to be nil: %+v, %v", valueRes.Value.Value, err) } valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{}) if err != nil { t.Error(err) } if expTS := (hlc.Timestamp{WallTime: 1}); valueRes.Value.Value.Timestamp != expTS { t.Fatalf("expected timestamp %+v == %+v", valueRes.Value.Value.Timestamp, expTS) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } } func TestMVCCResolveWithPushedTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{ Txn: txn1, }) if err != nil { t.Fatal(err) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } // Resolve with a higher commit timestamp, but with still-pending transaction. // This represents a straightforward push (i.e. from a read/write conflict). txn := makeTxn(*txn1, hlc.Timestamp{WallTime: 1}) if _, _, _, _, err = MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{}) if valueRes.Value.Exists() || err == nil { t.Fatalf("expected both value nil and err to be a LockConflictError: %+v", valueRes.Value.Value) } // Can still fetch the value using txn1. valueRes, err = MVCCGet(ctx, engine, testKey1, hlc.Timestamp{WallTime: 1}, MVCCGetOptions{ Txn: txn1, }) if err != nil { t.Error(err) } if expTS := (hlc.Timestamp{WallTime: 1}); valueRes.Value.Value.Timestamp != expTS { t.Fatalf("expected timestamp %+v == %+v", valueRes.Value.Value.Timestamp, expTS) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } } func TestMVCCResolveTxnNoOps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Resolve a non existent key; noop. if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1Commit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } // Add key and resolve despite there being no intent. if _, err := MVCCPut(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, value1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn2Commit, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } // Write intent and resolve with different txn. if _, err := MVCCPut(ctx, engine, testKey2, txn1.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } txn1CommitWithTS := txn2Commit.Clone() txn1CommitWithTS.WriteTimestamp = hlc.Timestamp{WallTime: 1} if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1CommitWithTS, roachpb.Span{Key: testKey2}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } } func TestMVCCResolveTxnRange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() if _, err := MVCCPut(ctx, engine, testKey1, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey2, hlc.Timestamp{Logical: 1}, value2, MVCCWriteOptions{}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey3, txn2.ReadTimestamp, value3, MVCCWriteOptions{Txn: txn2}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, testKey4, txn1.ReadTimestamp, value4, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } numKeys, _, resumeSpan, _, _, err := MVCCResolveWriteIntentRange(ctx, engine, nil, roachpb.MakeLockUpdate(txn1Commit, roachpb.Span{Key: testKey1, EndKey: testKey4.Next()}), MVCCResolveWriteIntentRangeOptions{}) if err != nil { t.Fatal(err) } if numKeys != 2 || resumeSpan != nil { t.Fatalf("expected all keys to process for resolution, even though 2 are noops; got %d, resume=%s", numKeys, resumeSpan) } { valueRes, err := MVCCGet(ctx, engine, testKey1, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !bytes.Equal(value1.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } } { valueRes, err := MVCCGet(ctx, engine, testKey2, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !bytes.Equal(value2.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value2.RawBytes, valueRes.Value.Value.RawBytes) } } { valueRes, err := MVCCGet(ctx, engine, testKey3, hlc.Timestamp{Logical: 1}, MVCCGetOptions{ Txn: txn2, }) if err != nil { t.Fatal(err) } if !bytes.Equal(value3.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value3.RawBytes, valueRes.Value.Value.RawBytes) } } { valueRes, err := MVCCGet(ctx, engine, testKey4, hlc.Timestamp{Logical: 1}, MVCCGetOptions{}) if err != nil { t.Fatal(err) } if !bytes.Equal(value4.RawBytes, valueRes.Value.Value.RawBytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value1.RawBytes, valueRes.Value.Value.RawBytes) } } } func TestMVCCResolveTxnRangeResume(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Write 10 keys from txn1, 10 from txn2, and 10 with no txn, // interleaved. The length of these keys changes and is non-decreasing. // This exercises a subtle bug where separatedIntentAndVersionIter // forgot to update its intentKey, but in some cases the shared slice // for the unsafe key caused it to be inadvertently updated in a correct // way. for i := 0; i < 30; i += 3 { key0 := roachpb.Key(fmt.Sprintf("%02d%d", i+0, i+0)) key1 := roachpb.Key(fmt.Sprintf("%02d%d", i+1, i+1)) key2 := roachpb.Key(fmt.Sprintf("%02d%d", i+2, i+2)) if _, err := MVCCPut(ctx, engine, key0, txn1.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1}); err != nil { t.Fatal(err) } txn2ts := makeTxn(*txn2, hlc.Timestamp{Logical: 2}) if _, err := MVCCPut(ctx, engine, key1, txn2ts.ReadTimestamp, value2, MVCCWriteOptions{Txn: txn2ts}); err != nil { t.Fatal(err) } if _, err := MVCCPut(ctx, engine, key2, hlc.Timestamp{Logical: 3}, value3, MVCCWriteOptions{}); err != nil { t.Fatal(err) } } rw := engine.NewBatch() defer rw.Close() // Resolve up to 6 intents: the keys are 000, 033, 066, 099, 1212, 1515. numKeys, _, resumeSpan, _, _, err := MVCCResolveWriteIntentRange(ctx, rw, nil, roachpb.MakeLockUpdate(txn1Commit, roachpb.Span{Key: roachpb.Key("00"), EndKey: roachpb.Key("33")}), MVCCResolveWriteIntentRangeOptions{MaxKeys: 6}) if err != nil { t.Fatal(err) } if numKeys != 6 || resumeSpan == nil { t.Errorf("expected resolution for only 6 keys; got %d, resume=%s", numKeys, resumeSpan) } expResumeSpan := roachpb.Span{Key: roachpb.Key("1515").Next(), EndKey: roachpb.Key("33")} if !resumeSpan.Equal(expResumeSpan) { t.Errorf("expected resume span %s; got %s", expResumeSpan, resumeSpan) } require.NoError(t, rw.Commit(true)) // Check that the intents are actually gone by trying to read above them // using txn2. for i := 0; i < 18; i += 3 { res, err := MVCCGet(ctx, engine, roachpb.Key(fmt.Sprintf("%02d%d", i, i)), txn2.ReadTimestamp, MVCCGetOptions{Txn: txn2}) require.NotNil(t, res.Value) require.NoError(t, err) require.Nil(t, res.Intent) } } // This test is similar to TestMVCCResolveTxnRangeResume, and additionally has // keys with many versions and resumes intent resolution until it completes. func TestMVCCResolveTxnRangeResumeWithManyVersions(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Write 1000 keys with intents of which 100 are by the txn for which we // will perform intent resolution. lockUpdate := setupKeysWithIntent(t, engine, 5, /* numVersions */ 1 /* numFlushedVersions */, false, 10, false /* resolveIntentForLatestVersionWhenNonLockUpdateTxn */) lockUpdate.Key = makeKey(nil, 0) lockUpdate.EndKey = makeKey(nil, numIntentKeys) i := 0 for { // Resolve up to 20 intents. numKeys, _, resumeSpan, _, _, err := MVCCResolveWriteIntentRange(ctx, engine, nil, lockUpdate, MVCCResolveWriteIntentRangeOptions{MaxKeys: 20}) require.NoError(t, err) require.Equal(t, int64(20), numKeys) i++ if resumeSpan == nil { break } expResumeSpan := roachpb.Span{ Key: makeKey(nil, (i*20-1)*10).Next(), EndKey: lockUpdate.EndKey, } if !resumeSpan.Equal(expResumeSpan) { t.Errorf("expected resume span %s; got %s", expResumeSpan, resumeSpan) } lockUpdate.Span = expResumeSpan } require.Equal(t, 5, i) } func generateBytes(rng *rand.Rand, min int, max int) []byte { iterations := min + rng.Intn(max-min) result := make([]byte, 0, iterations) for i := 0; i < iterations; i++ { result = append(result, byte(rng.Float64()*float64('z'-'a')+'a')) } return result } func createEngWithSeparatedIntents(t *testing.T) Engine { eng, err := Open(context.Background(), InMemory(), cluster.MakeTestingClusterSettings(), MaxSizeBytes(1<<20)) require.NoError(t, err) return eng } type putState struct { key roachpb.Key values []roachpb.Value seqs []enginepb.TxnSeq writeTS []hlc.Timestamp } func writeToEngine( t *testing.T, eng Engine, puts []putState, txn *roachpb.Transaction, debug bool, ) { ctx := context.Background() if debug { log.Dev.Infof(ctx, "writeToEngine") } for _, p := range puts { for i := range p.writeTS { txn.Sequence = p.seqs[i] txn.WriteTimestamp = p.writeTS[i] if debug { log.Dev.Infof(ctx, "Put: %s, seq: %d, writets: %s", p.key.String(), txn.Sequence, txn.WriteTimestamp.String()) } _, err := MVCCPut(ctx, eng, p.key, txn.ReadTimestamp, p.values[i], MVCCWriteOptions{Txn: txn}) require.NoError(t, err) } } } func checkEngineEquality( t *testing.T, span roachpb.Span, eng1 Engine, eng2 Engine, expectEmpty bool, debug bool, ) { ctx := context.Background() if debug { log.Dev.Infof(ctx, "checkEngineEquality") } makeIter := func(eng Engine) MVCCIterator { iter, err := eng.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, IterOptions{LowerBound: span.Key, UpperBound: span.EndKey}) if err != nil { t.Fatal(err) } iter.SeekGE(MVCCKey{Key: span.Key}) return iter } iter1, iter2 := makeIter(eng1), makeIter(eng2) defer iter1.Close() defer iter2.Close() count := 0 for { valid1, err1 := iter1.Valid() valid2, err2 := iter2.Valid() require.NoError(t, err1) require.NoError(t, err2) if valid1 && !valid2 { t.Fatalf("iter2 exhausted before iter1") } else if !valid1 && valid2 { t.Fatalf("iter1 exhausted before iter2") } if !valid1 && !valid2 { break } count++ if !iter1.UnsafeKey().Equal(iter2.UnsafeKey()) { t.Fatalf("keys not equal %s, %s", iter1.UnsafeKey().String(), iter2.UnsafeKey().String()) } v1, err := iter1.UnsafeValue() require.NoError(t, err) v2, err := iter2.UnsafeValue() require.NoError(t, err) if !bytes.Equal(v1, v2) { t.Fatalf("key %s has different values: %x, %x", iter1.UnsafeKey().String(), v1, v2) } if debug { log.Dev.Infof(ctx, "key: %s", iter1.UnsafeKey().String()) } iter1.Next() iter2.Next() } if expectEmpty && count > 0 { t.Fatalf("expected no keys but found %d", count) } } // TestRandomizedMVCCResolveWriteIntentRange generates random keys and values // of different lengths, and exercises ranged intent resolution with // randomized transaction status and ignored seqnums. Currently it compares // the result of using the slow and fast paths for equality. When the slow // path is removed we will need to improve the correctness checking in this // test. // // TODO(sumeer): add epoch changes. func TestRandomizedMVCCResolveWriteIntentRange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) seed := *seedFlag debug := true if seed < 0 { seed = rand.Int63() debug = false } // Else, seed is being specified to debug a failure. fmt.Printf("seed: %d\n", seed) rng := rand.New(rand.NewSource(seed)) ctx := context.Background() var engs [2]struct { eng Engine stats enginepb.MVCCStats } // Engines are created with separated intents enabled. for i := range engs { engs[i].eng = createEngWithSeparatedIntents(t) defer engs[i].eng.Close() } var seq enginepb.TxnSeq var puts []putState timestamps := []hlc.Timestamp{{WallTime: 100}, {WallTime: 200}, {WallTime: 300}} keys := make(map[string]struct{}) // 100 keys. for i := 0; i < 100; i++ { var key []byte for { // Keys are of different lengths. We've had a bug in the past that was // not triggered by tests that were using same length keys. key = generateBytes(rng, 10, 20) if _, ok := keys[string(key)]; !ok { break } } put := putState{ key: key, } tsIndex := 0 // Multiple versions of the key. The timestamps will be monotonically // non-decreasing due to tsIndex. versions := rng.Intn(3) + 1 for j := 0; j < versions; j++ { val := roachpb.MakeValueFromBytes(generateBytes(rng, 20, 30)) put.values = append(put.values, val) put.seqs = append(put.seqs, seq) seq++ index := rng.Intn(len(timestamps)) if index > tsIndex { tsIndex = index } put.writeTS = append(put.writeTS, timestamps[tsIndex]) } puts = append(puts, put) } slices.SortFunc(puts, func(i, j putState) int { return i.key.Compare(j.key) }) // Do the puts to the engines. for i := range engs { txn := *txn1 txn.ReadTimestamp = timestamps[0] txn.MinTimestamp = txn.ReadTimestamp writeToEngine(t, engs[i].eng, puts, &txn, debug) } // Resolve intent range. txn := *txn1 txnMeta := txn.TxnMeta txnMeta.WriteTimestamp = timestamps[len(timestamps)-1] txnMeta.MinTimestamp = timestamps[0] txnMeta.Sequence = seq status := []roachpb.TransactionStatus{ roachpb.PENDING, roachpb.COMMITTED, roachpb.ABORTED}[rng.Intn(3)] var ignoredSeqNums []enginepb.IgnoredSeqNumRange // Since the number of versions per key are randomized, stepping here by the // constant 5 is sufficient to randomize which versions of a key get // ignored. for i := enginepb.TxnSeq(0); i < seq; i += 5 { ignoredSeqNums = append(ignoredSeqNums, enginepb.IgnoredSeqNumRange{Start: i, End: i}) } lu := roachpb.LockUpdate{ Span: roachpb.Span{ Key: puts[0].key, EndKey: encoding.BytesNext(puts[len(puts)-1].key), }, Txn: txnMeta, Status: status, IgnoredSeqNums: ignoredSeqNums, } if debug { log.Dev.Infof(ctx, "LockUpdate: %s, %s", status.String(), lu.String()) } for i := range engs { func() { batch := engs[i].eng.NewBatch() defer batch.Close() _, _, _, _, _, err := MVCCResolveWriteIntentRange(ctx, batch, &engs[i].stats, lu, MVCCResolveWriteIntentRangeOptions{}) require.NoError(t, err) require.NoError(t, batch.Commit(false)) }() } require.Equal(t, engs[0].stats, engs[1].stats) // TODO(sumeer): mvccResolveWriteIntent has a bug when the txn is being // ABORTED and there are IgnoredSeqNums that are causing a partial rollback. // It does the partial rollback and does not actually resolve the intent. // This does not affect correctness since the intent resolution will get // retried. So we pass expectEmpty=false here, and retry the intent // resolution if aborted, and then check again with expectEmpty=true. checkEngineEquality(t, lu.Span, engs[0].eng, engs[1].eng, false, debug) if status == roachpb.ABORTED { for i := range engs { func() { batch := engs[i].eng.NewBatch() defer batch.Close() _, _, _, _, _, err := MVCCResolveWriteIntentRange(ctx, batch, &engs[i].stats, lu, MVCCResolveWriteIntentRangeOptions{}) require.NoError(t, err) require.NoError(t, batch.Commit(false)) }() } checkEngineEquality(t, lu.Span, engs[0].eng, engs[1].eng, true, debug) } } // TestRandomizedSavepointRollbackAndIntentResolution is a randomized test // that tries to confirm that rolling back savepoints and then putting again // does not cause incorrectness when doing intent resolution. This would fail // under the bug documented in #69891. func TestRandomizedSavepointRollbackAndIntentResolution(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) seed := *seedFlag debug := true if seed < 0 { seed = rand.Int63() debug = false } // Else, seed is being specified to debug a failure. fmt.Printf("seed: %d\n", seed) rng := rand.New(rand.NewSource(seed)) ctx := context.Background() eng, err := Open( context.Background(), InMemory(), cluster.MakeTestingClusterSettings(), func(cfg *engineConfig) error { cfg.opts.LBaseMaxBytes = int64(100 + rng.Intn(16384)) log.Dev.Infof(ctx, "lbase: %d", cfg.opts.LBaseMaxBytes) return nil }) require.NoError(t, err) defer eng.Close() var seq enginepb.TxnSeq var puts []putState timestamps := []hlc.Timestamp{{WallTime: 100}, {WallTime: 200}, {WallTime: 300}} keys := make(map[string]struct{}) // 100 keys, each written to twice by the txn. for i := 0; i < 100; i++ { var key []byte for { key = generateBytes(rng, 10, 20) if _, ok := keys[string(key)]; !ok { break } } put := putState{ key: key, } for j := 0; j < 2; j++ { val := roachpb.MakeValueFromBytes(generateBytes(rng, 20, 30)) put.values = append(put.values, val) put.seqs = append(put.seqs, seq) seq++ put.writeTS = append(put.writeTS, timestamps[j]) } puts = append(puts, put) } slices.SortFunc(puts, func(i, j putState) int { return i.key.Compare(j.key) }) txn := *txn1 txn.ReadTimestamp = timestamps[0] txn.MinTimestamp = txn.ReadTimestamp writeToEngine(t, eng, puts, &txn, debug) // The two SET calls for writing the intent are collapsed down to L6. require.NoError(t, eng.Flush()) require.NoError(t, eng.Compact(ctx)) txn.WriteTimestamp = timestamps[1] txn.Sequence = seq ignoredSeqNums := []enginepb.IgnoredSeqNumRange{{Start: 0, End: seq - 1}} lu := roachpb.LockUpdate{ Span: roachpb.Span{ Key: puts[0].key, EndKey: encoding.BytesNext(puts[len(puts)-1].key), }, Txn: txn.TxnMeta, Status: roachpb.PENDING, IgnoredSeqNums: ignoredSeqNums, } if debug { log.Dev.Infof(ctx, "LockUpdate: %s", lu.String()) } // All the writes are ignored, so DEL is written for the intent. These // should be buffered in the memtable. _, _, _, _, _, err = MVCCResolveWriteIntentRange(ctx, eng, nil, lu, MVCCResolveWriteIntentRangeOptions{}) require.NoError(t, err) { iter, err := eng.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, IterOptions{LowerBound: lu.Span.Key, UpperBound: lu.Span.EndKey}) if err != nil { t.Fatal(err) } defer iter.Close() iter.SeekGE(MVCCKey{Key: lu.Span.Key}) valid, err := iter.Valid() require.NoError(t, err) require.False(t, valid) } // Do another put for all these keys. These will also be in the memtable. for i := 0; i < 100; i++ { val := roachpb.MakeValueFromBytes(generateBytes(rng, 2, 3)) puts[i].values = append(puts[i].values[:0], val) puts[i].seqs = append(puts[i].seqs[:0], seq) seq++ puts[i].writeTS = append(puts[i].writeTS[:0], timestamps[2]) } writeToEngine(t, eng, puts, &txn, debug) // Flush of the memtable will collapse DEL=>SET into SETWITHDEL. require.NoError(t, eng.Flush()) // Commit or abort the txn, so that we eventually get // SET=>SETWITHDEL=>SINGLEDEL for the intents. txn.WriteTimestamp = timestamps[2] txn.Sequence = seq lu.Txn = txn.TxnMeta lu.Status = []roachpb.TransactionStatus{roachpb.COMMITTED, roachpb.ABORTED}[rng.Intn(2)] if debug { log.Dev.Infof(ctx, "LockUpdate: %s", lu.String()) } _, _, _, _, _, err = MVCCResolveWriteIntentRange(ctx, eng, nil, lu, MVCCResolveWriteIntentRangeOptions{}) require.NoError(t, err) // Compact the engine so that SINGLEDEL consumes the SETWITHDEL, becoming a // DEL. require.NoError(t, eng.Compact(ctx)) iter, err := eng.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, IterOptions{LowerBound: lu.Span.Key, UpperBound: lu.Span.EndKey}) if err != nil { t.Fatal(err) } defer iter.Close() iter.SeekGE(MVCCKey{Key: lu.Span.Key}) if lu.Status == roachpb.COMMITTED { i := 0 for { valid, err := iter.Valid() require.NoError(t, err) if !valid { break } i++ // Expect only the committed values. require.Equal(t, timestamps[2], iter.UnsafeKey().Timestamp) iter.Next() } require.Equal(t, 100, i) } else { // ABORTED. Nothing to iterate over. valid, err := iter.Valid() require.NoError(t, err) // The correct behavior is !valid. But if there is a bug, the // intentInterleavingIter does not always expose its error immediately (in // this case the error would an intent without a provisional value), so we // step it forward once. if valid { iter.Next() _, err = iter.Valid() require.NoError(t, err) // Should fail on previous statement, but this whole path is incorrect, // so fail here. t.Fatal(t, "iter is valid") } } } func TestValidSplitKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testCases := []struct { key roachpb.Key valid bool }{ {roachpb.Key("\x02"), false}, {roachpb.Key("\x02\x00"), false}, {roachpb.Key("\x02\xff"), false}, {roachpb.Key("\x03"), true}, {roachpb.Key("\x03\x00"), true}, {roachpb.Key("\x03\xff"), true}, {roachpb.Key("\x03\xff\xff"), false}, {roachpb.Key("\x03\xff\xff\x88"), false}, {roachpb.Key("\x04"), true}, {roachpb.Key("\x05"), true}, {roachpb.Key("a"), true}, {roachpb.Key("\xff"), true}, {roachpb.Key("\xff\x01"), true}, } for i, test := range testCases { valid := IsValidSplitKey(test.key) if valid != test.valid { t.Errorf("%d: expected %q [%x] valid %t; got %t", i, test.key, []byte(test.key), test.valid, valid) } } } func TestFindSplitKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() ms := &enginepb.MVCCStats{} // Generate a series of KeyValues, each containing targetLength // bytes, writing key #i to (encoded) key #i through the MVCC // facility. Assuming that this translates roughly into same-length // values after MVCC encoding, the split key should hence be chosen // as the middle key of the interval. splitReservoirSize := 100 for i := 0; i < splitReservoirSize; i++ { k := fmt.Sprintf("%09d", i) v := strings.Repeat("X", 10-len(k)) val := roachpb.MakeValueFromString(v) // Write the key and value through MVCC if _, err := MVCCPut( ctx, engine, []byte(k), hlc.Timestamp{Logical: 1}, val, MVCCWriteOptions{Stats: ms}, ); err != nil { t.Fatal(err) } } testData := []struct { targetSize int64 splitInd int }{ {(ms.KeyBytes + ms.ValBytes) / 2, splitReservoirSize / 2}, {0, 0}, {math.MaxInt64, splitReservoirSize}, } for i, td := range testData { humanSplitKey, err := MVCCFindSplitKey(ctx, engine, roachpb.RKeyMin, roachpb.RKeyMax, td.targetSize) if err != nil { t.Fatal(err) } ind, err := strconv.Atoi(string(humanSplitKey)) if err != nil { t.Fatalf("%d: could not parse key %s as int: %+v", i, humanSplitKey, err) } if ind == 0 { t.Fatalf("%d: should never select first key as split key", i) } if diff := td.splitInd - ind; diff > 1 || diff < -1 { t.Fatalf("%d: wanted key #%d+-1, but got %d (diff %d)", i, td.splitInd, ind, diff) } } } // Injected via `external_helpers_test.go`. var TestingUserDescID func(offset uint32) uint32 // TestFindValidSplitKeys verifies split keys are located such that // they avoid splits through invalid key ranges. func TestFindValidSplitKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) userID := TestingUserDescID(0) type testCase struct { keys []roachpb.Key rangeStart roachpb.Key // optional expSplit roachpb.Key expError bool skipTenant bool // if true, skip tenant subtests } prefixTestKeys := func(test testCase, prefix roachpb.Key) testCase { // testCase.keys oldKeys := test.keys test.keys = make([]roachpb.Key, len(oldKeys)) for i, key := range oldKeys { test.keys[i] = append(prefix, key...) } // testCase.rangeStart if test.rangeStart != nil { test.rangeStart = append(prefix, test.rangeStart...) } // testCase.expSplit if test.expSplit != nil { test.expSplit = append(prefix, test.expSplit...) } return test } testCases := []testCase{ // All m1 cannot be split. { keys: []roachpb.Key{ roachpb.Key("\x02"), roachpb.Key("\x02\x00"), roachpb.Key("\x02\xff"), }, expSplit: nil, expError: false, skipTenant: true, }, // Between meta1 and meta2, splits at meta2. { keys: []roachpb.Key{ roachpb.Key("\x02"), roachpb.Key("\x02\x00"), roachpb.Key("\x02\xff"), roachpb.Key("\x03"), roachpb.Key("\x03\x00"), roachpb.Key("\x03\xff"), }, expSplit: roachpb.Key("\x03"), expError: false, skipTenant: true, }, // Even lopsided, always split at meta2. { keys: []roachpb.Key{ roachpb.Key("\x02"), roachpb.Key("\x02\x00"), roachpb.Key("\x02\xff"), roachpb.Key("\x03"), }, expSplit: roachpb.Key("\x03"), expError: false, skipTenant: true, }, // Between meta2Max and metaMax, splits at metaMax. { keys: []roachpb.Key{ roachpb.Key("\x03\xff\xff"), roachpb.Key("\x03\xff\xff\x88"), roachpb.Key("\x04"), roachpb.Key("\x04\xff\xff\x88"), }, expSplit: roachpb.Key("\x04"), expError: false, skipTenant: true, }, // Even lopsided, always split at metaMax. { keys: []roachpb.Key{ roachpb.Key("\x03\xff\xff"), roachpb.Key("\x03\xff\xff\x11"), roachpb.Key("\x03\xff\xff\x88"), roachpb.Key("\x03\xff\xff\xee"), roachpb.Key("\x04"), }, expSplit: roachpb.Key("\x04"), expError: false, skipTenant: true, }, // Lopsided, truncate non-zone prefix. { keys: []roachpb.Key{ roachpb.Key("\x04zond"), roachpb.Key("\x04zone"), roachpb.Key("\x04zone\x00"), roachpb.Key("\x04zone\xff"), }, expSplit: roachpb.Key("\x04zone\x00"), expError: false, skipTenant: true, }, // Lopsided, truncate non-zone suffix. { keys: []roachpb.Key{ roachpb.Key("\x04zone"), roachpb.Key("\x04zone\x00"), roachpb.Key("\x04zone\xff"), roachpb.Key("\x04zonf"), }, expSplit: roachpb.Key("\x04zone\xff"), expError: false, skipTenant: true, }, // A Range for which MVCCSplitKey would return the start key isn't fair // game, even if the StartKey actually exists. There was once a bug // here which compared timestamps as well as keys and thus didn't // realize it was splitting at the initial key (due to getting confused // by the actual value having a nonzero timestamp). { keys: []roachpb.Key{ roachpb.Key("b"), }, rangeStart: roachpb.Key("a"), expSplit: nil, expError: false, }, // Similar test, but the range starts at first key. { keys: []roachpb.Key{ roachpb.Key("b"), }, rangeStart: roachpb.Key("b"), expSplit: nil, expError: false, }, // Some example table data. Make sure we don't split in the middle of a row // or return the start key of the range. { keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a"), 1), testAddColFam(testTablePrefix(userID, "a"), 2), testAddColFam(testTablePrefix(userID, "a"), 3), testAddColFam(testTablePrefix(userID, "a"), 4), testAddColFam(testTablePrefix(userID, "a"), 5), testAddColFam(testTablePrefix(userID, "b"), 1), testAddColFam(testTablePrefix(userID, "c"), 1), }, rangeStart: testTablePrefix(userID, "a"), expSplit: testTablePrefix(userID, "b"), expError: false, }, // More example table data. Make sure ranges at the start of a table can // be split properly - this checks that the minSplitKey logic doesn't // break for such ranges. { keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a"), 1), testAddColFam(testTablePrefix(userID, "b"), 1), testAddColFam(testTablePrefix(userID, "c"), 1), testAddColFam(testTablePrefix(userID, "d"), 1), }, rangeStart: keys.SystemSQLCodec.TablePrefix(userID), expSplit: testTablePrefix(userID, "c"), expError: false, }, // More example table data. Make sure ranges at the start of a table can // be split properly even in the presence of a large first row. { keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a"), 1), testAddColFam(testTablePrefix(userID, "a"), 2), testAddColFam(testTablePrefix(userID, "a"), 3), testAddColFam(testTablePrefix(userID, "a"), 4), testAddColFam(testTablePrefix(userID, "a"), 5), testAddColFam(testTablePrefix(userID, "b"), 1), testAddColFam(testTablePrefix(userID, "c"), 1), }, rangeStart: keys.SystemSQLCodec.TablePrefix(TestingUserDescID(0)), expSplit: testTablePrefix(userID, "b"), expError: false, }, // One partition where partition key is the first column. Checks that // split logic is not confused by the special partition start key. { keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a", "a"), 1), testAddColFam(testTablePrefix(userID, "a", "b"), 1), testAddColFam(testTablePrefix(userID, "a", "c"), 1), testAddColFam(testTablePrefix(userID, "a", "d"), 1), }, rangeStart: testTablePrefix(userID, "a"), expSplit: testTablePrefix(userID, "a", "c"), expError: false, }, // One partition with a large first row. Checks that our logic to avoid // splitting in the middle of a row still applies. { keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a", "a"), 1), testAddColFam(testTablePrefix(userID, "a", "a"), 2), testAddColFam(testTablePrefix(userID, "a", "a"), 3), testAddColFam(testTablePrefix(userID, "a", "a"), 4), testAddColFam(testTablePrefix(userID, "a", "a"), 5), testAddColFam(testTablePrefix(userID, "a", "b"), 1), testAddColFam(testTablePrefix(userID, "a", "c"), 1), }, rangeStart: testTablePrefix(userID, "a"), expSplit: testTablePrefix(userID, "a", "b"), expError: false, }, } testutils.RunTrueAndFalse(t, "tenant", func(t *testing.T, tenant bool) { for i, test := range testCases { t.Run("", func(t *testing.T) { if tenant { if test.skipTenant { skip.IgnoreLint(t, "") } // Update all keys to include a tenant prefix. tenPrefix := keys.MakeSQLCodec(roachpb.MinTenantID).TenantPrefix() test = prefixTestKeys(test, tenPrefix) } ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() ms := &enginepb.MVCCStats{} testPopulateKeysWithVersions(ctx, t, engine, ms, test.keys) rangeStart := test.keys[0] if len(test.rangeStart) > 0 { rangeStart = test.rangeStart } rangeEnd := test.keys[len(test.keys)-1].Next() rangeStartAddr, err := keys.Addr(rangeStart) if err != nil { t.Fatal(err) } rangeEndAddr, err := keys.Addr(rangeEnd) if err != nil { t.Fatal(err) } targetSize := (ms.KeyBytes + ms.ValBytes) / 2 splitKey, err := MVCCFindSplitKey(ctx, engine, rangeStartAddr, rangeEndAddr, targetSize) if test.expError { if !testutils.IsError(err, "has no valid splits") { t.Fatalf("%d: unexpected error: %+v", i, err) } return } if err != nil { t.Fatalf("%d; unexpected error: %+v", i, err) } if !splitKey.Equal(test.expSplit) { t.Errorf("%d: expected split key %q; got %q", i, test.expSplit, splitKey) } }) } }) } // TestFindBalancedSplitKeys verifies split keys are located such that // the left and right halves are equally balanced. func TestFindBalancedSplitKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testCases := []struct { keySizes []int valSizes []int expSplit int }{ // Bigger keys on right side. { keySizes: []int{10, 100, 10, 10, 500}, valSizes: []int{1, 1, 1, 1, 1}, expSplit: 4, }, // Bigger keys on left side. { keySizes: []int{1000, 500, 500, 10, 10}, valSizes: []int{1, 1, 1, 1, 1}, expSplit: 1, }, // Bigger values on right side. { keySizes: []int{1, 1, 1, 1, 1}, valSizes: []int{10, 100, 10, 10, 500}, expSplit: 4, }, // Bigger values on left side. { keySizes: []int{1, 1, 1, 1, 1}, valSizes: []int{1000, 100, 500, 10, 10}, expSplit: 1, }, // Bigger key/values on right side. { keySizes: []int{10, 100, 10, 10, 250}, valSizes: []int{10, 100, 10, 10, 250}, expSplit: 4, }, // Bigger key/values on left side. { keySizes: []int{500, 50, 250, 10, 10}, valSizes: []int{500, 50, 250, 10, 10}, expSplit: 1, }, } for i, test := range testCases { t.Run("", func(t *testing.T) { ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() ms := &enginepb.MVCCStats{} var expKey roachpb.Key for j, keySize := range test.keySizes { key := roachpb.Key(fmt.Sprintf("%d%s", j, strings.Repeat("X", keySize))) if test.expSplit == j { expKey = key } val := roachpb.MakeValueFromString(strings.Repeat("X", test.valSizes[j])) if _, err := MVCCPut( ctx, engine, key, hlc.Timestamp{Logical: 1}, val, MVCCWriteOptions{Stats: ms}, ); err != nil { t.Fatal(err) } } targetSize := (ms.KeyBytes + ms.ValBytes) / 2 splitKey, err := MVCCFindSplitKey(ctx, engine, roachpb.RKey("\x02"), roachpb.RKeyMax, targetSize) if err != nil { t.Fatalf("unexpected error: %+v", err) } if !splitKey.Equal(expKey) { t.Errorf("%d: expected split key %q; got %q", i, expKey, splitKey) } }) } } // testAddPrefix manually creates rows corresponding to the schema e.g. // CREATE TABLE t (id1 STRING, id2 STRING, ... PRIMARY KEY (id1, id2, ...)) func testAddPrefix(prefix roachpb.Key, id uint32, rowVals ...string) roachpb.Key { tableKey := append(prefix, keys.SystemSQLCodec.TablePrefix(id)...) rowKey := roachpb.Key(encoding.EncodeVarintAscending(tableKey, 1)) for _, rowVal := range rowVals { rowKey = encoding.EncodeStringAscending(rowKey, rowVal) } return rowKey } func testTablePrefix(id uint32, rowVals ...string) roachpb.Key { return testAddPrefix(nil, id, rowVals...) } func testAddColFam(rowKey roachpb.Key, colFam uint32) roachpb.Key { return keys.MakeFamilyKey(append([]byte(nil), rowKey...), colFam) } // testPopulateKeysWithVersions puts the keys into the engine provided. Each // key is added with 3 MVCC versions with a XX.. value. func testPopulateKeysWithVersions( ctx context.Context, t *testing.T, engine Engine, ms *enginepb.MVCCStats, keys []roachpb.Key, ) { val := roachpb.MakeValueFromString(strings.Repeat("X", 10)) for _, k := range keys { // Add three MVCC versions of every key. Splits are not allowed // between MVCC versions, so this shouldn't have any effect. for j := 1; j <= 3; j++ { ts := hlc.Timestamp{Logical: int32(j)} _, err := MVCCPut(ctx, engine, []byte(k), ts, val, MVCCWriteOptions{Stats: ms}) require.NoError(t, err) } } } // TestMVCCFirstSplitKey checks that the split key returned from // MVCCFirstSplitKey is: // (1) Within a range's bounds // (2) No less than the desired split key. // (3) Greater than the first key, or first row's keys in table ranges. // (4) Not inbetween the start and end of a row for table ranges. func TestMVCCFirstSplitKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) userID := TestingUserDescID(0) type splitExpect struct { desired roachpb.Key expected roachpb.Key } testCases := []struct { keys []roachpb.Key startKey, endKey roachpb.Key splits []splitExpect }{ { // No keys, no splits. keys: []roachpb.Key{}, startKey: roachpb.Key("a"), endKey: roachpb.Key("z"), splits: []splitExpect{ {desired: roachpb.Key("a"), expected: nil}, {desired: roachpb.Key("m"), expected: nil}, {desired: roachpb.Key("z"), expected: nil}, }, }, { // meta1 cannot be split. Previously, this test would cause a panic in // mvccMinSplitKey, called by MVCCFirstSplitKey. The iterator is // initialized with a global key constraint from the endKey // ("\x02\xff\xff"), but we seekGE the start key (MinKey="") which is // local because it is before LocalMax (0x02). keys: []roachpb.Key{ roachpb.Key("\x02"), roachpb.Key("\x02\x00"), roachpb.Key("\x02\xff"), }, startKey: keys.MinKey, endKey: keys.Meta1KeyMax, splits: []splitExpect{ {desired: keys.MinKey, expected: nil}, {desired: roachpb.Key("\x02"), expected: nil}, {desired: roachpb.Key("\x02\x00"), expected: nil}, }, }, { // All keys are outside the range, no keys to spit at so expect no // splits. keys: []roachpb.Key{ roachpb.Key("0"), roachpb.Key("c"), roachpb.Key("d"), }, startKey: roachpb.Key("a"), endKey: roachpb.Key("c"), splits: []splitExpect{ {desired: roachpb.Key("a"), expected: nil}, {desired: roachpb.Key("b"), expected: nil}, {desired: roachpb.Key("c"), expected: nil}, }, }, { // Only one key within the range, require at least two keys to split. keys: []roachpb.Key{ // (0) is outside the range [a,c) roachpb.Key("0"), roachpb.Key("a"), // (c) is outside of the range [a,c). roachpb.Key("c"), }, startKey: roachpb.Key("a"), endKey: roachpb.Key("c"), splits: []splitExpect{ {desired: roachpb.Key("a"), expected: nil}, {desired: roachpb.Key("b"), expected: nil}, {desired: roachpb.Key("c"), expected: nil}, }, }, { // Enough keys to realize a split on c. Only desiredSplitKeys <= c should // split at c. keys: []roachpb.Key{ // (0) is outside the range [a,e) roachpb.Key("0"), roachpb.Key("b"), roachpb.Key("c"), // (e) is outside of the range [a,e). roachpb.Key("e"), }, startKey: roachpb.Key("a"), endKey: roachpb.Key("e"), splits: []splitExpect{ // Should iterate to the first split key after minSpitKey which is (c). {desired: roachpb.Key("0"), expected: roachpb.Key("c")}, {desired: roachpb.Key("b"), expected: roachpb.Key("c")}, {desired: roachpb.Key("c"), expected: roachpb.Key("c")}, {desired: keys.MinKey, expected: roachpb.Key("c")}, // Desired split key is after the last key in the range (c), shouldn't // split. {desired: roachpb.Key("d"), expected: nil}, }, }, { keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a"), 1), testAddColFam(testTablePrefix(userID, "b"), 1), testAddColFam(testTablePrefix(userID, "b"), 2), testAddColFam(testTablePrefix(userID, "b"), 3), testAddColFam(testTablePrefix(userID, "d"), 1), // (e,1) is outside of the range [a,e) testAddColFam(testTablePrefix(userID, "e"), 1), }, startKey: testTablePrefix(userID, "a"), endKey: testTablePrefix(userID, "e"), splits: []splitExpect{ {desired: testAddColFam(testTablePrefix(userID, "a"), 0), expected: testTablePrefix(userID, "b")}, {desired: testAddColFam(testTablePrefix(userID, "b"), 3), expected: testTablePrefix(userID, "b")}, // The first key after the desired split key is (d,1), expect a split // at the prefix (d). {desired: testAddColFam(testTablePrefix(userID, "b"), 4), expected: testTablePrefix(userID, "d")}, // Desired split key is after the last key in the range (d,1), // shouldn't split. {desired: testAddColFam(testTablePrefix(userID, "d"), 2), expected: nil}, }, }, { // One partiton key, where the partition key is the first column (a). keys: []roachpb.Key{ testAddColFam(testTablePrefix(userID, "a", "a"), 1), testAddColFam(testTablePrefix(userID, "a", "a"), 3), testAddColFam(testTablePrefix(userID, "a", "b"), 1), testAddColFam(testTablePrefix(userID, "a", "c"), 1), // (a,d,0) is outside the range [a,(a,d)). testAddColFam(testTablePrefix(userID, "a", "d"), 0), }, startKey: testTablePrefix(userID, "a"), endKey: testTablePrefix(userID, "a", "d"), splits: []splitExpect{ {desired: testTablePrefix(userID, "a"), expected: testTablePrefix(userID, "a", "b")}, {desired: testAddColFam(testTablePrefix(userID, "a", "a"), 3), expected: testTablePrefix(userID, "a", "b")}, {desired: testAddColFam(testTablePrefix(userID, "a", "b"), 2), expected: testTablePrefix(userID, "a", "c")}, // Desired split key is after the last key in the range (a,c,1), // shouldn't split. {desired: testAddColFam(testTablePrefix(userID, "a", "c"), 2), expected: nil}, {desired: testTablePrefix(userID, "a", "e"), expected: nil}, }, }, } for _, tc := range testCases { t.Run(fmt.Sprintf("%v", tc.keys), func(t *testing.T) { ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() testPopulateKeysWithVersions(ctx, t, engine, &enginepb.MVCCStats{}, tc.keys) rangeStartAddr := keys.MustAddr(tc.startKey) rangeEndAddr := keys.MustAddr(tc.endKey) for _, split := range tc.splits { t.Run(fmt.Sprintf("%v", split.desired), func(t *testing.T) { desiredSplitAddr := keys.MustAddr(split.desired) splitKey, err := MVCCFirstSplitKey(ctx, engine, desiredSplitAddr, rangeStartAddr, rangeEndAddr) // NB: We don't expect errors. If no split key can be found, we // expect a nil splitKey to be returned. require.NoError(t, err) require.Equal(t, split.expected, splitKey) }) } }) } } // TestMVCCGarbageCollect writes a series of gc'able bytes and then // sends an MVCC GC request and verifies cleared values and updated // stats. func TestMVCCGarbageCollect(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() ms := &enginepb.MVCCStats{} val := []byte("value") ts1 := hlc.Timestamp{WallTime: 1e9} ts2 := hlc.Timestamp{WallTime: 2e9} ts3 := hlc.Timestamp{WallTime: 3e9} ts4 := hlc.Timestamp{WallTime: 4e9} ts5 := hlc.Timestamp{WallTime: 4e9} val1 := roachpb.MakeValueFromBytesAndTimestamp(val, ts1) val2 := roachpb.MakeValueFromBytesAndTimestamp(val, ts2) val3 := roachpb.MakeValueFromBytesAndTimestamp(val, ts3) valInline := roachpb.MakeValueFromBytesAndTimestamp(val, hlc.Timestamp{}) testData := []struct { key roachpb.Key vals []roachpb.Value isDeleted bool // is the most recent value a deletion tombstone? }{ {roachpb.Key("a"), []roachpb.Value{val1, val2}, false}, {roachpb.Key("a-del"), []roachpb.Value{val1, val2}, true}, {roachpb.Key("b"), []roachpb.Value{val1, val2, val3}, false}, {roachpb.Key("b-del"), []roachpb.Value{val1, val2, val3}, true}, {roachpb.Key("inline"), []roachpb.Value{valInline}, false}, {roachpb.Key("r-2"), []roachpb.Value{val1}, false}, {roachpb.Key("r-3"), []roachpb.Value{val1}, false}, {roachpb.Key("r-4"), []roachpb.Value{val1}, false}, {roachpb.Key("r-6"), []roachpb.Value{val1}, true}, {roachpb.Key("t"), []roachpb.Value{val1}, false}, } for i := 0; i < 3; i++ { for _, test := range testData { if i >= len(test.vals) { continue } for _, val := range test.vals[i : i+1] { if i == len(test.vals)-1 && test.isDeleted { if _, _, err := MVCCDelete(ctx, engine, test.key, val.Timestamp, MVCCWriteOptions{Stats: ms}); err != nil { t.Fatal(err) } continue } valCpy := *protoutil.Clone(&val).(*roachpb.Value) valCpy.Timestamp = hlc.Timestamp{} if _, err := MVCCPut(ctx, engine, test.key, val.Timestamp, valCpy, MVCCWriteOptions{Stats: ms}); err != nil { t.Fatal(err) } } } } if err := MVCCDeleteRangeUsingTombstone(ctx, engine, ms, roachpb.Key("r"), roachpb.Key("r-del").Next(), ts3, hlc.ClockTimestamp{}, nil, nil, false, 0, 0, nil); err != nil { t.Fatal(err) } if err := MVCCDeleteRangeUsingTombstone(ctx, engine, ms, roachpb.Key("t"), roachpb.Key("u").Next(), ts2, hlc.ClockTimestamp{}, nil, nil, false, 0, 0, nil); err != nil { t.Fatal(err) } if err := MVCCDeleteRangeUsingTombstone(ctx, engine, ms, roachpb.Key("t"), roachpb.Key("u").Next(), ts3, hlc.ClockTimestamp{}, nil, nil, false, 0, 0, nil); err != nil { t.Fatal(err) } if log.V(1) { log.Dev.Info(context.Background(), "Engine content before GC") kvsn, err := Scan(context.Background(), engine, localMax, keyMax, 0) if err != nil { t.Fatal(err) } for i, kv := range kvsn { log.Dev.Infof(context.Background(), "%d: %s", i, kv.Key) } } gcTime := ts5 gcKeys := []kvpb.GCRequest_GCKey{ {Key: roachpb.Key("a"), Timestamp: ts1}, {Key: roachpb.Key("a-del"), Timestamp: ts2}, {Key: roachpb.Key("b"), Timestamp: ts1}, {Key: roachpb.Key("b-del"), Timestamp: ts2}, {Key: roachpb.Key("inline"), Timestamp: hlc.Timestamp{}}, // Keys that don't exist, which should result in a no-op. {Key: roachpb.Key("a-bad"), Timestamp: ts2}, {Key: roachpb.Key("inline-bad"), Timestamp: hlc.Timestamp{}}, // Keys that are hidden by range key. // Non-existing keys that needs to skip gracefully without // distorting stats. (Checking that following keys doesn't affect it) {Key: roachpb.Key("r-0"), Timestamp: ts1}, {Key: roachpb.Key("r-1"), Timestamp: ts4}, // Request has a timestamp below range key, it will be handled by // logic processing range tombstones specifically. {Key: roachpb.Key("r-2"), Timestamp: ts1}, // Requests has a timestamp at or above range key, it will be handled by // logic processing synthesized metadata. {Key: roachpb.Key("r-3"), Timestamp: ts3}, {Key: roachpb.Key("r-4"), Timestamp: ts4}, // This is a non-existing key that needs to skip gracefully without // distorting stats. Checking that absence of next key is handled. {Key: roachpb.Key("r-5"), Timestamp: ts4}, // Delete key covered by range delete key. {Key: roachpb.Key("r-6"), Timestamp: ts4}, {Key: roachpb.Key("t"), Timestamp: ts4}, } if err := MVCCGarbageCollect( context.Background(), engine, ms, gcKeys, gcTime, ); err != nil { t.Fatal(err) } if log.V(1) { log.Dev.Info(context.Background(), "Engine content after GC") kvsn, err := Scan(context.Background(), engine, localMax, keyMax, 0) if err != nil { t.Fatal(err) } for i, kv := range kvsn { log.Dev.Infof(context.Background(), "%d: %s", i, kv.Key) } } expEncKeys := []MVCCKey{ mvccVersionKey(roachpb.Key("a"), ts2), mvccVersionKey(roachpb.Key("b"), ts3), mvccVersionKey(roachpb.Key("b"), ts2), mvccVersionKey(roachpb.Key("b-del"), ts3), } kvs, err := Scan(context.Background(), engine, localMax, keyMax, 0) if err != nil { t.Fatal(err) } if len(kvs) != len(expEncKeys) { t.Fatalf("number of kvs %d != expected %d", len(kvs), len(expEncKeys)) } for i, kv := range kvs { if !kv.Key.Equal(expEncKeys[i]) { t.Errorf("%d: expected key %q; got %q", i, expEncKeys[i], kv.Key) } } // Verify aggregated stats match computed stats after GC. for _, mvccStatsTest := range mvccStatsTests { t.Run(mvccStatsTest.name, func(t *testing.T) { expMS, err := mvccStatsTest.fn(engine, localMax, roachpb.KeyMax, gcTime.WallTime) if err != nil { t.Fatal(err) } assertEq(t, engine, "verification", ms, &expMS) }) } // Compact the engine; the ForTesting() config option will assert that all // DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(ctx)) } // TestMVCCGarbageCollectNonDeleted verifies that the first value for // a key cannot be GC'd if it's not deleted. func TestMVCCGarbageCollectNonDeleted(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() s := "string" ts1 := hlc.Timestamp{WallTime: 1e9} ts2 := hlc.Timestamp{WallTime: 2e9} val1 := mkVal(s, ts1) val2 := mkVal(s, ts2) valInline := mkVal(s, hlc.Timestamp{}) testData := []struct { key roachpb.Key vals []roachpb.Value expError string }{ {roachpb.Key("a"), []roachpb.Value{val1, val2}, `request to GC non-deleted, latest value of "a"`}, {roachpb.Key("inline"), []roachpb.Value{valInline}, ""}, } for _, test := range testData { for _, val := range test.vals { valCpy := *protoutil.Clone(&val).(*roachpb.Value) valCpy.Timestamp = hlc.Timestamp{} if _, err := MVCCPut(ctx, engine, test.key, val.Timestamp, valCpy, MVCCWriteOptions{}); err != nil { t.Fatal(err) } } keys := []kvpb.GCRequest_GCKey{ {Key: test.key, Timestamp: ts2}, } err := MVCCGarbageCollect(ctx, engine, nil, keys, ts2) if !testutils.IsError(err, test.expError) { t.Fatalf("expected error %q when garbage collecting a non-deleted live value, found %v", test.expError, err) } } // Compact the engine; the ForTesting() config option will assert that all // DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(ctx)) } // TestMVCCGarbageCollectIntent verifies that an intent cannot be GC'd. func TestMVCCGarbageCollectIntent(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() bytes := []byte("value") ts1 := hlc.Timestamp{WallTime: 1e9} ts2 := hlc.Timestamp{WallTime: 2e9} key := roachpb.Key("a") { val1 := roachpb.MakeValueFromBytes(bytes) if _, err := MVCCPut(ctx, engine, key, ts1, val1, MVCCWriteOptions{}); err != nil { t.Fatal(err) } } txn := &roachpb.Transaction{ TxnMeta: enginepb.TxnMeta{ID: uuid.MakeV4(), WriteTimestamp: ts2}, ReadTimestamp: ts2, } if _, _, err := MVCCDelete(ctx, engine, key, txn.ReadTimestamp, MVCCWriteOptions{Txn: txn}); err != nil { t.Fatal(err) } keys := []kvpb.GCRequest_GCKey{ {Key: key, Timestamp: ts2}, } if err := MVCCGarbageCollect(ctx, engine, nil, keys, ts2); err == nil { t.Fatal("expected error garbage collecting an intent") } // Compact the engine; the ForTesting() config option will assert that all // DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(ctx)) } // TestMVCCGarbageCollectPanicsWithMixOfLocalAndGlobalKeys verifies that // MVCCGarbageCollect panics when presented with a mix of local and global // keys. func TestMVCCGarbageCollectPanicsWithMixOfLocalAndGlobalKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() require.Panics(t, func() { ts := hlc.Timestamp{WallTime: 1e9} k := roachpb.Key("a") keys := []kvpb.GCRequest_GCKey{ {Key: k, Timestamp: ts}, {Key: keys.RangeDescriptorKey(roachpb.RKey(k))}, } if err := MVCCGarbageCollect(ctx, engine, nil, keys, ts); err != nil { panic(err) } }) } // readWriterReturningSeekLTTrackingIterator is used in a test to inject errors // and ensure that SeekLT is returned an appropriate number of times. type readWriterReturningSeekLTTrackingIterator struct { it seekLTTrackingIterator ReadWriter } // NewMVCCIterator injects a seekLTTrackingIterator over the engine's real iterator. func (rw *readWriterReturningSeekLTTrackingIterator) NewMVCCIterator( ctx context.Context, iterKind MVCCIterKind, opts IterOptions, ) (MVCCIterator, error) { var err error rw.it.MVCCIterator, err = rw.ReadWriter.NewMVCCIterator(ctx, iterKind, opts) return &rw.it, err } // seekLTTrackingIterator is used to determine the number of times seekLT is // called. type seekLTTrackingIterator struct { seekLTCalled int MVCCIterator } func (it *seekLTTrackingIterator) SeekLT(k MVCCKey) { it.seekLTCalled++ it.MVCCIterator.SeekLT(k) } // TestMVCCGarbageCollectUsesSeekLTAppropriately ensures that the garbage // collection only utilizes SeekLT if there are enough undeleted versions. func TestMVCCGarbageCollectUsesSeekLTAppropriately(t *testing.T) { defer leaktest.AfterTest(t)() type testCaseKey struct { key string timestamps []int gcTimestamp int expSeekLT bool } type testCase struct { name string keys []testCaseKey } bytes := []byte("value") toHLC := func(seconds int) hlc.Timestamp { return hlc.Timestamp{WallTime: (time.Duration(seconds) * time.Second).Nanoseconds()} } runTestCase := func(t *testing.T, tc testCase, engine Engine) { ctx := context.Background() ms := &enginepb.MVCCStats{} for _, key := range tc.keys { for _, seconds := range key.timestamps { val := roachpb.MakeValueFromBytes(bytes) ts := toHLC(seconds) if _, err := MVCCPut(ctx, engine, roachpb.Key(key.key), ts, val, MVCCWriteOptions{Stats: ms}); err != nil { t.Fatal(err) } } } var keys []kvpb.GCRequest_GCKey var expectedSeekLTs int for _, key := range tc.keys { keys = append(keys, kvpb.GCRequest_GCKey{ Key: roachpb.Key(key.key), Timestamp: toHLC(key.gcTimestamp), }) if key.expSeekLT { expectedSeekLTs++ } } batch := engine.NewBatch() defer batch.Close() rw := readWriterReturningSeekLTTrackingIterator{ReadWriter: batch} require.NoError(t, MVCCGarbageCollect(ctx, &rw, ms, keys, toHLC(10))) require.Equal(t, expectedSeekLTs, rw.it.seekLTCalled) } cases := []testCase{ { name: "basic", keys: []testCaseKey{ { key: "a", timestamps: []int{1, 2}, gcTimestamp: 1, }, { key: "b", timestamps: []int{1, 2, 3}, gcTimestamp: 1, }, { key: "c", timestamps: []int{1, 2, 3, 4}, gcTimestamp: 1, }, { key: "d", timestamps: []int{1, 2, 3, 4, 5}, gcTimestamp: 1, expSeekLT: true, }, { key: "e", timestamps: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, gcTimestamp: 1, expSeekLT: true, }, { key: "f", timestamps: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, gcTimestamp: 6, expSeekLT: false, }, }, }, { name: "SeekLT to the end", keys: []testCaseKey{ { key: "ee", timestamps: []int{2, 3, 4, 5, 6, 7, 8, 9}, gcTimestamp: 1, expSeekLT: true, }, }, }, { name: "Next to the end", keys: []testCaseKey{ { key: "eee", timestamps: []int{8, 9}, gcTimestamp: 1, expSeekLT: false, }, }, }, { name: "Next to the next key", keys: []testCaseKey{ { key: "eeee", timestamps: []int{8, 9}, gcTimestamp: 1, expSeekLT: false, }, { key: "eeeee", timestamps: []int{8, 9}, gcTimestamp: 1, expSeekLT: false, }, }, }, { name: "Next to the end on the first version", keys: []testCaseKey{ { key: "h", timestamps: []int{9}, gcTimestamp: 1, expSeekLT: false, }, }, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { engine := NewDefaultInMemForTesting() defer engine.Close() runTestCase(t, tc, engine) // Compact the engine; the ForTesting() config option will assert // that all DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(context.Background())) }) } } type rangeTestDataItem struct { point MVCCKeyValue txn *roachpb.Transaction rangeTombstone MVCCRangeKey } type rangeTestData []rangeTestDataItem func (d rangeTestData) populateEngine( t *testing.T, engine ReadWriter, ms *enginepb.MVCCStats, ) hlc.Timestamp { ctx := context.Background() var ts hlc.Timestamp for _, v := range d { if v.rangeTombstone.Timestamp.IsEmpty() { if v.point.Value != nil { _, err := MVCCPut(ctx, engine, v.point.Key.Key, v.point.Key.Timestamp, roachpb.MakeValueFromBytes(v.point.Value), MVCCWriteOptions{Txn: v.txn, Stats: ms}) require.NoError(t, err, "failed to insert test value into engine (%s)", v.point.Key) } else { _, _, err := MVCCDelete(ctx, engine, v.point.Key.Key, v.point.Key.Timestamp, MVCCWriteOptions{Txn: v.txn, Stats: ms}) require.NoError(t, err, "failed to insert tombstone value into engine (%s)", v.point.Key.String()) } ts = v.point.Key.Timestamp } else { rw := mvccRangeKeyEncodedTimestampReadWriter{engine} require.NoError(t, MVCCDeleteRangeUsingTombstone(ctx, rw, ms, v.rangeTombstone.StartKey, v.rangeTombstone.EndKey, v.rangeTombstone.Timestamp, hlc.ClockTimestamp{}, nil, nil, false, 0, 0, nil), "failed to insert range tombstone into engine (%s)", v.rangeTombstone.String()) ts = v.rangeTombstone.Timestamp } } return ts } // mvccRangeKeyEncodedTimestampReadWriter wraps a ReadWriter, overriding // PutMVCCRangeKey so that if MVCCRangeKey.EncodedTimestampSuffix is non-empty, // the resulting range key uses the encoded suffix verbatim. This is a bit of a // subtle and convoluted test construction, but it allows testing of range keys // with the synthetic bit without needing to bring back knowledge of the // synthetic bit to the various MVCC key encoding and decoding routines. // // Note that all production ReadWriter implementations only use // EncodedTimestampSuffix in their ClearMVCCRangeKey implementations, not in // their PutMVCCRangeKey implementations. // // TODO(jackson): Remove this when we've guaranteed that all range keys have // timestamps without the synthetic bit. type mvccRangeKeyEncodedTimestampReadWriter struct { ReadWriter } func (w mvccRangeKeyEncodedTimestampReadWriter) PutMVCCRangeKey( rk MVCCRangeKey, v MVCCValue, ) error { if len(rk.EncodedTimestampSuffix) == 0 { return w.ReadWriter.PutMVCCRangeKey(rk, v) } valueRaw, err := EncodeMVCCValue(v) if err != nil { return errors.Wrapf(err, "failed to encode MVCC value for range key %s", rk) } else if err := rk.Validate(); err != nil { return err } return w.ReadWriter.PutEngineRangeKey( rk.StartKey, rk.EndKey, rk.EncodedTimestampSuffix, valueRaw) } // pt creates a point update for key with default value. func pt(key roachpb.Key, ts hlc.Timestamp) rangeTestDataItem { val := roachpb.MakeValueFromString("testval").RawBytes return rangeTestDataItem{point: MVCCKeyValue{Key: mvccVersionKey(key, ts), Value: val}} } // inlineValue constant is used with pt function for readability of created inline // values. var inlineValue hlc.Timestamp // tb creates a point tombstone. func tb(key roachpb.Key, ts hlc.Timestamp) rangeTestDataItem { return rangeTestDataItem{point: MVCCKeyValue{Key: mvccVersionKey(key, ts)}} } // txn wraps point update and adds transaction to it for intent creation. func txn(d rangeTestDataItem) rangeTestDataItem { ts := d.point.Key.Timestamp d.txn = &roachpb.Transaction{ Status: roachpb.PENDING, ReadTimestamp: ts, GlobalUncertaintyLimit: ts.Next().Next(), } d.txn.ID = uuid.MakeV4() d.txn.WriteTimestamp = ts d.txn.Key = roachpb.Key([]byte{0, 1}) return d } // rng creates range tombstone update. func rng(start, end roachpb.Key, ts hlc.Timestamp) rangeTestDataItem { return rangeTestDataItem{rangeTombstone: MVCCRangeKey{StartKey: start, EndKey: end, Timestamp: ts}} } func TestMVCCGarbageCollectRanges(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() mkKey := func(k string) roachpb.Key { return append(keys.SystemSQLCodec.TablePrefix(42), k...) } rangeStart := mkKey("") rangeEnd := rangeStart.PrefixEnd() // Note we use keys of different lengths so that stats accounting errors // would not obviously cancel out if right and left bounds are used // incorrectly. keyA := mkKey("a") keyB := mkKey("bb") keyC := mkKey("ccc") keyD := mkKey("dddd") keyE := mkKey("eeeee") keyF := mkKey("ffffff") mkTs := func(wallTimeSec int64) hlc.Timestamp { return hlc.Timestamp{WallTime: time.Second.Nanoseconds() * wallTimeSec} } ts1 := mkTs(1) ts2 := mkTs(2) ts3 := mkTs(3) ts4 := mkTs(4) tsMax := mkTs(9) testData := []struct { name string // Note that range test data should be in ascending order (valid writes). before rangeTestData request []kvpb.GCRequest_GCRangeKey // Note that expectations should be in timestamp descending order // (forward iteration). after []MVCCRangeKey // Optional start and end range for tests that want to restrict default // key range. rangeStart roachpb.Key rangeEnd roachpb.Key }{ { name: "signle range", before: rangeTestData{ rng(keyA, keyD, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts3}, }, after: []MVCCRangeKey{}, }, { name: "multiple contiguous fragments", before: rangeTestData{ rng(keyA, keyD, ts2), rng(keyB, keyC, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts4, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts4)}, }, }, { name: "multiple non-contiguous fragments", before: rangeTestData{ rng(keyA, keyB, ts2), rng(keyC, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "multiple non-overlapping fragments", before: rangeTestData{ rng(keyA, keyB, ts2), rng(keyC, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, {StartKey: keyC, EndKey: keyD, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [A--[B--B]--A]", before: rangeTestData{ rng(keyB, keyC, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "overlapping [A--[B--A]--B]", before: rangeTestData{ rng(keyB, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyC, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyC, EndKey: keyD, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [B--[A--B]--A]", before: rangeTestData{ rng(keyA, keyC, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [B--[A--A]--B]", before: rangeTestData{ rng(keyA, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, {StartKey: keyC, EndKey: keyD, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [[AB--A]--B]", before: rangeTestData{ rng(keyA, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyB, EndKey: keyD, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [[AB--B]--A]", before: rangeTestData{ rng(keyA, keyB, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "overlapping [B--[A--AB]]", before: rangeTestData{ rng(keyA, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [A--[B--AB]]", before: rangeTestData{ rng(keyB, keyD, ts2), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "overlapping [B--[A--AB]] point before", before: rangeTestData{ rng(keyB, keyD, ts2), pt(keyA, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyC, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [B--[A--AB]] point at range start", before: rangeTestData{ rng(keyA, keyD, ts2), pt(keyA, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [B--[A--AB]] point between", before: rangeTestData{ rng(keyA, keyD, ts2), pt(keyB, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyC, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyC, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [B--[A--AB]] point at gc start", before: rangeTestData{ rng(keyA, keyD, ts2), pt(keyB, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts2, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts2)}, }, }, { name: "overlapping [A--[B--AB]] point before", before: rangeTestData{ rng(keyC, keyD, ts2), pt(keyA, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "overlapping [A--[B--AB]] point at gc start", before: rangeTestData{ rng(keyB, keyD, ts2), pt(keyA, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "overlapping [A--[B--AB]] point between", before: rangeTestData{ rng(keyC, keyD, ts2), pt(keyB, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "overlapping [A--[B--AB]] point at range start", before: rangeTestData{ rng(keyB, keyD, ts2), pt(keyB, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "range under intent", before: rangeTestData{ rng(keyA, keyD, ts2), txn(pt(keyA, ts4)), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{}, }, { name: "stacked range fragments", before: rangeTestData{ rng(keyB, keyC, ts2), rng(keyA, keyD, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts4}, }, after: []MVCCRangeKey{}, }, { name: "old value before range", before: rangeTestData{ pt(keyA, ts2), rng(keyB, keyC, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts3}, }, after: []MVCCRangeKey{}, }, { name: "old value at range end", before: rangeTestData{ pt(keyC, ts2), rng(keyB, keyC, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts3}, }, after: []MVCCRangeKey{}, }, { name: "range partially overlap gc request", before: rangeTestData{ rng(keyA, keyD, ts1), rng(keyA, keyD, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts1}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, {StartKey: keyA, EndKey: keyB, Timestamp: ts1, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts1)}, {StartKey: keyB, EndKey: keyC, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, {StartKey: keyC, EndKey: keyD, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, {StartKey: keyC, EndKey: keyD, Timestamp: ts1, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts1)}, }, }, { name: "range merges sides", before: rangeTestData{ rng(keyB, keyC, ts1), rng(keyA, keyD, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts1}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, }, }, { name: "range merges next", before: rangeTestData{ rng(keyB, keyC, ts1), rng(keyA, keyC, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts1}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyC, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, }, }, { name: "range merges previous", before: rangeTestData{ rng(keyA, keyB, ts1), rng(keyA, keyD, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyB, Timestamp: ts1}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, }, }, { name: "range merges chain", before: rangeTestData{ rng(keyB, keyC, ts1), rng(keyD, keyE, ts2), rng(keyA, keyF, ts3), rng(keyA, keyF, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts1}, {StartKey: keyD, EndKey: keyE, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyF, Timestamp: ts4, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts4)}, {StartKey: keyA, EndKey: keyF, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, }, }, { name: "range merges sequential", before: rangeTestData{ rng(keyC, keyD, ts1), rng(keyB, keyD, ts2), rng(keyA, keyE, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts2}, {StartKey: keyC, EndKey: keyD, Timestamp: ts2}, }, after: []MVCCRangeKey{ {StartKey: keyA, EndKey: keyE, Timestamp: ts3, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts3)}, }, }, { name: "don't merge outside range", before: rangeTestData{ rng(keyB, keyC, ts1), // Tombstone spanning multiple ranges. rng(keyA, keyD, ts4), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyB, EndKey: keyC, Timestamp: ts1}, }, after: []MVCCRangeKey{ // We only iterate data within range, so range keys would be // truncated. {StartKey: keyB, EndKey: keyC, Timestamp: ts4, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffix(ts4)}, }, rangeStart: keyB, rangeEnd: keyC, }, { name: "synthetic bit range keys", before: rangeTestData{ rangeTestDataItem{rangeTombstone: MVCCRangeKey{ StartKey: keyA, EndKey: keyC, Timestamp: ts4, EncodedTimestampSuffix: mvccencoding.EncodeMVCCTimestampSuffixWithSyntheticBitForTesting(ts4), }}, }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyC, Timestamp: ts4}, }, after: []MVCCRangeKey{}, }, } for _, d := range testData { t.Run(d.name, func(t *testing.T) { engine := NewDefaultInMemForTesting() defer engine.Close() // Populate range descriptor defaults. if len(d.rangeStart) == 0 { d.rangeStart = rangeStart } if len(d.rangeEnd) == 0 { d.rangeEnd = rangeEnd } var ms enginepb.MVCCStats d.before.populateEngine(t, engine, &ms) rangeKeys := rangesFromRequests(rangeStart, rangeEnd, d.request) require.NoError(t, MVCCGarbageCollectRangeKeys(ctx, engine, &ms, rangeKeys), "failed to run mvcc range tombstone garbage collect") it, err := engine.NewMVCCIterator(context.Background(), MVCCKeyIterKind, IterOptions{ KeyTypes: IterKeyTypeRangesOnly, LowerBound: d.rangeStart, UpperBound: d.rangeEnd, }) if err != nil { t.Fatal(err) } defer it.Close() it.SeekGE(MVCCKey{Key: d.rangeStart}) expectIndex := 0 for ; ; it.Next() { ok, err := it.Valid() require.NoError(t, err, "failed to iterate engine") if !ok { break } for rk := range it.RangeKeys().All() { require.Less(t, expectIndex, len(d.after), "not enough expectations; at unexpected range: %s", rk) require.EqualValues(t, d.after[expectIndex], rk, "range key is not equal") expectIndex++ } } require.Equal(t, len(d.after), expectIndex, "not all range tombstone expectations were consumed") ms.AgeTo(tsMax.WallTime) expMs, err := ComputeStats(ctx, engine, fs.UnknownReadCategory, d.rangeStart, d.rangeEnd, tsMax.WallTime) require.NoError(t, err, "failed to compute stats for range") require.EqualValues(t, expMs, ms, "computed range stats vs gc'd") // Compact the engine; the ForTesting() config option will assert // that all DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(ctx)) }) } } func rangesFromRequests( rangeStart, rangeEnd roachpb.Key, rangeKeys []kvpb.GCRequest_GCRangeKey, ) []CollectableGCRangeKey { collectableKeys := make([]CollectableGCRangeKey, len(rangeKeys)) for i, rk := range rangeKeys { leftPeekBound := rk.StartKey.Prevish(roachpb.PrevishKeyLength) if leftPeekBound.Compare(rangeStart) <= 0 { leftPeekBound = rangeStart } rightPeekBound := rk.EndKey.Next() if rightPeekBound.Compare(rangeEnd) >= 0 { rightPeekBound = rangeEnd } collectableKeys[i] = CollectableGCRangeKey{ MVCCRangeKey: MVCCRangeKey{ StartKey: rk.StartKey, EndKey: rk.EndKey, Timestamp: rk.Timestamp, }, LatchSpan: roachpb.Span{Key: leftPeekBound, EndKey: rightPeekBound}, } } return collectableKeys } func TestMVCCGarbageCollectRangesFailures(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) mkKey := func(k string) roachpb.Key { return append(keys.SystemSQLCodec.TablePrefix(42), k...) } rangeStart := mkKey("") rangeEnd := rangeStart.PrefixEnd() keyA := mkKey("a") keyB := mkKey("b") keyC := mkKey("c") keyD := mkKey("d") mkTs := func(wallTimeSec int64) hlc.Timestamp { return hlc.Timestamp{WallTime: time.Second.Nanoseconds() * wallTimeSec} } ts1 := mkTs(1) ts2 := mkTs(2) ts3 := mkTs(3) ts4 := mkTs(4) ts5 := mkTs(5) ts6 := mkTs(6) ts7 := mkTs(7) ts8 := mkTs(8) testData := []struct { name string before rangeTestData request []kvpb.GCRequest_GCRangeKey error string }{ { name: "request overlap", before: rangeTestData{ rng(keyA, keyD, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyC, Timestamp: ts3}, {StartKey: keyB, EndKey: keyD, Timestamp: ts3}, }, error: "range keys in gc request should be non-overlapping", }, { name: "delete range above value", before: rangeTestData{ pt(keyB, ts2), rng(keyA, keyD, ts3), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts3}, }, error: "attempt to delete range tombstone .* hiding key at .*", }, { // Note that this test is a bit contrived as we can't put intent // under the range tombstone, but we test that if you try to delete // tombstone above intents even if it doesn't exist, we would reject // the attempt as it is an indication of inconsistency. // This might be relaxed to ignore any points which are not covered. name: "delete range above intent", before: rangeTestData{ rng(keyA, keyD, ts2), txn(pt(keyB, ts3)), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts4}, }, error: "attempt to delete range tombstone .* hiding key at .*", }, { name: "delete range above tail of long history", before: rangeTestData{ pt(keyB, ts1), rng(keyA, keyD, ts2), pt(keyB, ts3), pt(keyB, ts4), pt(keyB, ts5), pt(keyB, ts6), pt(keyB, ts7), pt(keyB, ts8), }, request: []kvpb.GCRequest_GCRangeKey{ {StartKey: keyA, EndKey: keyD, Timestamp: ts2}, }, error: "attempt to delete range tombstone .* hiding key at .*", }, } ctx := context.Background() for _, d := range testData { t.Run(d.name, func(t *testing.T) { engine := NewDefaultInMemForTesting() defer engine.Close() d.before.populateEngine(t, engine, nil) rangeKeys := rangesFromRequests(rangeStart, rangeEnd, d.request) err := MVCCGarbageCollectRangeKeys(ctx, engine, nil, rangeKeys) require.Errorf(t, err, "expected error '%s' but found none", d.error) require.True(t, testutils.IsError(err, d.error), "expected error '%s' found '%s'", d.error, err) }) } } // TestMVCCGarbageCollectClearRange checks that basic GCClearRange functionality // works. Fine grained tests cases are tested in mvcc_histories_test // 'gc_clear_range'. This test could be used when debugging any issues found. func TestMVCCGarbageCollectClearRange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() mkKey := func(k string) roachpb.Key { return append(keys.SystemSQLCodec.TablePrefix(42), k...) } rangeStart := mkKey("") rangeEnd := rangeStart.PrefixEnd() // Note we use keys of different lengths so that stats accounting errors // would not obviously cancel out if right and left bounds are used // incorrectly. keyA := mkKey("a") keyB := mkKey("bb") keyD := mkKey("dddd") mkTs := func(wallTimeSec int64) hlc.Timestamp { return hlc.Timestamp{WallTime: time.Second.Nanoseconds() * wallTimeSec} } ts2 := mkTs(2) ts4 := mkTs(4) tsGC := mkTs(5) tsMax := mkTs(9) mkGCReq := func(start roachpb.Key, end roachpb.Key) kvpb.GCRequest_GCClearRange { return kvpb.GCRequest_GCClearRange{ StartKey: start, EndKey: end, } } before := rangeTestData{ pt(keyB, ts2), rng(keyA, keyD, ts4), } request := mkGCReq(keyA, keyD) engine := NewDefaultInMemForTesting() defer engine.Close() var ms, diff enginepb.MVCCStats before.populateEngine(t, engine, &ms) require.NoError(t, MVCCGarbageCollectWholeRange(ctx, engine, &diff, request.StartKey, request.EndKey, tsGC, ms), "failed to run mvcc range tombstone garbage collect") ms.Add(diff) rks := scanRangeKeys(t, engine) require.Empty(t, rks) ks := scanPointKeys(t, engine) require.Empty(t, ks) ms.AgeTo(tsMax.WallTime) it, err := engine.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, IterOptions{ KeyTypes: IterKeyTypePointsAndRanges, LowerBound: rangeStart, UpperBound: rangeEnd, }) if err != nil { t.Fatal(err) } defer it.Close() expMs, err := ComputeStatsForIter(it, tsMax.WallTime) require.NoError(t, err, "failed to compute stats for range") require.EqualValues(t, expMs, ms, "computed range stats vs gc'd") } func TestMVCCGarbageCollectClearRangeInlinedValue(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() mkKey := func(k string) roachpb.Key { return append(keys.SystemSQLCodec.TablePrefix(42), k...) } // Note we use keys of different lengths so that stats accounting errors // would not obviously cancel out if right and left bounds are used // incorrectly. keyA := mkKey("a") keyB := mkKey("b") keyD := mkKey("dddd") mkTs := func(wallTimeSec int64) hlc.Timestamp { return hlc.Timestamp{WallTime: time.Second.Nanoseconds() * wallTimeSec} } tsGC := mkTs(5) mkGCReq := func(start roachpb.Key, end roachpb.Key) kvpb.GCRequest_GCClearRange { return kvpb.GCRequest_GCClearRange{ StartKey: start, EndKey: end, } } before := rangeTestData{ pt(keyB, inlineValue), } request := mkGCReq(keyA, keyD) expectedError := `found key not covered by range tombstone /Table/42/"b"/0,0` engine := NewDefaultInMemForTesting() defer engine.Close() var ms, diff enginepb.MVCCStats before.populateEngine(t, engine, &ms) // We are forcing stats to be estimates to bypass quick liveness check // that will prevent actual data checks if there's some live data. ms.ContainsEstimates = 1 err := MVCCGarbageCollectWholeRange(ctx, engine, &diff, request.StartKey, request.EndKey, tsGC, ms) ms.Add(diff) require.Errorf(t, err, "expected error '%s' but found none", expectedError) require.True(t, testutils.IsError(err, expectedError), "expected error '%s' found '%s'", expectedError, err) // Compact the engine; the ForTesting() config option will assert that all // DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(ctx)) } func TestMVCCGarbageCollectClearPointsInRange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() mkKey := func(k string) roachpb.Key { return append(keys.SystemSQLCodec.TablePrefix(42), k...) } rangeStart := mkKey("") rangeEnd := rangeStart.PrefixEnd() // Note we use keys of different lengths so that stats accounting errors // would not obviously cancel out if right and left bounds are used // incorrectly. keyA := mkKey("a") keyB := mkKey("bb") keyC := mkKey("ccc") keyD := mkKey("dddd") mkTs := func(wallTimeSec int64) hlc.Timestamp { return hlc.Timestamp{WallTime: time.Second.Nanoseconds() * wallTimeSec} } ts1 := mkTs(1) ts2 := mkTs(2) ts3 := mkTs(3) ts4 := mkTs(4) tsMax := mkTs(9) engine := NewDefaultInMemForTesting() defer engine.Close() var ms enginepb.MVCCStats rangeTestData{ pt(keyB, ts1), pt(keyB, ts2), tb(keyB, ts3), tb(keyB, ts4), pt(keyC, ts1), tb(keyC, ts2), pt(keyC, ts3), tb(keyC, ts4), }.populateEngine(t, engine, &ms) require.NoError(t, MVCCGarbageCollectPointsWithClearRange(ctx, engine, &ms, keyA, keyD, hlc.Timestamp{}, tsMax), "failed to run mvcc range tombstone garbage collect") expected := NewDefaultInMemForTesting() defer expected.Close() var expMs enginepb.MVCCStats rks := scanRangeKeys(t, engine) expRks := scanRangeKeys(t, expected) require.EqualValues(t, expRks, rks) ks := scanPointKeys(t, engine) expKs := scanPointKeys(t, expected) require.EqualValues(t, expKs, ks) ms.AgeTo(tsMax.WallTime) expMs, err := ComputeStats(ctx, engine, fs.UnknownReadCategory, rangeStart, rangeEnd, tsMax.WallTime) require.NoError(t, err, "failed to compute stats for range") require.EqualValues(t, expMs, ms, "computed range stats vs gc'd") // Compact the engine; the ForTesting() config option will assert that all // DELSIZED tombstones were appropriately sized. require.NoError(t, engine.Compact(ctx)) } func TestMVCCGarbageCollectClearRangeFailure(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() mkKey := func(k string) roachpb.Key { return append(keys.SystemSQLCodec.TablePrefix(42), k...) } // Note we use keys of different lengths so that stats accounting errors // would not obviously cancel out if right and left bounds are used // incorrectly. keyA := mkKey("a") keyD := mkKey("dddd") mkTs := func(wallTimeSec int64) hlc.Timestamp { return hlc.Timestamp{WallTime: time.Second.Nanoseconds() * wallTimeSec} } engine := NewDefaultInMemForTesting() defer engine.Close() var ms enginepb.MVCCStats rangeTestData{ pt(keyA, mkTs(1)), }.populateEngine(t, engine, &ms) err := MVCCGarbageCollectPointsWithClearRange(ctx, engine, &ms, keyA, keyD, mkTs(1), mkTs(5)) errMsg := `attempt to GC data /Table/42/"a"/1.000000000,0 still visible at GC threshold 5.000000000,0 with clear range` require.Errorf(t, err, "expected error '%s' but found none", errMsg) require.True(t, testutils.IsError(err, errMsg), "expected error '%s' found '%s'", errMsg, err) } // TestResolveIntentWithLowerEpoch verifies that trying to resolve // an intent at an epoch that is lower than the epoch of the intent // leaves the intent untouched. func TestResolveIntentWithLowerEpoch(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Lay down an intent with a high epoch. if _, err := MVCCPut(ctx, engine, testKey1, txn1e2.ReadTimestamp, value1, MVCCWriteOptions{Txn: txn1e2}); err != nil { t.Fatal(err) } // Resolve the intent with a low epoch. if _, _, _, _, err := MVCCResolveWriteIntent(ctx, engine, nil, roachpb.MakeLockUpdate(txn1, roachpb.Span{Key: testKey1}), MVCCResolveWriteIntentOptions{}); err != nil { t.Fatal(err) } // Check that the intent was not cleared. intentRes, err := MVCCGet(ctx, engine, testKey1, hlc.MaxTimestamp, MVCCGetOptions{ Inconsistent: true, }) if err != nil { t.Fatal(err) } if intentRes.Intent == nil { t.Fatal("intent should not be cleared by resolve intent request with lower epoch") } } // TestTimeSeriesMVCCStats ensures that merge operations // result in an expected increase in timeseries data. func TestTimeSeriesMVCCStats(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() var ms = enginepb.MVCCStats{} // Perform a sequence of merges on the same key // and record the MVCC stats for it. if err := MVCCMerge(ctx, engine, &ms, testKey1, hlc.Timestamp{Logical: 1}, tsvalue1); err != nil { t.Fatal(err) } firstMS := ms if err := MVCCMerge(ctx, engine, &ms, testKey1, hlc.Timestamp{Logical: 1}, tsvalue1); err != nil { t.Fatal(err) } secondMS := ms // Ensure timeseries metrics increase as expected. expectedMS := firstMS expectedMS.LiveBytes += int64(len(tsvalue1.RawBytes)) expectedMS.ValBytes += int64(len(tsvalue1.RawBytes)) if secondMS.LiveBytes != expectedMS.LiveBytes { t.Fatalf("second merged LiveBytes value %v differed from expected LiveBytes value %v", secondMS.LiveBytes, expectedMS.LiveBytes) } if secondMS.ValBytes != expectedMS.ValBytes { t.Fatalf("second merged ValBytes value %v differed from expected ValBytes value %v", secondMS.LiveBytes, expectedMS.LiveBytes) } } // TestMVCCTimeSeriesPartialMerge ensures that "partial merges" of merged time // series data does not result in a different final result than a "full merge". func TestMVCCTimeSeriesPartialMerge(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() // Perform the same sequence of merges on two different keys. For // one of them, insert some compactions which cause partial merges // to be run and affect the results. vals := make([]*roachpb.Value, 2) for i, k := range []roachpb.Key{testKey1, testKey2} { if err := MVCCMerge(ctx, engine, nil, k, hlc.Timestamp{Logical: 1}, tsvalue1); err != nil { t.Fatal(err) } if err := MVCCMerge(ctx, engine, nil, k, hlc.Timestamp{Logical: 2}, tsvalue2); err != nil { t.Fatal(err) } if i == 1 { if err := engine.Compact(ctx); err != nil { t.Fatal(err) } } if err := MVCCMerge(ctx, engine, nil, k, hlc.Timestamp{Logical: 2}, tsvalue2); err != nil { t.Fatal(err) } if err := MVCCMerge(ctx, engine, nil, k, hlc.Timestamp{Logical: 1}, tsvalue1); err != nil { t.Fatal(err) } if i == 1 { if err := engine.Compact(ctx); err != nil { t.Fatal(err) } } if valueRes, err := MVCCGet(ctx, engine, k, hlc.Timestamp{}, MVCCGetOptions{}); err != nil { t.Fatal(err) } else { vals[i] = valueRes.Value.ToPointer() } } if first, second := vals[0], vals[1]; !reflect.DeepEqual(first, second) { var firstTS, secondTS roachpb.InternalTimeSeriesData if err := first.GetProto(&firstTS); err != nil { t.Fatal(err) } if err := second.GetProto(&secondTS); err != nil { t.Fatal(err) } t.Fatalf("partially merged value %v differed from expected merged value %v", secondTS, firstTS) } } func TestWillOverflow(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testCases := []struct { a, b int64 overflow bool // will a+b over- or underflow? }{ {0, 0, false}, {math.MaxInt64, 0, false}, {math.MaxInt64, 1, true}, {math.MaxInt64, math.MinInt64, false}, {math.MinInt64, 0, false}, {math.MinInt64, -1, true}, {math.MinInt64, math.MinInt64, true}, } for i, c := range testCases { if willOverflow(c.a, c.b) != c.overflow || willOverflow(c.b, c.a) != c.overflow { t.Errorf("%d: overflow recognition error", i) } } } // TestMVCCExportToSSTExhaustedAtStart is a regression test for a bug // in which mis-handling of resume spans would cause MVCCExportToSST // to return an empty resume key in cases where the resource limiters // caused an early return of a resume span. // // NB: That this test treats the result of MVCCExportToSST _without_ // CPU rate limiting as the truth. Bugs that affect all exports will // not be caught by this test. func TestMVCCExportToSSTExhaustedAtStart(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) st := cluster.MakeTestingClusterSettings() var ( minKey = int64(0) maxKey = int64(1000) minTimestamp = hlc.Timestamp{WallTime: 100000} maxTimestamp = hlc.Timestamp{WallTime: 200000} exportAllQuery = queryLimits{ minKey: minKey, maxKey: maxKey, minTimestamp: minTimestamp, maxTimestamp: maxTimestamp, latest: false, } ) // When ExportRequest is interrupted by the CPU limiter, the currently // buffered range key stack will have its EndKey truncated to the resume // key. To account for this, we write all of the range keys back into a // store and then export them out again without interruption. canonicalizeRangeKeys := func(in []MVCCRangeKeyStack) []MVCCRangeKeyStack { if len(in) == 0 { return in } engine := createTestPebbleEngine() defer engine.Close() for _, keyStack := range in { for _, version := range keyStack.Versions { require.NoError(t, engine.PutRawMVCCRangeKey(keyStack.AsRangeKey(version), []byte{})) } } require.NoError(t, engine.Flush()) keys, rKeys := exportAllData(t, engine, exportAllQuery) require.Equal(t, 0, len(keys)) return rKeys } assertExportEqualWithOptions := func(t *testing.T, ctx context.Context, engine Engine, expectedKeys []MVCCKey, expectedRangeKeys []MVCCRangeKeyStack, initialOpts MVCCExportOptions) { keysIndex := 0 rKeysBuf := []MVCCRangeKeyStack{} startKey := initialOpts.StartKey for len(startKey.Key) > 0 { var sstFile objstorage.MemObj opts := initialOpts opts.StartKey = startKey _, resumeInfo, err := MVCCExportToSST(ctx, st, engine, opts, &sstFile) require.NoError(t, err) keys, rangeKeys := sstToKeys(t, sstFile.Data()) require.LessOrEqual(t, len(keys), len(expectedKeys)-keysIndex, "remaining test key data") for _, key := range keys { require.True(t, key.Equal(expectedKeys[keysIndex]), "returned key is not equal") keysIndex++ } rKeysBuf = append(rKeysBuf, rangeKeys...) startKey = resumeInfo.ResumeKey } require.Equal(t, len(expectedKeys), keysIndex, "not all expected keys were consumed") actualRangeKeys := canonicalizeRangeKeys(rKeysBuf) require.Equal(t, len(expectedRangeKeys), len(actualRangeKeys)) for i, actual := range actualRangeKeys { expected := expectedRangeKeys[i] require.True(t, actual.Equal(expected), "range key mismatch %v != %v", actual, expected) } } t.Run("elastic CPU limit exhausted", func(t *testing.T) { engine := createTestPebbleEngine() defer engine.Close() limits := dataLimits{ minKey: minKey, maxKey: maxKey, minTimestamp: minTimestamp, maxTimestamp: maxTimestamp, tombstoneChance: 0.01, } generateData(t, engine, limits, (limits.maxKey-limits.minKey)*10) keys, rKeys := exportAllData(t, engine, exportAllQuery) // Our ElasticCPUWorkHandle will fail on the very first call. As a result, // the very first return from MVCCExportToSST will actually contain no // data but _should_ return a resume key. firstCall := true ctx := admission.ContextWithElasticCPUWorkHandle(context.Background(), admission.TestingNewElasticCPUHandleWithCallback(func() (bool, time.Duration) { if firstCall { firstCall = false return true, 0 } return false, 0 })) assertExportEqualWithOptions(t, ctx, engine, keys, rKeys, MVCCExportOptions{ StartKey: MVCCKey{Key: testKey(limits.minKey), Timestamp: limits.minTimestamp}, EndKey: testKey(limits.maxKey), StartTS: limits.minTimestamp, EndTS: limits.maxTimestamp, ExportAllRevisions: true, }) require.False(t, firstCall) }) t.Run("elastic CPU limit always exhausted", func(t *testing.T) { engine := createTestPebbleEngine() defer engine.Close() limits := dataLimits{ minKey: minKey, maxKey: maxKey, minTimestamp: minTimestamp, maxTimestamp: maxTimestamp, tombstoneChance: 0.01, } generateData(t, engine, limits, (limits.maxKey-limits.minKey)*10) keys, rKeys := exportAllData(t, engine, exportAllQuery) // Our ElasticCPUWorkHandle will always fail. But, we // should still make progress, one key at a time. ctx := admission.ContextWithElasticCPUWorkHandle(context.Background(), admission.TestingNewElasticCPUHandleWithCallback(func() (bool, time.Duration) { return true, 0 })) assertExportEqualWithOptions(t, ctx, engine, keys, rKeys, MVCCExportOptions{ StartKey: MVCCKey{Key: testKey(limits.minKey), Timestamp: limits.minTimestamp}, EndKey: testKey(limits.maxKey), StartTS: limits.minTimestamp, EndTS: limits.maxTimestamp, ExportAllRevisions: true, }) }) t.Run("elastic CPU limit always exhausted with range keys", func(t *testing.T) { engine := createTestPebbleEngine() defer engine.Close() limits := dataLimits{ minKey: minKey, maxKey: maxKey, minTimestamp: minTimestamp, maxTimestamp: maxTimestamp, tombstoneChance: 0.50, useRangeTombstones: true, } // Adding many range keys makes this test much slower, // so we use 2*keyRange rather than 10*keyRange here. generateData(t, engine, limits, (limits.maxKey-limits.minKey)*2) keys, rKeys := exportAllData(t, engine, exportAllQuery) // Our ElasticCPUWorkHandle will always fail. But, we // should still make progress, one key at a time. ctx := admission.ContextWithElasticCPUWorkHandle(context.Background(), admission.TestingNewElasticCPUHandleWithCallback(func() (bool, time.Duration) { return true, 0 })) assertExportEqualWithOptions(t, ctx, engine, keys, rKeys, MVCCExportOptions{ StartKey: MVCCKey{Key: testKey(limits.minKey), Timestamp: limits.minTimestamp}, EndKey: testKey(limits.maxKey), StartTS: limits.minTimestamp, EndTS: limits.maxTimestamp, ExportAllRevisions: true, StopMidKey: true, }) }) t.Run("elastic CPU limit exhausted respects StopMidKey", func(t *testing.T) { engine := createTestPebbleEngine() defer engine.Close() // Construct a data set that contains 6 // revisions of the same key. // // We expect that MVCCExportToSST with // ExportAllRevisions set to true but with // StopMidKey set to false to always return // all or none of this key. revisionCount := 6 rng := rand.New(rand.NewSource(timeutil.Now().Unix())) key := testKey(6) start := minTimestamp.Add(1, 0) nextKey := func(i int64) MVCCKey { return MVCCKey{Key: key, Timestamp: start.Add(i, 0)} } nextValue := func() MVCCValue { return MVCCValue{Value: roachpb.MakeValueFromBytes(randutil.RandBytes(rng, 256))} } for i := 0; i < revisionCount; i++ { require.NoError(t, engine.PutMVCC(nextKey(int64(i)), nextValue()), "write data to test storage") } require.NoError(t, engine.Flush(), "Flush engine data") var sstFile objstorage.MemObj opts := MVCCExportOptions{ StartKey: MVCCKey{Key: testKey(minKey), Timestamp: minTimestamp}, EndKey: testKey(maxKey), StartTS: minTimestamp, EndTS: maxTimestamp, ExportAllRevisions: true, StopMidKey: false, } // Create an ElasticCPUWorkHandler that will // simulate a resource constraint failure // after some number of loop iterations. ctx := context.Background() callsBeforeFailure := 2 ctx = admission.ContextWithElasticCPUWorkHandle(ctx, admission.TestingNewElasticCPUHandleWithCallback(func() (bool, time.Duration) { if callsBeforeFailure > 0 { callsBeforeFailure-- return false, 0 } return true, 0 })) // With StopMidKey=false, we expect 6 // revisions or 0 revisions. _, _, err := MVCCExportToSST(ctx, st, engine, opts, &sstFile) require.NoError(t, err) chunk, _ := sstToKeys(t, sstFile.Data()) require.Equal(t, 6, len(chunk)) // With StopMidKey=true, we can stop in the // middle of iteration. callsBeforeFailure = 2 sstFile = objstorage.MemObj{} opts.StopMidKey = true _, _, err = MVCCExportToSST(ctx, st, engine, opts, &sstFile) require.NoError(t, err) chunk, _ = sstToKeys(t, sstFile.Data()) // We expect 3 here rather than 2 because the // first iteration never calls the handler. require.Equal(t, 3, len(chunk)) }) } type queryLimits struct { minKey int64 maxKey int64 minTimestamp hlc.Timestamp maxTimestamp hlc.Timestamp latest bool } func testKey(id int64) roachpb.Key { return []byte(fmt.Sprintf("key-%08d", id)) } type dataLimits struct { minKey int64 maxKey int64 minTimestamp hlc.Timestamp maxTimestamp hlc.Timestamp tombstoneChance float64 useRangeTombstones bool } func exportAllData( t *testing.T, engine Engine, limits queryLimits, ) ([]MVCCKey, []MVCCRangeKeyStack) { st := cluster.MakeTestingClusterSettings() var sstFile objstorage.MemObj _, _, err := MVCCExportToSST(context.Background(), st, engine, MVCCExportOptions{ StartKey: MVCCKey{Key: testKey(limits.minKey), Timestamp: limits.minTimestamp}, EndKey: testKey(limits.maxKey), StartTS: limits.minTimestamp, EndTS: limits.maxTimestamp, ExportAllRevisions: !limits.latest, }, &sstFile) require.NoError(t, err, "Failed to export expected data") return sstToKeys(t, sstFile.Data()) } func sstToKeys(t *testing.T, data []byte) ([]MVCCKey, []MVCCRangeKeyStack) { var results []MVCCKey var rangeKeyRes []MVCCRangeKeyStack it, err := NewMemSSTIterator(data, false, IterOptions{ KeyTypes: pebble.IterKeyTypePointsAndRanges, LowerBound: keys.MinKey, UpperBound: keys.MaxKey, }) require.NoError(t, err, "Failed to read exported data") defer it.Close() for it.SeekGE(MVCCKey{Key: []byte{}}); ; { ok, err := it.Valid() require.NoError(t, err, "Failed to advance iterator while preparing data") if !ok { break } if it.RangeKeyChanged() { hasPoint, hasRange := it.HasPointAndRange() if hasRange { rangeKeyRes = append(rangeKeyRes, it.RangeKeys().Clone()) } if !hasPoint { it.Next() continue } } results = append(results, MVCCKey{ Key: append(roachpb.Key(nil), it.UnsafeKey().Key...), Timestamp: it.UnsafeKey().Timestamp, }) it.Next() } return results, rangeKeyRes } func generateData(t *testing.T, engine Engine, limits dataLimits, totalEntries int64) { rng := rand.New(rand.NewSource(timeutil.Now().Unix())) for i := int64(0); i < totalEntries; i++ { keyID := limits.minKey + rand.Int63n(limits.maxKey-limits.minKey) key := testKey(keyID) timestamp := limits.minTimestamp.Add(rand.Int63n(limits.maxTimestamp.WallTime-limits.minTimestamp.WallTime), 0) size := 256 if rng.Float64() < limits.tombstoneChance { size = 0 } if limits.useRangeTombstones && size == 0 { require.NoError(t, engine.PutRawMVCCRangeKey(MVCCRangeKey{ StartKey: key, EndKey: testKey(keyID + 2), Timestamp: timestamp}, []byte{}), "write data to test storage") } else { value := MVCCValue{Value: roachpb.MakeValueFromBytes(randutil.RandBytes(rng, size))} require.NoError(t, engine.PutMVCC(MVCCKey{Key: key, Timestamp: timestamp}, value), "Write data to test storage") } } require.NoError(t, engine.Flush(), "Flush engine data") } func TestMVCCExportToSSTFailureIntentBatching(t *testing.T) { defer leaktest.AfterTest(t)() // Test function uses a fixed time and key range to produce SST. // Use varying inserted keys for values and intents to putting them in and out of ranges. checkReportedErrors := func(data []testValue, expectedIntentIndices []int, targetBytes uint64) func(*testing.T) { return func(t *testing.T) { ctx := context.Background() st := cluster.MakeTestingClusterSettings() engine := createTestPebbleEngine() defer engine.Close() require.NoError(t, fillInData(ctx, engine, data)) ss := kvpb.ScanStats{} _, _, err := MVCCExportToSST(ctx, st, engine, MVCCExportOptions{ StartKey: MVCCKey{Key: key(10)}, EndKey: key(20000), StartTS: ts(999), EndTS: ts(2000), ExportAllRevisions: true, TargetSize: 0, MaxSize: 0, MaxLockConflicts: uint64(MaxConflictsPerLockConflictError.Default()), TargetLockConflictBytes: targetBytes, StopMidKey: false, ScanStats: &ss, }, &objstorage.MemObj{}) if len(expectedIntentIndices) == 0 { require.NoError(t, err) } else { require.Error(t, err) e := (*kvpb.LockConflictError)(nil) if !errors.As(err, &e) { require.Fail(t, "Expected LockConflictError, got %T", err) } require.Equal(t, len(expectedIntentIndices), len(e.Locks)) for i, dataIdx := range expectedIntentIndices { require.Equal(t, data[dataIdx].txn.ID, e.Locks[i].Txn.ID) } } // Check some stats to ensure they are being populated. require.Less(t, uint64(MaxConflictsPerLockConflictError.Default()), ss.NumInterfaceSteps) require.Equal(t, uint64(1), ss.NumScans) } } // Export range is fixed to k:["00010", "10000"), ts:(999, 2000] for all tests. testDataCount := int(MaxConflictsPerLockConflictError.Default() + 1) testData := make([]testValue, testDataCount*2) expectedErrors := make([]int, testDataCount) for i := 0; i < testDataCount; i++ { testData[i*2] = value(key(i*2+11), "value", ts(1000)) testData[i*2+1] = intent(key(i*2+12), "intent", ts(1001)) expectedErrors[i] = i*2 + 1 } t.Run("Receive no more than limit intents", checkReportedErrors(testData, expectedErrors[:MaxConflictsPerLockConflictError.Default()], 0)) t.Run("Byte target checking", checkReportedErrors(testData, expectedErrors[:5000], uint64(TargetBytesPerLockConflictError.Default()))) } // TestMVCCExportToSSTSplitMidKey verifies that split mid key in exports will // omit resume timestamps where they are unnecessary e.g. when we split at the // new key. In this case we can safely use the SST as is without the need to // merge with the remaining versions of the key. func TestMVCCExportToSSTSplitMidKey(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() st := cluster.MakeTestingClusterSettings() engine := createTestPebbleEngine() defer engine.Close() const keyValueSize = 11 var testData = []testValue{ value(key(1), "value1", ts(1000)), value(key(2), "value2", ts(1000)), value(key(2), "value3", ts(2000)), value(key(3), "value4", ts(2000)), } require.NoError(t, fillInData(ctx, engine, testData)) for _, test := range []struct { exportAll bool stopMidKey bool useMaxSize bool resumeCount int resumeWithTs int }{ {false, false, false, 3, 0}, {true, false, false, 3, 0}, {false, true, false, 3, 0}, // No resume timestamps since we fall under max size criteria {true, true, false, 3, 0}, {true, true, true, 4, 1}, } { t.Run( fmt.Sprintf("exportAll=%t,stopMidKey=%t,useMaxSize=%t", test.exportAll, test.stopMidKey, test.useMaxSize), func(t *testing.T) { resumeKey := MVCCKey{Key: key(1)} resumeWithTs := 0 resumeCount := 0 var maxSize uint64 = 0 if test.useMaxSize { maxSize = keyValueSize * 2 } for !resumeKey.Equal(MVCCKey{}) { _, resumeInfo, err := MVCCExportToSST( ctx, st, engine, MVCCExportOptions{ StartKey: resumeKey, EndKey: key(3).Next(), StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: test.exportAll, TargetSize: 1, MaxSize: maxSize, StopMidKey: test.stopMidKey, }, &objstorage.MemObj{}) require.NoError(t, err) resumeKey = resumeInfo.ResumeKey if !resumeKey.Timestamp.IsEmpty() { resumeWithTs++ } resumeCount++ } require.Equal(t, test.resumeCount, resumeCount) require.Equal(t, test.resumeWithTs, resumeWithTs) }) } } // TestMVCCExportToSSTSErrorsOnLargeKV verifies that MVCCExportToSST errors on a // single kv that is larger than max size. func TestMVCCExportToSSTSErrorsOnLargeKV(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() st := cluster.MakeTestingClusterSettings() engine := createTestPebbleEngine() defer engine.Close() var testData = []testValue{value(key(1), "value1", ts(1000))} require.NoError(t, fillInData(ctx, engine, testData)) summary, _, err := MVCCExportToSST( ctx, st, engine, MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: key(3).Next(), StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: false, TargetSize: 1, MaxSize: 1, StopMidKey: true, }, &objstorage.MemObj{}) require.Equal(t, int64(0), summary.DataSize) expectedErr := &ExceedMaxSizeError{} require.ErrorAs(t, err, &expectedErr) } func TestMVCCExportDeadlineExceeded(t *testing.T) { defer leaktest.AfterTest(t)() ctx, cancelFunc := context.WithCancel(context.Background()) st := cluster.MakeTestingClusterSettings() engine := createTestPebbleEngine() defer engine.Close() var testData = []testValue{value(key(1), "value1", ts(1000))} require.NoError(t, fillInData(ctx, engine, testData)) ss := kvpb.ScanStats{} cancelFunc() _, _, err := MVCCExportToSST( ctx, st, engine, MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: key(3).Next(), StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: false, TargetSize: 1, ScanStats: &ss, }, &objstorage.MemObj{}) // Stats are not completely deterministic, so we assert lower bounds. require.LessOrEqual(t, uint64(1), ss.NumInterfaceSeeks) require.LessOrEqual(t, uint64(1), ss.NumInterfaceSteps) require.LessOrEqual(t, uint64(1), ss.PointCount) require.LessOrEqual(t, uint64(10), ss.KeyBytes) require.LessOrEqual(t, uint64(10), ss.ValueBytes) require.Equal(t, uint64(1), ss.NumScans) require.NoError(t, err) } // TestMVCCExportFingerprint verifies that MVCCExportFingerprint correctly // fingerprints point keys in a given key and time interval, and returns the // range keys in a pebble SST. // // This test uses a `fingerprintOracle` to verify that the fingerprint generated // by `MVCCExportFingerprint` is what we would get if we iterated over an SST // with all keys and computed our own fingerprint. func TestMVCCExportFingerprint(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() st := cluster.MakeTestingClusterSettings() fingerprint := func(opts MVCCExportOptions, engine Engine) (uint64, []byte, kvpb.BulkOpSummary, MVCCKey) { var dest objstorage.MemObj var err error res, resumeInfo, fingerprint, hasRangeKeys, err := MVCCExportFingerprint( ctx, st, engine, opts, &dest) require.NoError(t, err) if !hasRangeKeys { dest = objstorage.MemObj{} } return fingerprint, dest.Data(), res, resumeInfo.ResumeKey } // verifyFingerprintAgainstOracle uses the `fingerprintOracle` to compute a // fingerprint over the same key and time interval, and ensure our fingerprint // and range keys match up with that generated by the oracle. verifyFingerprintAgainstOracle := func( actualFingerprint uint64, actualRangekeys []MVCCRangeKeyStack, opts MVCCExportOptions, engine Engine) { oracle := makeFingerprintOracle(st, engine, opts) expectedFingerprint, expectedRangeKeys := oracle.getFingerprintAndRangeKeys(ctx, t) require.Equal(t, expectedFingerprint, actualFingerprint) require.Equal(t, expectedRangeKeys, actualRangekeys) } engine := createTestPebbleEngine() defer engine.Close() kvSize := int64(16) rangeKeySize := int64(10) // Insert some point keys. // // 2000 value3 value4 // // 1000 value1 value2 // // 1 2 3 var testData = []testValue{ value(key(1), "value1", ts(1000)), value(key(2), "value2", ts(1000)), value(key(2), "value3", ts(2000)), value(key(3), "value4", ts(2000)), } require.NoError(t, fillInData(ctx, engine, testData)) // Insert range keys. // // 3000 [--- r2 ---) // // 2000 value3 value4 [--- r1 ---) // // 1000 value1 value2 // // 1 2 3 4 5 require.NoError(t, engine.PutRawMVCCRangeKey(MVCCRangeKey{ StartKey: key(4), EndKey: key(5), Timestamp: ts(2000), }, []byte{})) require.NoError(t, engine.PutRawMVCCRangeKey(MVCCRangeKey{ StartKey: key(1), EndKey: key(2), Timestamp: ts(3000), }, []byte{})) testutils.RunTrueAndFalse(t, "allRevisions", func(t *testing.T, allRevisions bool) { t.Run("no-key-or-ts-bounds", func(t *testing.T) { opts := MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: keys.MaxKey, StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: allRevisions, } fingerprint, rangeKeySST, summary, resumeKey := fingerprint(opts, engine) require.Empty(t, resumeKey) rangeKeys := getRangeKeys(t, rangeKeySST) if allRevisions { require.Equal(t, kvSize*4+rangeKeySize*2, summary.DataSize) require.Equal(t, 2, len(rangeKeys)) } else { require.Equal(t, kvSize*2, summary.DataSize) // StartTime is empty so we don't read rangekeys when not exporting all // revisions. require.Empty(t, rangeKeys) } verifyFingerprintAgainstOracle(fingerprint, rangeKeys, opts, engine) }) t.Run("key-bounds", func(t *testing.T) { opts := MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: key(2).Next(), StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: allRevisions, } fingerprint, rangeKeySST, summary, resumeKey := fingerprint(opts, engine) require.Empty(t, resumeKey) rangeKeys := getRangeKeys(t, rangeKeySST) if allRevisions { require.Equal(t, kvSize*3+rangeKeySize, summary.DataSize) require.Equal(t, 1, len(rangeKeys)) } else { // Rangekey masks the point key 1@1000, so we only see 2@2000. require.Equal(t, kvSize*1, summary.DataSize) // StartTime is empty, so we don't read rangekeys when not exporting all // revisions. require.Empty(t, rangeKeys) } verifyFingerprintAgainstOracle(fingerprint, getRangeKeys(t, rangeKeySST), opts, engine) }) t.Run("outside-point-key-bounds", func(t *testing.T) { opts := MVCCExportOptions{ StartKey: MVCCKey{Key: key(3).Next()}, EndKey: keys.MaxKey, StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: allRevisions, } fingerprint, rangeKeySST, summary, resumeKey := fingerprint(opts, engine) require.Empty(t, resumeKey) rangeKeys := getRangeKeys(t, rangeKeySST) require.Equal(t, uint64(0), fingerprint) if allRevisions { require.Equal(t, rangeKeySize, summary.DataSize) require.Len(t, rangeKeys, 1) } else { require.Equal(t, int64(0), summary.DataSize) require.Empty(t, rangeKeys) } verifyFingerprintAgainstOracle(fingerprint, getRangeKeys(t, rangeKeySST), opts, engine) }) t.Run("time-bounds", func(t *testing.T) { opts := MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: keys.MaxKey, StartTS: ts(1000).Prev(), EndTS: ts(1000), ExportAllRevisions: allRevisions, } fingerprint, rangeKeySST, summary, resumeKey := fingerprint(opts, engine) require.Empty(t, resumeKey) rangeKeys := getRangeKeys(t, rangeKeySST) require.Empty(t, rangeKeys) require.Equal(t, kvSize*2, summary.DataSize) verifyFingerprintAgainstOracle(fingerprint, getRangeKeys(t, rangeKeySST), opts, engine) }) t.Run("outside-point-key-time-bounds", func(t *testing.T) { opts := MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: keys.MaxKey, StartTS: ts(2000), EndTS: ts(3000), ExportAllRevisions: allRevisions, } fingerprint, rangeKeySST, summary, resumeKey := fingerprint(opts, engine) require.Empty(t, resumeKey) rangeKeys := getRangeKeys(t, rangeKeySST) require.Equal(t, rangeKeySize, summary.DataSize) require.Len(t, rangeKeys, 1) require.Equal(t, uint64(0), fingerprint) verifyFingerprintAgainstOracle(fingerprint, getRangeKeys(t, rangeKeySST), opts, engine) }) t.Run("assert-hash-is-per-kv", func(t *testing.T) { // Fingerprint point keys 1 and 2. opts := MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: key(2).Next(), StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: allRevisions, } fingerprint1, _, summary, resumeKey := fingerprint(opts, engine) require.Empty(t, resumeKey) if allRevisions { require.Equal(t, 3*kvSize+rangeKeySize, summary.DataSize) } else { // Rangekey masking means we only see 2@2000. require.Equal(t, kvSize, summary.DataSize) } // Fingerprint point key 3. opts = MVCCExportOptions{ StartKey: MVCCKey{Key: key(3)}, EndKey: keys.MaxKey, StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: allRevisions, } fingerprint2, _, summary2, resumeKey2 := fingerprint(opts, engine) require.Empty(t, resumeKey2) if allRevisions { require.Equal(t, kvSize+rangeKeySize, summary2.DataSize) } else { require.Equal(t, kvSize, summary2.DataSize) } // Fingerprint point keys 1 to 3. opts = MVCCExportOptions{ StartKey: MVCCKey{Key: key(1)}, EndKey: keys.MaxKey, StartTS: hlc.Timestamp{}, EndTS: hlc.Timestamp{WallTime: 9999}, ExportAllRevisions: allRevisions, } fingerprint3, _, summary3, resumeKey3 := fingerprint(opts, engine) require.Empty(t, resumeKey3) if allRevisions { require.Equal(t, 4*kvSize+2*rangeKeySize, summary3.DataSize) } else { require.Equal(t, 2*kvSize, summary3.DataSize) } // Verify that fp3 = fp1 ^ fp2 require.Equal(t, fingerprint3, fingerprint1^fingerprint2) }) }) } type fingerprintOracle struct { st *cluster.Settings engine Engine opts *MVCCExportOptions } // makeFingerprintOracle returns a fingerprintOracle that can be used to check // the correctness of a fingerprint for point keys. func makeFingerprintOracle( st *cluster.Settings, engine Engine, opts MVCCExportOptions, ) *fingerprintOracle { return &fingerprintOracle{ opts: &opts, engine: engine, st: st, } } // getFingerprintAndRangeKeys can be used to generate the fingerprint of point // keys in an interval determined by the supplied `MVCCExportOptions`. This // fingerprint is generated by exporting the point and range keys to a pebble // SST using `MVCCExportToSST` and then maintaining a XOR aggregate of the hash // of every point key in the SST. Range keys are not fingerprinted but instead // returned as is to the caller. func (f *fingerprintOracle) getFingerprintAndRangeKeys( ctx context.Context, t *testing.T, ) (uint64, []MVCCRangeKeyStack) { t.Helper() var dest objstorage.MemObj _, _, err := MVCCExportToSST(ctx, f.st, f.engine, *f.opts, &dest) require.NoError(t, err) return f.fingerprintPointKeys(t, dest.Data()), getRangeKeys(t, dest.Data()) } func (f *fingerprintOracle) fingerprintPointKeys(t *testing.T, dataSST []byte) uint64 { t.Helper() hasher := fnv.New64() var xorAgg uint64 iterOpts := IterOptions{ KeyTypes: IterKeyTypePointsOnly, LowerBound: keys.LocalMax, UpperBound: keys.MaxKey, } iter, err := NewMemSSTIterator(dataSST, false, iterOpts) if err != nil { t.Fatal(err) } defer iter.Close() for iter.SeekGE(MVCCKey{Key: keys.MinKey}); ; iter.Next() { if valid, err := iter.Valid(); !valid || err != nil { if err != nil { t.Fatal(err) } break } k := iter.UnsafeKey() if k.Timestamp.IsEmpty() { _, err := hasher.Write(k.Key) require.NoError(t, err) v, err := iter.UnsafeValue() require.NoError(t, err) _, err = hasher.Write(v) require.NoError(t, err) } else { _, err := hasher.Write(k.Key) require.NoError(t, err) tsLen := mvccencoding.EncodedMVCCTimestampLength(k.Timestamp) require.NotZero(t, tsLen) timestampBuf := make([]byte, tsLen) mvccencoding.EncodeMVCCTimestampToBufSized(timestampBuf, k.Timestamp) _, err = hasher.Write(timestampBuf) require.NoError(t, err) v, err := iter.UnsafeValue() require.NoError(t, err) _, err = hasher.Write(v) require.NoError(t, err) } xorAgg = xorAgg ^ hasher.Sum64() hasher.Reset() } return xorAgg } func getRangeKeys(t *testing.T, dataSST []byte) []MVCCRangeKeyStack { t.Helper() if len(dataSST) == 0 { return []MVCCRangeKeyStack{} } iterOpts := IterOptions{ KeyTypes: IterKeyTypeRangesOnly, LowerBound: keys.LocalMax, UpperBound: keys.MaxKey, } iter, err := NewMemSSTIterator(dataSST, false, iterOpts) require.NoError(t, err) defer iter.Close() allRangeKeys := make([]MVCCRangeKeyStack, 0) for iter.SeekGE(MVCCKey{Key: keys.MinKey}); ; iter.Next() { if ok, err := iter.Valid(); err != nil { t.Fatal(err) } else if !ok { break } rangeKeys := iter.RangeKeys() allRangeKeys = append(allRangeKeys, rangeKeys.Clone()) } return allRangeKeys } // mvccGetRaw fetches a raw MVCC value, for use in tests. func mvccGetRaw(t *testing.T, r Reader, key MVCCKey) []byte { value, err := mvccGetRawWithError(t, r, key) require.NoError(t, err) return value } func mvccGetRawWithError(t *testing.T, r Reader, key MVCCKey) ([]byte, error) { iter, err := r.NewMVCCIterator(context.Background(), MVCCKeyAndIntentsIterKind, IterOptions{Prefix: true}) if err != nil { t.Fatal(err) } defer iter.Close() iter.SeekGE(key) if ok, err := iter.Valid(); err != nil || !ok { return nil, err } return iter.Value() } func TestMVCCLookupRangeKeyValue(t *testing.T) { defer leaktest.AfterTest(t)() eng := createTestPebbleEngine() defer eng.Close() const diagram = ` # a b c d # t=4000 [-----v1----) # t=2000 [-v2-)[-----v3----) # ` v1 := MVCCValue{ MVCCValueHeader: enginepb.MVCCValueHeader{ LocalTimestamp: hlc.ClockTimestamp{WallTime: 1}, }, } v2 := MVCCValue{ MVCCValueHeader: enginepb.MVCCValueHeader{ LocalTimestamp: hlc.ClockTimestamp{WallTime: 2}, }, } v3 := MVCCValue{ MVCCValueHeader: enginepb.MVCCValueHeader{ LocalTimestamp: hlc.ClockTimestamp{WallTime: 3}, }, } t2000 := hlc.Timestamp{WallTime: 2000} t4000 := hlc.Timestamp{WallTime: 4000} a, b, c, d := roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c"), roachpb.Key("d") require.NoError(t, eng.PutMVCCRangeKey(MVCCRangeKey{ StartKey: a, EndKey: c, Timestamp: t4000, }, v1)) require.NoError(t, eng.PutMVCCRangeKey(MVCCRangeKey{ StartKey: a, EndKey: b, Timestamp: t2000, }, v2)) require.NoError(t, eng.PutMVCCRangeKey(MVCCRangeKey{ StartKey: b, EndKey: d, Timestamp: t2000, }, v3)) var buf bytes.Buffer fmt.Fprintln(&buf, strings.TrimSpace(diagram)) for _, tc := range []struct { name string k, ek roachpb.Key ts hlc.Timestamp }{ { // Look up the exact rangedel. name: "ac-valid-full", k: a, ek: c, ts: t4000, }, { // Look up inside of the rangedel. name: "ac-valid-partial", k: a.Next(), ek: b, ts: t4000, }, { // Correct bounds, but incorrect timestamp, // will see part of ab and bd which are not compatible and error out. name: "ac-incompatible-fragments", k: a, ek: c, ts: t2000, }, { // Correct bounds, but timestamp too early. // Won't see anything and error out. name: "ac-ts-too-early", k: a, ek: b, ts: t2000, }, { // See ac but with a gap. Start key before rangedel starts. Errors out. name: "ac-invalid-pre", k: roachpb.KeyMin, ek: c, ts: t4000, }, { // Sees ac but with a gap. End key after rangedel end. Errors out. name: "ac-invalid-post", k: a, ek: d, ts: t4000, }, // Sees cd but wants it longer. Errors. { name: "cd-invalid-post", k: c, ek: roachpb.Key("f"), ts: t2000, }, } { fmt.Fprintf(&buf, "# %s\n", tc.name) fmt.Fprintf(&buf, "lookup([%s,%s) @ %d) = ", tc.k, tc.ek, tc.ts.WallTime) valBytes, err := MVCCLookupRangeKeyValue(context.Background(), eng, tc.k, tc.ek, tc.ts) if err != nil { fmt.Fprintln(&buf, err) } else { v, err := DecodeMVCCValue(valBytes) if err != nil { fmt.Fprintln(&buf, err) } else { fmt.Fprintf(&buf, "v%d\n", v.MVCCValueHeader.LocalTimestamp.WallTime) } } } path := datapathutils.TestDataPath(t, t.Name()) echotest.Require(t, buf.String(), path) } func TestMVCCGetForKnownTimestampWithNoIntent(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() for _, tc := range []struct { name string key roachpb.Key writeTs hlc.Timestamp writeToBatch bool writeWithTxn bool val string readTs hlc.Timestamp errStr string }{ { name: "found-in-batch", key: roachpb.Key("a"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: true, val: "a-val", readTs: hlc.Timestamp{WallTime: 5}, }, { name: "found-in-engine", key: roachpb.Key("b"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: false, val: "b-val", readTs: hlc.Timestamp{WallTime: 5}, }, { name: "higher-timestamp-in-batch", key: roachpb.Key("c"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: true, val: "c-val", readTs: hlc.Timestamp{WallTime: 4}, errStr: "value missing for key", }, { name: "higher-timestamp-in-engine", key: roachpb.Key("d"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: false, val: "d-val", readTs: hlc.Timestamp{WallTime: 4}, errStr: "value missing for key", }, { name: "intent-in-batch-not-seen", key: roachpb.Key("e"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: true, writeWithTxn: true, val: "e-val", readTs: hlc.Timestamp{WallTime: 5}, }, { name: "intent-in-engine-not-seen", key: roachpb.Key("f"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: false, writeWithTxn: true, val: "f-val", readTs: hlc.Timestamp{WallTime: 5}, }, { name: "lower-timestamp-in-batch", key: roachpb.Key("g"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: true, val: "g-val", readTs: hlc.Timestamp{WallTime: 6}, errStr: "expected timestamp 0.000000006,0 and found 0.000000005,0", }, { name: "lower-timestamp-in-engine", key: roachpb.Key("h"), writeTs: hlc.Timestamp{WallTime: 5}, writeToBatch: false, val: "h-val", readTs: hlc.Timestamp{WallTime: 6}, errStr: "expected timestamp 0.000000006,0 and found 0.000000005,0", }, } { t.Run(tc.name, func(t *testing.T) { var rw ReadWriter rw = engine var batch Batch if tc.writeToBatch { batch = engine.NewBatch() defer batch.Close() rw = batch } val := roachpb.MakeValueFromString(tc.val) var txn *roachpb.Transaction if tc.writeWithTxn { txn = makeTxn(*txn1, tc.writeTs) } _, err := MVCCPut(ctx, rw, tc.key, tc.writeTs, val, MVCCWriteOptions{Txn: txn}) require.NoError(t, err) if batch == nil { batch = engine.NewBatch() defer batch.Close() } v, _, err := MVCCGetForKnownTimestampWithNoIntent( ctx, batch, tc.key, tc.readTs, tc.writeToBatch) if len(tc.errStr) == 0 { b, err := v.GetBytes() require.NoError(t, err) require.Equal(t, tc.val, string(b)) if tc.writeToBatch { // Also read without the batch-only optimization. v, _, err = MVCCGetForKnownTimestampWithNoIntent( ctx, batch, tc.key, tc.readTs, false) require.NoError(t, err) b, err = v.GetBytes() require.NoError(t, err) require.Equal(t, tc.val, string(b)) } } else { require.ErrorContains(t, err, tc.errStr) } }) } } func TestApproximateLockTableSize(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() engine := NewDefaultInMemForTesting() defer engine.Close() acq := roachpb.LockAcquisition{ Span: roachpb.Span{ Key: roachpb.Key("f"), }, Txn: txn1.TxnMeta, Durability: lock.Replicated, Strength: lock.Exclusive, } batch := engine.NewBatch() defer batch.Close() stats := &enginepb.MVCCStats{} require.NoError(t, MVCCAcquireLock(ctx, batch, &acq.Txn, acq.IgnoredSeqNums, acq.Strength, acq.Key, stats, 0, 0, false, )) require.GreaterOrEqual(t, ApproximateLockTableSize(&acq), stats.LockBytes) } func BenchmarkApproximateLockTableSize(b *testing.B) { defer leaktest.AfterTest(b)() defer log.Scope(b).Close(b) acq := roachpb.LockAcquisition{ Span: roachpb.Span{ Key: roachpb.Key("f"), }, Txn: txn1.TxnMeta, Durability: lock.Replicated, Strength: lock.Exclusive, } total := int64(0) for i := 0; i < b.N; i++ { total += ApproximateLockTableSize(&acq) } b.Logf("total: %d", total) }
go
github
https://github.com/cockroachdb/cockroach
pkg/storage/mvcc_test.go
#!/usr/bin/env python # -*- coding: utf-8 -*- from time import sleep class MenuItem: def __init__(self, name, action): self.name = name self.action = action def get_text(self): return self.name def get_action(self): return self.action def set_lcd(self, lcd): self.lcd = lcd class Menu(MenuItem): item_prefix = "\x7e" def __init__(self, lcd, name): self.lcd = lcd self.name = name self.items = [] def clear_all_items(self): self.items = [] def set_item_prefix(self, prefix): self.item_prefix = prefix def add_item(self, item): self.items.append(item) def _open_menu(self): lcd = self.lcd items = self.items lcd.clear() lcd.cursor() lcd.blink() curpos = 0 item_count = len(items) while True: if curpos % 2 == 0: firstitem = items[curpos] if curpos < (item_count - 1): seconditem = items[curpos + 1] else: seconditem = MenuItem("", lambda: None) else: firstitem = items[curpos - 1] seconditem = items[curpos] lcd.clear() lcd.message(self.item_prefix) lcd.message(firstitem.get_text()) lcd.message("\n") lcd.message(self.item_prefix) lcd.message(seconditem.get_text()) lcd.setCursor(len(self.item_prefix) - 1, curpos % 2) nothingHappened = 0 btnWait = True while btnWait: if nothingHappened == 100: lcd.backlight(lcd.OFF) print ("Turning backlight off!") if lcd.buttonPressed(lcd.DOWN): lcd.backlight(lcd.ON) nothingHappened = 0 curpos = (curpos + 1) % item_count btnWait = False sleep(0.1) if lcd.buttonPressed(lcd.UP): lcd.backlight(lcd.ON) nothingHappened = 0 curpos = (curpos - 1) % item_count btnWait = False sleep(0.1) if lcd.buttonPressed(lcd.SELECT): lcd.backlight(lcd.ON) nothingHappened = 0 btnWait = False items[curpos].get_action()() sleep(0.1) nothingHappened = nothingHappened + 1 def get_action(self): return self._open_menu
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2014-2017 CERN. # # INSPIRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE. If not, see <http://www.gnu.org/licenses/>. # # In applying this license, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. from __future__ import absolute_import, division, print_function import logging from flask import request from flask_security import current_user from elasticsearch import RequestError from elasticsearch_dsl.query import Q from invenio_search.api import DefaultFilter, RecordsSearch from invenio_search import current_search_client as es from inspirehep.modules.records.permissions import ( all_restricted_collections, user_collections ) from .query_factory import inspire_query_factory logger = logging.getLogger(__name__) IQ = inspire_query_factory() class SearchMixin(object): """Mixin that adds helper functions to ElasticSearch DSL classes.""" def query_from_iq(self, query_string): """Initialize ES DSL object using INSPIRE query parser. :param query_string: Query string as a user would input in INSPIRE's search box. :type query_string: string :returns: Elasticsearch DSL search class """ return self.query(IQ(query_string, self)) def get_source(self, uuid, **kwargs): """Get source from a given uuid. This function mimics the behaviour from the low level ES library get_source function. :param uuid: uuid of document to be retrieved. :type uuid: UUID :returns: dict """ return es.get_source( index=self.Meta.index, doc_type=self.Meta.doc_types, id=uuid, **kwargs ) def mget(self, uuids, **kwargs): """Get source from a list of uuids. :param uuids: uuids of documents to be retrieved. :type uuids: list of strings representing uuids :returns: list of JSON documents """ results = [] try: documents = es.mget( index=self.Meta.index, doc_type=self.Meta.doc_types, body={'ids': uuids}, **kwargs ) results = [document['_source'] for document in documents['docs']] except RequestError: pass return results def inspire_filter(): """Filter applied to all queries.""" if request: collection = request.values.get('cc', 'Literature') user_roles = [r.name for r in current_user.roles] if 'superuser' in user_roles: user_coll = all_restricted_collections else: user_coll = user_collections query = Q('match', _collections=collection) for collection in list(all_restricted_collections - user_coll): query = query & ~Q('match', _collections=collection) return query class LiteratureSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Literature database.""" class Meta: index = 'records-hep' doc_types = 'hep' default_filter = DefaultFilter(inspire_filter) def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class AuthorsSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Authors database.""" class Meta: index = 'records-authors' doc_types = 'authors' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class DataSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Data database.""" class Meta: index = 'records-data' doc_types = 'data' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class ConferencesSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Conferences database.""" class Meta: index = 'records-conferences' doc_types = 'conferences' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class JobsSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Jobs database.""" class Meta: index = 'records-jobs' doc_types = 'jobs' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class InstitutionsSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Institutions database.""" class Meta: index = 'records-institutions' doc_types = 'institutions' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class ExperimentsSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Experiments database.""" class Meta: index = 'records-experiments' doc_types = 'experiments' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all'] class JournalsSearch(RecordsSearch, SearchMixin): """Elasticsearch-dsl specialized class to search in Journals database.""" class Meta: index = 'records-journals' doc_types = 'journals' def default_fields(self): """What fields to use when no keyword is specified.""" return ['_all']
unknown
codeparrot/codeparrot-clean
class Waveband(dict): """Base class to define a waveband""" def __init__(self): super(Waveband,self).__init__() self.ucd = None class Radio(Waveband): def __init__(self): super(Radio,self).__init__() self.ucd = 'em.radio' class Millimeter(Waveband): def __init__(self): super(Millimeter,self).__init__() self.ucd = 'em.mm' class Infrared(Waveband): def __init__(self): super(Infrared,self).__init__() self.ucd = 'em.IR' class Optical(Waveband): def __init__(self): super(Optical,self).__init__() self.ucd = 'em.opt' class Ultraviolet(Waveband): def __init__(self): super(Ultraviolet,self).__init__() self.ucd = 'em.UV' class Xray(Waveband): def __init__(self): super(Xray,self).__init__() self.ucd = 'em.X-ray' class Gammaray(Waveband): def __init__(self): super(Gammaray,self).__init__() self.ucd = 'em.gamma' # Wavebands available to search for catalogue data # (for convenience I relate the UCD words used) # For UCDs, take a look at http://www.ivoa.net/documents/latest/UCDlist.html # WAVEBANDS = {'radio' : Radio(), 'millimeter' : Millimeter(), 'infrared' : Infrared(), 'optical' : Optical(), 'uv' : Ultraviolet(), 'xray' : Xray(), 'gammaray' : Gammaray()} SERVICES = {'scs' : None, 'ssa' : None}
unknown
codeparrot/codeparrot-clean
import pytest from tests.support.asserts import assert_error, assert_success from tests.support.inline import inline def is_element_enabled(session, element_id): return session.transport.send( "GET", "session/{session_id}/element/{element_id}/enabled".format( session_id=session.session_id, element_id=element_id ) ) def test_no_browsing_context(session, closed_window): response = is_element_enabled(session, "foo") assert_error(response, "no such window") def test_element_stale(session): session.url = inline("<input>") element = session.find.css("input", all=False) session.refresh() result = is_element_enabled(session, element.id) assert_error(result, "stale element reference") @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_form_control_disabled(session, element): session.url = inline("<{} disabled/>".format(element)) element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, False) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_form_control_enabled(session, element): session.url = inline("<{}/>".format(element)) element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, True) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_fieldset_disabled_descendant(session, element): session.url = inline("<fieldset disabled><{}/></fieldset>".format(element)) element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, False) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_fieldset_enabled_descendant(session, element): session.url = inline("<fieldset><{}/></fieldset>".format(element)) element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, True) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_fieldset_disabled_descendant_legend(session, element): session.url = inline("<fieldset disabled><legend><{}/></legend></fieldset>".format(element)) element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, True) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_fieldset_enabled_descendant_legend(session, element): session.url = inline("<fieldset><legend><{}/></legend></fieldset>".format(element)) element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, True) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_xhtml_form_control_disabled(session, element): session.url = inline("""<{} disabled="disabled"/>""".format(element), doctype="xhtml") element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, False) @pytest.mark.parametrize("element", ["button", "input", "select", "textarea"]) def test_xhtml_form_control_enabled(session, element): session.url = inline("""<{}/>""".format(element), doctype="xhtml") element = session.find.css(element, all=False) result = is_element_enabled(session, element.id) assert_success(result, True) def test_xml_always_not_enabled(session): session.url = inline("""<note></note>""", doctype="xml") element = session.find.css("note", all=False) result = is_element_enabled(session, element.id) assert_success(result, False)
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2022 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.tests.auth import io.ktor.client.request.* import io.ktor.http.* import io.ktor.serialization.jackson.* import io.ktor.server.auth.* import io.ktor.server.plugins.contentnegotiation.* import io.ktor.server.response.* import io.ktor.server.routing.* import io.ktor.server.testing.* import kotlin.test.* class AuthWithPlugins { @Test fun testFormAuthWithJackson() = testApplication { install(ContentNegotiation) { jackson() } install(Authentication) { form { challenge("/unauthorized") validate { credentials -> if (credentials.name == credentials.password) { UserIdPrincipal(credentials.name) } else { null } } } } routing { get("/unauthorized") { call.respond(HttpStatusCode.Unauthorized, "Unauthorized") } authenticate { post("/test") { call.respondText("OK") } } } val response = client.post("/test") { header(HttpHeaders.ContentType, ContentType.Application.Json) setBody("{}") } assertEquals(HttpStatusCode.Found, response.status) val location = response.headers[HttpHeaders.Location] ?: fail("Location header is missing") assertEquals("/unauthorized", location) } }
kotlin
github
https://github.com/ktorio/ktor
ktor-server/ktor-server-plugins/ktor-server-auth/jvm/test/io/ktor/tests/auth/AuthWithPlugins.kt
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import traceback import os import shutil import subprocess import select import fcntl import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase class Connection(ConnectionBase): ''' Local based connections ''' @property def transport(self): ''' used to identify this connection object ''' return 'local' def _connect(self, port=None): ''' connect to the local host; nothing to do here ''' if not self._connected: self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr)) self._connected = True return self def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the local host ''' super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) self._display.debug("in local.exec_command()") if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None self._display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook self._display.debug("opening command with Popen()") p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), executable=executable, #cwd=... stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) self._display.debug("done running command with Popen()") if self._play_context.prompt and self._play_context.become_pass and sudoable: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) become_output = '' while not self.check_become_success(become_output) and not self.check_password_prompt(become_output): rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout) if p.stdout in rfd: chunk = p.stdout.read() elif p.stderr in rfd: chunk = p.stderr.read() else: stdout, stderr = p.communicate() raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + become_output) if not chunk: stdout, stderr = p.communicate() raise AnsibleError('privilege output closed while waiting for password prompt:\n' + become_output) become_output += chunk if not self.check_become_success(become_output): p.stdin.write(self._play_context.become_pass + '\n') fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) self._display.debug("getting output with communicate()") stdout, stderr = p.communicate() self._display.debug("done communicating") self._display.debug("done with local.exec_command()") return (p.returncode, '', stdout, stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to local ''' super(Connection, self).put_file(in_path, out_path) self._display.vvv("{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) try: shutil.copyfile(in_path, out_path) except shutil.Error: raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path)) except IOError as e: raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, e)) def fetch_file(self, in_path, out_path): ''' fetch a file from local to local -- for copatibility ''' super(Connection, self).fetch_file(in_path, out_path) self._display.vvv("{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) self.put_file(in_path, out_path) def close(self): ''' terminate the connection; nothing to do here ''' self._connected = False
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python3 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import argparse import sys from cairis.core.Borg import Borg import cairis.core.BorgFactory from cairis.core.dba import resetUser,accounts from cairis.mio.ModelExport import exportPackage from cairis.bin.cimport import package_import __author__ = 'Shamal Faily' logging.basicConfig(level=logging.INFO) logger = logging.getLogger('reset_user') def resetDatabase(cairisRoot,rPasswd,dbHost,dbPort,user,isReload): cairis.core.BorgFactory.initialise(user=user,db='default') b = Borg() packageFile = '/tmp/' + user + '.cairis' if (isReload == '1'): logger.info('Exporting ' + user + ' default database') exportPackage(packageFile,b.dbProxy) logger.info('Resetting ' + user) resetUser(cairisRoot,rPasswd, dbHost, dbPort, user) cairis.core.BorgFactory.initialise(user=user,db='default') if (isReload == '1'): logger.info('Re-importing ' + user + ' default database') pkgStr = open(packageFile,'rb').read() package_import(pkgStr) def main(): parser = argparse.ArgumentParser(description='Computer Aided Integration of Requirements and Information Security - Reset CAIRIS user') parser.add_argument('user',help='Email address or all for all users') parser.add_argument('--reload',dest='isReload',help='If 1 is set, reload the contents of the default database', default='0') args = parser.parse_args() cairis.core.BorgFactory.dInitialise() b = Borg() if (args.user != 'all'): resetDatabase(b.cairisRoot,b.rPasswd,b.dbHost,b.dbPort,args.user,args.isReload) else: for email in accounts(b.cairisRoot,b.dbHost,b.dbPort): resetDatabase(b.cairisRoot,b.rPasswd,b.dbHost,b.dbPort,email,args.isReload) if __name__ == '__main__': try: main() except Exception as e: print('Fatal reset_cairis_user error: ' + str(e)) sys.exit(-1)
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals import datetime import re from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.utils import six from .topological_sort import stable_topological_sort class MigrationAutodetector(object): """ Takes a pair of ProjectStates, and compares them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of appliable changes. Takes a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return { key: self.deep_deconstruct(value) for key, value in obj.items() } elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], { key: self.deep_deconstruct(value) for key, value in kwargs.items() }, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as, of course, the related fields change during renames) """ fields_def = [] for name, field in sorted(fields): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: del deconstruction[2]['to'] fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Returns a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # We'll then go through that list later and order it and split # into migrations to resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_apps = self.from_state.concrete_apps self.new_apps = self.to_state.apps self.old_model_keys = [] self.old_proxy_keys = [] self.old_unmanaged_keys = [] self.new_model_keys = [] self.new_proxy_keys = [] self.new_unmanaged_keys = [] for al, mn in sorted(self.from_state.models.keys()): model = self.old_apps.get_model(al, mn) if not model._meta.managed: self.old_unmanaged_keys.append((al, mn)) elif al not in self.from_state.real_apps: if model._meta.proxy: self.old_proxy_keys.append((al, mn)) else: self.old_model_keys.append((al, mn)) for al, mn in sorted(self.to_state.models.keys()): model = self.new_apps.get_model(al, mn) if not model._meta.managed: self.new_unmanaged_keys.append((al, mn)) elif ( al not in self.from_state.real_apps or (convert_apps and al in convert_apps) ): if model._meta.proxy: self.new_proxy_keys.append((al, mn)) else: self.new_model_keys.append((al, mn)) # Renames have to come first self.generate_renamed_models() # Prepare field lists, and prepare a list of the fields that used # through models in the old state so we can make dependencies # from the through model deletion to the field that uses it. self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys) self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys) self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys) self.through_users = {} self.old_field_keys = set() self.new_field_keys = set() for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields) self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields) # Through model map generation for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] for field_name, field in old_model_state.fields: old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name) if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and not old_field.remote_field.through._meta.auto_created): through_key = ( old_field.remote_field.through._meta.app_label, old_field.remote_field.through._meta.model_name, ) self.through_users[through_key] = (app_label, old_model_name, field_name) # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Generate field operations self.generate_renamed_fields() self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_unique_together() self.generate_altered_index_together() self.generate_altered_db_table() self.generate_altered_order_with_respect_to() # Now, reordering to make things possible. The order we have already # isn't bad, but we need to pull a few things around so FKs work nicely # inside the same app for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph) # Now, we need to chop the lists of operations up into migrations with # dependencies on each other. # We do this by stepping up an app's list of operations until we # find one that has an outgoing dependency that isn't in another app's # migration yet (hasn't been chopped off its list). We then chop off the # operations before it into a migration and move onto the next app. # If we loop back around without doing anything, there's a circular # dependency (which _should_ be impossible as the operations are all # split at this point so they can't depend and be depended on) self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations.keys()): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: is_swappable_dep = False if dep[0] == "__setting__": # We need to temporarily resolve the swappable dependency to prevent # circular references. While keeping the dependency checks on the # resolved model we still add the swappable dependencies. # See #23322 resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.') original_dep = dep dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3]) is_swappable_dep = True if dep[0] != app_label and dep[0] != "__setting__": # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get(dep[0], []): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add((original_dep[0], original_dep[1])) elif dep[0] in self.migrations: operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name)) else: # If we can't find the other app, we add a first/last dependency, # but only if we've already been through once and checked everything if chop_mode: # If the app already exists, we add a dependency on the last migration, # as we don't know which migration contains the target field. # If it's not yet migrated or has no migrations, we use __first__ if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add(graph.leaf_nodes(dep[0])[0]) else: operation_dependencies.add((dep[0], "__first__")) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) self.generated_operations[app_label] = self.generated_operations[app_label][1:] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []}) instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label) instance.dependencies = list(dependencies) instance.operations = chopped self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = chopped + self.generated_operations[app_label] new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations) num_ops = new_num_ops # OK, add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for app_label, migrations in self.migrations.items(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label) return self.migrations def check_dependency(self, operation, dependency): """ Returns ``True`` if the given operation depends on the given dependency, ``False`` otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance(operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether)) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency, )) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Sorting key function that places potential swappable models first in lists of created models (only real way to solve #22783) """ try: model = self.new_apps.get_model(item[0], item[1]) base_names = [base.__name__ for base in model.__bases__] string_version = "%s.%s" % (item[0], item[1]) if ( model._meta.swappable or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Finds any renamed models, and generates the operations for them, and removes the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = set(self.new_model_keys) - set(self.old_model_keys) for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = set(self.old_model_keys) - set(self.new_model_keys) for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[rem_app_label, rem_model_name] rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model(rem_model_state, model_state): self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ) ) self.renamed_models[app_label, model_name] = rem_model_name self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.append((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (we'll optimize these back in later if we can). We also defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys) added_models = set(self.new_model_keys) - old_keys added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True) ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] model_opts = self.new_apps.get_model(app_label, model_name)._meta # Gather related fields related_fields = {} primary_key_rel = None for field in model_opts.local_fields: if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field for field in model_opts.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Are there unique/index_together to defer? unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) order_with_respect_to = model_state.options.pop('order_with_respect_to', None) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, six.string_types) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append(( primary_key_rel._meta.app_label, primary_key_rel._meta.object_name, None, True )) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[d for d in model_state.fields if d[0] not in related_fields], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_opts.managed: continue # Generate operations for each related field for name, field in sorted(related_fields.items()): # Account for FKs to swappable models swappable_setting = getattr(field, 'swappable_setting', None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label = field.remote_field.model._meta.app_label dep_object_name = field.remote_field.model._meta.object_name dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: dependencies.append(( field.remote_field.through._meta.app_label, field.remote_field.through._meta.object_name, None, True )) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns related_dependencies = [ (app_label, model_name, name, True) for name, field in sorted(related_fields.items()) ] related_dependencies.append((app_label, model_name, None, True)) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies ) if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ] ) def generate_created_proxies(self): """ Makes CreateModel statements for proxy models. We use the same statements as that way there's less code duplication, but of course for proxy models we can skip all that pointless field stuff and just chuck out an operation. """ added = set(self.new_proxy_keys) - set(self.old_proxy_keys) for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, six.string_types) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (we'll optimize these back in later if we can). We also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys) deleted_models = set(self.old_model_keys) - new_keys deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models)) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] model = self.old_apps.get_model(app_label, model_name) if not model._meta.managed: # Skip here, no need to handle fields for unmanaged models continue # Gather related fields related_fields = {} for field in model._meta.local_fields: if field.remote_field: if field.remote_field.model: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field for field in model._meta.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Generate option removal first unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ) ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ) ) # Then remove each related field for name, field in sorted(related_fields.items()): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ) ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] for related_object in model._meta.related_objects: related_object_app_label = related_object.related_model._meta.app_label object_name = related_object.related_model._meta.object_name field_name = related_object.field.name dependencies.append((related_object_app_label, object_name, field_name, False)) if not related_object.many_to_many: dependencies.append((related_object_app_label, object_name, field_name, "alter")) for name, field in sorted(related_fields.items()): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append((through_user[0], through_user[1], through_user[2], False)) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """ Makes DeleteModel statements for proxy models. """ deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys) for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def generate_renamed_fields(self): """ Works out renamed fields """ self.renamed_fields = {} for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys): if rem_app_label == app_label and rem_model_name == model_name: old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name)) if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]: old_rel_to = old_field_dec[2]['to'] if old_rel_to in self.renamed_models_rel: old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to] if old_field_dec == field_dec: if self.questioner.ask_rename(model_name, rem_field_name, field_name, field): self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ) ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[app_label, model_name, field_name] = rem_field_name break def generate_added_fields(self): """ Fields that have been added """ for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Fields that are foreignkeys/m2ms depend on stuff dependencies = [] if field.remote_field and field.remote_field.model: # Account for FKs to swappable models swappable_setting = getattr(field, 'swappable_setting', None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label = field.remote_field.model._meta.app_label dep_object_name = field.remote_field.model._meta.object_name dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: dependencies.append(( field.remote_field.through._meta.app_label, field.remote_field.through._meta.object_name, None, True, )) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. preserve_default = True if (not field.null and not field.has_default() and not isinstance(field, models.ManyToManyField) and not (field.blank and field.empty_strings_allowed)): field = field.clone() field.default = self.questioner.ask_not_null_addition(field_name, model_name) preserve_default = False self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """ Fields that have been removed. """ for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Fields that have been altered. """ for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)): # Did the field change? old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name) old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name) new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None): rename_key = ( new_field.remote_field.model._meta.app_label, new_field.remote_field.model._meta.model_name, ) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) if old_field_dec != new_field_dec: both_m2m = ( isinstance(old_field, models.ManyToManyField) and isinstance(new_field, models.ManyToManyField) ) neither_m2m = ( not isinstance(old_field, models.ManyToManyField) and not isinstance(new_field, models.ManyToManyField) ) if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if (old_field.null and not new_field.null and not new_field.has_default() and not isinstance(new_field, models.ManyToManyField)): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration(field_name, model_name) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ) ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def _generate_altered_foo_together(self, operation): option_name = operation.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) or set() if old_value: old_value = { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } new_value = new_model_state.options.get(option_name) or set() if new_value: new_value = set(new_value) if old_value != new_value: self.add_operation( app_label, operation( name=model_name, **{option_name: new_value} ) ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get('db_table') new_db_table_name = new_model_state.options.get('db_table') if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ) ) def generate_altered_options(self): """ Works out if any non-schema-affecting options have changed and makes an operation to represent them in state changes (in case Python code in migrations needs them) """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys ).union( self.kept_unmanaged_keys ).union( # unmanaged converted to managed set(self.old_unmanaged_keys).intersection(self.new_model_keys) ).union( # managed converted to unmanaged set(self.old_model_keys).intersection(self.new_unmanaged_keys) ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = dict( option for option in old_model_state.options.items() if option[0] in AlterModelOptions.ALTER_OPTION_KEYS ) new_options = dict( option for option in new_model_state.options.items() if option[0] in AlterModelOptions.ALTER_OPTION_KEYS ) if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ) ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if (old_model_state.options.get("order_with_respect_to") != new_model_state.options.get("order_with_respect_to")): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append(( app_label, model_name, new_model_state.options["order_with_respect_to"], True, )) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get('order_with_respect_to'), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ) ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Takes in a result from changes() and a MigrationGraph, and fixes the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) if i == 0 and not app_leaf: new_name = "0001_%s" % migration_name if migration_name else "0001_initial" else: new_name = "%04i_%s" % ( next_number, migration_name or self.suggest_name(migration.operations)[:100], ) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for app_label, migrations in changes.items(): for migration in migrations: migration.dependencies = [name_map.get(d, d) for d in migration.dependencies] return changes def _trim_to_apps(self, changes, app_labels): """ Takes changes from arrange_for_graph and set of app labels and returns a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present, as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) for app_label in list(required_apps): required_apps.update(app_dependencies.get(app_label, set())) # Remove all migrations that aren't needed for app_label in list(changes.keys()): if app_label not in required_apps: del changes[app_label] return changes @classmethod def suggest_name(cls, ops): """ Given a set of operations, suggests a name for the migration they might represent. Names are not guaranteed to be unique, but we put some effort in to the fallback name to avoid VCS conflicts if we can. """ if len(ops) == 1: if isinstance(ops[0], operations.CreateModel): return ops[0].name_lower elif isinstance(ops[0], operations.DeleteModel): return "delete_%s" % ops[0].name_lower elif isinstance(ops[0], operations.AddField): return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif isinstance(ops[0], operations.RemoveField): return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif len(ops) > 1: if all(isinstance(o, operations.CreateModel) for o in ops): return "_".join(sorted(o.name_lower for o in ops)) return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M") @classmethod def parse_number(cls, name): """ Given a migration name, tries to extract a number from the beginning of it. If no number found, returns None. """ if re.match(r"^\d+_", name): return int(name.split("_")[0]) return None
unknown
codeparrot/codeparrot-clean
""" Wrapper class that takes a list of template loaders as an argument and attempts to load templates from them in order, caching the result. """ import hashlib from django.template.base import TemplateDoesNotExist from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin from django.utils.encoding import force_bytes class Loader(BaseLoader): is_usable = True def __init__(self, loaders): self.template_cache = {} self._loaders = loaders self._cached_loaders = [] @property def loaders(self): # Resolve loaders on demand to avoid circular imports if not self._cached_loaders: # Set self._cached_loaders atomically. Otherwise, another thread # could see an incomplete list. See #17303. cached_loaders = [] for loader in self._loaders: cached_loaders.append(find_template_loader(loader)) self._cached_loaders = cached_loaders return self._cached_loaders def find_template(self, name, dirs=None): for loader in self.loaders: try: template, display_name = loader(name, dirs) return (template, make_origin(display_name, loader, name, dirs)) except TemplateDoesNotExist: pass raise TemplateDoesNotExist(name) def load_template(self, template_name, template_dirs=None): key = template_name if template_dirs: # If template directories were specified, use a hash to differentiate key = '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()]) if key not in self.template_cache: template, origin = self.find_template(template_name, template_dirs) if not hasattr(template, 'render'): try: template = get_template_from_string(template, origin, template_name) except TemplateDoesNotExist: # If compiling the template we found raises TemplateDoesNotExist, # back off to returning the source and display name for the template # we were asked to load. This allows for correct identification (later) # of the actual template that does not exist. return template, origin self.template_cache[key] = template return self.template_cache[key], None def reset(self): "Empty the template cache." self.template_cache.clear()
unknown
codeparrot/codeparrot-clean
from future import standard_library standard_library.install_aliases() from builtins import str import logging from queue import Queue import mesos.interface from mesos.interface import mesos_pb2 import mesos.native from airflow import configuration from airflow.executors.base_executor import BaseExecutor from airflow.settings import Session from airflow.utils.state import State from airflow.exceptions import AirflowException DEFAULT_FRAMEWORK_NAME = 'Airflow' FRAMEWORK_CONNID_PREFIX = 'mesos_framework_' def get_framework_name(): if not configuration.get('mesos', 'FRAMEWORK_NAME'): return DEFAULT_FRAMEWORK_NAME return configuration.get('mesos', 'FRAMEWORK_NAME') # AirflowMesosScheduler, implements Mesos Scheduler interface # To schedule airflow jobs on mesos class AirflowMesosScheduler(mesos.interface.Scheduler): """ Airflow Mesos scheduler implements mesos scheduler interface to schedule airflow tasks on mesos. Basically, it schedules a command like 'airflow run <dag_id> <task_instance_id> <start_date> --local -p=<pickle>' to run on a mesos slave. """ def __init__(self, task_queue, result_queue, task_cpu=1, task_mem=256): self.task_queue = task_queue self.result_queue = result_queue self.task_cpu = task_cpu self.task_mem = task_mem self.task_counter = 0 self.task_key_map = {} def registered(self, driver, frameworkId, masterInfo): logging.info("AirflowScheduler registered to mesos with framework ID %s", frameworkId.value) if configuration.getboolean('mesos', 'CHECKPOINT') and configuration.get('mesos', 'FAILOVER_TIMEOUT'): # Import here to work around a circular import error from airflow.models import Connection # Update the Framework ID in the database. session = Session() conn_id = FRAMEWORK_CONNID_PREFIX + get_framework_name() connection = Session.query(Connection).filter_by(conn_id=conn_id).first() if connection is None: connection = Connection(conn_id=conn_id, conn_type='mesos_framework-id', extra=frameworkId.value) else: connection.extra = frameworkId.value session.add(connection) session.commit() Session.remove() def reregistered(self, driver, masterInfo): logging.info("AirflowScheduler re-registered to mesos") def disconnected(self, driver): logging.info("AirflowScheduler disconnected from mesos") def offerRescinded(self, driver, offerId): logging.info("AirflowScheduler offer %s rescinded", str(offerId)) def frameworkMessage(self, driver, executorId, slaveId, message): logging.info("AirflowScheduler received framework message %s", message) def executorLost(self, driver, executorId, slaveId, status): logging.warning("AirflowScheduler executor %s lost", str(executorId)) def slaveLost(self, driver, slaveId): logging.warning("AirflowScheduler slave %s lost", str(slaveId)) def error(self, driver, message): logging.error("AirflowScheduler driver aborted %s", message) raise AirflowException("AirflowScheduler driver aborted %s" % message) def resourceOffers(self, driver, offers): for offer in offers: tasks = [] offerCpus = 0 offerMem = 0 for resource in offer.resources: if resource.name == "cpus": offerCpus += resource.scalar.value elif resource.name == "mem": offerMem += resource.scalar.value logging.info("Received offer %s with cpus: %s and mem: %s", offer.id.value, offerCpus, offerMem) remainingCpus = offerCpus remainingMem = offerMem while (not self.task_queue.empty()) and \ remainingCpus >= self.task_cpu and \ remainingMem >= self.task_mem: key, cmd = self.task_queue.get() tid = self.task_counter self.task_counter += 1 self.task_key_map[str(tid)] = key logging.info("Launching task %d using offer %s", tid, offer.id.value) task = mesos_pb2.TaskInfo() task.task_id.value = str(tid) task.slave_id.value = offer.slave_id.value task.name = "AirflowTask %d" % tid cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = self.task_cpu mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = self.task_mem command = mesos_pb2.CommandInfo() command.shell = True command.value = cmd task.command.MergeFrom(command) tasks.append(task) remainingCpus -= self.task_cpu remainingMem -= self.task_mem driver.launchTasks(offer.id, tasks) def statusUpdate(self, driver, update): logging.info("Task %s is in state %s, data %s", update.task_id.value, mesos_pb2.TaskState.Name(update.state), str(update.data)) try: key = self.task_key_map[update.task_id.value] except KeyError: # The map may not contain an item if the framework re-registered after a failover. # Discard these tasks. logging.warn("Unrecognised task key %s" % update.task_id.value) return if update.state == mesos_pb2.TASK_FINISHED: self.result_queue.put((key, State.SUCCESS)) self.task_queue.task_done() if update.state == mesos_pb2.TASK_LOST or \ update.state == mesos_pb2.TASK_KILLED or \ update.state == mesos_pb2.TASK_FAILED: self.result_queue.put((key, State.FAILED)) self.task_queue.task_done() class MesosExecutor(BaseExecutor): """ MesosExecutor allows distributing the execution of task instances to multiple mesos workers. Apache Mesos is a distributed systems kernel which abstracts CPU, memory, storage, and other compute resources away from machines (physical or virtual), enabling fault-tolerant and elastic distributed systems to easily be built and run effectively. See http://mesos.apache.org/ """ def start(self): self.task_queue = Queue() self.result_queue = Queue() framework = mesos_pb2.FrameworkInfo() framework.user = '' if not configuration.get('mesos', 'MASTER'): logging.error("Expecting mesos master URL for mesos executor") raise AirflowException("mesos.master not provided for mesos executor") master = configuration.get('mesos', 'MASTER') framework.name = get_framework_name() if not configuration.get('mesos', 'TASK_CPU'): task_cpu = 1 else: task_cpu = configuration.getint('mesos', 'TASK_CPU') if not configuration.get('mesos', 'TASK_MEMORY'): task_memory = 256 else: task_memory = configuration.getint('mesos', 'TASK_MEMORY') if configuration.getboolean('mesos', 'CHECKPOINT'): framework.checkpoint = True if configuration.get('mesos', 'FAILOVER_TIMEOUT'): # Import here to work around a circular import error from airflow.models import Connection # Query the database to get the ID of the Mesos Framework, if available. conn_id = FRAMEWORK_CONNID_PREFIX + framework.name session = Session() connection = session.query(Connection).filter_by(conn_id=conn_id).first() if connection is not None: # Set the Framework ID to let the scheduler reconnect with running tasks. framework.id.value = connection.extra framework.failover_timeout = configuration.getint('mesos', 'FAILOVER_TIMEOUT') else: framework.checkpoint = False logging.info('MesosFramework master : %s, name : %s, cpu : %s, mem : %s, checkpoint : %s', master, framework.name, str(task_cpu), str(task_memory), str(framework.checkpoint)) implicit_acknowledgements = 1 if configuration.getboolean('mesos', 'AUTHENTICATE'): if not configuration.get('mesos', 'DEFAULT_PRINCIPAL'): logging.error("Expecting authentication principal in the environment") raise AirflowException("mesos.default_principal not provided in authenticated mode") if not configuration.get('mesos', 'DEFAULT_SECRET'): logging.error("Expecting authentication secret in the environment") raise AirflowException("mesos.default_secret not provided in authenticated mode") credential = mesos_pb2.Credential() credential.principal = configuration.get('mesos', 'DEFAULT_PRINCIPAL') credential.secret = configuration.get('mesos', 'DEFAULT_SECRET') framework.principal = credential.principal driver = mesos.native.MesosSchedulerDriver( AirflowMesosScheduler(self.task_queue, self.result_queue, task_cpu, task_memory), framework, master, implicit_acknowledgements, credential) else: framework.principal = 'Airflow' driver = mesos.native.MesosSchedulerDriver( AirflowMesosScheduler(self.task_queue, self.result_queue, task_cpu, task_memory), framework, master, implicit_acknowledgements) self.mesos_driver = driver self.mesos_driver.start() def execute_async(self, key, command, queue=None): self.task_queue.put((key, command)) def sync(self): while not self.result_queue.empty(): results = self.result_queue.get() self.change_state(*results) def end(self): self.task_queue.join() self.mesos_driver.stop()
unknown
codeparrot/codeparrot-clean
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Debugging ## Debug dei problemi di rete multi-GPU Quando addestri o fai inferenza con `DistributedDataParallel` e GPU multiple, se si verificano problemi di intercomunicazione tra processi e/o nodi, puoi utilizzare il seguente script per diagnosticare i problemi della rete. ```bash wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py ``` Per esempio per testare come 2 GPU interagiscono fai: ```bash python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` Se entrambi i processi sono in grado di comunicare tra loro e di allocare la memoria della GPU, ciascuno di essi stamperà lo stato OK. Per più GPU o nodi adatta gli argumenti nello script. All'interno dello script di diagnostica troverai molti altri dettagli e anche una guida per eseguirlo in ambiente SLURM. Un livello di debug superiore è aggiungere la variabile d'ambiente `NCCL_DEBUG=INFO` come di seguito: ```bash NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` In questo modo si scaricano molte informazioni di debug relative a NCCL, che puoi cercare online in caso di problemi. Oppure, se non hai la sicurezza di come interpretare l'output, puoi condividere il file di log in una Issue. ## Rilevamento di Underflow e Overflow <Tip> Questa funzionalità al momento è disponibile solo per PyTorch. </Tip> <Tip> Per addestramento multi-GPU richiede DDP (`torch.distributed.launch`). </Tip> <Tip> Questa funzionalità può essere usata con modelli basati su `nn.Module`. </Tip> Se inizi a ottenere `loss=NaN` o il modello presenta qualche altro comportamento anomalo a causa di valori `inf` o `nan` in attivazioni o nei pesi, è necessario scoprire dove si verifica il primo underflow o overflow e cosa lo ha determinato. Fortunatamente è possibile farlo facilmente attivando un modulo speciale che effettuerà il rilevamento automaticamente. Se stai usando [`Trainer`], hai bisogno di aggiungere solo: ```bash --debug underflow_overflow ``` ai normali argomenti della riga di comando, o passa `debug="underflow_overflow"` quando viene creato l'oggetto [`TrainingArguments`]. Se stai usando il tuo ciclo di allenamento o un altro trainer, puoi ottenere lo stesso risultato con: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` [`~debug_utils.DebugUnderflowOverflow`] inserisce dei ganci nel modello che dopo ogni chiamata testeranno le variabili di ingresso e di uscita e anche i pesi del modulo corrispondente. Non appena viene rilevato `inf` o o `nan` in almeno un elemento delle attivazioni o dei pesi, il programma lo notifica e stampa un rapporto come il seguente (questo è stato rilevato con `google/mt5-small` sotto fp16 mixed precision): ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` L'output di esempio è stato tagliato al centro per brevità. La seconda colonna mostra il valore dell'elemento più grande in assoluto,così se osserviamo da vicino gli ultimi istanti, input e output sono nel range di `1e4`. Questo addestramento è stato eseguito con una mixed precision fp16 e l'ultimo passo usciva fuori (sotto `fp16` il valore più grande prima di `inf` è `64e3`). Per evitare overflows sotto `fp16` le attivazionioni devono rimanere molto al di sotto di `1e4`, perché `1e4 * 1e4 = 1e8` quindi qualsiasi moltiplicazione di matrice con grandi attivazioni porterà a una condizione di overflow numerico. All'inizio della traccia è possibile scoprire a quale lotto si è verificato il problema (questo `Detected inf/nan during batch_number=0` significa che il problema si è verificato nel primo lotto). Ogni frame segnalato inizia dichiarando la voce completamente qualificata per il modulo corrispondente per il quale il frame è stato segnalato. Se osserviamo il seguente frame: ``` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output ``` Questo, `encoder.block.2.layer.1.layer_norm` indica che si tratta di un layer norm nel primo layer, del secondo blocco dell'encoder. E le chiamata specifica di `forward` è `T5LayerNorm`. Osserviamo gli ultimi frame del report: ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` L'ultimo frame report per la funzione `Dropout.forward` con la prima voce per l'unico input e la seconda per l'unico output. Si può notare che è stato richiamato da un attibuto `dropout` dentro la classe `DenseReluDense`. Si può notare che ciò è avvenuto durante il primo strato, del 2° blocco, durante il primissimo lotto. Infine, gli elementi di input più grandi in assoluto sono stati `6.27e+04` e l'equivalente per l'output era `inf`. Puoi vedere qui, che `T5DenseGatedGeluDense.forward` risulta in output activations, il cui valore massimo assoluto era circa 62,7K, che è molto vicino al limite massimo di 64K di fp16. Nel prossimo frame abbiamo `Dropout` che rinormalizza i pesi, dopo aver azzerato alcuni elementi, il che spinge il valore massimo assoluto a più di 64K e si verifica un overflow.(`inf`). Come puoi notare, è nei frames precedenti che occorre esaminare quando i numeri iniziano a diventare molto grandi per i valori fp16. Confrontiamo il report al codice `models/t5/modeling_t5.py`: ```python class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states ``` Ora è facile vedere la chiamata `dropout`, e tutte le chiamate precedenti. Poiché il rilevamento avviene in un avanzamento (forward hook in eng.), i rapporti vengono creati immeditamente dopo ogni rientro da `forward` (forward returns in eng.). Tornando al rapporto completo, per agire e risolvere il problema, dobbiamo andare qualche frame più in alto, dove i numeri hanno iniziato a salire, e probabilmente passare alla modalità `fp32`, in modo che i numeri non trabocchino quando vengono moltiplicati o sommati. Naturalmente, potrebbero esserci altre soluzioni. Per esempio, potremmo spegnere temporanemante `amp` se è abilitato, successivamente spostare `forward` in un helper wrapper, come: ```python def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states) ``` Poiché il rilevatore automatico riporta solo gli ingressi e le uscite di fotogrammi completi, una volta che si sa dove cercare, si può analizzare anche le fasi intermedie di una specifica funzione `forward`. In alcuni casi puoi usare la funzione di supporto `detect_overflow` per indirizzare il rilevatore dove preferisci, ad esempio: ```python from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states) ``` Si può vedere che abbiamo aggiunto 2 di questi e ora teniamo traccia se `inf` o `nan` per `forwarded_states` è stato rilevato da qualche parte. In realtà, il rilevatore li riporta già, perché ciascuna delle chiamate nell'esempio precedente è un `nn.Module`, ma diciamo che se avessimo dei calcoli diretti locali, questo è il modo in cui lo faremmo. Inoltre, se si istanzia il debugger nel proprio codice, è possibile modificare il numero di fotogrammi stampati rispetto a predefinito, ad esempio.: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` ### Tracciamento della mistura assoluta del lotto specifico e del valore massimo La stessa classe di debug può essere utilizzata per il tracciamento per-batch con la funzione di rilevamento di underflow/overflow disattivata. Supponiamo di voler osservare i valori minimi e massimi assoluti per tutti gli ingredienti di ogni chiamata `forward` di un dato lotto. lotto, e che lo si voglia fare solo per i lotti 1 e 3. Si istanzia questa classe come: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` Ora i batch completi 1 e 3 saranno tracciati utilizzando lo stesso formato del rilevatore di underflow/overflow. I batches sono 0-indexed. Questo è utile se si sa che il programma inizia a comportarsi male dopo un certo numero di batch, in modo da poter avanzare velocemente fino a quell'area. direttamente a quell'area. Ecco un esempio di output troncato per questa configurazione: ``` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...] ``` Qui verrà scaricato un numero enorme di fotogrammi, tanti quanti sono le chiamate in avanti nel modello, quindi può essere o non essere quello che volete, ma a volte può essere più utile usarlo di un classico debugger. Per esempio, se il problema inizia a verificarsi a partire dal lotto numero 150. Quindi è possibile scaricare le tracce dei lotti 149 e 150 e confrontare i punti in cui i numeri hanno iniziato a divergere. È inoltre possibile specificare il numero di batch dopo il quale interrompere l'addestramento, con: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ```
unknown
github
https://github.com/huggingface/transformers
docs/source/it/debugging.md
/** * Copyright IBM Corp. 2016, 2025 * SPDX-License-Identifier: BUSL-1.1 */ /* eslint-env node */ 'use strict'; const EmberApp = require('ember-cli/lib/broccoli/ember-app'); const config = require('./config/environment')(); const environment = EmberApp.env(); const isProd = environment === 'production'; const isTest = environment === 'test'; // const isCI = !!process.env.CI; const appConfig = { 'ember-service-worker': { serviceWorkerScope: config.serviceWorkerScope, skipWaitingOnMessage: true, }, babel: { plugins: [require.resolve('ember-concurrency/async-arrow-task-transform')], }, fingerprint: { exclude: ['images/'], }, assetLoader: { generateURI: function (filePath) { return `${config.rootURL.replace(/\/$/, '')}${filePath}`; }, }, 'ember-cli-babel': { enableTypeScriptTransform: true, throwUnlessParallelizable: true, }, hinting: isTest, tests: !isProd, sourcemaps: { enabled: !isProd, }, sassOptions: { sourceMap: false, onlyIncluded: true, quietDeps: true, // silences deprecation warnings from dependencies precision: 4, includePaths: [ './node_modules/@hashicorp/design-system-components/dist/styles', './node_modules/@hashicorp/design-system-tokens/dist/products/css', './node_modules/ember-basic-dropdown/', './node_modules/ember-power-select/', './node_modules/@hashicorp-internal/vault-reporting/dist/styles', ], }, minifyCSS: { options: { advanced: false, }, }, autoprefixer: { enabled: isTest || isProd, grid: true, browsers: ['defaults'], }, autoImport: { forbidEval: true, }, 'ember-test-selectors': { strip: isProd, }, 'ember-composable-helpers': { except: ['array'], }, 'ember-cli-deprecation-workflow': { enabled: true, }, }; module.exports = function (defaults) { const app = new EmberApp(defaults, appConfig); app.import('node_modules/jsonlint/lib/jsonlint.js'); app.import('node_modules/text-encoder-lite/text-encoder-lite.js'); app.import('vendor/jsondiffpatch.umd.js'); app.import('vendor/htmlformatter.umd.js'); app.import('node_modules/jsondiffpatch/lib/formatters/styles/html.css'); app.import('app/styles/bulma/bulma-radio-checkbox.css'); app.import( 'node_modules/@hashicorp/design-system-components/dist/styles/@hashicorp/design-system-components.css' ); return app.toTree(); };
javascript
github
https://github.com/hashicorp/vault
ui/ember-cli-build.js
# -*- coding: utf-8 -*- """ Python evaluation of blaze AIR. """ from __future__ import absolute_import, division, print_function from pykit.ir import interp import blaze # Use numpy for now until dynd supports reshape import numpy as np #------------------------------------------------------------------------ # Interpreter #------------------------------------------------------------------------ def interpret(func, env, args, **kwds): args = [np.array(arg) for arg in args] env = {'interp.handlers' : handlers} result = interp.run(func, env, None, args=args) return blaze.array(result) #------------------------------------------------------------------------ # Handlers #------------------------------------------------------------------------ def op_pykernel(interp, func, args): return func(*args) def op_convert(interp, arg): op = interp.op dshape = op.type # Broadcasting out_shape = arg.shape in_shape = dshape.shape for i in range(len(out_shape), len(in_shape)): out_shape = (1,) + out_shape # Reshape with the output shape, since it may have broadcasting dimensions arg = arg.reshape(out_shape) # Dtype conversion in_dtype = dshape.measure.to_numpy_dtype() if arg.dtype != in_dtype: arg = arg.astype(in_dtype) return arg def op_ret(interp, arg): interp.pc = -1 return arg handlers = { 'pykernel': op_pykernel, 'kernel': op_pykernel, 'convert': op_convert, 'ret': op_ret, }
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008 """ Nasm processing """ import os import TaskGen, Task from TaskGen import taskgen, before, extension nasm_str = '${NASM} ${NASM_FLAGS} ${NASM_INCLUDES} ${SRC} -o ${TGT}' EXT_NASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP'] @taskgen @before('apply_link') def apply_nasm_vars(self): # flags if hasattr(self, 'nasm_flags'): for flag in self.to_list(self.nasm_flags): self.env.append_value('NASM_FLAGS', flag) # includes - well, if we suppose it works with c processing if hasattr(self, 'includes'): for inc in self.to_list(self.includes): node = self.path.find_dir(inc) if not node: raise ValueError("cannot find the dir" + inc) self.env.append_value('NASM_INCLUDES', '-I %s' % node.srcpath(self.env)) self.env.append_value('NASM_INCLUDES', '-I %s' % node.bldpath(self.env)) @extension(EXT_NASM) def nasm_file(self, node): try: obj_ext = self.obj_ext except AttributeError: obj_ext = '_%d.o' % self.idx task = self.create_task('nasm') task.inputs = [node] task.outputs = [node.change_ext(obj_ext)] self.compiled_tasks.append(task) self.meths.append('apply_nasm_vars') # create our action here Task.simple_task_type('nasm', nasm_str, color='BLUE', ext_out='.o') def detect(conf): nasm = conf.find_program('nasm', var='NASM') if not nasm: nasm = conf.find_program('yasm', var='NASM') if not nasm: conf.fatal('could not find nasm (or yasm), install it or set PATH env var')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python import sys import pickle sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit from tester import dump_classifier_and_data ### Task 1: Select what features you'll use. ### features_list is a list of strings, each of which is a feature name. ### The first feature must be "poi". features_list = ['poi','salary'] # You will need to use more features ### Load the dictionary containing the dataset with open("final_project_dataset.pkl", "r") as data_file: data_dict = pickle.load(data_file) ### Task 2: Remove outliers ### Task 3: Create new feature(s) ### Store to my_dataset for easy export below. my_dataset = data_dict ### Extract features and labels from dataset for local testing data = featureFormat(my_dataset, features_list, sort_keys = True) labels, features = targetFeatureSplit(data) ### Task 4: Try a varity of classifiers ### Please name your classifier clf for easy export below. ### Note that if you want to do PCA or other multi-stage operations, ### you'll need to use Pipelines. For more info: ### http://scikit-learn.org/stable/modules/pipeline.html # Provided to give you a starting point. Try a variety of classifiers. from sklearn.naive_bayes import GaussianNB clf = GaussianNB() ### Task 5: Tune your classifier to achieve better than .3 precision and recall ### using our testing script. Check the tester.py script in the final project ### folder for details on the evaluation method, especially the test_classifier ### function. Because of the small size of the dataset, the script uses ### stratified shuffle split cross validation. For more info: ### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html # Example starting point. Try investigating other evaluation techniques! from sklearn.cross_validation import train_test_split features_train, features_test, labels_train, labels_test = \ train_test_split(features, labels, test_size=0.3, random_state=42) ### Task 6: Dump your classifier, dataset, and features_list so anyone can ### check your results. You do not need to change anything below, but make sure ### that the version of poi_id.py that you submit can be run on its own and ### generates the necessary .pkl files for validating your results. dump_classifier_and_data(clf, my_dataset, features_list)
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc.protocolPB; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.hadoop.ipc.GenericRefreshProtocol; import org.apache.hadoop.ipc.RefreshResponse; import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto; import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto; import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto; import org.apache.hadoop.thirdparty.protobuf.RpcController; import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class GenericRefreshProtocolServerSideTranslatorPB implements GenericRefreshProtocolPB { private final GenericRefreshProtocol impl; public GenericRefreshProtocolServerSideTranslatorPB( GenericRefreshProtocol impl) { this.impl = impl; } @Override public GenericRefreshResponseCollectionProto refresh( RpcController controller, GenericRefreshRequestProto request) throws ServiceException { try { List<String> argList = request.getArgsList(); String[] args = argList.toArray(new String[argList.size()]); if (!request.hasIdentifier()) { throw new ServiceException("Request must contain identifier"); } Collection<RefreshResponse> results = impl.refresh(request.getIdentifier(), args); return pack(results); } catch (IOException e) { throw new ServiceException(e); } } // Convert a collection of RefreshResponse objects to a // RefreshResponseCollection proto private GenericRefreshResponseCollectionProto pack( Collection<RefreshResponse> responses) { GenericRefreshResponseCollectionProto.Builder b = GenericRefreshResponseCollectionProto.newBuilder(); for (RefreshResponse response : responses) { GenericRefreshResponseProto.Builder respBuilder = GenericRefreshResponseProto.newBuilder(); respBuilder.setExitStatus(response.getReturnCode()); respBuilder.setUserMessage(response.getMessage()); respBuilder.setSenderName(response.getSenderName()); // Add to collection b.addResponses(respBuilder); } return b.build(); } }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java
//===--- TBDGenRequests.cpp - Requests for TBD Generation ----------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "swift/AST/TBDGenRequests.h" #include "swift/AST/ASTContext.h" #include "swift/AST/Evaluator.h" #include "swift/AST/FileUnit.h" #include "swift/AST/Module.h" #include "swift/ClangImporter/ClangImporter.h" #include "swift/IRGen/TBDGen.h" #include "swift/Subsystems.h" #include "clang/Basic/TargetInfo.h" #include "llvm/TextAPI/InterfaceFile.h" #include "APIGen.h" using namespace swift; namespace swift { // Implement the TBDGen type zone (zone 14). #define SWIFT_TYPEID_ZONE TBDGen #define SWIFT_TYPEID_HEADER "swift/AST/TBDGenTypeIDZone.def" #include "swift/Basic/ImplementTypeIDZone.h" #undef SWIFT_TYPEID_ZONE #undef SWIFT_TYPEID_HEADER } // end namespace swift //----------------------------------------------------------------------------// // GenerateTBDRequest computation. //----------------------------------------------------------------------------// FileUnit *TBDGenDescriptor::getSingleFile() const { return Input.dyn_cast<FileUnit *>(); } ModuleDecl *TBDGenDescriptor::getParentModule() const { if (auto *module = Input.dyn_cast<ModuleDecl *>()) return module; return cast<FileUnit *>(Input)->getParentModule(); } const StringRef TBDGenDescriptor::getDataLayoutString() const { auto &ctx = getParentModule()->getASTContext(); auto *clang = static_cast<ClangImporter *>(ctx.getClangModuleLoader()); return llvm::StringRef(clang->getTargetInfo().getDataLayoutString()); } const llvm::Triple &TBDGenDescriptor::getTarget() const { return getParentModule()->getASTContext().LangOpts.Target; } bool TBDGenDescriptor::operator==(const TBDGenDescriptor &other) const { return Input == other.Input && Opts == other.Opts; } llvm::hash_code swift::hash_value(const TBDGenDescriptor &desc) { return llvm::hash_combine(desc.getFileOrModule(), desc.getOptions()); } void swift::simple_display(llvm::raw_ostream &out, const TBDGenDescriptor &desc) { out << "Generate TBD for "; if (auto *module = desc.getFileOrModule().dyn_cast<ModuleDecl *>()) { out << "module "; simple_display(out, module); } else { out << "file "; simple_display(out, cast<FileUnit *>(desc.getFileOrModule())); } } SourceLoc swift::extractNearestSourceLoc(const TBDGenDescriptor &desc) { return extractNearestSourceLoc(desc.getFileOrModule()); } // Define request evaluation functions for each of the TBDGen requests. static AbstractRequestFunction *tbdGenRequestFunctions[] = { #define SWIFT_REQUEST(Zone, Name, Sig, Caching, LocOptions) \ reinterpret_cast<AbstractRequestFunction *>(&Name::evaluateRequest), #include "swift/AST/TBDGenTypeIDZone.def" #undef SWIFT_REQUEST }; void swift::registerTBDGenRequestFunctions(Evaluator &evaluator) { evaluator.registerRequestFunctions(Zone::TBDGen, tbdGenRequestFunctions); }
cpp
github
https://github.com/apple/swift
lib/IRGen/TBDGenRequests.cpp
import os import threading import time import warnings from django.apps import apps from django.core.signals import setting_changed from django.db import connections, router from django.db.utils import ConnectionRouter from django.dispatch import Signal, receiver from django.utils import timezone from django.utils.functional import empty template_rendered = Signal(providing_args=["template", "context"]) # Most setting_changed receivers are supposed to be added below, # except for cases where the receiver is related to a contrib app. # Settings that may not work well when using 'override_settings' (#19031) COMPLEX_OVERRIDE_SETTINGS = {'DATABASES'} @receiver(setting_changed) def clear_cache_handlers(**kwargs): if kwargs['setting'] == 'CACHES': from django.core.cache import caches caches._caches = threading.local() @receiver(setting_changed) def update_installed_apps(**kwargs): if kwargs['setting'] == 'INSTALLED_APPS': # Rebuild any AppDirectoriesFinder instance. from django.contrib.staticfiles.finders import get_finder get_finder.cache_clear() # Rebuild management commands cache from django.core.management import get_commands get_commands.cache_clear() # Rebuild get_app_template_dirs cache. from django.template.utils import get_app_template_dirs get_app_template_dirs.cache_clear() # Rebuild translations cache. from django.utils.translation import trans_real trans_real._translations = {} @receiver(setting_changed) def update_connections_time_zone(**kwargs): if kwargs['setting'] == 'TIME_ZONE': # Reset process time zone if hasattr(time, 'tzset'): if kwargs['value']: os.environ['TZ'] = kwargs['value'] else: os.environ.pop('TZ', None) time.tzset() # Reset local time zone cache timezone.get_default_timezone.cache_clear() # Reset the database connections' time zone if kwargs['setting'] in {'TIME_ZONE', 'USE_TZ'}: for conn in connections.all(): try: del conn.timezone except AttributeError: pass try: del conn.timezone_name except AttributeError: pass conn.ensure_timezone() @receiver(setting_changed) def clear_routers_cache(**kwargs): if kwargs['setting'] == 'DATABASE_ROUTERS': router.routers = ConnectionRouter().routers @receiver(setting_changed) def reset_template_engines(**kwargs): if kwargs['setting'] in { 'TEMPLATES', 'DEBUG', 'FILE_CHARSET', 'INSTALLED_APPS', }: from django.template import engines try: del engines.templates except AttributeError: pass engines._templates = None engines._engines = {} from django.template.engine import Engine Engine.get_default.cache_clear() @receiver(setting_changed) def clear_serializers_cache(**kwargs): if kwargs['setting'] == 'SERIALIZATION_MODULES': from django.core import serializers serializers._serializers = {} @receiver(setting_changed) def language_changed(**kwargs): if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}: from django.utils.translation import trans_real trans_real._default = None trans_real._active = threading.local() if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}: from django.utils.translation import trans_real trans_real._translations = {} trans_real.check_for_language.cache_clear() @receiver(setting_changed) def file_storage_changed(**kwargs): if kwargs['setting'] == 'DEFAULT_FILE_STORAGE': from django.core.files.storage import default_storage default_storage._wrapped = empty @receiver(setting_changed) def complex_setting_changed(**kwargs): if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS: # Considering the current implementation of the signals framework, # stacklevel=5 shows the line containing the override_settings call. warnings.warn("Overriding setting %s can lead to unexpected behavior." % kwargs['setting'], stacklevel=5) @receiver(setting_changed) def root_urlconf_changed(**kwargs): if kwargs['setting'] == 'ROOT_URLCONF': from django.urls import clear_url_caches, set_urlconf clear_url_caches() set_urlconf(None) @receiver(setting_changed) def static_storage_changed(**kwargs): if kwargs['setting'] in { 'STATICFILES_STORAGE', 'STATIC_ROOT', 'STATIC_URL', }: from django.contrib.staticfiles.storage import staticfiles_storage staticfiles_storage._wrapped = empty @receiver(setting_changed) def static_finders_changed(**kwargs): if kwargs['setting'] in { 'STATICFILES_DIRS', 'STATIC_ROOT', }: from django.contrib.staticfiles.finders import get_finder get_finder.cache_clear() @receiver(setting_changed) def auth_password_validators_changed(**kwargs): if kwargs['setting'] == 'AUTH_PASSWORD_VALIDATORS': from django.contrib.auth.password_validation import get_default_password_validators get_default_password_validators.cache_clear() @receiver(setting_changed) def user_model_swapped(**kwargs): if kwargs['setting'] == 'AUTH_USER_MODEL': apps.clear_cache()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.configurationsample.generic; import java.util.HashMap; import java.util.Map; /** * A base properties class with generics. * * @param <A> name type * @param <B> mapping key type * @param <C> mapping value type * @author Stephane Nicoll */ public class AbstractGenericProperties<A, B, C> { /** * Generic name. */ private A name; /** * Generic mappings. */ private final Map<B, C> mappings = new HashMap<>(); public A getName() { return this.name; } public void setName(A name) { this.name = name; } public Map<B, C> getMappings() { return this.mappings; } }
java
github
https://github.com/spring-projects/spring-boot
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/generic/AbstractGenericProperties.java
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. unites = { 0: '', 1:'un', 2:'deux', 3:'trois', 4:'quatre', 5:'cinq', 6:'six', 7:'sept', 8:'huit', 9:'neuf', 10:'dix', 11:'onze', 12:'douze', 13:'treize', 14:'quatorze', 15:'quinze', 16:'seize', 21:'vingt et un', 31:'trente et un', 41:'quarante et un', 51:'cinquante et un', 61:'soixante et un', 71:'septante et un', 91:'nonante et un', 80:'quatre-vingts' } dizaine = { 1: 'dix', 2:'vingt', 3:'trente',4:'quarante', 5:'cinquante', 6:'soixante', 7:'septante', 8:'quatre-vingt', 9:'nonante' } centaine = { 0:'', 1: 'cent', 2:'deux cent', 3:'trois cent',4:'quatre cent', 5:'cinq cent', 6:'six cent', 7:'sept cent', 8:'huit cent', 9:'neuf cent' } mille = { 0:'', 1:'mille' } def _100_to_text(chiffre): if chiffre in unites: return unites[chiffre] else: if chiffre%10>0: return dizaine[chiffre / 10]+'-'+unites[chiffre % 10] else: return dizaine[chiffre / 10] def _1000_to_text(chiffre): d = _100_to_text(chiffre % 100) d2 = chiffre/100 if d2>0 and d: return centaine[d2]+' '+d elif d2>1 and not d: return centaine[d2]+'s' else: return centaine[d2] or d def _10000_to_text(chiffre): if chiffre==0: return 'zero' part1 = _1000_to_text(chiffre % 1000) part2 = mille.get(chiffre / 1000, _1000_to_text(chiffre / 1000)+' mille') if part2 and part1: part1 = ' '+part1 return part2+part1 def int_to_text(i): return _10000_to_text(i) if __name__=='__main__': for i in range(1,999999,139): print int_to_text(i)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- #------------------------------------------------------------ # streamondemand - XBMC Plugin # Canal para sports-main # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from core import logger from core import config from core import scrapertools from core.item import Item from servers import servertools from channels.channels_sports import sportstools as SPT __channel__ = "sports-main" __title__ = "Deportes" __language__ = "ES" def isGeneric(): return True def mainlist(item): logger.info("[sports-main.py] mainlist") itemlist = [] itemlist.append( Item(channel="sports-lshunter", action="mainlist", title="LSHunter.tv", url="http://www.drakulastream.eu" )) itemlist.append( Item(channel="sports-rojadirecta", action="mainlist", title="RojaDirecta.me", url="http://www.rojadirecta.me" )) #itemlist.append( Item(channel="sports-firstrow", action="mainlist", title="FirstRowSports.eu", url="http://www.ifeed2all.eu/type/football.html" )) itemlist.append( Item(channel="sports-lshunter", action="myteam", title=SPT.MIEQUIPO+" en LSHunter.tv", url="http://www.drakulastream.eu" )) itemlist.append( Item(channel="sports-rojadirecta", action="myteam", title=SPT.MIEQUIPO+" en RojaDirecta.me", url="http://www.rojadirecta.me" )) #itemlist.append( Item(channel="sports-firstrow", action="myteam", title=SPT.MIEQUIPO+" en FirstRowSports.eu", url="http://www.ifeed2all.eu/type/football.html" )) itemlist.append( Item(channel="configuracion", action="mainlist" , title="Configurar mi equipo : [COLOR=green][B]"+SPT.MIEQUIPO+'[/B][/COLOR]' )) return itemlist # BUSCAR ENLACES A VIDEOS : # ========================= def corregir_url(url): if url == 'http://tuttosportweb.com': return 'http://tuttosportweb.com/update/ch1.php' aux = scrapertools.find_single_match (url, "tuttosportweb.com/([\w]+.php)") if aux != '': return 'http://tuttosportweb.com/update/%s' % aux url = url.replace("kasimirotv.net/canal", "kasimirotv.net/player") return url def play(item): logger.info("[sports-main.py] play") itemlist = [] item.url = corregir_url(item.url) data = scrapertools.cachePage(item.url,headers=SPT.DEFAULT_HEADERS) headers = SPT.DEFAULT_HEADERS[:] headers.append(["Referer",item.url]) url = buscar_url_valida(data, headers) if url == '': # Buscar si hay algun iframe que pudiera contener el video (width>=500 y height>=300 !?) patron = '<iframe([^>]+)' matches = re.compile(patron,re.DOTALL | re.IGNORECASE).findall(data) for match in matches: #logger.info("iframe match "+match) # marginheight="0" marginwidth="0" style="width: 700px; height: 450px" width="650" height="80" w = re.findall ('[^n]width\s*[:=][^\d]*(\d+)', match, re.DOTALL | re.IGNORECASE) h = re.findall ('[^n]height\s*[:=][^\d]*(\d+)', match, re.DOTALL | re.IGNORECASE) if int(w[0]) >= 500 and int(h[0]) >= 300: url2 = scrapertools.find_single_match (match, 'src\s*=\s*["\']([^"\']+)') logger.info("buscando en iframe "+url2) headers = SPT.DEFAULT_HEADERS[:] headers.append(["Referer",item.url]) try: data = scrapertools.cachePage(url2,headers=headers) except: continue if data == '': continue headers = SPT.DEFAULT_HEADERS[:] headers.append(["Referer",url2]) url = buscar_url_valida(data, headers) if url != '': break if url != '': itemlist.append( Item(channel=__channel__, title=item.title , url=url, server='directo')) else: logger.info("NO DETECTADO SERVIDOR") return itemlist def buscar_url_valida(data, headers): logger.info("[sports-main.py] buscar_url_valida") if (SPT.DEBUG): logger.info("data="+data) # unescape de posible código javascript "oculto" patronjs = "unescape\s*\(\s*['\"]([^'\"]+)" matches = re.compile(patronjs,re.DOTALL).findall(data) for ofuscado in matches: data = data.replace(ofuscado, urllib.unquote(ofuscado)) #if (SPT.DEBUG): logger.info("datanoofus="+data) # Ejecuta find_url_play en cada servidor hasta encontrar una url for serverid in SPT.SPORTS_SERVERS: try: servers_module = __import__("servers_sports."+serverid) server_module = getattr(servers_module,serverid) url = server_module.find_url_play(data, headers) if url != '': return url except ImportError: logger.info("No existe conector para "+serverid) except: logger.info("Error en el conector "+serverid) import traceback,sys from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) return '' # no encontrada
unknown
codeparrot/codeparrot-clean
# ================================================================================================== # Copyright 2015 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== from datetime import datetime from zktraffic.base.network import BadPacket from zktraffic.base.util import read_long, read_number, read_string class PeerState(object): LOOKING = 0 FOLLOWING = 1 LEADING = 2 OBSERVING = 3 STATES = [LOOKING, FOLLOWING, LEADING, OBSERVING] NAMES = [ "looking", "following", "leading", "observing", ] @classmethod def invalid(cls, state): return state not in cls.STATES @classmethod def to_str(cls, state): return "" if cls.invalid(state) else cls.NAMES[state] class Message(object): PROTO_VER = -65536 OLD_LEN = 28 WITH_VERSION_LEN = 36 __slots__ = () @classmethod def from_payload(cls, data, src, dst, timestamp): if len(data) < 16: raise BadPacket("Too small") proto, offset = read_long(data, 0) if proto == cls.PROTO_VER: server_id, offset = read_long(data, offset) election_addr, _ = read_string(data, offset) return Initial(timestamp, src, dst, server_id, election_addr) if len(data) >= cls.OLD_LEN: state, offset = read_number(data, 0) if PeerState.invalid(state): raise BadPacket("Invalid state: %d" % state) leader, offset = read_long(data, offset) zxid, offset = read_long(data, offset) election_epoch, offset = read_long(data, offset) peer_epoch, offset = read_long(data, offset) if len(data) > cls.OLD_LEN else (-1, offset) version = 0 config = "" if len(data) > cls.WITH_VERSION_LEN: version, offset = read_number(data, offset) if version == 2: config, _ = read_string(data, offset) return Notification( timestamp, src, dst, state, leader, zxid, election_epoch, peer_epoch, version, config ) raise BadPacket("Unknown unknown") @property def timestr(self): return datetime.fromtimestamp(self.timestamp).strftime("%H:%M:%S:%f") class Initial(Message): __slots__ = ("timestamp", "src", "dst", "server_id", "election_addr") def __init__(self, timestamp, src, dst, server_id, election_addr): self.timestamp = timestamp self.src = src self.dst = dst self.server_id = server_id self.election_addr = election_addr def __str__(self): return "%s(\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s\n)\n" % ( "Initial", " " * 5 + "timestamp", self.timestr, " " * 5 + "src", self.src, " " * 5 + "dst", self.dst, " " * 5 + "server_id", self.server_id, " " * 5 + "election_addr", self.election_addr ) class Notification(Message): __slots__ = ( "timestamp", "src", "dst", "state", "leader", "zxid", "election_epoch", "peer_epoch", "version", "config" ) def __init__(self, timestamp, src, dst, state, leader, zxid, election_epoch, peer_epoch, version, config): self.timestamp = timestamp self.src = src self.dst = dst self.state = state self.leader = leader self.zxid = zxid self.election_epoch = election_epoch self.peer_epoch = peer_epoch self.version = version self.config = config @property def state_literal(self): return PeerState.to_str(self.state) def __str__(self): config = [" " * 10 + cline for cline in self.config.split("\n")] return "%s(\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=\n%s\n)\n" % ( "Notification", " " * 5 + "timestamp", self.timestr, " " * 5 + "src", self.src, " " * 5 + "dst", self.dst, " " * 5 + "state", self.state_literal, " " * 5 + "leader", self.leader, " " * 5 + "zxid", self.zxid, " " * 5 + "election_epoch", self.election_epoch, " " * 5 + "peer_epoch", self.peer_epoch, " " * 5 + "version", self.version, " " * 5 + "config", "\n".join(config), )
unknown
codeparrot/codeparrot-clean
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import struct from . import packet_base from . import ethernet class Packet(object): """A packet decoder/encoder class. An instance is used to either decode or encode a single packet. *data* is a bytearray to describe a raw datagram to decode. When decoding, a Packet object is iteratable. Iterated values are protocol (ethernet, ipv4, ...) headers and the payload. Protocol headers are instances of subclass of packet_base.PacketBase. The payload is a bytearray. They are iterated in on-wire order. *data* should be omitted when encoding a packet. """ def __init__(self, data=None, protocols=None, parse_cls=ethernet.ethernet): super(Packet, self).__init__() self.data = data if protocols is None: self.protocols = [] else: self.protocols = protocols if self.data: self._parser(parse_cls) def _parser(self, cls): rest_data = self.data while cls: try: proto, cls, rest_data = cls.parser(rest_data) except struct.error: break if proto: self.protocols.append(proto) if rest_data: self.protocols.append(rest_data) def serialize(self): """Encode a packet and store the resulted bytearray in self.data. This method is legal only when encoding a packet. """ self.data = bytearray() r = self.protocols[::-1] for i, p in enumerate(r): if isinstance(p, packet_base.PacketBase): if i == len(r) - 1: prev = None else: prev = r[i + 1] data = p.serialize(self.data, prev) else: data = str(p) self.data = data + self.data def add_protocol(self, proto): """Register a protocol *proto* for this packet. This method is legal only when encoding a packet. When encoding a packet, register a protocol (ethernet, ipv4, ...) header to add to this packet. Protocol headers should be registered in on-wire order before calling self.serialize. """ self.protocols.append(proto) def get_protocols(self, protocol): """Returns a list of protocols that matches to the specified protocol. """ if isinstance(protocol, packet_base.PacketBase): protocol = protocol.__class__ assert issubclass(protocol, packet_base.PacketBase) return [p for p in self.protocols if isinstance(p, protocol)] def get_protocol(self, protocol): """Returns the firstly found protocol that matches to the specified protocol. """ result = self.get_protocols(protocol) if len(result) > 0: return result[0] return None def __div__(self, trailer): self.add_protocol(trailer) return self def __truediv__(self, trailer): return self.__div__(trailer) def __iter__(self): return iter(self.protocols) def __getitem__(self, idx): return self.protocols[idx] def __setitem__(self, idx, item): self.protocols[idx] = item def __delitem__(self, idx): del self.protocols[idx] def __len__(self): return len(self.protocols) def __contains__(self, protocol): if (inspect.isclass(protocol) and issubclass(protocol, packet_base.PacketBase)): return protocol in [p.__class__ for p in self.protocols] return protocol in self.protocols def __str__(self): return ', '.join(repr(protocol) for protocol in self.protocols) __repr__ = __str__ # note: str(list) uses __repr__ for elements # XXX: Hack for preventing recursive import def _PacketBase__div__(self, trailer): pkt = Packet() pkt.add_protocol(self) pkt.add_protocol(trailer) return pkt packet_base.PacketBase.__div__ = _PacketBase__div__ packet_base.PacketBase.__truediv__ = _PacketBase__div__
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import psycopg2 conn = psycopg2.connect(database="hpc", user="hpc", password="123456", host="localhost", port="5432") print "Open database successfully" cursor = conn.cursor() data_list = [(2, '2003.txt'), (3, '2004.txt'), (4, '2005.txt'), (5, '2006.txt'), (6, '2007.txt'), (7, '2008.txt'), (8, '2009.txt'), (9, '2010.txt'), (10, '2011.txt'), (11, '2012.txt')] for nth, name in data_list: file = open(name) while True: rank = file.readline() if not rank: break manufacturer = file.readline().strip() computer = file.readline().strip() site = file.readline().strip() year = file.readline().strip() segment = file.readline().strip() total_cores = file.readline().strip() rmax = file.readline().strip() reference = file.readline().strip() rpeak = file.readline().strip() efficiency = file.readline().strip() #print "INSERT INTO rank_toponehundred (nth, rank, manufacturer, computer, site, year, segment, total_cores, rmax, reference, rpeak, efficiency) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (nth, rank, manufacturer, computer, site, year, segment, total_cores, rmax, reference, rpeak, efficiency) cursor.execute("INSERT INTO rank_toponehundred (nth, rank, manufacturer, computer, site, year, segment, total_cores, rmax, reference, rpeak, efficiency) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);", (nth, rank, manufacturer, computer, site, year, segment, total_cores, rmax, reference, rpeak, efficiency)) conn.commit() conn.close()
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.compute.providers import Provider from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver __all__ = [ 'AuroraComputeNodeDriver' ] class AuroraComputeNodeDriver(CloudStackNodeDriver): type = Provider.AURORACOMPUTE name = 'PCextreme AuroraCompute' website = 'https://www.pcextreme.nl/en/aurora/compute' host = 'cloud.pcextreme.nl' path = '/api'
unknown
codeparrot/codeparrot-clean
"""Tests for Incremental PCA.""" import itertools import warnings import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal from sklearn import datasets from sklearn.decomposition import PCA, IncrementalPCA from sklearn.utils._testing import ( assert_allclose_dense_sparse, assert_almost_equal, assert_array_almost_equal, ) from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS iris = datasets.load_iris() def test_incremental_pca(): # Incremental PCA on dense arrays. X = iris.data batch_size = X.shape[0] // 3 ipca = IncrementalPCA(n_components=2, batch_size=batch_size) pca = PCA(n_components=2) pca.fit_transform(X) X_transformed = ipca.fit_transform(X) assert X_transformed.shape == (X.shape[0], 2) np.testing.assert_allclose( ipca.explained_variance_ratio_.sum(), pca.explained_variance_ratio_.sum(), rtol=1e-3, ) for n_components in [1, 2, X.shape[1]]: ipca = IncrementalPCA(n_components, batch_size=batch_size) ipca.fit(X) cov = ipca.get_covariance() precision = ipca.get_precision() np.testing.assert_allclose( np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13 ) @pytest.mark.parametrize( "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS ) def test_incremental_pca_sparse(sparse_container): # Incremental PCA on sparse arrays. X = iris.data pca = PCA(n_components=2) pca.fit_transform(X) X_sparse = sparse_container(X) batch_size = X_sparse.shape[0] // 3 ipca = IncrementalPCA(n_components=2, batch_size=batch_size) X_transformed = ipca.fit_transform(X_sparse) assert X_transformed.shape == (X_sparse.shape[0], 2) np.testing.assert_allclose( ipca.explained_variance_ratio_.sum(), pca.explained_variance_ratio_.sum(), rtol=1e-3, ) for n_components in [1, 2, X.shape[1]]: ipca = IncrementalPCA(n_components, batch_size=batch_size) ipca.fit(X_sparse) cov = ipca.get_covariance() precision = ipca.get_precision() np.testing.assert_allclose( np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13 ) with pytest.raises( TypeError, match=( "IncrementalPCA.partial_fit does not support " "sparse input. Either convert data to dense " "or use IncrementalPCA.fit to do so in batches." ), ): ipca.partial_fit(X_sparse) def test_incremental_pca_check_projection(global_random_seed): # Test that the projection of data is correct. rng = np.random.RandomState(global_random_seed) n, p = 100, 3 X = rng.randn(n, p) * 0.1 X[:10] += np.array([3, 4, 5]) Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) # Get the reconstruction of the generated data X # Note that Xt has the same "components" as X, just separated # This is what we want to ensure is recreated correctly Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt) # Normalize Yt /= np.sqrt((Yt**2).sum()) # Make sure that the first element of Yt is ~1, this means # the reconstruction worked as expected assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1) def test_incremental_pca_inverse(global_random_seed): # Test that the projection of data can be inverted. rng = np.random.RandomState(global_random_seed) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= 0.00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X) Y = ipca.transform(X) Y_inverse = ipca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=3) def test_incremental_pca_validation(): # Test that n_components is <= n_features. X = np.array([[0, 1, 0], [1, 0, 0]]) n_samples, n_features = X.shape n_components = 4 with pytest.raises( ValueError, match=( "n_components={} invalid" " for n_features={}, need more rows than" " columns for IncrementalPCA" " processing".format(n_components, n_features) ), ): IncrementalPCA(n_components, batch_size=10).fit(X) # Test that n_components is also <= n_samples in first call to partial fit. n_components = 3 with pytest.raises( ValueError, match=( f"n_components={n_components} must be less or equal to the batch " f"number of samples {n_samples} for the first partial_fit call." ), ): IncrementalPCA(n_components=n_components).partial_fit(X) def test_n_samples_equal_n_components(): # Ensures no warning is raised when n_samples==n_components # Non-regression test for gh-19050 ipca = IncrementalPCA(n_components=5) with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) ipca.partial_fit(np.random.randn(5, 7)) with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) ipca.fit(np.random.randn(5, 7)) def test_n_components_none(): # Ensures that n_components == None is handled correctly rng = np.random.RandomState(1999) for n_samples, n_features in [(50, 10), (10, 50)]: X = rng.rand(n_samples, n_features) ipca = IncrementalPCA(n_components=None) # First partial_fit call, ipca.n_components_ is inferred from # min(X.shape) ipca.partial_fit(X) assert ipca.n_components_ == min(X.shape) # Second partial_fit call, ipca.n_components_ is inferred from # ipca.components_ computed from the first partial_fit call ipca.partial_fit(X) assert ipca.n_components_ == ipca.components_.shape[0] def test_incremental_pca_set_params(): # Test that components_ sign is stable over batch sizes. rng = np.random.RandomState(1999) n_samples = 100 n_features = 20 X = rng.randn(n_samples, n_features) X2 = rng.randn(n_samples, n_features) X3 = rng.randn(n_samples, n_features) ipca = IncrementalPCA(n_components=20) ipca.fit(X) # Decreasing number of components ipca.set_params(n_components=10) with pytest.raises(ValueError): ipca.partial_fit(X2) # Increasing number of components ipca.set_params(n_components=15) with pytest.raises(ValueError): ipca.partial_fit(X3) # Returning to original setting ipca.set_params(n_components=20) ipca.partial_fit(X) def test_incremental_pca_num_features_change(): # Test that changing n_components will raise an error. rng = np.random.RandomState(1999) n_samples = 100 X = rng.randn(n_samples, 20) X2 = rng.randn(n_samples, 50) ipca = IncrementalPCA(n_components=None) ipca.fit(X) with pytest.raises(ValueError): ipca.partial_fit(X2) def test_incremental_pca_batch_signs(global_random_seed): # Test that components_ sign is stable over batch sizes. rng = np.random.RandomState(global_random_seed) n_samples = 100 n_features = 3 X = rng.randn(n_samples, n_features) all_components = [] batch_sizes = np.arange(10, 20) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for i, j in itertools.pairwise(all_components): assert_almost_equal(np.sign(i), np.sign(j), decimal=6) def test_incremental_pca_partial_fit_small_batch(): # Test that there is no minimum batch size after the first partial_fit # Non-regression test rng = np.random.RandomState(1999) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= 0.00001 # make middle component relatively small X += [5, 4, 3] # make a large mean n_components = p pipca = IncrementalPCA(n_components=n_components) pipca.partial_fit(X[:n_components]) for idx in range(n_components, n): pipca.partial_fit(X[idx : idx + 1]) pca = PCA(n_components=n_components) pca.fit(X) assert_allclose(pca.components_, pipca.components_, atol=1e-3) def test_incremental_pca_batch_values(global_random_seed): # Test that components_ values are stable over batch sizes. rng = np.random.RandomState(global_random_seed) n_samples = 100 n_features = 3 X = rng.randn(n_samples, n_features) all_components = [] batch_sizes = np.arange(20, 40, 3) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for i, j in itertools.pairwise(all_components): assert_almost_equal(i, j, decimal=1) def test_incremental_pca_batch_rank(): # Test sample size in each batch is always larger or equal to n_components rng = np.random.RandomState(1999) n_samples = 100 n_features = 20 X = rng.randn(n_samples, n_features) all_components = [] batch_sizes = np.arange(20, 90, 3) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for components_i, components_j in itertools.pairwise(all_components): assert_allclose_dense_sparse(components_i, components_j) def test_incremental_pca_partial_fit(global_random_seed): # Test that fit and partial_fit get equivalent results. rng = np.random.RandomState(global_random_seed) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= 0.00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) batch_size = 10 ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X) pipca = IncrementalPCA(n_components=2, batch_size=batch_size) # Add one to make sure endpoint is included batch_itr = np.arange(0, n + 1, batch_size) for i, j in itertools.pairwise(batch_itr): pipca.partial_fit(X[i:j, :]) assert_almost_equal(ipca.components_, pipca.components_, decimal=3) def test_incremental_pca_against_pca_iris(): # Test that IncrementalPCA and PCA are approximate (to a sign flip). X = iris.data Y_pca = PCA(n_components=2).fit_transform(X) Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X) assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) def test_incremental_pca_against_pca_random_data(global_random_seed): # Test that IncrementalPCA and PCA are approximate (to a sign flip). rng = np.random.RandomState(global_random_seed) n_samples = 100 n_features = 3 X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features) Y_pca = PCA(n_components=3).fit_transform(X) Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X) assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) def test_explained_variances(): # Test that PCA and IncrementalPCA calculations match X = datasets.make_low_rank_matrix( 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999 ) prec = 3 n_samples, n_features = X.shape for nc in [None, 99]: pca = PCA(n_components=nc).fit(X) ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X) assert_almost_equal( pca.explained_variance_, ipca.explained_variance_, decimal=prec ) assert_almost_equal( pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec ) assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec) def test_singular_values(global_random_seed): # Check that the IncrementalPCA output has the correct singular values rng = np.random.RandomState(global_random_seed) n_samples = 1000 n_features = 100 X = datasets.make_low_rank_matrix( n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng ) pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X) ipca = IncrementalPCA(n_components=10, batch_size=150).fit(X) assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2) # Compare to the Frobenius norm X_pca = pca.transform(X) X_ipca = ipca.transform(X) assert_array_almost_equal( np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12 ) assert_array_almost_equal( np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2 ) # Compare to the 2-norms of the score vectors assert_array_almost_equal( pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12 ) assert_array_almost_equal( ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2 ) # Set the singular values and see what we get back rng = np.random.RandomState(global_random_seed) n_samples = 100 n_features = 110 X = datasets.make_low_rank_matrix( n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng ) pca = PCA(n_components=3, svd_solver="full", random_state=rng) ipca = IncrementalPCA(n_components=3, batch_size=100) X_pca = pca.fit_transform(X) X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) X_pca[:, 0] *= 3.142 X_pca[:, 1] *= 2.718 X_hat = np.dot(X_pca, pca.components_) pca.fit(X_hat) ipca.fit(X_hat) assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14) assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14) def test_whitening(global_random_seed): # Test that PCA and IncrementalPCA transforms match to sign flip. X = datasets.make_low_rank_matrix( 1000, 10, tail_strength=0.0, effective_rank=2, random_state=global_random_seed ) atol = 1e-3 for nc in [None, 9]: pca = PCA(whiten=True, n_components=nc).fit(X) ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X) # Since the data is rank deficient, some components are pure noise. We # should not expect those dimensions to carry any signal and their # values might be arbitrarily changed by implementation details of the # internal SVD solver. We therefore filter them out before comparison. stable_mask = pca.explained_variance_ratio_ > 1e-12 Xt_pca = pca.transform(X) Xt_ipca = ipca.transform(X) assert_allclose( np.abs(Xt_pca)[:, stable_mask], np.abs(Xt_ipca)[:, stable_mask], atol=atol, ) # The noisy dimensions are in the null space of the inverse transform, # so they are not influencing the reconstruction. We therefore don't # need to apply the mask here. Xinv_ipca = ipca.inverse_transform(Xt_ipca) Xinv_pca = pca.inverse_transform(Xt_pca) assert_allclose(X, Xinv_ipca, atol=atol) assert_allclose(X, Xinv_pca, atol=atol) assert_allclose(Xinv_pca, Xinv_ipca, atol=atol) def test_incremental_pca_partial_fit_float_division(): # Test to ensure float division is used in all versions of Python # (non-regression test for issue #9489) rng = np.random.RandomState(0) A = rng.randn(5, 3) + 2 B = rng.randn(7, 3) + 5 pca = IncrementalPCA(n_components=2) pca.partial_fit(A) # Set n_samples_seen_ to be a floating point number instead of an int pca.n_samples_seen_ = float(pca.n_samples_seen_) pca.partial_fit(B) singular_vals_float_samples_seen = pca.singular_values_ pca2 = IncrementalPCA(n_components=2) pca2.partial_fit(A) pca2.partial_fit(B) singular_vals_int_samples_seen = pca2.singular_values_ np.testing.assert_allclose( singular_vals_float_samples_seen, singular_vals_int_samples_seen ) def test_incremental_pca_fit_overflow_error(): # Test for overflow error on Windows OS # (non-regression test for issue #17693) rng = np.random.RandomState(0) A = rng.rand(500000, 2) ipca = IncrementalPCA(n_components=2, batch_size=10000) ipca.fit(A) pca = PCA(n_components=2) pca.fit(A) np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_) def test_incremental_pca_feature_names_out(): """Check feature names out for IncrementalPCA.""" ipca = IncrementalPCA(n_components=2).fit(iris.data) names = ipca.get_feature_names_out() assert_array_equal([f"incrementalpca{i}" for i in range(2)], names)
python
github
https://github.com/scikit-learn/scikit-learn
sklearn/decomposition/tests/test_incremental_pca.py
from django.utils import copycompat as copy from django.conf import settings from django.db import router from django.db.models.query import QuerySet, EmptyQuerySet, insert_query, RawQuerySet from django.db.models import signals from django.db.models.fields import FieldDoesNotExist def ensure_default_manager(sender, **kwargs): """ Ensures that a Model subclass contains a default manager and sets the _default_manager attribute on the class. Also sets up the _base_manager points to a plain Manager instance (which could be the same as _default_manager if it's not a subclass of Manager). """ cls = sender if cls._meta.abstract: return if not getattr(cls, '_default_manager', None): # Create the default manager, if needed. try: cls._meta.get_field('objects') raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__) except FieldDoesNotExist: pass cls.add_to_class('objects', Manager()) cls._base_manager = cls.objects elif not getattr(cls, '_base_manager', None): default_mgr = cls._default_manager.__class__ if (default_mgr is Manager or getattr(default_mgr, "use_for_related_fields", False)): cls._base_manager = cls._default_manager else: # Default manager isn't a plain Manager class, or a suitable # replacement, so we walk up the base class hierarchy until we hit # something appropriate. for base_class in default_mgr.mro()[1:]: if (base_class is Manager or getattr(base_class, "use_for_related_fields", False)): cls.add_to_class('_base_manager', base_class()) return raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.") signals.class_prepared.connect(ensure_default_manager) class Manager(object): # Tracks each time a Manager instance is created. Used to retain order. creation_counter = 0 def __init__(self): super(Manager, self).__init__() self._set_creation_counter() self.model = None self._inherited = False self._db = None def contribute_to_class(self, model, name): # TODO: Use weakref because of possible memory leak / circular reference. self.model = model setattr(model, name, ManagerDescriptor(self)) if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter: model._default_manager = self if model._meta.abstract or (self._inherited and not self.model._meta.proxy): model._meta.abstract_managers.append((self.creation_counter, name, self)) else: model._meta.concrete_managers.append((self.creation_counter, name, self)) def _set_creation_counter(self): """ Sets the creation counter value for this instance and increments the class-level copy. """ self.creation_counter = Manager.creation_counter Manager.creation_counter += 1 def _copy_to_model(self, model): """ Makes a copy of the manager and assigns it to 'model', which should be a child of the existing model (used when inheriting a manager from an abstract base class). """ assert issubclass(model, self.model) mgr = copy.copy(self) mgr._set_creation_counter() mgr.model = model mgr._inherited = True return mgr def db_manager(self, using): obj = copy.copy(self) obj._db = using return obj @property def db(self): return self._db or router.db_for_read(self.model) ####################### # PROXIES TO QUERYSET # ####################### def get_empty_query_set(self): return EmptyQuerySet(self.model, using=self._db) def get_query_set(self): """Returns a new QuerySet object. Subclasses can override this method to easily customize the behavior of the Manager. """ return QuerySet(self.model, using=self._db) def none(self): return self.get_empty_query_set() def all(self): return self.get_query_set() def count(self): return self.get_query_set().count() def dates(self, *args, **kwargs): return self.get_query_set().dates(*args, **kwargs) def distinct(self, *args, **kwargs): return self.get_query_set().distinct(*args, **kwargs) def extra(self, *args, **kwargs): return self.get_query_set().extra(*args, **kwargs) def get(self, *args, **kwargs): return self.get_query_set().get(*args, **kwargs) def get_or_create(self, **kwargs): return self.get_query_set().get_or_create(**kwargs) def create(self, **kwargs): return self.get_query_set().create(**kwargs) def filter(self, *args, **kwargs): return self.get_query_set().filter(*args, **kwargs) def aggregate(self, *args, **kwargs): return self.get_query_set().aggregate(*args, **kwargs) def annotate(self, *args, **kwargs): return self.get_query_set().annotate(*args, **kwargs) def complex_filter(self, *args, **kwargs): return self.get_query_set().complex_filter(*args, **kwargs) def exclude(self, *args, **kwargs): return self.get_query_set().exclude(*args, **kwargs) def in_bulk(self, *args, **kwargs): return self.get_query_set().in_bulk(*args, **kwargs) def iterator(self, *args, **kwargs): return self.get_query_set().iterator(*args, **kwargs) def latest(self, *args, **kwargs): return self.get_query_set().latest(*args, **kwargs) def order_by(self, *args, **kwargs): return self.get_query_set().order_by(*args, **kwargs) def select_related(self, *args, **kwargs): return self.get_query_set().select_related(*args, **kwargs) def prefetch_related(self, *args, **kwargs): return self.get_query_set().prefetch_related(*args, **kwargs) def values(self, *args, **kwargs): return self.get_query_set().values(*args, **kwargs) def values_list(self, *args, **kwargs): return self.get_query_set().values_list(*args, **kwargs) def update(self, *args, **kwargs): return self.get_query_set().update(*args, **kwargs) def reverse(self, *args, **kwargs): return self.get_query_set().reverse(*args, **kwargs) def defer(self, *args, **kwargs): return self.get_query_set().defer(*args, **kwargs) def only(self, *args, **kwargs): return self.get_query_set().only(*args, **kwargs) def using(self, *args, **kwargs): return self.get_query_set().using(*args, **kwargs) def exists(self, *args, **kwargs): return self.get_query_set().exists(*args, **kwargs) def _insert(self, values, **kwargs): return insert_query(self.model, values, **kwargs) def _update(self, values, **kwargs): return self.get_query_set()._update(values, **kwargs) def raw(self, raw_query, params=None, *args, **kwargs): return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs) class ManagerDescriptor(object): # This class ensures managers aren't accessible via model instances. # For example, Poll.objects works, but poll_obj.objects raises AttributeError. def __init__(self, manager): self.manager = manager def __get__(self, instance, type=None): if instance != None: raise AttributeError("Manager isn't accessible via %s instances" % type.__name__) return self.manager class EmptyManager(Manager): def get_query_set(self): return self.get_empty_query_set()
unknown
codeparrot/codeparrot-clean
#----------------------------------------------------------------------------- # class used to store graded responses to CAPA questions # # Used by responsetypes and capa_problem class CorrectMap(object): """ Stores map between answer_id and response evaluation result for each question in a capa problem. The response evaluation result for each answer_id includes (correctness, npoints, msg, hint, hintmode). - correctness : either 'correct' or 'incorrect' - npoints : None, or integer specifying number of points awarded for this answer_id - msg : string (may have HTML) giving extra message response (displayed below textline or textbox) - hint : string (may have HTML) giving optional hint (displayed below textline or textbox, above msg) - hintmode : one of (None,'on_request','always') criteria for displaying hint - queuestate : Dict {key:'', time:''} where key is a secret string, and time is a string dump of a DateTime object in the format '%Y%m%d%H%M%S'. Is None when not queued Behaves as a dict. """ def __init__(self, *args, **kwargs): # start with empty dict self.cmap = dict() self.items = self.cmap.items self.keys = self.cmap.keys self.overall_message = "" self.set(*args, **kwargs) def __getitem__(self, *args, **kwargs): return self.cmap.__getitem__(*args, **kwargs) def __iter__(self): return self.cmap.__iter__() # See the documentation for 'set_dict' for the use of kwargs def set( self, answer_id=None, correctness=None, npoints=None, msg='', hint='', hintmode=None, queuestate=None, **kwargs ): if answer_id is not None: self.cmap[answer_id] = { 'correctness': correctness, 'npoints': npoints, 'msg': msg, 'hint': hint, 'hintmode': hintmode, 'queuestate': queuestate, } def __repr__(self): return repr(self.cmap) def get_dict(self): """ return dict version of self """ return self.cmap def set_dict(self, correct_map): """ Set internal dict of CorrectMap to provided correct_map dict correct_map is saved by LMS as a plaintext JSON dump of the correctmap dict. This means that when the definition of CorrectMap (e.g. its properties) are altered, an existing correct_map dict will not coincide with the newest CorrectMap format as defined by self.set. For graceful migration, feed the contents of each correct map to self.set, rather than making a direct copy of the given correct_map dict. This way, the common keys between the incoming correct_map dict and the new CorrectMap instance will be written, while mismatched keys will be gracefully ignored. Special migration case: If correct_map is a one-level dict, then convert it to the new dict of dicts format. """ # empty current dict self.__init__() # create new dict entries if correct_map and not isinstance(correct_map.values()[0], dict): # special migration for k in correct_map: self.set(k, correctness=correct_map[k]) else: for k in correct_map: self.set(k, **correct_map[k]) def is_correct(self, answer_id): if answer_id in self.cmap: return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct'] return None def is_queued(self, answer_id): return answer_id in self.cmap and self.cmap[answer_id]['queuestate'] is not None def is_right_queuekey(self, answer_id, test_key): return self.is_queued(answer_id) and self.cmap[answer_id]['queuestate']['key'] == test_key def get_queuetime_str(self, answer_id): if self.cmap[answer_id]['queuestate']: return self.cmap[answer_id]['queuestate']['time'] else: return None def get_npoints(self, answer_id): """Return the number of points for an answer, used for partial credit.""" npoints = self.get_property(answer_id, 'npoints') if npoints is not None: return npoints elif self.is_correct(answer_id): return 1 # if not correct and no points have been assigned, return 0 return 0 def set_property(self, answer_id, property, value): if answer_id in self.cmap: self.cmap[answer_id][property] = value else: self.cmap[answer_id] = {property: value} def get_property(self, answer_id, property, default=None): if answer_id in self.cmap: return self.cmap[answer_id].get(property, default) return default def get_correctness(self, answer_id): return self.get_property(answer_id, 'correctness') def get_msg(self, answer_id): return self.get_property(answer_id, 'msg', '') def get_hint(self, answer_id): return self.get_property(answer_id, 'hint', '') def get_hintmode(self, answer_id): return self.get_property(answer_id, 'hintmode', None) def set_hint_and_mode(self, answer_id, hint, hintmode): """ - hint : (string) HTML text for hint - hintmode : (string) mode for hint display ('always' or 'on_request') """ self.set_property(answer_id, 'hint', hint) self.set_property(answer_id, 'hintmode', hintmode) def update(self, other_cmap): """ Update this CorrectMap with the contents of another CorrectMap """ if not isinstance(other_cmap, CorrectMap): raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap) self.cmap.update(other_cmap.get_dict()) self.set_overall_message(other_cmap.get_overall_message()) def set_overall_message(self, message_str): """ Set a message that applies to the question as a whole, rather than to individual inputs. """ self.overall_message = str(message_str) if message_str else "" def get_overall_message(self): """ Retrieve a message that applies to the question as a whole. If no message is available, returns the empty string """ return self.overall_message
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-04-08 16:07 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('UserManagement', '0036_staff_staffprofile'), ] operations = [ migrations.RenameField( model_name='expertprofile', old_name='team', new_name='coachedTeam', ), migrations.AddField( model_name='student', name='university', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='UserManagement.Team'), ), migrations.AlterField( model_name='attendent', name='role', field=models.CharField(choices=[('negotiator', 'Negotiator'), ('mediator', 'Mediator'), ('assessor', 'Assessor'), ('staff', 'Staff Member'), ('session_management', 'Session Management'), ('expert_feedback', 'Expert Feedback')], max_length=35), ), ]
unknown
codeparrot/codeparrot-clean
import locale import pytest from pandas._config import detect_console_encoding class MockEncoding: """ Used to add a side effect when accessing the 'encoding' property. If the side effect is a str in nature, the value will be returned. Otherwise, the side effect should be an exception that will be raised. """ def __init__(self, encoding) -> None: super().__init__() self.val = encoding @property def encoding(self): return self.raise_or_return(self.val) @staticmethod def raise_or_return(val): if isinstance(val, str): return val else: raise val @pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]]) def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled): # Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when # they have values filled. # GH 21552 with monkeypatch.context() as context: context.setattr(f"sys.{empty}", MockEncoding("")) context.setattr(f"sys.{filled}", MockEncoding(filled)) assert detect_console_encoding() == filled @pytest.mark.parametrize("encoding", [AttributeError, OSError, "ascii"]) def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding): # GH 21552 with monkeypatch.context() as context: context.setattr("locale.getpreferredencoding", lambda: "foo") context.setattr("sys.stdout", MockEncoding(encoding)) assert detect_console_encoding() == "foo" @pytest.mark.parametrize( "std,locale", [ ["ascii", "ascii"], ["ascii", locale.Error], [AttributeError, "ascii"], [AttributeError, locale.Error], [OSError, "ascii"], [OSError, locale.Error], ], ) def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale): # When both the stdout/stdin encoding and locale preferred encoding checks # fail (or return 'ascii', we should default to the sys default encoding. # GH 21552 with monkeypatch.context() as context: context.setattr( "locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale) ) context.setattr("sys.stdout", MockEncoding(std)) context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding") assert detect_console_encoding() == "sysDefaultEncoding"
python
github
https://github.com/pandas-dev/pandas
pandas/tests/io/formats/test_console.py
""" Created on 30 Jun 2017 @author: Bruno Beloff (bruno.beloff@southcoastscience.com) """ import optparse # -------------------------------------------------------------------------------------------------------------------- class CmdVerbose(object): """unix command line handler""" def __init__(self): """ Constructor """ self.__parser = optparse.OptionParser(usage="%prog [-v]", version="%prog 1.0") # optional... self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False, help="report narrative to stderr") self.__opts, self.__args = self.__parser.parse_args() # ---------------------------------------------------------------------------------------------------------------- @property def verbose(self): return self.__opts.verbose # ---------------------------------------------------------------------------------------------------------------- def __str__(self, *args, **kwargs): return "CmdVerbose:{verbose:%s}" % self.verbose
unknown
codeparrot/codeparrot-clean
// Copyright 2023 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package e2e import ( "bytes" "context" "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/json" "encoding/pem" "fmt" "math/big" "net" "os" "strings" "testing" "time" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "go.etcd.io/etcd/client/pkg/v3/transport" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/pkg/v3/stringutil" "go.etcd.io/etcd/tests/v3/framework/e2e" "go.etcd.io/etcd/tests/v3/framework/integration" ) func newClient(t *testing.T, entpoints []string, cfg e2e.ClientConfig) *clientv3.Client { tlscfg, err := tlsInfo(t, cfg) if err != nil { t.Fatal(err) } ccfg := clientv3.Config{ Endpoints: entpoints, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, //nolint:staticcheck // TODO: remove for a supported version } if tlscfg != nil { ccfg.TLS, err = tlscfg.ClientConfig() if err != nil { t.Fatal(err) } } c, err := clientv3.New(ccfg) if err != nil { t.Fatal(err) } t.Cleanup(func() { c.Close() }) return c } // tlsInfo follows the Client-to-server communication in https://etcd.io/docs/v3.6/op-guide/security/#basic-setup func tlsInfo(tb testing.TB, cfg e2e.ClientConfig) (*transport.TLSInfo, error) { switch cfg.ConnectionType { case e2e.ClientNonTLS, e2e.ClientTLSAndNonTLS: return nil, nil case e2e.ClientTLS: if cfg.AutoTLS { tls, err := transport.SelfCert(zap.NewNop(), tb.TempDir(), []string{"localhost"}, 1) if err != nil { return nil, fmt.Errorf("failed to generate cert: %w", err) } return &tls, nil } return &integration.TestTLSInfo, nil default: return nil, fmt.Errorf("config %v not supported", cfg) } } func fillEtcdWithData(ctx context.Context, c *clientv3.Client, dbSize int) error { g := errgroup.Group{} concurrency := 10 keyCount := 100 keysPerRoutine := keyCount / concurrency valueSize := dbSize / keyCount for i := 0; i < concurrency; i++ { i := i g.Go(func() error { for j := 0; j < keysPerRoutine; j++ { _, err := c.Put(ctx, fmt.Sprintf("%d", i*keysPerRoutine+j), stringutil.RandString(uint(valueSize))) if err != nil { return err } } return nil }) } return g.Wait() } func curl(endpoint string, method string, curlReq e2e.CURLReq, connType e2e.ClientConnType) (string, error) { args := e2e.CURLPrefixArgs(endpoint, e2e.ClientConfig{ConnectionType: connType}, false, method, curlReq) lines, err := e2e.RunUtilCompletion(args, nil) if err != nil { return "", err } return strings.Join(lines, "\n"), nil } func runCommandAndReadJSONOutput(args []string) (map[string]any, error) { lines, err := e2e.RunUtilCompletion(args, nil) if err != nil { return nil, err } var resp map[string]any err = json.Unmarshal([]byte(strings.Join(lines, "\n")), &resp) if err != nil { return nil, err } return resp, nil } func getMemberIDByName(ctx context.Context, c *e2e.EtcdctlV3, name string) (id uint64, found bool, err error) { resp, err := c.MemberList(ctx, false) if err != nil { return 0, false, err } for _, member := range resp.Members { if name == member.Name { return member.ID, true, nil } } return 0, false, nil } func generateCertsForIPs(tempDir string, ips []net.IP) (caFile string, certFiles []string, keyFiles []string, err error) { ca := &x509.Certificate{ SerialNumber: big.NewInt(1001), Subject: pkix.Name{ Organization: []string{"etcd"}, OrganizationalUnit: []string{"etcd Security"}, Locality: []string{"San Francisco"}, Province: []string{"California"}, Country: []string{"USA"}, }, NotBefore: time.Now(), NotAfter: time.Now().AddDate(0, 0, 1), IsCA: true, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, } caKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { return "", nil, nil, err } caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caKey.PublicKey, caKey) if err != nil { return "", nil, nil, err } caFile, _, err = saveCertToFile(tempDir, caBytes, nil) if err != nil { return "", nil, nil, err } for i, ip := range ips { cert := &x509.Certificate{ SerialNumber: big.NewInt(1001 + int64(i)), Subject: pkix.Name{ Organization: []string{"etcd"}, OrganizationalUnit: []string{"etcd Security"}, Locality: []string{"San Francisco"}, Province: []string{"California"}, Country: []string{"USA"}, }, IPAddresses: []net.IP{ip}, NotBefore: time.Now(), NotAfter: time.Now().AddDate(0, 0, 1), SubjectKeyId: []byte{1, 2, 3, 4, 5}, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, KeyUsage: x509.KeyUsageDigitalSignature, } certKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { return "", nil, nil, err } certBytes, err := x509.CreateCertificate(rand.Reader, cert, ca, &certKey.PublicKey, caKey) if err != nil { return "", nil, nil, err } certFile, keyFile, err := saveCertToFile(tempDir, certBytes, certKey) if err != nil { return "", nil, nil, err } certFiles = append(certFiles, certFile) keyFiles = append(keyFiles, keyFile) } return caFile, certFiles, keyFiles, nil } func saveCertToFile(tempDir string, certBytes []byte, key *rsa.PrivateKey) (certFile string, keyFile string, err error) { certPEM := new(bytes.Buffer) pem.Encode(certPEM, &pem.Block{ Type: "CERTIFICATE", Bytes: certBytes, }) cf, err := os.CreateTemp(tempDir, "*.crt") if err != nil { return "", "", err } defer cf.Close() if _, err := cf.Write(certPEM.Bytes()); err != nil { return "", "", err } if key != nil { certKeyPEM := new(bytes.Buffer) pem.Encode(certKeyPEM, &pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key), }) kf, err := os.CreateTemp(tempDir, "*.key.insecure") if err != nil { return "", "", err } defer kf.Close() if _, err := kf.Write(certKeyPEM.Bytes()); err != nil { return "", "", err } return cf.Name(), kf.Name(), nil } return cf.Name(), "", nil } func getLocalIP() (string, error) { conn, err := net.Dial("udp", "8.8.8.8:80") if err != nil { return "", err } defer conn.Close() localAddress := conn.LocalAddr().(*net.UDPAddr) return localAddress.IP.String(), nil }
go
github
https://github.com/etcd-io/etcd
tests/e2e/utils.go
__source__ = 'https://leetcode.com/problems/maximum-width-ramp/' # Time: O() # Space: O() # # Description: Leetcode # 962. Maximum Width Ramp # # Given an array A of integers, a ramp is a tuple (i, j) for which i < j and A[i] <= A[j]. # The width of such a ramp is j - i. # # Find the maximum width of a ramp in A. If one doesn't exist, return 0. # # Example 1: # # Input: [6,0,8,2,1,5] # Output: 4 # Explanation: # The maximum width ramp is achieved at (i, j) = (1, 5): A[1] = 0 and A[5] = 5. # # Example 2: # # Input: [9,8,1,0,1,9,4,0,4,1] # Output: 7 # Explanation: # The maximum width ramp is achieved at (i, j) = (2, 9): A[2] = 1 and A[9] = 1. # # Note: # # 2 <= A.length <= 50000 # 0 <= A[i] <= 50000 # import unittest # 260ms 40.40% class Solution(object): def maxWidthRamp(self, A): """ :type A: List[int] :rtype: int """ ans = 0 m = float('inf') for i in sorted(range(len(A)), key = A.__getitem__): ans = max(ans, i - m) m = min(m, i) return ans class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) if __name__ == '__main__': unittest.main() Java = ''' # Thought: https://leetcode.com/problems/maximum-width-ramp/solution/ # Approach 1: Sort Complexity Analysis Time Complexity: O(NLogN), where N is the length of A. Space Complexity: O(N), depending on the implementation of the sorting function. # 160ms 40.70% class Solution { public int maxWidthRamp(int[] A) { int N = A.length; Integer[] B = new Integer[N]; for (int i = 0; i < N; ++i) B[i] = i; Arrays.sort(B, (i, j) -> ((Integer) A[i]).compareTo(A[j])); int ans = 0; int m = N; for (int i: B) { ans = Math.max(ans, i - m); m = Math.min(m, i); } return ans; } } Approach 2: Binary Search Candidates Complexity Analysis Time Complexity: O(NlogN), where N is the length of A. Space Complexity: O(N) # 38ms 73.26% import java.awt.Point; class Solution { public int maxWidthRamp(int[] A) { int N = A.length; int ans = 0; List<Point> candidates = new ArrayList(); candidates.add(new Point(A[N-1], N-1)); // candidates: i's decreasing, by increasing value of A[i] for (int i = N-2; i >= 0; --i) { // Find largest j in candidates with A[j] >= A[i] int lo = 0, hi = candidates.size(); while (lo < hi) { int mi = lo + (hi - lo) / 2; if (candidates.get(mi).x < A[i]) lo = mi + 1; else hi = mi; } if (lo < candidates.size()) { int j = candidates.get(lo).y; ans = Math.max(ans, j - i); } else { candidates.add(new Point(A[i], i)); } } return ans; } } # 12ms 94.64% class Solution { public int maxWidthRamp(int[] A) { int[] s = new int[A.length]; int ptr = 0; int res = 0, n = A.length; for (int i = 0; i < n; ++i){ if (ptr == 0 || A[s[ptr-1]] > A[i]) s[ptr++] = i; } for (int i = n - 1; i > res; --i) { while (ptr != 0 && A[s[ptr-1]] <= A[i]) { res = Math.max(res, i - s[--ptr]); } } return res; } } '''
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This tool creates an html visualization of a TensorFlow Lite graph. Example usage: python visualize.py foo.tflite foo.html """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import sys from tensorflow.python.platform import resource_loader # Schema to use for flatbuffers _SCHEMA = "third_party/tensorflow/contrib/lite/schema/schema.fbs" # TODO(angerson): fix later when rules are simplified.. _SCHEMA = resource_loader.get_path_to_datafile("../schema/schema.fbs") _BINARY = resource_loader.get_path_to_datafile("../../../../flatbuffers/flatc") # Account for different package positioning internal vs. external. if not os.path.exists(_BINARY): _BINARY = resource_loader.get_path_to_datafile( "../../../../../flatbuffers/flatc") if not os.path.exists(_SCHEMA): raise RuntimeError("Sorry, schema file cannot be found at %r" % _SCHEMA) if not os.path.exists(_BINARY): raise RuntimeError("Sorry, flatc is not available at %r" % _BINARY) # A CSS description for making the visualizer _CSS = """ <html> <head> <style> body {font-family: sans-serif; background-color: #ffaa00;} table {background-color: #eeccaa;} th {background-color: black; color: white;} h1 { background-color: ffaa00; padding:5px; color: black; } div { border-radius: 5px; background-color: #ffeecc; padding:5px; margin:5px; } .tooltip {color: blue;} .tooltip .tooltipcontent { visibility: hidden; color: black; background-color: yellow; padding: 5px; border-radius: 4px; position: absolute; z-index: 1; } .tooltip:hover .tooltipcontent { visibility: visible; } .edges line { stroke: #333333; } .nodes text { color: black; pointer-events: none; font-family: sans-serif; font-size: 11px; } </style> <script src="https://d3js.org/d3.v4.min.js"></script> </head> <body> """ _D3_HTML_TEMPLATE = """ <script> // Build graph data var graph = %s; var svg = d3.select("#subgraph%d"); var width = svg.attr("width"); var height = svg.attr("height"); var color = d3.scaleOrdinal(d3.schemeCategory20); var simulation = d3.forceSimulation() .force("link", d3.forceLink().id(function(d) {return d.id;})) .force("charge", d3.forceManyBody()) .force("center", d3.forceCenter(0.5 * width, 0.5 * height)); function buildGraph() { var edge = svg.append("g").attr("class", "edges").selectAll("line") .data(graph.edges).enter().append("line") // Make the node group var node = svg.selectAll(".nodes") .data(graph.nodes) .enter().append("g") .attr("class", "nodes") .call(d3.drag() .on("start", function(d) { if(!d3.event.active) simulation.alphaTarget(1.0).restart(); d.fx = d.x;d.fy = d.y; }) .on("drag", function(d) { d.fx = d3.event.x; d.fy = d3.event.y; }) .on("end", function(d) { if (!d3.event.active) simulation.alphaTarget(0); d.fx = d.fy = null; })); // Within the group, draw a circle for the node position and text // on the side. node.append("circle") .attr("r", "5px") .attr("fill", function(d) { return color(d.group); }) node.append("text") .attr("dx", 8).attr("dy", 5).text(function(d) { return d.name; }); // Setup force parameters and update position callback simulation.nodes(graph.nodes).on("tick", forceSimulationUpdated); simulation.force("link").links(graph.edges); function forceSimulationUpdated() { // Update edges. edge.attr("x1", function(d) {return d.source.x;}) .attr("y1", function(d) {return d.source.y;}) .attr("x2", function(d) {return d.target.x;}) .attr("y2", function(d) {return d.target.y;}); // Update node positions node.attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; }); } } buildGraph() </script> """ class OpCodeMapper(object): """Maps an opcode index to an op name.""" def __init__(self, data): self.code_to_name = {} for idx, d in enumerate(data["operator_codes"]): self.code_to_name[idx] = d["builtin_code"] def __call__(self, x): if x not in self.code_to_name: s = "<UNKNOWN>" else: s = self.code_to_name[x] return "%s (opcode=%d)" % (s, x) class DataSizeMapper(object): """For buffers, report the number of bytes.""" def __call__(self, x): if x is not None: return "%d bytes" % len(x) else: return "--" class TensorMapper(object): """Maps a list of tensor indices to a tooltip hoverable indicator of more.""" def __init__(self, subgraph_data): self.data = subgraph_data def __call__(self, x): html = "" html += "<span class='tooltip'><span class='tooltipcontent'>" for i in x: tensor = self.data["tensors"][i] html += str(i) + " " html += tensor["name"] + " " html += str(tensor["type"]) + " " html += repr(tensor["shape"]) + "<br>" html += "</span>" html += repr(x) html += "</span>" return html def GenerateGraph(subgraph_idx, g, opcode_mapper): """Produces the HTML required to have a d3 visualization of the dag.""" def TensorName(idx): return "t%d" % idx def OpName(idx): return "o%d" % idx edges = [] nodes = [] first = {} pixel_mult = 50 # TODO(aselle): multiplier for initial placement for op_index, op in enumerate(g["operators"]): for tensor_input_position, tensor_index in enumerate(op["inputs"]): if tensor_index not in first: first[tensor_index] = ( op_index * pixel_mult, tensor_input_position * pixel_mult - pixel_mult / 2) edges.append({ "source": TensorName(tensor_index), "target": OpName(op_index) }) for tensor_index in op["outputs"]: edges.append({ "target": TensorName(tensor_index), "source": OpName(op_index) }) nodes.append({ "id": OpName(op_index), "name": opcode_mapper(op["opcode_index"]), "group": 2, "x": pixel_mult, "y": op_index * pixel_mult }) for tensor_index, tensor in enumerate(g["tensors"]): initial_y = ( first[tensor_index] if tensor_index in first else len(g["operators"])) nodes.append({ "id": TensorName(tensor_index), "name": "%s (%d)" % (tensor["name"], tensor_index), "group": 1, "x": 2, "y": initial_y }) graph_str = json.dumps({"nodes": nodes, "edges": edges}) html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx) return html def GenerateTableHtml(items, keys_to_print, display_index=True): """Given a list of object values and keys to print, make an HTML table. Args: items: Items to print an array of dicts. keys_to_print: (key, display_fn). `key` is a key in the object. i.e. items[0][key] should exist. display_fn is the mapping function on display. i.e. the displayed html cell will have the string returned by `mapping_fn(items[0][key])`. display_index: add a column which is the index of each row in `items`. Returns: An html table. """ html = "" # Print the list of items html += "<table><tr>\n" html += "<tr>\n" if display_index: html += "<th>index</th>" for h, mapper in keys_to_print: html += "<th>%s</th>" % h html += "</tr>\n" for idx, tensor in enumerate(items): html += "<tr>\n" if display_index: html += "<td>%d</td>" % idx # print tensor.keys() for h, mapper in keys_to_print: val = tensor[h] if h in tensor else None val = val if mapper is None else mapper(val) html += "<td>%s</td>\n" % val html += "</tr>\n" html += "</table>\n" return html def CreateHtmlFile(tflite_input, html_output): """Given a tflite model in `tflite_input` file, produce html description.""" # Convert the model into a JSON flatbuffer using flatc (build if doesn't # exist. if not os.path.exists(tflite_input): raise RuntimeError("Invalid filename %r" % tflite_input) if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"): # Run convert cmd = ( _BINARY + " -t " "--strict-json --defaults-json -o /tmp {schema} -- {input}".format( input=tflite_input, schema=_SCHEMA)) print(cmd) os.system(cmd) real_output = ("/tmp/" + os.path.splitext( os.path.split(tflite_input)[-1])[0] + ".json") data = json.load(open(real_output)) elif tflite_input.endswith(".json"): data = json.load(open(tflite_input)) else: raise RuntimeError("Input file was not .tflite or .json") html = "" html += _CSS html += "<h1>TensorFlow Lite Model</h2>" data["filename"] = tflite_input # Avoid special case toplevel_stuff = [("filename", None), ("version", None), ("description", None)] html += "<table>\n" for key, mapping in toplevel_stuff: if not mapping: mapping = lambda x: x html += "<tr><th>%s</th><td>%s</td></tr>\n" % (key, mapping(data.get(key))) html += "</table>\n" # Spec on what keys to display buffer_keys_to_display = [("data", DataSizeMapper())] operator_keys_to_display = [("builtin_code", None)] for subgraph_idx, g in enumerate(data["subgraphs"]): # Subgraph local specs on what to display html += "<div class='subgraph'>" tensor_mapper = TensorMapper(g) opcode_mapper = OpCodeMapper(data) op_keys_to_display = [("inputs", tensor_mapper), ("outputs", tensor_mapper), ("builtin_options", None), ("opcode_index", opcode_mapper)] tensor_keys_to_display = [("name", None), ("type", None), ("shape", None), ("buffer", None), ("quantization", None)] html += "<h2>Subgraph %d</h2>\n" % subgraph_idx # Inputs and outputs. html += "<h3>Inputs/Outputs</h3>\n" html += GenerateTableHtml( [{ "inputs": g["inputs"], "outputs": g["outputs"] }], [("inputs", tensor_mapper), ("outputs", tensor_mapper)], display_index=False) # Print the tensors. html += "<h3>Tensors</h3>\n" html += GenerateTableHtml(g["tensors"], tensor_keys_to_display) # Print the ops. html += "<h3>Ops</h3>\n" html += GenerateTableHtml(g["operators"], op_keys_to_display) # Visual graph. html += "<svg id='subgraph%d' width='960' height='1600'></svg>\n" % ( subgraph_idx,) html += GenerateGraph(subgraph_idx, g, opcode_mapper) html += "</div>" # Buffers have no data, but maybe in the future they will html += "<h2>Buffers</h2>\n" html += GenerateTableHtml(data["buffers"], buffer_keys_to_display) # Operator codes html += "<h2>Operator Codes</h2>\n" html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display) html += "</body></html>\n" open(html_output, "w").write(html) def main(argv): try: tflite_input = argv[1] html_output = argv[2] except IndexError: print("Usage: %s <input tflite> <output html>" % (argv[0])) else: CreateHtmlFile(tflite_input, html_output) if __name__ == "__main__": main(sys.argv)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4 ############################################################################### # OpenLP - Open Source Lyrics Projection # # --------------------------------------------------------------------------- # # Copyright (c) 2008-2013 Raoul Snyman # # Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan # # Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, # # Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. # # Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, # # Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, # # Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, # # Frode Woldsund, Martin Zibricky, Patrick Zimmermann # # --------------------------------------------------------------------------- # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the Free # # Software Foundation; version 2 of the License. # # # # This program is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # # more details. # # # # You should have received a copy of the GNU General Public License along # # with this program; if not, write to the Free Software Foundation, Inc., 59 # # Temple Place, Suite 330, Boston, MA 02111-1307 USA # ############################################################################### """ The bible import functions for OpenLP """ import logging import os from PyQt4 import QtCore, QtGui from openlp.core.lib import Settings, UiStrings, translate from openlp.core.lib.db import delete_database from openlp.core.lib.ui import critical_error_message_box from openlp.core.ui.wizard import OpenLPWizard, WizardStrings from openlp.core.utils import AppLocation, locale_compare from openlp.plugins.bibles.lib.manager import BibleFormat from openlp.plugins.bibles.lib.db import BiblesResourcesDB, clean_filename log = logging.getLogger(__name__) class WebDownload(object): """ Provides an enumeration for the web bible types available to OpenLP. """ Unknown = -1 Crosswalk = 0 BibleGateway = 1 Bibleserver = 2 Names = [u'Crosswalk', u'BibleGateway', u'Bibleserver'] class BibleImportForm(OpenLPWizard): """ This is the Bible Import Wizard, which allows easy importing of Bibles into OpenLP from other formats like OSIS, CSV and OpenSong. """ log.info(u'BibleImportForm loaded') def __init__(self, parent, manager, bibleplugin): """ Instantiate the wizard, and run any extra setup we need to. ``parent`` The QWidget-derived parent of the wizard. ``manager`` The Bible manager. ``bibleplugin`` The Bible plugin. """ self.manager = manager self.web_bible_list = {} OpenLPWizard.__init__(self, parent, bibleplugin, u'bibleImportWizard', u':/wizards/wizard_importbible.bmp') def setupUi(self, image): """ Set up the UI for the bible wizard. """ OpenLPWizard.setupUi(self, image) QtCore.QObject.connect(self.formatComboBox,QtCore.SIGNAL(u'currentIndexChanged(int)'), self.onCurrentIndexChanged) def onCurrentIndexChanged(self, index): """ Called when the format combo box's index changed. We have to check if the import is available and accordingly to disable or enable the next button. """ self.selectStack.setCurrentIndex(index) next_button = self.button(QtGui.QWizard.NextButton) next_button.setEnabled(BibleFormat.get_availability(index)) def customInit(self): """ Perform any custom initialisation for bible importing. """ if BibleFormat.get_availability(BibleFormat.OpenLP1): self.openlp1DisabledLabel.hide() else: self.openlp1FileLabel.hide() self.openlp1FileEdit.hide() self.openlp1BrowseButton.hide() self.manager.set_process_dialog(self) self.loadWebBibles() self.restart() self.selectStack.setCurrentIndex(0) def customSignals(self): """ Set up the signals used in the bible importer. """ QtCore.QObject.connect(self.webSourceComboBox, QtCore.SIGNAL(u'currentIndexChanged(int)'), self.onWebSourceComboBoxIndexChanged) QtCore.QObject.connect(self.osisBrowseButton, QtCore.SIGNAL(u'clicked()'), self.onOsisBrowseButtonClicked) QtCore.QObject.connect(self.csvBooksButton, QtCore.SIGNAL(u'clicked()'), self.onCsvBooksBrowseButtonClicked) QtCore.QObject.connect(self.csvVersesButton, QtCore.SIGNAL(u'clicked()'), self.onCsvVersesBrowseButtonClicked) QtCore.QObject.connect(self.openSongBrowseButton, QtCore.SIGNAL(u'clicked()'), self.onOpenSongBrowseButtonClicked) QtCore.QObject.connect(self.openlp1BrowseButton, QtCore.SIGNAL(u'clicked()'), self.onOpenlp1BrowseButtonClicked) def addCustomPages(self): """ Add the bible import specific wizard pages. """ # Select Page self.selectPage = QtGui.QWizardPage() self.selectPage.setObjectName(u'SelectPage') self.selectPageLayout = QtGui.QVBoxLayout(self.selectPage) self.selectPageLayout.setObjectName(u'SelectPageLayout') self.formatLayout = QtGui.QFormLayout() self.formatLayout.setObjectName(u'FormatLayout') self.formatLabel = QtGui.QLabel(self.selectPage) self.formatLabel.setObjectName(u'FormatLabel') self.formatComboBox = QtGui.QComboBox(self.selectPage) self.formatComboBox.addItems([u'', u'', u'', u'', u'']) self.formatComboBox.setObjectName(u'FormatComboBox') self.formatLayout.addRow(self.formatLabel, self.formatComboBox) self.spacer = QtGui.QSpacerItem(10, 0, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum) self.formatLayout.setItem(1, QtGui.QFormLayout.LabelRole, self.spacer) self.selectPageLayout.addLayout(self.formatLayout) self.selectStack = QtGui.QStackedLayout() self.selectStack.setObjectName(u'SelectStack') self.osisWidget = QtGui.QWidget(self.selectPage) self.osisWidget.setObjectName(u'OsisWidget') self.osisLayout = QtGui.QFormLayout(self.osisWidget) self.osisLayout.setMargin(0) self.osisLayout.setObjectName(u'OsisLayout') self.osisFileLabel = QtGui.QLabel(self.osisWidget) self.osisFileLabel.setObjectName(u'OsisFileLabel') self.osisFileLayout = QtGui.QHBoxLayout() self.osisFileLayout.setObjectName(u'OsisFileLayout') self.osisFileEdit = QtGui.QLineEdit(self.osisWidget) self.osisFileEdit.setObjectName(u'OsisFileEdit') self.osisFileLayout.addWidget(self.osisFileEdit) self.osisBrowseButton = QtGui.QToolButton(self.osisWidget) self.osisBrowseButton.setIcon(self.openIcon) self.osisBrowseButton.setObjectName(u'OsisBrowseButton') self.osisFileLayout.addWidget(self.osisBrowseButton) self.osisLayout.addRow(self.osisFileLabel, self.osisFileLayout) self.osisLayout.setItem(1, QtGui.QFormLayout.LabelRole, self.spacer) self.selectStack.addWidget(self.osisWidget) self.csvWidget = QtGui.QWidget(self.selectPage) self.csvWidget.setObjectName(u'CsvWidget') self.csvLayout = QtGui.QFormLayout(self.csvWidget) self.csvLayout.setMargin(0) self.csvLayout.setObjectName(u'CsvLayout') self.csvBooksLabel = QtGui.QLabel(self.csvWidget) self.csvBooksLabel.setObjectName(u'CsvBooksLabel') self.csvBooksLayout = QtGui.QHBoxLayout() self.csvBooksLayout.setObjectName(u'CsvBooksLayout') self.csvBooksEdit = QtGui.QLineEdit(self.csvWidget) self.csvBooksEdit.setObjectName(u'CsvBooksEdit') self.csvBooksLayout.addWidget(self.csvBooksEdit) self.csvBooksButton = QtGui.QToolButton(self.csvWidget) self.csvBooksButton.setIcon(self.openIcon) self.csvBooksButton.setObjectName(u'CsvBooksButton') self.csvBooksLayout.addWidget(self.csvBooksButton) self.csvLayout.addRow(self.csvBooksLabel, self.csvBooksLayout) self.csvVersesLabel = QtGui.QLabel(self.csvWidget) self.csvVersesLabel.setObjectName(u'CsvVersesLabel') self.csvVersesLayout = QtGui.QHBoxLayout() self.csvVersesLayout.setObjectName(u'CsvVersesLayout') self.csvVersesEdit = QtGui.QLineEdit(self.csvWidget) self.csvVersesEdit.setObjectName(u'CsvVersesEdit') self.csvVersesLayout.addWidget(self.csvVersesEdit) self.csvVersesButton = QtGui.QToolButton(self.csvWidget) self.csvVersesButton.setIcon(self.openIcon) self.csvVersesButton.setObjectName(u'CsvVersesButton') self.csvVersesLayout.addWidget(self.csvVersesButton) self.csvLayout.addRow(self.csvVersesLabel, self.csvVersesLayout) self.csvLayout.setItem(3, QtGui.QFormLayout.LabelRole, self.spacer) self.selectStack.addWidget(self.csvWidget) self.openSongWidget = QtGui.QWidget(self.selectPage) self.openSongWidget.setObjectName(u'OpenSongWidget') self.openSongLayout = QtGui.QFormLayout(self.openSongWidget) self.openSongLayout.setMargin(0) self.openSongLayout.setObjectName(u'OpenSongLayout') self.openSongFileLabel = QtGui.QLabel(self.openSongWidget) self.openSongFileLabel.setObjectName(u'OpenSongFileLabel') self.openSongFileLayout = QtGui.QHBoxLayout() self.openSongFileLayout.setObjectName(u'OpenSongFileLayout') self.openSongFileEdit = QtGui.QLineEdit(self.openSongWidget) self.openSongFileEdit.setObjectName(u'OpenSongFileEdit') self.openSongFileLayout.addWidget(self.openSongFileEdit) self.openSongBrowseButton = QtGui.QToolButton(self.openSongWidget) self.openSongBrowseButton.setIcon(self.openIcon) self.openSongBrowseButton.setObjectName(u'OpenSongBrowseButton') self.openSongFileLayout.addWidget(self.openSongBrowseButton) self.openSongLayout.addRow(self.openSongFileLabel, self.openSongFileLayout) self.openSongLayout.setItem(1, QtGui.QFormLayout.LabelRole, self.spacer) self.selectStack.addWidget(self.openSongWidget) self.webTabWidget = QtGui.QTabWidget(self.selectPage) self.webTabWidget.setObjectName(u'WebTabWidget') self.webBibleTab = QtGui.QWidget() self.webBibleTab.setObjectName(u'WebBibleTab') self.webBibleLayout = QtGui.QFormLayout(self.webBibleTab) self.webBibleLayout.setObjectName(u'WebBibleLayout') self.webSourceLabel = QtGui.QLabel(self.webBibleTab) self.webSourceLabel.setObjectName(u'WebSourceLabel') self.webBibleLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.webSourceLabel) self.webSourceComboBox = QtGui.QComboBox(self.webBibleTab) self.webSourceComboBox.setObjectName(u'WebSourceComboBox') self.webSourceComboBox.addItems([u'', u'', u'']) self.webBibleLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.webSourceComboBox) self.webTranslationLabel = QtGui.QLabel(self.webBibleTab) self.webTranslationLabel.setObjectName(u'webTranslationLabel') self.webBibleLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.webTranslationLabel) self.webTranslationComboBox = QtGui.QComboBox(self.webBibleTab) self.webTranslationComboBox.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents) self.webTranslationComboBox.setObjectName(u'WebTranslationComboBox') self.webBibleLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.webTranslationComboBox) self.webTabWidget.addTab(self.webBibleTab, u'') self.webProxyTab = QtGui.QWidget() self.webProxyTab.setObjectName(u'WebProxyTab') self.webProxyLayout = QtGui.QFormLayout(self.webProxyTab) self.webProxyLayout.setObjectName(u'WebProxyLayout') self.webServerLabel = QtGui.QLabel(self.webProxyTab) self.webServerLabel.setObjectName(u'WebServerLabel') self.webProxyLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.webServerLabel) self.webServerEdit = QtGui.QLineEdit(self.webProxyTab) self.webServerEdit.setObjectName(u'WebServerEdit') self.webProxyLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.webServerEdit) self.webUserLabel = QtGui.QLabel(self.webProxyTab) self.webUserLabel.setObjectName(u'WebUserLabel') self.webProxyLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.webUserLabel) self.webUserEdit = QtGui.QLineEdit(self.webProxyTab) self.webUserEdit.setObjectName(u'WebUserEdit') self.webProxyLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.webUserEdit) self.webPasswordLabel = QtGui.QLabel(self.webProxyTab) self.webPasswordLabel.setObjectName(u'WebPasswordLabel') self.webProxyLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.webPasswordLabel) self.webPasswordEdit = QtGui.QLineEdit(self.webProxyTab) self.webPasswordEdit.setObjectName(u'WebPasswordEdit') self.webProxyLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.webPasswordEdit) self.webTabWidget.addTab(self.webProxyTab, u'') self.selectStack.addWidget(self.webTabWidget) self.openlp1Widget = QtGui.QWidget(self.selectPage) self.openlp1Widget.setObjectName(u'Openlp1Widget') self.openlp1Layout = QtGui.QFormLayout(self.openlp1Widget) self.openlp1Layout.setMargin(0) self.openlp1Layout.setObjectName(u'Openlp1Layout') self.openlp1FileLabel = QtGui.QLabel(self.openlp1Widget) self.openlp1FileLabel.setObjectName(u'Openlp1FileLabel') self.openlp1FileLayout = QtGui.QHBoxLayout() self.openlp1FileLayout.setObjectName(u'Openlp1FileLayout') self.openlp1FileEdit = QtGui.QLineEdit(self.openlp1Widget) self.openlp1FileEdit.setObjectName(u'Openlp1FileEdit') self.openlp1FileLayout.addWidget(self.openlp1FileEdit) self.openlp1BrowseButton = QtGui.QToolButton(self.openlp1Widget) self.openlp1BrowseButton.setIcon(self.openIcon) self.openlp1BrowseButton.setObjectName(u'Openlp1BrowseButton') self.openlp1FileLayout.addWidget(self.openlp1BrowseButton) self.openlp1Layout.addRow(self.openlp1FileLabel, self.openlp1FileLayout) self.openlp1DisabledLabel = QtGui.QLabel(self.openlp1Widget) self.openlp1DisabledLabel.setWordWrap(True) self.openlp1DisabledLabel.setObjectName(u'Openlp1DisabledLabel') self.openlp1Layout.addRow(self.openlp1DisabledLabel) self.openlp1Layout.setItem(1, QtGui.QFormLayout.LabelRole, self.spacer) self.selectStack.addWidget(self.openlp1Widget) self.selectPageLayout.addLayout(self.selectStack) self.addPage(self.selectPage) # License Page self.licenseDetailsPage = QtGui.QWizardPage() self.licenseDetailsPage.setObjectName(u'LicenseDetailsPage') self.licenseDetailsLayout = QtGui.QFormLayout(self.licenseDetailsPage) self.licenseDetailsLayout.setObjectName(u'LicenseDetailsLayout') self.versionNameLabel = QtGui.QLabel(self.licenseDetailsPage) self.versionNameLabel.setObjectName(u'VersionNameLabel') self.licenseDetailsLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.versionNameLabel) self.versionNameEdit = QtGui.QLineEdit(self.licenseDetailsPage) self.versionNameEdit.setObjectName(u'VersionNameEdit') self.licenseDetailsLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.versionNameEdit) self.copyrightLabel = QtGui.QLabel(self.licenseDetailsPage) self.copyrightLabel.setObjectName(u'CopyrightLabel') self.licenseDetailsLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.copyrightLabel) self.copyrightEdit = QtGui.QLineEdit(self.licenseDetailsPage) self.copyrightEdit.setObjectName(u'CopyrightEdit') self.licenseDetailsLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.copyrightEdit) self.permissionsLabel = QtGui.QLabel(self.licenseDetailsPage) self.permissionsLabel.setObjectName(u'PermissionsLabel') self.licenseDetailsLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.permissionsLabel) self.permissionsEdit = QtGui.QLineEdit(self.licenseDetailsPage) self.permissionsEdit.setObjectName(u'PermissionsEdit') self.licenseDetailsLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.permissionsEdit) self.addPage(self.licenseDetailsPage) def retranslateUi(self): """ Allow for localisation of the bible import wizard. """ self.setWindowTitle(translate('BiblesPlugin.ImportWizardForm', 'Bible Import Wizard')) self.titleLabel.setText(WizardStrings.HeaderStyle % translate('OpenLP.Ui', 'Welcome to the Bible Import Wizard')) self.informationLabel.setText( translate('BiblesPlugin.ImportWizardForm', 'This wizard will help you to import Bibles from a variety of ' 'formats. Click the next button below to start the process by ' 'selecting a format to import from.')) self.selectPage.setTitle(WizardStrings.ImportSelect) self.selectPage.setSubTitle(WizardStrings.ImportSelectLong) self.formatLabel.setText(WizardStrings.FormatLabel) self.formatComboBox.setItemText(BibleFormat.OSIS, WizardStrings.OSIS) self.formatComboBox.setItemText(BibleFormat.CSV, WizardStrings.CSV) self.formatComboBox.setItemText(BibleFormat.OpenSong, WizardStrings.OS) self.formatComboBox.setItemText(BibleFormat.WebDownload, translate('BiblesPlugin.ImportWizardForm', 'Web Download')) self.formatComboBox.setItemText(BibleFormat.OpenLP1, UiStrings().OLPV1) self.openlp1FileLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Bible file:')) self.osisFileLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Bible file:')) self.csvBooksLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Books file:')) self.csvVersesLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Verses file:')) self.openSongFileLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Bible file:')) self.webSourceLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Location:')) self.webSourceComboBox.setItemText(WebDownload.Crosswalk, translate('BiblesPlugin.ImportWizardForm', 'Crosswalk')) self.webSourceComboBox.setItemText(WebDownload.BibleGateway, translate('BiblesPlugin.ImportWizardForm', 'BibleGateway')) self.webSourceComboBox.setItemText(WebDownload.Bibleserver, translate('BiblesPlugin.ImportWizardForm', 'Bibleserver')) self.webTranslationLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Bible:')) self.webTabWidget.setTabText(self.webTabWidget.indexOf(self.webBibleTab), translate('BiblesPlugin.ImportWizardForm', 'Download Options')) self.webServerLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Server:')) self.webUserLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Username:')) self.webPasswordLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Password:')) self.webTabWidget.setTabText(self.webTabWidget.indexOf(self.webProxyTab), translate('BiblesPlugin.ImportWizardForm', 'Proxy Server (Optional)')) self.licenseDetailsPage.setTitle( translate('BiblesPlugin.ImportWizardForm', 'License Details')) self.licenseDetailsPage.setSubTitle(translate('BiblesPlugin.ImportWizardForm', 'Set up the Bible\'s license details.')) self.versionNameLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Version name:')) self.copyrightLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Copyright:')) self.permissionsLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Permissions:')) self.progressPage.setTitle(WizardStrings.Importing) self.progressPage.setSubTitle(translate('BiblesPlugin.ImportWizardForm', 'Please wait while your Bible is imported.')) self.progressLabel.setText(WizardStrings.Ready) self.progressBar.setFormat(u'%p%') self.openlp1DisabledLabel.setText(WizardStrings.NoSqlite) # Align all QFormLayouts towards each other. labelWidth = max(self.formatLabel.minimumSizeHint().width(), self.osisFileLabel.minimumSizeHint().width(), self.csvBooksLabel.minimumSizeHint().width(), self.csvVersesLabel.minimumSizeHint().width(), self.openSongFileLabel.minimumSizeHint().width(), self.openlp1FileLabel.minimumSizeHint().width()) self.spacer.changeSize(labelWidth, 0, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) def validateCurrentPage(self): """ Validate the current page before moving on to the next page. """ if self.currentPage() == self.welcomePage: return True elif self.currentPage() == self.selectPage: if self.field(u'source_format') == BibleFormat.OSIS: if not self.field(u'osis_location'): critical_error_message_box(UiStrings().NFSs, WizardStrings.YouSpecifyFile % WizardStrings.OSIS) self.osisFileEdit.setFocus() return False elif self.field(u'source_format') == BibleFormat.CSV: if not self.field(u'csv_booksfile'): critical_error_message_box(UiStrings().NFSs, translate('BiblesPlugin.ImportWizardForm', 'You need to specify a file with books of the Bible to use in the import.')) self.csvBooksEdit.setFocus() return False elif not self.field(u'csv_versefile'): critical_error_message_box(UiStrings().NFSs, translate('BiblesPlugin.ImportWizardForm', 'You need to specify a file of Bible verses to import.')) self.csvVersesEdit.setFocus() return False elif self.field(u'source_format') == BibleFormat.OpenSong: if not self.field(u'opensong_file'): critical_error_message_box(UiStrings().NFSs, WizardStrings.YouSpecifyFile % WizardStrings.OS) self.openSongFileEdit.setFocus() return False elif self.field(u'source_format') == BibleFormat.WebDownload: self.versionNameEdit.setText(self.webTranslationComboBox.currentText()) return True elif self.field(u'source_format') == BibleFormat.OpenLP1: if not self.field(u'openlp1_location'): critical_error_message_box(UiStrings().NFSs, WizardStrings.YouSpecifyFile % UiStrings().OLPV1) self.openlp1FileEdit.setFocus() return False return True elif self.currentPage() == self.licenseDetailsPage: license_version = self.field(u'license_version') license_copyright = self.field(u'license_copyright') path = AppLocation.get_section_data_path(u'bibles') if not license_version: critical_error_message_box(UiStrings().EmptyField, translate('BiblesPlugin.ImportWizardForm', 'You need to specify a version name for your Bible.')) self.versionNameEdit.setFocus() return False elif not license_copyright: critical_error_message_box(UiStrings().EmptyField, translate('BiblesPlugin.ImportWizardForm', 'You need to set a copyright for your Bible. ' 'Bibles in the Public Domain need to be marked as such.')) self.copyrightEdit.setFocus() return False elif self.manager.exists(license_version): critical_error_message_box(translate('BiblesPlugin.ImportWizardForm', 'Bible Exists'), translate('BiblesPlugin.ImportWizardForm', 'This Bible already exists. Please import a different Bible or first delete the existing one.')) self.versionNameEdit.setFocus() return False elif os.path.exists(os.path.join(path, clean_filename( license_version))): critical_error_message_box( translate('BiblesPlugin.ImportWizardForm', 'Bible Exists'), translate('BiblesPlugin.ImportWizardForm', 'This Bible already exists. Please import ' 'a different Bible or first delete the existing one.')) self.versionNameEdit.setFocus() return False return True if self.currentPage() == self.progressPage: return True def onWebSourceComboBoxIndexChanged(self, index): """ Setup the list of Bibles when you select a different source on the web download page. ``index`` The index of the combo box. """ self.webTranslationComboBox.clear() bibles = self.web_bible_list[index].keys() bibles.sort(cmp=locale_compare) self.webTranslationComboBox.addItems(bibles) def onOsisBrowseButtonClicked(self): """ Show the file open dialog for the OSIS file. """ self.getFileName(WizardStrings.OpenTypeFile % WizardStrings.OSIS, self.osisFileEdit, u'last directory import') def onCsvBooksBrowseButtonClicked(self): """ Show the file open dialog for the books CSV file. """ self.getFileName(WizardStrings.OpenTypeFile % WizardStrings.CSV, self.csvBooksEdit, u'last directory import', u'%s (*.csv)' % translate('BiblesPlugin.ImportWizardForm', 'CSV File')) def onCsvVersesBrowseButtonClicked(self): """ Show the file open dialog for the verses CSV file. """ self.getFileName(WizardStrings.OpenTypeFile % WizardStrings.CSV, self.csvVersesEdit, u'last directory import', u'%s (*.csv)' % translate('BiblesPlugin.ImportWizardForm', 'CSV File')) def onOpenSongBrowseButtonClicked(self): """ Show the file open dialog for the OpenSong file. """ self.getFileName(WizardStrings.OpenTypeFile % WizardStrings.OS, self.openSongFileEdit, u'last directory import') def onOpenlp1BrowseButtonClicked(self): """ Show the file open dialog for the openlp.org 1.x file. """ self.getFileName(WizardStrings.OpenTypeFile % UiStrings().OLPV1, self.openlp1FileEdit, u'last directory import', u'%s (*.bible)' % translate('BiblesPlugin.ImportWizardForm', 'openlp.org 1.x Bible Files')) def registerFields(self): """ Register the bible import wizard fields. """ self.selectPage.registerField(u'source_format', self.formatComboBox) self.selectPage.registerField(u'osis_location', self.osisFileEdit) self.selectPage.registerField(u'csv_booksfile', self.csvBooksEdit) self.selectPage.registerField(u'csv_versefile', self.csvVersesEdit) self.selectPage.registerField(u'opensong_file', self.openSongFileEdit) self.selectPage.registerField(u'web_location', self.webSourceComboBox) self.selectPage.registerField(u'web_biblename', self.webTranslationComboBox) self.selectPage.registerField(u'proxy_server', self.webServerEdit) self.selectPage.registerField(u'proxy_username', self.webUserEdit) self.selectPage.registerField(u'proxy_password', self.webPasswordEdit) self.selectPage.registerField(u'openlp1_location', self.openlp1FileEdit) self.licenseDetailsPage.registerField(u'license_version', self.versionNameEdit) self.licenseDetailsPage.registerField(u'license_copyright', self.copyrightEdit) self.licenseDetailsPage.registerField(u'license_permissions', self.permissionsEdit) def setDefaults(self): """ Set default values for the wizard pages. """ settings = Settings() settings.beginGroup(self.plugin.settingsSection) self.restart() self.finishButton.setVisible(False) self.cancelButton.setVisible(True) self.setField(u'source_format', 0) self.setField(u'osis_location', '') self.setField(u'csv_booksfile', '') self.setField(u'csv_versefile', '') self.setField(u'opensong_file', '') self.setField(u'web_location', WebDownload.Crosswalk) self.setField(u'web_biblename', self.webTranslationComboBox.currentIndex()) self.setField(u'proxy_server', settings.value(u'proxy address')) self.setField(u'proxy_username', settings.value(u'proxy username')) self.setField(u'proxy_password', settings.value(u'proxy password')) self.setField(u'openlp1_location', '') self.setField(u'license_version', self.versionNameEdit.text()) self.setField(u'license_copyright', self.copyrightEdit.text()) self.setField(u'license_permissions', self.permissionsEdit.text()) self.onWebSourceComboBoxIndexChanged(WebDownload.Crosswalk) settings.endGroup() def loadWebBibles(self): """ Load the lists of Crosswalk, BibleGateway and Bibleserver bibles. """ # Load Crosswalk Bibles. self.loadBibleResource(WebDownload.Crosswalk) # Load BibleGateway Bibles. self.loadBibleResource(WebDownload.BibleGateway) # Load and Bibleserver Bibles. self.loadBibleResource(WebDownload.Bibleserver) def loadBibleResource(self, download_type): """ Loads a web bible from bible_resources.sqlite. ``download_type`` The WebDownload type e.g. bibleserver. """ self.web_bible_list[download_type] = {} bibles = BiblesResourcesDB.get_webbibles(WebDownload.Names[download_type]) for bible in bibles: version = bible[u'name'] name = bible[u'abbreviation'] self.web_bible_list[download_type][version] = name.strip() def preWizard(self): """ Prepare the UI for the import. """ OpenLPWizard.preWizard(self) bible_type = self.field(u'source_format') if bible_type == BibleFormat.WebDownload: self.progressLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Registering Bible...')) else: self.progressLabel.setText(WizardStrings.StartingImport) self.application.process_events() def performWizard(self): """ Perform the actual import. """ bible_type = self.field(u'source_format') license_version = self.field(u'license_version') license_copyright = self.field(u'license_copyright') license_permissions = self.field(u'license_permissions') importer = None if bible_type == BibleFormat.OSIS: # Import an OSIS bible. importer = self.manager.import_bible(BibleFormat.OSIS, name=license_version, filename=self.field(u'osis_location') ) elif bible_type == BibleFormat.CSV: # Import a CSV bible. importer = self.manager.import_bible(BibleFormat.CSV, name=license_version, booksfile=self.field(u'csv_booksfile'), versefile=self.field(u'csv_versefile') ) elif bible_type == BibleFormat.OpenSong: # Import an OpenSong bible. importer = self.manager.import_bible(BibleFormat.OpenSong, name=license_version, filename=self.field(u'opensong_file') ) elif bible_type == BibleFormat.WebDownload: # Import a bible from the web. self.progressBar.setMaximum(1) download_location = self.field(u'web_location') bible_version = self.webTranslationComboBox.currentText() bible = self.web_bible_list[download_location][bible_version] importer = self.manager.import_bible( BibleFormat.WebDownload, name=license_version, download_source=WebDownload.Names[download_location], download_name=bible, proxy_server=self.field(u'proxy_server'), proxy_username=self.field(u'proxy_username'), proxy_password=self.field(u'proxy_password') ) elif bible_type == BibleFormat.OpenLP1: # Import an openlp.org 1.x bible. importer = self.manager.import_bible(BibleFormat.OpenLP1, name=license_version, filename=self.field(u'openlp1_location') ) if importer.do_import(license_version): self.manager.save_meta_data(license_version, license_version, license_copyright, license_permissions) self.manager.reload_bibles() if bible_type == BibleFormat.WebDownload: self.progressLabel.setText( translate('BiblesPlugin.ImportWizardForm', 'Registered Bible. Please note, that verses will be ' 'downloaded on\ndemand and thus an internet connection is required.')) else: self.progressLabel.setText(WizardStrings.FinishedImport) else: self.progressLabel.setText(translate('BiblesPlugin.ImportWizardForm', 'Your Bible import failed.')) del self.manager.db_cache[importer.name] delete_database(self.plugin.settingsSection, importer.file)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled correctly. """ import TestGyp import re import subprocess import sys if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) CHDIR = 'rpath' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) def GetRpaths(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'cmd LC_RPATH.*?path (.*?) \(offset \d+\)', re.DOTALL) proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0] assert not proc.returncode return r.findall(o) if (GetRpaths('libdefault_rpath.dylib') != []): test.fail_test() if (GetRpaths('libexplicit_rpath.dylib') != ['@executable_path/.']): test.fail_test() if (GetRpaths('libexplicit_rpaths_escaped.dylib') != ['First rpath', 'Second rpath']): test.fail_test() if (GetRpaths('My Framework.framework/My Framework') != ['@loader_path/.']): test.fail_test() if (GetRpaths('executable') != ['@executable_path/.']): test.fail_test() test.pass_test()
unknown
codeparrot/codeparrot-clean
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://www.deepspeed.ai/) is designed to optimize distributed training for large models with data, model, pipeline, and even a combination of all three [parallelism](./perf_train_gpu_many) strategies to provide better memory efficiency and faster training speeds. This is achieved with the [Zero Redundancy Optimizer (ZeRO)](https://hf.co/papers/1910.02054) which consists of three stages. | ZeRO stage | description | |---|---| | 1 | partition optimizer states | | 2 | partition optimizer and gradient states | | 3 | partition optimizer, gradient, and parameters | Each stage progressively saves more memory, allowing really large models to fit and train on a single GPU. All ZeRO stages, offloading optimizer memory and computations from the GPU to the CPU are integrated with [`Trainer`]. Provide a config file or one of the example templates to [`Trainer`] to enable DeepSpeed features. This guide walks you through setting up a DeepSpeed config file, how to enable its features in [`Trainer`], and deploy for training. Install DeepSpeed from either PyPI or Transformers. For more detailed installation instructions, refer to the DeepSpeed [installation](https://www.deepspeed.ai/tutorials/advanced-install/) or GitHUB [README](https://github.com/microsoft/deepspeed#installation). <hfoptions id="installation"> <hfoption id="PyPI"> ```bash pip install deepspeed ``` </hfoption> <hfoption id="Transformers"> ```bash pip install transformers[deepspeed] ``` </hfoption> </hfoptions> > [!WARNING] > Refer to the [DeepSpeed CUDA installation](./debugging#deepspeed-cuda-issues) if you're having trouble with your installation. While DeepSpeed has a pip installable package, it is highly recommended to [install it from source](https://www.deepspeed.ai/tutorials/advanced-install/#install-deepspeed-from-source) to ensure it matches your hardware and to support certain features which aren't available in the PyPI distribution. DeepSpeed provides a tool for estimating the required CPU and GPU memory for the parameters, optimizer and gradient states. You'll also to need to reserve some memory for the CUDA kernels and activations. Run the command below to check the memory requirements for [bigscience/T0_3B](https://huggingface.co/docs/transformers/main/en/bigscience/T0_3B) on a single GPU. ```bash $ python -c 'from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained("bigscience/T0_3B"); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)' [...] Estimated memory needed for params, optim states and gradients for a: HW: Setup with 1 node, 1 GPU per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0 ``` > [!TIP] > If you have enough GPU memory, disable CPU and NVMe offload to speed everything up. ## Choosing a ZeRO stage Consider the table below to help you choose the appropriate ZeRO stage for training because there is a trade-off between training speed and memory usage. The table orders the ZeRO stages from fastest to slowest and from least memory usage to most. | fastest | least memory usage | |---|---| | ZeRO-1 | ZeRO-3 + offload | | ZeRO-2 | ZeRO-3 | | ZeRO-2 + offload | ZeRO-2 + offload | | ZeRO-3 | ZeRO-2 | | ZeRO-3 + offload | ZeRO-1 | Decide the type of performance you're optimizing for, speed or memory, and then work backwards to discover the best ZeRO stage for your use case. For example, if you're optimizing for speed, start with the fastest ZeRO stage and if you run out of memory, try the next stage which is slower but more memory efficient. ## Config file Once you've decided on a ZeRO stage, set up a config file to enable DeepSpeed with [`Trainer`]. The config file contains all the parameters for how to configure and set up your training. When the training script is executed, DeepSpeed logs the configuration from [`Trainer`] to the console so you can see exactly what's being used. > [!TIP] > Find a complete list of DeepSpeed configuration options on the [DeepSpeed Configuration JSON](https://www.deepspeed.ai/docs/config-json/) reference. There are also practical examples of various DeepSpeed configuration examples in the [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) main [DeepSpeed](https://github.com/microsoft/DeepSpeed) repository. Run the command below to quickly find specific examples. > > ```bash > git clone https://github.com/microsoft/DeepSpeedExamples > cd DeepSpeedExamples > find . -name '*json' > # find examples with the Lamb optimizer > grep -i Lamb $(find . -name '*json') > ``` The config file is passed as a path to a JSON file if you're training from the command line interface or as a nested dict object if you're using [`Trainer`] in a notebook. <hfoptions id="pass-config"> <hfoption id="path to file"> ```py TrainingArguments( deepspeed="path/to/deepspeed_config.json", ..., ) ``` </hfoption> <hfoption id="nested dict"> ```py ds_config_dict = dict(scheduler=scheduler_params, optimizer=optimizer_params) args = TrainingArguments( deepspeed=ds_config_dict, ..., ) trainer = Trainer( model, args, ..., ) ``` </hfoption> </hfoptions> ### DeepSpeed versus Trainer parameters There are three types of config parameters. 1. Some config parameters are shared by DeepSpeed and [`Trainer`] making it difficult to identify errors when there are conflicting definitions. In this case, configure these parameters from the [`Trainer`] command line arguments. 1. Some config parameters are automatically derived from the model configuration and don't need to be manually configured. [`Trainer`] uses the config value `auto` to set the most correct or efficient option. You could define these parameters explicitly, but you must take care to ensure the [`Trainer`] and DeepSpeed config parameters match. Mismatches may cause training to fail in very difficult to detect ways. 1. Some config parameters are specific to DeepSpeed and should be manually set based on your training requirements. There are two ways to modify the config parameters. > [!TIP] > Some values, such as `scheduler.params.total_num_steps`, are calculated by [`Trainer`] during training. 1. Create or load a DeepSpeed config to use as the main config. 1. Create a [`TrainingArguments`] object based on the DeepSpeed config values. ### ZeRO stage Each ZeRO stage config is defined in `zero_optimization`. For a more detailed explanation of each parameter, refer to the [DeepSpeed Configuration JSON](https://www.deepspeed.ai/docs/config-json/) reference. These parameters must be set up with DeepSpeed because [`Trainer`] doesn't provide equivalent command line arguments. > [!WARNING] > DeepSpeed doesn't validate parameter names and any typos will fallback on the parameters default setting. Observe the DeepSpeed engine startup log messages to see what values are being used. <hfoptions id="zero-config"> <hfoption id="ZeRO-1"> ZeRO-1 shards the optimizer states across GPUs and you can expect a small speed up. ```yml { "zero_optimization": { "stage": 1 } } ``` </hfoption> <hfoption id="ZeRO-2"> ZeRO-2 shards the optimizer and gradient states across GPUs. This stage is primarily used for training since its features are not relevant to inference. Some important parameters to configure for better performance include the following. * `offload_optimizer` should be enabled to reduce GPU memory usage. * `overlap_comm` when set to `true` uses more GPU memory in exchange for lower allreduce latency. This feature uses 4.5x the `allgather_bucket_size` and `reduce_bucket_size` values. In this example, they're set to `5e8` which means it requires 9GB of GPU memory. If your GPU memory is 8GB or less, you should reduce `overlap_comm` to lower the memory requirements and prevent an out-of-memory (OOM) error. * `allgather_bucket_size` and `reduce_bucket_size` trade-off available GPU memory for communication speed. The smaller their values, the slower communication is and the more GPU memory is available. You can balance, for example, whether a bigger batch size is more important than a slightly slower training time. * `round_robin_gradients` is available in DeepSpeed 0.4.4 for CPU offloading. It parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism). ```yml { "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 5e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 5e8, "contiguous_gradients": true, "round_robin_gradients": true } } ``` </hfoption> <hfoption id="ZeRO-3"> ZeRO-3 shards the optimizer and gradient states, and parameters across GPUs. Unlike ZeRO-2, ZeRO-3 can also be used for inference in addition to training because it loads large models onto multiple GPUs. Some important parameters to configure include the following. * `device: "cpu"` can help if you're running out of GPU memory and if you have free CPU memory available. This offloads model parameters to the CPU. * `pin_memory: true` can improve throughput, but less memory becomes available for other processes because the pinned memory is reserved for the specific process that requested it and it's typically accessed much faster than normal CPU memory. * `stage3_max_live_parameters` is the upper limit on how many full parameters to keep on the GPU at any given time. Reduce this value if you encounter an OOM error. * `stage3_max_reuse_distance` is a value for determining when a parameter is used again in the future, and it helps decide whether to throw the parameter away or to keep it. If the parameter is going to be reused (if the value is less than `stage3_max_reuse_distance`), then it is kept to reduce communication overhead. This is helpful when activation checkpointing is enabled and you want to keep the parameter in the forward recompute until the backward pass. Reduce this value if you encounter an OOM error. * `stage3_gather_16bit_weights_on_model_save` consolidates fp16 weights when a model is saved. For large models and multiple GPUs, this is expensive in terms of memory and speed. You should enable it if you're planning on resuming training. * `sub_group_size` controls which parameters are updated during the optimizer step. Parameters are grouped into buckets of `sub_group_size` and each bucket is updated one at a time. When used with NVMe offload, `sub_group_size` determines when model states are moved in and out of CPU memory during the optimization step. This prevents running out of CPU memory for extremely large models. `sub_group_size` can be left to its default value if you aren't using NVMe offload, but you may want to change it if you: 1. Run into an OOM error during the optimization step. In this case, reduce `sub_group_size` to reduce memory usage of the temporary buffers. 2. The optimization step is taking a really long time. In this case, increase `sub_group_size` to improve bandwidth utilization as a result of increased data buffers. * `reduce_bucket_size`, `stage3_prefetch_bucket_size`, and `stage3_param_persistence_threshold` are dependent on a models hidden size. It is recommended to set these values to `auto` and allow [`Trainer`] to automatically assign the values. ```yml { "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true } } ``` ### Initialize large models With ZeRO-3, use the [deepspeed.zero.Init](https://deepspeed.readthedocs.io/en/latest/zero3.html#deepspeed.zero.Init) context manager to initialize a model faster. ```py from transformers import T5ForConditionalGeneration, T5Config import deepspeed with deepspeed.zero.Init(): config = T5Config.from_pretrained("google-t5/t5-small") model = T5ForConditionalGeneration(config) ``` The DeepSped config file needs to have `is_deepspeed_zero3_enabled: true` setup in [`TrainingArguments`] and it needs a ZeRO configuration enabled. The [`TrainingArguments`] object must be created **before** calling [`~PreTrainedModel.from_pretrained`]. > [!TIP] > You'll need ZeRO-3 when the fp16 weights don't fit on a single GPU. But if you're able to load the fp16 weights, set `dtype=torch.float16` in [`~PreTrainedModel.from_pretrained`]. ```py from transformers import AutoModel, Trainer, TrainingArguments training_args = TrainingArguments(..., deepspeed=ds_config) model = AutoModel.from_pretrained("google-t5/t5-small") trainer = Trainer(model=model, args=training_args, ...) ``` When there are multiple GPUs, no single GPU has all the parameters unless it's the parameters of the currently executing layer. To access all parameters from all the layers at once, such as loading pretrained model weights in [`~PreTrainedModel.from_pretrained`], one layer is loaded at a time and immediately partitioned to all GPUs. For very large models, it isn't possible to load the weights onto one GPU and then distribute them across the other GPUs due to memory limitations. If you encounter a model parameter weight where `tensor([1.])` or the parameter size is 1 instead of a larger multidimensional shape, it means the parameter is partitioned and this is a ZeRO-3 placeholder. ```py tensor([1.0], device="cuda:0", dtype=torch.float16, requires_grad=True) ``` > [!TIP] > For more information about initializing large models with ZeRO-3 and accessing the parameters, take a look at the [Constructing Massive Models](https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models) and [Gathering Parameters](https://deepspeed.readthedocs.io/en/latest/zero3.html#gathering-parameters) guides. </hfoption> </hfoptions> ### NVMe [ZeRO-Infinity](https://hf.co/papers/2104.07857) offloads model states to the CPU and/or NVMe to save even more memory. Smart partitioning and tiling algorithms allow each GPU to send and receive very small amounts of data during offloading such that a modern NVMe can fit an even larger total memory pool than is available to your training process. ZeRO-Infinity requires ZeRO-3. Depending on the CPU and NVMe memory available, you can offload both the [optimizer states](https://www.deepspeed.ai/docs/config-json/#optimizer-offloading) and [parameters](https://www.deepspeed.ai/docs/config-json/#parameter-offloading), just one of them, or none of them. Make sure the `nvme_path` points to a NVMe device, because while it still works with a regular hard drive or solid state drive, it'll be significantly slower. With a modern NVMe, you can expect peak transfer speeds of ~3.5GB/s for read operations and ~3GB/s for write operations. Consider running a [benchmark](https://github.com/microsoft/DeepSpeed/issues/998) on your training setup to determine the optimal `aio` configuration. The example ZeRO-3 and ZeRO-Infinity config below sets most of the parameter values to `auto`, but you can also manually set configure these values. ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "nvme", "nvme_path": "/local_nvme", "pin_memory": true, "buffer_count": 4, "fast_init": false }, "offload_param": { "device": "nvme", "nvme_path": "/local_nvme", "pin_memory": true, "buffer_count": 5, "buffer_size": 1e8, "max_in_cpu": 1e9 }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "aio": { "block_size": 262144, "queue_depth": 32, "thread_count": 1, "single_submit": false, "overlap_events": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ### Sequence Parallelism DeepSpeed's ALST/Ulysses sequence parallelism enables training with very long sequences by splitting the sequence across multiple GPUs. This is particularly useful for training large language models with very long sequence lengths. Arctic Long Sequence Training (ALST) uses a combination of sharding inputs along the sequence dimension and attention head parallelism. With this approach, you can train models with sequence lengths up to 500K tokens on a single H100 GPU, 3.7M on a single node, or 15M tokens on just four nodes with Llama-8B. The implementation described here enables one component of the full ALST system. For additional optimizations like TiledMLP and activation checkpoint offloading, refer to the [DeepSpeed ALST tutorial](https://www.deepspeed.ai/tutorials/ulysses-alst-sequence-parallelism/). > [!TIP] > For more detailed information about sequence parallelism, see the Accelerate [Sequence Parallelism](https://huggingface.co/docs/accelerate/concept_guides/sequence_parallelism) guide. To enable ALST/Ulysses sequence parallelism with [`Trainer`], configure `parallelism_config` in [`TrainingArguments`]. Sequence parallelism is configured via Accelerate's `ParallelismConfig` and requires an Accelerate version higher than 1.12.0. ```py from accelerate.utils import ParallelismConfig, DeepSpeedSequenceParallelConfig # Example: 4 GPUs with sp_size=4, dp_replicate_size=1 (no data parallelism) # Ensure total_size = dp_replicate_size * dp_shard_size * sp_size = 1 * 1 * 4 = 4 GPUs parallelism_config = ParallelismConfig( sp_backend="deepspeed", sp_size=4, # Number of GPUs to split sequence across dp_replicate_size=1, # Explicit: no data parallelism sp_handler=DeepSpeedSequenceParallelConfig( sp_seq_length_is_variable=True, sp_attn_implementation="sdpa", ), ) training_args = TrainingArguments( ..., deepspeed="path/to/deepspeed_config.json", parallelism_config=parallelism_config, ) ``` You can also configure sequence parallelism using an Accelerate config file. ```yaml distributed_type: DEEPSPEED deepspeed_config: deepspeed_config_file: path/to/ds_config.json machine_rank: 0 num_machines: 1 num_processes: 4 # Total number of processes parallelism_config: parallelism_config_sp_size: 4 # Sequence parallel size parallelism_config_dp_replicate_size: 1 # Must be: dp_replicate_size * dp_shard_size * sp_size = num_processes parallelism_config_sp_backend: deepspeed parallelism_config_sp_seq_length_is_variable: true parallelism_config_sp_attn_implementation: sdpa ``` Important configuration parameters include the following. * `sp_backend` must be set to `"deepspeed"` to use ALST/Ulysses sequence parallelism. * `sp_size` is the degree of sequence parallelism. For example, `sp_size=4` means 4 GPUs will process a single sequence in parallel. You need at least 2 GPUs to enable sequence parallelism. **Data feeding**: Each rank receives a unique data stream from the DataLoader (like DP). **Batch size calculation**: The effective `dp_world_size = world_size / sp_size`. So with 4 GPUs and `sp_size=4`, each of the 4 ranks gets different samples from the DataLoader, but `dp_world_size=1` for total batch size calculations * `sp_seq_length_is_variable` determines how sequence lengths are handled. When set to `True` (recommended), the implementation adapts to varying sequence lengths between batches. When `False`, all sequences must be padded to a fixed length specified by `sp_seq_length`. * `sp_attn_implementation` specifies the attention implementation to use. Supported values are `"sdpa"`, `"flash_attention_2"`, or `"flash_attention_3"`. Flash Attention is recommended for best performance, especially with multiple samples in a batch, because SDPA may incorrectly attend across sample boundaries. > [!WARNING] > Sequence parallelism requires your model to use one of the supported attention implementations (`sdpa`, `flash_attention_2`, or `flash_attention_3`). The `eager` attention implementation is not supported because it doesn't properly handle `position_ids`. When using sequence parallelism, ensure your sequences are properly padded. Use `pad_to_multiple_of` in your data collator to ensure sequences are divisible by `sp_size`. For example, with `sp_size=4`, set `pad_to_multiple_of=4` or higher. ```py from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=False, pad_to_multiple_of=4, # Ensure sequences are divisible by sp_size ) ``` When using `sp_size` with multiple GPUs, you **must** explicitly set `dp_replicate_size` or `dp_shard_size` to ensure `total_size = dp_replicate_size * dp_shard_size * sp_size` equals your total number of GPUs. For example, with 8 GPUs and `sp_size=4`, you must set `dp_replicate_size=2` (since 2 × 1 × 4 = 8): ```py parallelism_config = ParallelismConfig( sp_backend="deepspeed", sp_size=4, dp_replicate_size=2, sp_handler=DeepSpeedSequenceParallelConfig( sp_seq_length_is_variable=True, sp_attn_implementation="flash_attention_2", ), ) ``` [`Trainer`] automatically handles the special requirements for sequence parallelism including: * Adapting the data loader via DeepSpeed's [`UlyssesSPDataLoaderAdapter`](https://github.com/deepspeedai/DeepSpeed/blob/master/deepspeed/runtime/sequence_parallel/ulysses_sp.py) to shard sequences across GPUs. **Important**: Unlike Tensor Parallelism (where all ranks must receive identical data), each rank with SP receives a unique data stream from the DataLoader (similar to DP). The adapter handles distributing sequence chunks across SP ranks internally, so your DataLoader should continue feeding different samples to each rank. * Generating `position_ids` when not provided * Creating `shift_labels` for causal language modeling * Aggregating loss across sequence parallel ranks with proper masking for `-100` labels You can launch training with sequence parallelism using the `accelerate launch` command. ```bash accelerate launch --config_file alst_config.yaml your_training_script.py \ --output_dir output_dir \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 1 ``` ## Training features DeepSpeed supports many training features that can be configured in the config file. This section describes some of the most important features. ### Gradient checkpointing Gradient checkpointing saves memory by only storing *some* of the intermediate activations instead of storing *all* of them. It is useful for fitting larger models on the GPU without running out of memory or to increase the batch size for better performance. Training speed is slower though. * For a Transformers model, set `model.gradient_checkpointing_enable()` or add `--gradient_checkpointing` in the [`TrainingArguments`]. * For a non-Transformers model, use the DeepSpeed [Activation Checkpointing API](https://deepspeed.readthedocs.io/en/latest/activation-checkpointing.html). Replacing Transformers modeling code and [torch.utils.checkpoint](https://pytorch.org/docs/stable/checkpoint.html) with the DeepSpeed API gives you more flexibility because you can offload the forward activations to the CPU memory instead of recalculating them. ### Batch size The batch size can be automatically configured or manually set. When you choose the `"auto"` option, [`Trainer`] sets `train_micro_batch_size_per_gpu` and `train_batch_size` to the value of `world_size * per_device_train_batch_size * gradient_accumulation_steps`. ```json { "train_micro_batch_size_per_gpu": "auto", "train_batch_size": "auto" } ``` ### Communication data type A separate data type is used for communication collectives like reduction, gathering and scattering operations. All gather and scatter operations are performed in the same data type the data is in. For example, if you're training in bf16, the data is also gathered in bf16 because gathering is a non-lossy operation. Reduce operations are lossy, for example, when gradients are averaged across multiple GPUs. When the communication is done in fp16 or bf16, it's more likely to be lossy because adding multiple numbers in low precision isn't exact. This is especially the case with bf16 which has a lower precision than fp16. For this reason, fp16 is the default for reduction operations because the loss is minimal when averaging gradients. Choose the communication data type by setting the `communication_data_type` parameter in the config file. For example, choosing fp32 adds a small amount of overhead but ensures the reduction operation is accumulated in fp32 and when it is ready, it's downcasted to whichever half-precision data type you're training in. ```json { "communication_data_type": "fp32" } ``` ### Gradient accumulation Gradient accumulation accumulates gradients over several mini-batches of data before updating parameters. It stores less gradients and enables training with a larger *effective batch size*. Training speed is slower though, but it's useful for overcoming memory constraints. Gradient accumulation can be automatically configured or manually set. When you choose the `"auto"` option, [`Trainer`] sets it to the value of `gradient_accumulation_steps`. ```json { "gradient_accumulation_steps": "auto" } ``` ### Gradient clipping Gradient clipping is useful for preventing exploding gradients which can lead to instability during training. It sets a maximum threshold value and rescales the gradients if their norm exceeds the threshold. Gradient clipping can be automatically configured or manually set. When you choose the `"auto"` option, [`Trainer`] sets it to the value of `max_grad_norm`. ```json { "gradient_clipping": "auto" } ``` ### Mixed precision training Mixed precision accelerates training speed by performing some calculations in half-precision, but it also maintains some calculations in full-precision to preserve accuracy. DeepSpeed supports fp32, fp16, and bf16 data types. <hfoptions id="precision"> <hfoption id="fp32"> Train in fp32 if a model wasn't pretrained in mixed precision because it may cause underflow or overflow errors. Disable fp16, the default, in this case. ```json { "fp16": { "enabled": false } } ``` For Ampere GPUs and PyTorch 1.7+, the more efficient [tf32](https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices) mode is automatically enabled for some operations but the results are still in fp32. Configure it in [`Trainer`] by setting `--tf32` to enable it, and `--tf32 0` or `--no_tf32` to disable it. </hfoption> <hfoption id="fp16"> To configure fp16 mixed precision, set up the config as shown below with `"auto"` or your own values. [`Trainer`] automatically enables or disables fp16 based on the value of `fp16` or `fp16_full_eval`, and the rest of the config can be set by you. fp16 is enabled from the command line when the following arguments are passed: `--fp16` or `--fp16_full_eval` also. ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } } ``` For additional DeepSpeed fp16 training options, take a look at the [FP16 Training Options](https://www.deepspeed.ai/docs/config-json/#fp16-training-options) reference. </hfoption> <hfoption id="bf16"> > [!TIP] > bf16 requires DeepSpeed 0.6.0. bf16 has the same dynamic range as fp32, and doesn't require loss scaling unlike fp16. However, if you use [gradient accumulation](#gradient-accumulation) with bf16, gradients are accumulated in bf16 which may not be desirable because the lower precision can lead to lossy accumulation. bf16 can be set up in the config file or enabled from the command line when the following arguments are passed: `--bf16` or `--bf16_full_eval`. ```json { "bf16": { "enabled": "auto" } } ``` </hfoption> </hfoptions> ### Optimizer and scheduler DeepSpeed and Transformers optimizers and schedulers can be mixed and matched if `offload_optimizer` isn't enabled. When `offload_optimizer` is enabled, use a non-DeepSpeed optimizer (except for LAMB) as long as it has it a CPU and GPU implementation. Set the optimizer and scheduler parameters for the config file from the command line to avoid hard to find errors. For example, if the learning rate is set to a different value in another place, you can override it from the command line. <hfoptions id="opt-sched"> <hfoption id="optimizer"> DeepSpeed offers several [optimizers](https://www.deepspeed.ai/docs/config-json/#optimizer-parameters) (Adam, AdamW, OneBitAdam, and LAMB) but you can also import other optimizers from PyTorch. If you don't configure the optimizer in the config, [`Trainer`] automatically selects AdamW and either uses the supplied values or the default values for the following parameters from the command line: `lr`, `adam_beta1`, `adam_beta2`, `adam_epsilon`, `weight_decay`. You can set the parameters to `"auto"` or manually input your own values. ```json { "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } } } ``` Use an unsupported optimizer by adding the following to the top level configuration. ```json { "zero_allow_untested_optimizer": true } ``` From DeepSpeed 0.8.3+, if you want to use offload, you'll also need to add the following to the top level configuration because offload works best with DeepSpeed's CPU Adam optimizer. ```json { "zero_force_ds_cpu_optimizer": false } ``` </hfoption> <hfoption id="scheduler"> DeepSpeed supports the LRRangeTest, OneCycle, WarmupLR and WarmupDecayLR learning rate [schedulers](https://www.deepspeed.ai/docs/config-json/#scheduler-parameters). Transformers and DeepSpeed provide two of the same schedulers: * WarmupLR is the same as `--lr_scheduler_type constant_with_warmup` in Transformers. * WarmupDecayLR is the same as `--lr_scheduler_type linear` in Transformers (this is the default scheduler used in Transformers). If you don't configure the scheduler in the config file, [`Trainer`] automatically selects WarmupDecayLR and either uses the supplied values or the default values for the following parameters from the command line: `warmup_min_lr`, `warmup_max_lr`, `warmup_num_steps`, `total_num_steps` (automatically calculated during run time if `max_steps` is not provided). You can set the parameters to `"auto"` or manually input your own values. ```json { "scheduler": { "type": "WarmupDecayLR", "params": { "total_num_steps": "auto", "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } } } ``` </hfoption> </hfoptions> ### Universal checkpointing [Universal Checkpointing](https://www.deepspeed.ai/tutorials/universal-checkpointing) saves and loads model, optimizer and training scheduler states across different model architectures, parallelism techniques, and training configurations. By saving them in a Universal format, it enables easier model training continuation and fine-tuning. Resume training with a Universal checkpoint by setting `load_universal` to `true` in the config file. ```json { "checkpoint": { "load_universal": true } } ``` ## Deploy DeepSpeed can be deployed with its native launcher, [torchrun](https://pytorch.org/docs/stable/elastic/run.html) or [Accelerate](https://huggingface.co/docs/accelerate/basic_tutorials/launch#using-accelerate-launch). Add the `--deepspeed ds_config.json` argument to [`Trainer`] in the command line. It is recommended to use DeepSpeeds [add_config_arguments](https://deepspeed.readthedocs.io/en/latest/initialize.html#argument-parsing) utility to add any other command line arguments to your code. <hfoptions id="deploy"> <hfoption id="multi-GPU"> To deploy DeepSpeed on multiple GPUs, add `--num_gpus`. You don't need to add `--num_gpus` if you're planning on using all available GPUs. ```bash deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro ``` </hfoption> <hfoption id="single-GPU"> DeepSpeed is still useful with just one GPU because you can: 1. Offload some computations and memory to the CPU to make more GPU resources available to your model to use a larger batch size or fit a very large model that normally won't fit. 2. Minimize memory fragmentation with its smart GPU memory management system which also allows you to fit bigger models and data batches. To deploy DeepSpeed on a single GPU, add `--num_gpus`. You don't need to add `--num_gpus` if you only have one GPU because DeepSpeed deploys all GPUs it can see on a given node. > [!TIP] > Set the `allgather_bucket_size` and `reduce_bucket_size` values to 2e8 in the [ZeRO-2](#zero-configuration) configuration file to get better performance on a single GPU. ```bash deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro ``` </hfoption> </hfoptions> ### Multi-node A multi-node setup consists of multiple nodes, where each node has one of more GPUs running a workload. DeepSpeed expects a shared storage system, but if this is not the case, you need to adjust the config file to include a [checkpoint](https://www.deepspeed.ai/docs/config-json/#checkpoint-options) to allow loading without access to a shared filesystem. ```json { "checkpoint": { "use_node_local_storage": true } } ``` You could also use the `--save_on_each_node` parameter in [`TrainingArguments`] to automatically add the above `checkpoint` to your config. The examples below for the torchrun and DeepSpeed launcher shows how to deploy two nodes with eight GPUs each. Access the first node with `ssh hostname1` and the second node with `ssh hostname2`. Both nodes must be able to communicate with each other locally over ssh without a password. <hfoptions id="multinode"> <hfoption id="torchrun"> With [torchrun](https://pytorch.org/docs/stable/elastic/run.html), ssh to each node and run the following command on both of them. The launcher waits until both nodes are synchronized before launching the training. ```bash torchrun --nproc_per_node=8 --nnode=2 --node_rank=0 --master_addr=hostname1 \ --master_port=9901 your_program.py <normal cl args> --deepspeed ds_config.json ``` </hfoption> <hfoption id="DeepSpeed"> Create a `hostfile` for the DeepSpeed launcher. ```bash hostname1 slots=8 hostname2 slots=8 ``` The DeepSpeed launcher automatically launches the command on both nodes at once with the command below. ```bash deepspeed --num_gpus 8 --num_nodes 2 --hostfile hostfile --master_addr hostname1 --master_port=9901 \ your_program.py <normal cl args> --deepspeed ds_config.json ``` Check out the [Resource Configuration (multi-node)](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) guide for more details about configuring multi-node compute resources. </hfoption> </hfoptions> ### Slurm [Slurm](https://slurm.schedmd.com/documentation.html) is a cluster management and job scheduling system. An example Slurm script is shown below. ```bash #SBATCH --job-name=test-nodes # name #SBATCH --nodes=2 # nodes #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! #SBATCH --cpus-per-task=10 # number of cores per tasks #SBATCH --gres=gpu:8 # number of gpus #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) #SBATCH --output=%x-%j.out # output file name export GPUS_PER_NODE=8 export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) export MASTER_PORT=9901 srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ your_program.py <normal cl args> --deepspeed ds_config.json' ``` Launch training simultaneously on all nodes with the command below. ```bash sbatch launch.slurm ``` ### Jupyter Notebook To use DeepSpeed in a Jupyter Notebook, you need to emulate a distributed environment because the launcher doesn't support deployment from a notebook. This is only supported for one GPU. To use multiple GPUs, you must use a multi-process environment, which means you have to use the DeepSpeed launcher which can't be emulated as shown here. ```py # emulate a launcher in the notebook import os os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "9994" # modify if RuntimeError: Address already in use os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = "0" os.environ["WORLD_SIZE"] = "1" training_args = TrainingArguments(..., deepspeed="ds_config_zero3.json") trainer = Trainer(...) trainer.train() ``` Create a config file on the fly in the notebook in the current directory with a dedicated cell. ```py %%bash cat <<'EOT' > ds_config_zero3.json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } EOT ``` If the training script is in a file and not a notebook cell, launch DeepSpeed from the shell in the notebook cell. ```py !git clone https://github.com/huggingface/transformers !cd transformers; deepspeed examples/pytorch/translation/run_translation.py ... ``` Another option is to use `%%bash` to run the shell program without emulating the distributed environment. However, you won't be able to view the logs until training is complete. ```py %%bash git clone https://github.com/huggingface/transformers cd transformers deepspeed examples/pytorch/translation/run_translation.py ... ``` ## Save model weights DeepSpeed stores the main fp32 weights in custom checkpoint optimizer files (`global_step*/*optim_states.pt`) which are saved under the normal checkpoint. ### fp16 ZeRO-2 saves the model weights in fp16. To save the weights in fp16 for ZeRO-3, set `"stage3_gather_16bit_weights_on_model_save": true` in the config file, because the weights are distributed across multiple GPUs. If you don't, [`Trainer`] won't save the weights in fp16 and won't create a `pytorch_model.bin` file. This is because DeepSpeed's state_dict contains a placeholder instead of the real weights, so you won't be able to load it. ```json { "zero_optimization": { "stage": 3, "stage3_gather_16bit_weights_on_model_save": true } } ``` ### fp32 Unless you have a lot of free CPU memory, fp32 weights shouldn't be saved during training because it can require a lot of memory. It is usually best to save the fp32 weights offline after training is complete. <hfoptions id="save"> <hfoption id="offline"> DeepSpeed provides a [zero_to_fp32.py](https://github.com/microsoft/DeepSpeed/blob/91829476a8fd4d0d9268c03c1d56795d20a51c12/deepspeed/utils/zero_to_fp32.py#L14) script at the top-level checkpoint folder for extracting weights at any point. This is a standalone script and you don't need a config file or [`Trainer`]. For example, if your checkpoint folder looks like the one shown below, then you can run the following command to create and consolidate the fp32 weights from multiple GPUs into a single `pytorch_model.bin` file. The script automatically discovers the subfolder `global_step1` which contains the checkpoint. ```bash $ ls -l output_dir/checkpoint-1/ -rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ -rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest -rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt -rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin -rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt -rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json -rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model -rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json -rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json -rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin -rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py* ``` > [!TIP] > Run `python zero_to_fp32.py -h` for more usage details. The script requires 2x the general RAM of the final fp32 weights. ```bash python zero_to_fp32.py . pytorch_model.bin ``` </hfoption> <hfoption id="online"> Adding the `--load_best_model_at_end` parameter in [`TrainingArguments`] tracks the best checkpoint so you can finish training first and save the final model explicitly. Reload the model as shown below. > [!WARNING] > Once [load_state_dict_from_zero_checkpoint](https://deepspeed.readthedocs.io/en/stable/model-checkpointing.html#deepspeed.utils.zero_to_fp32.load_state_dict_from_zero_checkpoint) is run, the model is no longer usable in DeepSpeed in the context of the same application. You'll need to reinitialize the DeepSpeed engine because `model.load_state_dict(state_dict)` removes all the DeepSpeed magic from it. Only use this function once training is complete. ```py from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint checkpoint_dir = os.path.join(trainer.args.output_dir, "checkpoint-final") trainer.deepspeed.save_checkpoint(checkpoint_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) ``` You must have saved at least one checkpoint to load the latest checkpoint as shown in the example below. ```py from transformers.trainer_utils import get_last_checkpoint from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) ``` Use `load_state_dict` to extract and load the state_dict of the fp32 weights. ```py from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) model = model.cpu() model.load_state_dict(state_dict) ``` </hfoption> </hfoptions> ## Non-Trainer integration DeepSpeed also works with Transformers without [`Trainer`]. The [`~integrations.HfDeepSpeedConfig`] is responsible for gathering ZeRO-3 parameters and partitioning a model across multiple GPUs when [`~PreTrainedModel.from_pretrained`] is called. You must instantiate [`~integrations.HfDeepSpeedConfig`] before loading a model to efficiently deploy ZeRO-3. <hfoptions id="models"> <hfoption id="pretrained model"> ```py from transformers.integrations import HfDeepSpeedConfig from transformers import AutoModel import deepspeed # DeepSpeed config object or path to the file ds_config = {...} # must run before instantiating the model to detect ZeRO-3 dschf = HfDeepSpeedConfig(ds_config) # keep this object alive model = AutoModel.from_pretrained("openai-community/gpt2") engine = deepspeed.initialize(model=model, config_params=ds_config, ...) ``` </hfoption> <hfoption id="non-pretrained model"> [`~integrations.HfDeepSpeedConfig`] is not required for ZeRO-1 or ZeRO-2. ```py from transformers.integrations import HfDeepSpeedConfig from transformers import AutoModel, AutoConfig import deepspeed # DeepSpeed config object or path to the file ds_config = {...} # must run before instantiating the model to detect zero 3 dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # randomly initialize model weights config = AutoConfig.from_pretrained("openai-community/gpt2") model = AutoModel.from_config(config) engine = deepspeed.initialize(model=model, config_params=ds_config, ...) ``` </hfoption> </hfoptions> ## Troubleshoot One of the first things to check when you encounter an error is whether DeepSpeed is the cause (because often it isn't). Retry your setup without DeepSpeed, and if the error persists, report the issue. If the issue is unrelated to the Transformers integration, please open the issue on the DeepSpeed [repository](https://github.com/microsoft/DeepSpeed). For issues related to the Transformers integration, please provide the following information. * The full DeepSpeed config file. * The command line arguments for [`Trainer`] or the [`TrainingArguments`] if you're scripting the [`Trainer`] setup yourself (don't dump the entire [`TrainingArguments`] which contains many irrelevant entries). * The outputs of the following commands. ```bash python -c 'import torch; print(f"torch: {torch.__version__}")' python -c 'import transformers; print(f"transformers: {transformers.__version__}")' python -c 'import deepspeed; print(f"deepspeed: {deepspeed.__version__}")' ``` * A link to a Google Colab notebook to reproduce the issue. * A standard or non-custom dataset or an existing example to reproduce the issue. The following sections provide a guide for resolving two of the most common issues. ### Process killed at startup When the DeepSpeed process is killed during launch without a traceback, that usually means the program tried to allocate more CPU memory than is available on your system. Or the process may have tried to allocate more CPU memory than allowed, leading the OS kernel to terminate the process. In this case, check whether your config file has either `offload_optimizer`, `offlload_param`, or both configured to offload to the CPU. If you have NVM3 and ZeRO-3 set up, experiment with offloading to the NVMe ([estimate](https://deepspeed.readthedocs.io/en/latest/memory.html) the memory requirements of a model first) instead. ### NaN loss NaN loss often occurs when a model is pretrained in bf16 and you try to use it with fp16 (especially relevant to TPU trained models). To resolve this, use fp32 or bf16 if your hardware (TPUs, Ampere GPUs or newer) supports it. It is also possible that fp16 is causing overflow. For example, if your config file looks like the one below, you may see the following overflow errors in the logs. ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } } ``` The `OVERFLOW!` error below is a result of the DeepSpeed loss scaler unable to find a scaling coefficient to overcome the loss overflow. Try a higher `initial_scale_power` value in this case (32 usually works). ```bash 0%| | 0/189 [00:00<?, ?it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144, reducing to 262144 1%|▌ | 1/189 [00:00<01:26, 2.17it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144, reducing to 131072.0 1%|█▏ [...] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 14%|████████████████▌ | 27/189 [00:14<01:13, 2.21it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 15%|█████████████████▏ | 28/189 [00:14<01:13, 2.18it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 15%|█████████████████▊ | 29/189 [00:15<01:13, 2.18it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 [...] ``` ## Resources DeepSpeed is a powerful technology for scaling large model training. To learn more about DeepSpeed, take a look at their [blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed), [documentation](https://www.deepspeed.ai/getting-started/), and [GitHub](https://github.com/microsoft/deepspeed). The papers below provide additional details about ZeRO. * [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://hf.co/papers/1910.02054) * [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://hf.co/papers/2101.06840) * [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://hf.co/papers/2104.07857)
unknown
github
https://github.com/huggingface/transformers
docs/source/en/deepspeed.md
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Initializes a DfaClient without using yaml-cached credentials. While our LoadFromStorage method provides a useful shortcut to instantiate a client if you regularly use just one set of credentials, production applications may need to swap out users. This example shows you how to create an OAuth 2.0 client and a DfaClient without relying on a yaml file. """ __author__ = 'Joseph DiLallo' from googleads import dfa from googleads import oauth2 # OAuth 2.0 credential information. In a real application, you'd probably be # pulling these values from a credential storage. CLIENT_ID = 'INSERT_CLIENT_ID_HERE' CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE' REFRESH_TOKEN = 'INSERT_REFRESH_TOKEN_HERE' # DFA API information. USER_PROFILE_NAME = 'INSERT_USER_PROFILE_NAME_HERE' APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE' def main(client_id, client_secret, refresh_token, user_profile_name, application_name): oauth2_client = oauth2.GoogleRefreshTokenClient( client_id, client_secret, refresh_token) dfa_client = dfa.DfaClient(user_profile_name, oauth2_client, application_name) campaign_service = dfa_client.GetService( 'campaign', server='https://advertisersapitest.doubleclick.net') results = campaign_service.getCampaignsByCriteria({}) if results['records']: for campaign in results['records']: print ('Campaign with name \'%s\' and ID \'%s\' was found.' % (campaign['name'], campaign['id'])) if __name__ == '__main__': main(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, USER_PROFILE_NAME, APPLICATION_NAME)
unknown
codeparrot/codeparrot-clean
# Copyright 2013 dotCloud inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six import requests.adapters import socket if six.PY3: import http.client as httplib else: import httplib try: import requests.packages.urllib3 as urllib3 except ImportError: import urllib3 RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer class UnixHTTPConnection(httplib.HTTPConnection, object): def __init__(self, base_url, unix_socket, timeout=60): httplib.HTTPConnection.__init__(self, 'localhost', timeout=timeout) self.base_url = base_url self.unix_socket = unix_socket self.timeout = timeout def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.timeout) sock.connect(self.unix_socket) self.sock = sock class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): def __init__(self, base_url, socket_path, timeout=60): urllib3.connectionpool.HTTPConnectionPool.__init__( self, 'localhost', timeout=timeout ) self.base_url = base_url self.socket_path = socket_path self.timeout = timeout def _new_conn(self): return UnixHTTPConnection(self.base_url, self.socket_path, self.timeout) class UnixAdapter(requests.adapters.HTTPAdapter): def __init__(self, socket_url, timeout=60): socket_path = socket_url.replace('http+unix://', '') if not socket_path.startswith('/'): socket_path = '/' + socket_path self.socket_path = socket_path self.timeout = timeout self.pools = RecentlyUsedContainer(10, dispose_func=lambda p: p.close()) super(UnixAdapter, self).__init__() def get_connection(self, url, proxies=None): with self.pools.lock: pool = self.pools.get(url) if pool: return pool pool = UnixHTTPConnectionPool(url, self.socket_path, self.timeout) self.pools[url] = pool return pool def close(self): self.pools.clear()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # Copyright (C) 2009 Tan Swee Heng # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains extensions to Atom objects used with Google Finance.""" __author__ = 'thesweeheng@gmail.com' import atom import gdata GD_NAMESPACE = 'http://schemas.google.com/g/2005' GF_NAMESPACE = 'http://schemas.google.com/finance/2007' class Money(atom.AtomBase): """The <gd:money> element.""" _tag = 'money' _namespace = GD_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['amount'] = 'amount' _attributes['currencyCode'] = 'currency_code' def __init__(self, amount=None, currency_code=None, **kwargs): self.amount = amount self.currency_code = currency_code atom.AtomBase.__init__(self, **kwargs) def __str__(self): return "%s %s" % (self.amount, self.currency_code) def MoneyFromString(xml_string): return atom.CreateClassFromXMLString(Money, xml_string) class _Monies(atom.AtomBase): """An element containing multiple <gd:money> in multiple currencies.""" _namespace = GF_NAMESPACE _children = atom.AtomBase._children.copy() _children['{%s}money' % GD_NAMESPACE] = ('money', [Money]) def __init__(self, money=None, **kwargs): self.money = money or [] atom.AtomBase.__init__(self, **kwargs) def __str__(self): return " / ".join("%s" % i for i in self.money) class CostBasis(_Monies): """The <gf:costBasis> element.""" _tag = 'costBasis' def CostBasisFromString(xml_string): return atom.CreateClassFromXMLString(CostBasis, xml_string) class DaysGain(_Monies): """The <gf:daysGain> element.""" _tag = 'daysGain' def DaysGainFromString(xml_string): return atom.CreateClassFromXMLString(DaysGain, xml_string) class Gain(_Monies): """The <gf:gain> element.""" _tag = 'gain' def GainFromString(xml_string): return atom.CreateClassFromXMLString(Gain, xml_string) class MarketValue(_Monies): """The <gf:marketValue> element.""" _tag = 'gain' _tag = 'marketValue' def MarketValueFromString(xml_string): return atom.CreateClassFromXMLString(MarketValue, xml_string) class Commission(_Monies): """The <gf:commission> element.""" _tag = 'commission' def CommissionFromString(xml_string): return atom.CreateClassFromXMLString(Commission, xml_string) class Price(_Monies): """The <gf:price> element.""" _tag = 'price' def PriceFromString(xml_string): return atom.CreateClassFromXMLString(Price, xml_string) class Symbol(atom.AtomBase): """The <gf:symbol> element.""" _tag = 'symbol' _namespace = GF_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['fullName'] = 'full_name' _attributes['exchange'] = 'exchange' _attributes['symbol'] = 'symbol' def __init__(self, full_name=None, exchange=None, symbol=None, **kwargs): self.full_name = full_name self.exchange = exchange self.symbol = symbol atom.AtomBase.__init__(self, **kwargs) def __str__(self): return "%s:%s (%s)" % (self.exchange, self.symbol, self.full_name) def SymbolFromString(xml_string): return atom.CreateClassFromXMLString(Symbol, xml_string) class TransactionData(atom.AtomBase): """The <gf:transactionData> element.""" _tag = 'transactionData' _namespace = GF_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['type'] = 'type' _attributes['date'] = 'date' _attributes['shares'] = 'shares' _attributes['notes'] = 'notes' _children = atom.AtomBase._children.copy() _children['{%s}commission' % GF_NAMESPACE] = ('commission', Commission) _children['{%s}price' % GF_NAMESPACE] = ('price', Price) def __init__(self, type=None, date=None, shares=None, notes=None, commission=None, price=None, **kwargs): self.type = type self.date = date self.shares = shares self.notes = notes self.commission = commission self.price = price atom.AtomBase.__init__(self, **kwargs) def TransactionDataFromString(xml_string): return atom.CreateClassFromXMLString(TransactionData, xml_string) class TransactionEntry(gdata.GDataEntry): """An entry of the transaction feed. A TransactionEntry contains TransactionData such as the transaction type (Buy, Sell, Sell Short, or Buy to Cover), the number of units, the date, the price, any commission, and any notes. """ _tag = 'entry' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataEntry._children.copy() _children['{%s}transactionData' % GF_NAMESPACE] = ( 'transaction_data', TransactionData) def __init__(self, transaction_data=None, **kwargs): self.transaction_data = transaction_data gdata.GDataEntry.__init__(self, **kwargs) def transaction_id(self): return self.id.text.split("/")[-1] transaction_id = property(transaction_id, doc='The transaction ID.') def TransactionEntryFromString(xml_string): return atom.CreateClassFromXMLString(TransactionEntry, xml_string) class TransactionFeed(gdata.GDataFeed): """A feed that lists all of the transactions that have been recorded for a particular position. A transaction is a collection of information about an instance of buying or selling a particular security. The TransactionFeed lists all of the transactions that have been recorded for a particular position as a list of TransactionEntries. """ _tag = 'feed' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataFeed._children.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [TransactionEntry]) def TransactionFeedFromString(xml_string): return atom.CreateClassFromXMLString(TransactionFeed, xml_string) class TransactionFeedLink(atom.AtomBase): """Link to TransactionFeed embedded in PositionEntry. If a PositionFeed is queried with transactions='true', TransactionFeeds are inlined in the returned PositionEntries. These TransactionFeeds are accessible via TransactionFeedLink's feed attribute. """ _tag = 'feedLink' _namespace = GD_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['href'] = 'href' _children = atom.AtomBase._children.copy() _children['{%s}feed' % atom.ATOM_NAMESPACE] = ( 'feed', TransactionFeed) def __init__(self, href=None, feed=None, **kwargs): self.href = href self.feed = feed atom.AtomBase.__init__(self, **kwargs) class PositionData(atom.AtomBase): """The <gf:positionData> element.""" _tag = 'positionData' _namespace = GF_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['gainPercentage'] = 'gain_percentage' _attributes['return1w'] = 'return1w' _attributes['return4w'] = 'return4w' _attributes['return3m'] = 'return3m' _attributes['returnYTD'] = 'returnYTD' _attributes['return1y'] = 'return1y' _attributes['return3y'] = 'return3y' _attributes['return5y'] = 'return5y' _attributes['returnOverall'] = 'return_overall' _attributes['shares'] = 'shares' _children = atom.AtomBase._children.copy() _children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis) _children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain) _children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain) _children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue) def __init__(self, gain_percentage=None, return1w=None, return4w=None, return3m=None, returnYTD=None, return1y=None, return3y=None, return5y=None, return_overall=None, shares=None, cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs): self.gain_percentage = gain_percentage self.return1w = return1w self.return4w = return4w self.return3m = return3m self.returnYTD = returnYTD self.return1y = return1y self.return3y = return3y self.return5y = return5y self.return_overall = return_overall self.shares = shares self.cost_basis = cost_basis self.days_gain = days_gain self.gain = gain self.market_value = market_value atom.AtomBase.__init__(self, **kwargs) def PositionDataFromString(xml_string): return atom.CreateClassFromXMLString(PositionData, xml_string) class PositionEntry(gdata.GDataEntry): """An entry of the position feed. A PositionEntry contains the ticker exchange and Symbol for a stock, mutual fund, or other security, along with PositionData such as the number of units of that security that the user holds, and performance statistics. """ _tag = 'entry' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataEntry._children.copy() _children['{%s}positionData' % GF_NAMESPACE] = ( 'position_data', PositionData) _children['{%s}symbol' % GF_NAMESPACE] = ('symbol', Symbol) _children['{%s}feedLink' % GD_NAMESPACE] = ( 'feed_link', TransactionFeedLink) def __init__(self, position_data=None, symbol=None, feed_link=None, **kwargs): self.position_data = position_data self.symbol = symbol self.feed_link = feed_link gdata.GDataEntry.__init__(self, **kwargs) def position_title(self): return self.title.text position_title = property(position_title, doc='The position title as a string (i.e. position.title.text).') def ticker_id(self): return self.id.text.split("/")[-1] ticker_id = property(ticker_id, doc='The position TICKER ID.') def transactions(self): return self.feed_link.feed.entry if self.feed_link.feed else None transactions = property(transactions, doc=""" Inlined TransactionEntries are returned if PositionFeed is queried with transactions='true'.""") def PositionEntryFromString(xml_string): return atom.CreateClassFromXMLString(PositionEntry, xml_string) class PositionFeed(gdata.GDataFeed): """A feed that lists all of the positions in a particular portfolio. A position is a collection of information about a security that the user holds. The PositionFeed lists all of the positions in a particular portfolio as a list of PositionEntries. """ _tag = 'feed' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataFeed._children.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PositionEntry]) def PositionFeedFromString(xml_string): return atom.CreateClassFromXMLString(PositionFeed, xml_string) class PositionFeedLink(atom.AtomBase): """Link to PositionFeed embedded in PortfolioEntry. If a PortfolioFeed is queried with positions='true', the PositionFeeds are inlined in the returned PortfolioEntries. These PositionFeeds are accessible via PositionFeedLink's feed attribute. """ _tag = 'feedLink' _namespace = GD_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['href'] = 'href' _children = atom.AtomBase._children.copy() _children['{%s}feed' % atom.ATOM_NAMESPACE] = ( 'feed', PositionFeed) def __init__(self, href=None, feed=None, **kwargs): self.href = href self.feed = feed atom.AtomBase.__init__(self, **kwargs) class PortfolioData(atom.AtomBase): """The <gf:portfolioData> element.""" _tag = 'portfolioData' _namespace = GF_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['currencyCode'] = 'currency_code' _attributes['gainPercentage'] = 'gain_percentage' _attributes['return1w'] = 'return1w' _attributes['return4w'] = 'return4w' _attributes['return3m'] = 'return3m' _attributes['returnYTD'] = 'returnYTD' _attributes['return1y'] = 'return1y' _attributes['return3y'] = 'return3y' _attributes['return5y'] = 'return5y' _attributes['returnOverall'] = 'return_overall' _children = atom.AtomBase._children.copy() _children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis) _children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain) _children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain) _children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue) def __init__(self, currency_code=None, gain_percentage=None, return1w=None, return4w=None, return3m=None, returnYTD=None, return1y=None, return3y=None, return5y=None, return_overall=None, cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs): self.currency_code = currency_code self.gain_percentage = gain_percentage self.return1w = return1w self.return4w = return4w self.return3m = return3m self.returnYTD = returnYTD self.return1y = return1y self.return3y = return3y self.return5y = return5y self.return_overall = return_overall self.cost_basis = cost_basis self.days_gain = days_gain self.gain = gain self.market_value = market_value atom.AtomBase.__init__(self, **kwargs) def PortfolioDataFromString(xml_string): return atom.CreateClassFromXMLString(PortfolioData, xml_string) class PortfolioEntry(gdata.GDataEntry): """An entry of the PortfolioFeed. A PortfolioEntry contains the portfolio's title along with PortfolioData such as currency, total market value, and overall performance statistics. """ _tag = 'entry' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataEntry._children.copy() _children['{%s}portfolioData' % GF_NAMESPACE] = ( 'portfolio_data', PortfolioData) _children['{%s}feedLink' % GD_NAMESPACE] = ( 'feed_link', PositionFeedLink) def __init__(self, portfolio_data=None, feed_link=None, **kwargs): self.portfolio_data = portfolio_data self.feed_link = feed_link gdata.GDataEntry.__init__(self, **kwargs) def portfolio_title(self): return self.title.text def set_portfolio_title(self, portfolio_title): self.title = atom.Title(text=portfolio_title, title_type='text') portfolio_title = property(portfolio_title, set_portfolio_title, doc='The portfolio title as a string (i.e. portfolio.title.text).') def portfolio_id(self): return self.id.text.split("/")[-1] portfolio_id = property(portfolio_id, doc='The portfolio ID. Do not confuse with portfolio.id.') def positions(self): return self.feed_link.feed.entry if self.feed_link.feed else None positions = property(positions, doc=""" Inlined PositionEntries are returned if PortfolioFeed was queried with positions='true'.""") def PortfolioEntryFromString(xml_string): return atom.CreateClassFromXMLString(PortfolioEntry, xml_string) class PortfolioFeed(gdata.GDataFeed): """A feed that lists all of the user's portfolios. A portfolio is a collection of positions that the user holds in various securities, plus metadata. The PortfolioFeed lists all of the user's portfolios as a list of PortfolioEntries. """ _tag = 'feed' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataFeed._children.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PortfolioEntry]) def PortfolioFeedFromString(xml_string): return atom.CreateClassFromXMLString(PortfolioFeed, xml_string)
unknown
codeparrot/codeparrot-clean
package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1" ) // JSONSchema descriptions help the enrichment suggest API to generate enrichment configurations. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AlertEnrichment struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AlertEnrichmentSpec `json:"spec,omitempty"` } func (AlertEnrichment) OpenAPIModelName() string { return OpenAPIPrefix + "AlertEnrichment" } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AlertEnrichmentList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AlertEnrichment `json:"items,omitempty"` } func (AlertEnrichmentList) OpenAPIModelName() string { return OpenAPIPrefix + "AlertEnrichmentList" } // AlertEnrichmentSpec specifies an alert enrichment pipeline. type AlertEnrichmentSpec struct { // Title of the alert enrichment. // +kubebuilder:validation:Required Title string `json:"title" yaml:"title" jsonschema:"description=Title of the alert enrichment"` // Description of the alert enrichment. Description string `json:"description,omitempty" yaml:"description,omitempty" jsonschema:"description=Human‑readable description"` // Alert rules for which to run the enrichment for. // If not set, the enrichment runs for all alert rules. // +listType=set AlertRuleUIDs []string `json:"alertRuleUids,omitempty" yaml:"alertRuleUids,omitempty" jsonschema:"description=UIDs of alert rules this enrichment applies to (empty = all)"` // LabelMatchers optionally restricts when this enrichment runs. LabelMatchers []Matcher `json:"labelMatchers,omitempty" yaml:"labelMatchers,omitempty" jsonschema:"description=Label matchers that must be satisfied by the alert for this enrichment to run"` // AnnotationMatchers optionally restricts when this enrichment runs. AnnotationMatchers []Matcher `json:"annotationMatchers,omitempty" yaml:"annotationMatchers,omitempty" jsonschema:"description=Annotation matchers that must be satisfied by the alert for this enrichment to run"` // Receivers optionally restricts the enrichment to one or more receiver names. // If not set, the enrichment runs for alerts coming from all receivers. // +listType=set Receivers []string `json:"receivers,omitempty" yaml:"receivers,omitempty" jsonschema:"description=Alertmanager receiver names to match (empty = all)"` // Steps of the enrichment pipeline. Steps []Step `json:"steps" yaml:"steps" jsonschema:"description=Ordered list of enricher steps"` } func (AlertEnrichmentSpec) OpenAPIModelName() string { return OpenAPIPrefix + "AlertEnrichmentSpec" } // Type of comparison performed by the matcher. This mimics Alertmanager matchers. // +enum type StepType string // Defines values for MatchType. const ( StepTypeEnricher StepType = "enricher" StepTypeConditional StepType = "conditional" ) // Step represent an invocation of a single enricher. type Step struct { Type StepType `json:"type" yaml:"type" jsonschema:"description=Step kind: 'enricher' or 'conditional'"` // Timeout is the maximum about of time this specific enrichment is allowed to take. Timeout metav1.Duration `json:"timeout" yaml:"timeout" jsonschema:"description=Maximum execution duration for this step, for example '5s'"` // Enricher specifies what enricher to run and it's configuration. Enricher *EnricherConfig `json:"enricher,omitempty" yaml:"enricher,omitempty" jsonschema:"description=Enricher configuration"` // Conditional allows branching to specifies what enricher to run and it's configuration. Conditional *Conditional `json:"conditional,omitempty" yaml:"conditional,omitempty" jsonschema:"description=Conditional enricher configuration that branches based on the condition"` } func (Step) OpenAPIModelName() string { return OpenAPIPrefix + "Step" } type Conditional struct { // If is the condition to evaluate. If Condition `json:"if" yaml:"if" jsonschema:"description=Condition to evaluate before running the enrichment steps"` // Then is the enrichment steps to perform if all the conditions above are true. Then []Step `json:"then" yaml:"then" jsonschema:"description=Steps executed when the condition is true"` // Else is the enrichment steps to perform otherwise. Else []Step `json:"else,omitempty" yaml:"else,omitempty" jsonschema:"description=Steps executed when the condition is false"` } func (Conditional) OpenAPIModelName() string { return OpenAPIPrefix + "Conditional" } type Condition struct { // LabelMatchers optionally specifies the condition to require matching label values. LabelMatchers []Matcher `json:"labelMatchers,omitempty" yaml:"labelMatchers,omitempty" jsonschema:"description=Label matchers that must be satisfied"` // AnnotationMatchers optionally restricts when the per-alert enrichments are run. AnnotationMatchers []Matcher `json:"annotationMatchers,omitempty" yaml:"annotationMatchers,omitempty" jsonschema:"description=Annotation matchers that must be satisfied"` // DataSourceQuery is a data source query to run. If the query returns a non-zero value, // then the condition is taken to be true. DataSourceQuery *RawDataSourceQuery `json:"dataSourceQuery,omitempty" yaml:"dataSourceQuery,omitempty" jsonschema:"description=Data source query to run to evaluate the condition"` } func (Condition) OpenAPIModelName() string { return OpenAPIPrefix + "Condition" } // Matcher is used to match label (or annotation) values. type Matcher struct { Type MatchType `json:"type" yaml:"type" jsonschema:"description=Comparison operator ('=', '!=', '=~', '!~')"` Name string `json:"name" yaml:"name" jsonschema:"description=Label/annotation key"` Value string `json:"value" yaml:"value" jsonschema:"description=Value or regex pattern to match"` } func (Matcher) OpenAPIModelName() string { return OpenAPIPrefix + "Matcher" } // Type of comparison performed by the matcher. This mimics Alertmanager matchers. // +enum type MatchType string // Defines values for MatchType. const ( MatchTypeEqual MatchType = "=" MatchTypeNotEqual MatchType = "!=" MatchTypeRegexp MatchType = "=~" MatchNotRegexp MatchType = "!~" ) // Type of enricher // +enum type EnricherType string // Defines values for EnricherType. const ( EnricherTypeAssign EnricherType = "assign" EnricherTypeExternal EnricherType = "external" EnricherTypeDataSourceQuery EnricherType = "dsquery" EnricherTypeSift EnricherType = "sift" EnricherTypeAsserts EnricherType = "asserts" EnricherTypeExplain EnricherType = "explain" EnricherTypeLoop EnricherType = "loop" EnricherTypeAssistant EnricherType = "assistant" ) // EnricherConfig is a discriminated union of enricher configurations. type EnricherConfig struct { Type EnricherType `json:"type" yaml:"type" jsonschema:"description=Enricher type ('assign', 'external', 'dsquery', 'sift', 'asserts', 'explain', 'loop')"` Assign *AssignEnricher `json:"assign,omitempty" yaml:"assign,omitempty" jsonschema:"description=Assign enricher settings"` External *ExternalEnricher `json:"external,omitempty" yaml:"external,omitempty" jsonschema:"description=External HTTP enricher settings"` DataSource *DataSourceEnricher `json:"dataSource,omitempty" yaml:"dataSource,omitempty" jsonschema:"description=Data source query enricher settings"` Sift *SiftEnricher `json:"sift,omitempty" yaml:"sift,omitempty" jsonschema:"description=Sift enricher settings"` Asserts *AssertsEnricher `json:"asserts,omitempty" yaml:"asserts,omitempty" jsonschema:"description=Asserts enricher settings"` Explain *ExplainEnricher `json:"explain,omitempty" yaml:"explain,omitempty" jsonschema:"description=Explain enricher settings"` Loop *LoopEnricher `json:"loop,omitempty" yaml:"loop,omitempty" jsonschema:"description=Loop enricher settings"` Assistant *AssistantEnricher `json:"assistant,omitempty" yaml:"assistant,omitempty" jsonschema:"description=Assistant enricher settings"` } func (EnricherConfig) OpenAPIModelName() string { return OpenAPIPrefix + "EnricherConfig" } // AssignEnricher configures an enricher which assigns annotations. type AssignEnricher struct { // Annotations to change and values to set them to. // +listType=map // +listMapKey=name Annotations []Assignment `json:"annotations" yaml:"annotations" jsonschema:"description=Annotations to set on the alert"` } func (AssignEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "AssignEnricher" } type Assignment struct { // Name of the annotation to assign. Name string `json:"name" yaml:"name" jsonschema:"description=Annotation key"` // Value to assign to the annotation. Can use Go template format, with access to // annotations and labels via e.g. {{$annotations.x}} Value string `json:"value" yaml:"value" jsonschema:"description=Template value to apply, for example '{{ $labels.instance }} is down'"` } func (Assignment) OpenAPIModelName() string { return OpenAPIPrefix + "Assignment" } // ExternalEnricher configures an enricher which calls an external service. type ExternalEnricher struct { // URL of the external HTTP service to call out to. URL string `json:"url" yaml:"url" jsonschema:"description=HTTP endpoint to call for enrichment"` } func (ExternalEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "ExternalEnricher" } // Type of data source query // +enum type DataSourceQueryType string // Defines values for EnricherType. const ( DataSourceQueryTypeRaw DataSourceQueryType = "raw" DataSourceQueryTypeLogs DataSourceQueryType = "logs" ) // DataSourceEnricher configures an enricher which calls an external service. type DataSourceEnricher struct { Type DataSourceQueryType `json:"type" yaml:"type" jsonschema:"description=Data source query type ('raw', 'logs')"` Raw *RawDataSourceQuery `json:"raw,omitempty" yaml:"raw,omitempty" jsonschema:"description=Raw query definition"` Logs *LogsDataSourceQuery `json:"logs,omitempty" yaml:"logs,omitempty" jsonschema:"description=Logs query definition"` } func (DataSourceEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "DataSourceEnricher" } // RawDataSourceQuery allows defining the entire query request type RawDataSourceQuery struct { // The data source request to perform. Request common.Unstructured `json:"request,omitempty" yaml:"request,omitempty" jsonschema:"description=Grafana data source request payload"` // The RefID of the response to use. Not required if only a single query is given. RefID string `json:"refId,omitempty" yaml:"refId,omitempty" jsonschema:"description=RefID of the response to use, needed if multiple queries are given"` } func (RawDataSourceQuery) OpenAPIModelName() string { return OpenAPIPrefix + "RawDataSourceQuery" } // LogsDataSourceQuery is a simplified method of describing a logs query, // typically those that return data frames with a "Line" field. type LogsDataSourceQuery struct { // The datasource plugin type DataSourceType string `json:"dataSourceType" yaml:"dataSourceType" jsonschema:"description=Data source plugin type (e.g. 'prometheus', 'loki')"` // Datasource UID DataSourceUID string `json:"dataSourceUid,omitempty" yaml:"dataSourceUid,omitempty" jsonschema:"description=UID of the data source to query"` // The logs query to run. Expr string `json:"expr" yaml:"expr" jsonschema:"description=Log query expression"` // Number of log lines to add to the alert. Defaults to 3. MaxLines int `json:"maxLines,omitempty" yaml:"maxLines,omitempty" jsonschema:"description=Maximum number of log lines to include, defaults to 3"` } func (LogsDataSourceQuery) OpenAPIModelName() string { return OpenAPIPrefix + "LogsDataSourceQuery" } // SiftEnricher configures an enricher which calls into Sift. type SiftEnricher struct { // In the future, there may be configuration options. } func (SiftEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "SiftEnricher" } // AssertsEnricher configures an enricher which calls into Asserts. type AssertsEnricher struct { // In the future, there may be configuration options. } func (AssertsEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "AssertsEnricher" } // ExplainEnricher uses LLM to generate explanations for alerts. type ExplainEnricher struct { Annotation string `json:"annotation" yaml:"annotation" jsonschema:"description=Annotation name to set the explanation in, by default '__enriched_ai_explanation'"` } func (ExplainEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "ExplainEnricher" } // LoopEnricher configures an enricher which calls into Loop. type LoopEnricher struct { // In the future, there may be configuration options. } func (LoopEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "LoopEnricher" } // AssistantEnricher configures an enricher which calls into Assistant. type AssistantEnricher struct { // In the future, there may be configuration options. } func (AssistantEnricher) OpenAPIModelName() string { return OpenAPIPrefix + "AssistantEnricher" }
go
github
https://github.com/grafana/grafana
apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1/types.go
# -*- Mode:Python -*- ########################################################################## # # # This file is part of AVANGO. # # # # Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der # # angewandten Forschung (FhG), Munich, Germany. # # # # AVANGO is free software: you can redistribute it and/or modify # # it under the terms of the GNU Lesser General Public License as # # published by the Free Software Foundation, version 3. # # # # AVANGO is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. # # # ########################################################################## import math import avango.osg import avango.inspector import avango.display import sys argv = avango.display.init(sys.argv) view = avango.display.make_view() view.EnableTrackball.value = True ############################################################################### # some light sources -> see also below the NumLights Uniform for the shader light0 = avango.osg.nodes.Light(LightNum=0, Ambient=avango.osg.Vec4(0.2,0.2,0.2,0.2), Diffuse=avango.osg.Vec4(0.7,0.7,0.7,1.0), Specular=avango.osg.Vec4(1.0,1.0,1.0,1.0), Position=avango.osg.Vec4(3.0,3.0,3.0,1.0)) lightsource0 = avango.osg.nodes.LightSource(Light=light0) light1 = avango.osg.nodes.Light(LightNum=1, Ambient=avango.osg.Vec4(0.2,0.2,0.2,0.2), Diffuse=avango.osg.Vec4(0.7,0.7,0.7,1.0), Specular=avango.osg.Vec4(1.0,1.0,1.0,1.0), Position=avango.osg.Vec4(-3.0,3.0,3.0,1.0)) lightsource1 = avango.osg.nodes.LightSource(Light=light1) light2 = avango.osg.nodes.Light(LightNum=2, Ambient=avango.osg.Vec4(0.2,0.2,0.2,0.2), Diffuse=avango.osg.Vec4(0.7,0.7,0.7,1.0), Specular=avango.osg.Vec4(1.0,1.0,1.0,1.0), Position=avango.osg.Vec4(0.0,-3.0,3.0,1.0)) lightsource2 = avango.osg.nodes.LightSource(Light=light2) light3 = avango.osg.nodes.Light(LightNum=3, Ambient=avango.osg.Vec4(0.2,0.2,0.2,0.2), Diffuse=avango.osg.Vec4(0.7,0.7,0.7,1.0), Specular=avango.osg.Vec4(1.0,1.0,1.0,1.0), Position=avango.osg.Vec4(0.0,0.0,-3.0,1.0)) lightsource3 = avango.osg.nodes.LightSource(Light=light3) ############################################################################### # an example shader program vshaderfile = "vshader.vert" fshaderfile = "fshader.frag" vshader = avango.osg.nodes.Shader(Name="VertexShader", Type=avango.osg.shadertype.VERTEX, FileName=vshaderfile) fshader = avango.osg.nodes.Shader(Name="FragmentShader", Type=avango.osg.shadertype.FRAGMENT, FileName=fshaderfile) prog = avango.osg.nodes.Program(ShaderList=[vshader,fshader]) ss = avango.osg.nodes.StateSet(Program = prog) # Important: Values have to be specified first !!! ss.Uniforms.value = [avango.osg.nodes.Uniform( Values=[4.0], Type=avango.osg.uniformtype.FLOAT, UniformName="NumLights" ) ] ############################################################################### # some example models model1 = avango.osg.nodes.Sphere(StateSet = avango.osg.nodes.StateSet(Uniforms = [avango.osg.nodes.Uniform( Values=[0.2,0.0,0.0,1.0], Type=avango.osg.uniformtype.FLOAT_VEC4, UniformName="ambient"), avango.osg.nodes.Uniform( Values=[0.7,0.0,0.0,1.0], Type=avango.osg.uniformtype.FLOAT_VEC4, UniformName="diffuse"), avango.osg.nodes.Uniform( Values=[1.0,1.0,1.0,1.0], Type=avango.osg.uniformtype.FLOAT_VEC4, UniformName="specular"), avango.osg.nodes.Uniform( Values=[256.0], Type=avango.osg.uniformtype.FLOAT, UniformName="shininess"), ])) model1.Matrix.value = avango.osg.make_trans_mat(-5.0, 0.0, 0.0) * avango.osg.make_scale_mat(0.1, 0.1, 0.1) model2 = avango.osg.nodes.Sphere(StateSet = avango.osg.nodes.StateSet(Uniforms = [avango.osg.nodes.Uniform( Values=[0.0,0.2,0.0,1.0], Type=avango.osg.uniformtype.FLOAT_VEC4, UniformName="ambient"), avango.osg.nodes.Uniform( Values=[0.0,0.7,0.0,1.0], Type=avango.osg.uniformtype.FLOAT_VEC4, UniformName="diffuse"), avango.osg.nodes.Uniform( Values=[1.0,1.0,1.0,1.0], Type=avango.osg.uniformtype.FLOAT_VEC4, UniformName="specular"), avango.osg.nodes.Uniform( Values=[256.0], Type=avango.osg.uniformtype.FLOAT, UniformName="shininess"), ])) model2.Matrix.value = avango.osg.make_trans_mat(2.0, 0.0, 0.0) * avango.osg.make_scale_mat(0.2, 0.2, 0.2) ############################################################################### # build the scenegraph modelGroup = avango.osg.nodes.Group(StateSet=ss) modelGroup.Children.value.append(model1) modelGroup.Children.value.append(model2) lightsource3.Children.value = [modelGroup] lightsource2.Children.value = [lightsource3] lightsource1.Children.value = [lightsource2] lightsource0.Children.value = [lightsource1] root = avango.osg.nodes.Group() root.Children.value = [lightsource0] ############################################################################### # visualize the Lightsources in Wireframe wirestate = avango.osg.nodes.StateSet(WireframeMode = 1, Program = avango.osg.nodes.Program(ShaderList=[ avango.osg.nodes.Shader(Name="VertexShader",Type=avango.osg.shadertype.VERTEX,ShaderSource="void main() { gl_Position = ftransform(); }"), avango.osg.nodes.Shader(Name="VertexShader",Type=avango.osg.shadertype.FRAGMENT,ShaderSource="void main() { gl_FragColor = gl_FrontMaterial.diffuse; }") ])) lightcolor = avango.osg.Vec4(1.0,0.8,0.0,1.0) root.Children.value.append(avango.osg.nodes.Sphere(StateSet = wirestate, Color = lightcolor, Matrix = avango.osg.make_scale_mat(0.1,0.1,0.1) * avango.osg.make_trans_mat(light0.Position.value.x, light0.Position.value.y, light0.Position.value.z,))) root.Children.value.append(avango.osg.nodes.Sphere(StateSet = wirestate, Color = lightcolor, Matrix = avango.osg.make_scale_mat(0.1,0.1,0.1) * avango.osg.make_trans_mat(light1.Position.value.x, light1.Position.value.y, light1.Position.value.z,))) root.Children.value.append(avango.osg.nodes.Sphere(StateSet = wirestate, Color = lightcolor, Matrix = avango.osg.make_scale_mat(0.1,0.1,0.1) * avango.osg.make_trans_mat(light2.Position.value.x, light2.Position.value.y, light2.Position.value.z,))) root.Children.value.append(avango.osg.nodes.Sphere(StateSet = wirestate, Color = lightcolor, Matrix = avango.osg.make_scale_mat(0.1,0.1,0.1) * avango.osg.make_trans_mat(light3.Position.value.x, light3.Position.value.y, light3.Position.value.z,))) ############################################################################### # run the example view.Root.value = root avango.display.run()
unknown
codeparrot/codeparrot-clean
#define VQSORT_ONLY_STATIC 1 #include "hwy/highway.h" #include "hwy/contrib/sort/vqsort-inl.h" #include "highway_qsort.hpp" #include "quicksort.hpp" namespace np::highway::qsort_simd { template <typename T> void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) { #if VQSORT_ENABLED using THwy = std::conditional_t<std::is_same_v<T, Half>, hwy::float16_t, T>; hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast<THwy*>(arr), size, hwy::SortAscending()); #else sort::Quick(arr, size); #endif } #if !HWY_HAVE_FLOAT16 template <> void NPY_CPU_DISPATCH_CURFX(QSort)<Half>(Half *arr, npy_intp size) { sort::Quick(arr, size); } #endif // !HWY_HAVE_FLOAT16 template void NPY_CPU_DISPATCH_CURFX(QSort)<int16_t>(int16_t*, npy_intp); template void NPY_CPU_DISPATCH_CURFX(QSort)<uint16_t>(uint16_t*, npy_intp); #if HWY_HAVE_FLOAT16 template void NPY_CPU_DISPATCH_CURFX(QSort)<Half>(Half*, npy_intp); #endif } // np::highway::qsort_simd
cpp
github
https://github.com/numpy/numpy
numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp
/* * Copyright (C) 2009 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.util.concurrent; import static com.google.common.util.concurrent.InterruptionUtil.repeatedlyInterruptTestThread; import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly; import static java.util.concurrent.Executors.newSingleThreadExecutor; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.MINUTES; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.Assert.assertThrows; import com.google.common.testing.TearDown; import com.google.common.testing.TearDownStack; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeoutException; import junit.framework.TestCase; import org.jspecify.annotations.NullUnmarked; // TODO(cpovirk): Should this be merged into UninterruptiblesTest? /** * Unit test for {@link Uninterruptibles#getUninterruptibly} * * @author Kevin Bourrillion * @author Chris Povirk */ @NullUnmarked public class UninterruptibleFutureTest extends TestCase { private SleepingRunnable sleeper; private Future<Boolean> delayedFuture; private final TearDownStack tearDownStack = new TearDownStack(); @Override protected void setUp() { ExecutorService executor = newSingleThreadExecutor(); tearDownStack.addTearDown( new TearDown() { @Override public void tearDown() { executor.shutdownNow(); } }); sleeper = new SleepingRunnable(1000); delayedFuture = executor.submit(sleeper, true); tearDownStack.addTearDown( new TearDown() { @Override public void tearDown() { Thread.interrupted(); } }); } @Override protected void tearDown() { tearDownStack.runTearDown(); } /** * This first test doesn't test anything in Uninterruptibles, just demonstrates some normal * behavior of futures so that you can contrast the next test with it. */ public void testRegularFutureInterrupted() throws ExecutionException { /* * Here's the order of events that we want. * * 1. The client thread begins to block on a get() call to a future. * 2. The client thread is interrupted sometime before the result would be * available. * 3. We expect the client's get() to throw an InterruptedException. * 4. We expect the client thread's interrupt state to be false. * 5. The client thread again makes a blocking call to get(). * 6. Now the result becomes available. * 7. We expect get() to return this result. * 8. We expect the test thread's interrupt state to be false. */ InterruptionUtil.requestInterruptIn(200, MILLISECONDS); assertFalse(Thread.interrupted()); try { delayedFuture.get(20000, MILLISECONDS); fail("expected to be interrupted"); } catch (InterruptedException expected) { } catch (TimeoutException e) { throw new RuntimeException(e); } // we were interrupted, but it's been cleared now assertFalse(Thread.interrupted()); assertFalse(sleeper.completed); try { assertTrue(delayedFuture.get()); } catch (InterruptedException e) { throw new RuntimeException(e); } assertTrue(sleeper.completed); } public void testMakeUninterruptible_timeoutPreservedThroughInterruption() throws ExecutionException { repeatedlyInterruptTestThread(tearDownStack); assertThrows( TimeoutException.class, () -> getUninterruptibly(delayedFuture, 500, MILLISECONDS)); assertTrue(Thread.interrupted()); // clears the interrupt state, too assertFalse(sleeper.completed); assertTrue(getUninterruptibly(delayedFuture)); assertTrue(Thread.interrupted()); // clears the interrupt state, too assertTrue(sleeper.completed); } private static class SleepingRunnable implements Runnable { final int millis; volatile boolean completed; SleepingRunnable(int millis) { this.millis = millis; } @Override public void run() { try { Thread.sleep(millis); } catch (InterruptedException wontHappen) { throw new AssertionError(); } completed = true; } } public void testMakeUninterruptible_untimed_uninterrupted() throws Exception { runUntimedInterruptsTest(0); } public void testMakeUninterruptible_untimed_interrupted() throws Exception { runUntimedInterruptsTest(1); } public void testMakeUninterruptible_untimed_multiplyInterrupted() throws Exception { runUntimedInterruptsTest(38); } public void testMakeUninterruptible_timed_uninterrupted() throws Exception { runTimedInterruptsTest(0); } public void testMakeUninterruptible_timed_interrupted() throws Exception { runTimedInterruptsTest(1); } public void testMakeUninterruptible_timed_multiplyInterrupted() throws Exception { runTimedInterruptsTest(38); } private static void runUntimedInterruptsTest(int times) throws InterruptedException, ExecutionException, TimeoutException { SettableFuture<String> future = SettableFuture.create(); FutureTask<Boolean> interruptReporter = untimedInterruptReporter(future, false); runNInterruptsTest(times, future, interruptReporter); } private static void runTimedInterruptsTest(int times) throws InterruptedException, ExecutionException, TimeoutException { SettableFuture<String> future = SettableFuture.create(); FutureTask<Boolean> interruptReporter = timedInterruptReporter(future); runNInterruptsTest(times, future, interruptReporter); } private static void runNInterruptsTest( int times, SettableFuture<String> future, FutureTask<Boolean> interruptReporter) throws InterruptedException, ExecutionException, TimeoutException { Thread waitingThread = new Thread(interruptReporter); waitingThread.start(); for (int i = 0; i < times; i++) { waitingThread.interrupt(); } future.set(RESULT); assertEquals(times > 0, (boolean) interruptReporter.get(20, SECONDS)); } /** * Confirms that the test code triggers {@link InterruptedException} in a standard {@link Future}. */ public void testMakeUninterruptible_plainFutureSanityCheck() throws Exception { SettableFuture<String> future = SettableFuture.create(); FutureTask<Boolean> wasInterrupted = untimedInterruptReporter(future, true); Thread waitingThread = new Thread(wasInterrupted); waitingThread.start(); waitingThread.interrupt(); ExecutionException expected = assertThrows(ExecutionException.class, () -> wasInterrupted.get()); assertTrue(expected.getCause().toString(), expected.getCause() instanceof InterruptedException); } public void testMakeUninterruptible_timedGetZeroTimeoutAttempted() throws TimeoutException, ExecutionException { SettableFuture<String> future = SettableFuture.create(); future.set(RESULT); /* * getUninterruptibly should call the timed get method once with a * wait of 0 seconds (and it should succeed, since the result is already * available). */ assertEquals(RESULT, getUninterruptibly(future, 0, SECONDS)); } public void testMakeUninterruptible_timedGetNegativeTimeoutAttempted() throws TimeoutException, ExecutionException { SettableFuture<String> future = SettableFuture.create(); future.set(RESULT); /* * The getUninterruptibly should call the timed get method once with a * wait of -1 seconds (and it should succeed, since the result is already * available). */ assertEquals(RESULT, getUninterruptibly(future, -1, SECONDS)); } private static FutureTask<Boolean> untimedInterruptReporter( Future<?> future, boolean allowInterruption) { return new FutureTask<>( new Callable<Boolean>() { @Override public Boolean call() throws Exception { Object actual; if (allowInterruption) { actual = future.get(); } else { actual = getUninterruptibly(future); } assertEquals(RESULT, actual); return Thread.interrupted(); } }); } private static FutureTask<Boolean> timedInterruptReporter(Future<?> future) { return new FutureTask<>( new Callable<Boolean>() { @Override public Boolean call() throws Exception { assertEquals(RESULT, getUninterruptibly(future, 10, MINUTES)); return Thread.interrupted(); } }); } private static final String RESULT = "result"; }
java
github
https://github.com/google/guava
android/guava-tests/test/com/google/common/util/concurrent/UninterruptibleFutureTest.java
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, float_or_none, int_or_none, try_get, ) class HitRecordIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hitrecord\.org/records/(?P<id>\d+)' _TEST = { 'url': 'https://hitrecord.org/records/2954362', 'md5': 'fe1cdc2023bce0bbb95c39c57426aa71', 'info_dict': { 'id': '2954362', 'ext': 'mp4', 'title': 'A Very Different World (HITRECORD x ACLU)', 'description': 'md5:e62defaffab5075a5277736bead95a3d', 'duration': 139.327, 'timestamp': 1471557582, 'upload_date': '20160818', 'uploader': 'Zuzi.C12', 'uploader_id': '362811', 'view_count': int, 'like_count': int, 'comment_count': int, 'tags': list, } } def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://hitrecord.org/api/web/records/%s' % video_id, video_id) title = video['title'] video_url = video['source_url']['mp4_url'] tags = None tags_list = try_get(video, lambda x: x['tags'], list) if tags_list: tags = [ t['text'] for t in tags_list if isinstance(t, dict) and t.get('text') and isinstance(t['text'], compat_str)] return { 'id': video_id, 'url': video_url, 'title': title, 'description': clean_html(video.get('body')), 'duration': float_or_none(video.get('duration'), 1000), 'timestamp': int_or_none(video.get('created_at_i')), 'uploader': try_get( video, lambda x: x['user']['username'], compat_str), 'uploader_id': try_get( video, lambda x: compat_str(x['user']['id'])), 'view_count': int_or_none(video.get('total_views_count')), 'like_count': int_or_none(video.get('hearts_count')), 'comment_count': int_or_none(video.get('comments_count')), 'tags': tags, }
unknown
codeparrot/codeparrot-clean
/*------------------------------------------------------------------------- * * user.c * Commands for manipulating roles (formerly called users). * * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/commands/user.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/table.h" #include "access/xact.h" #include "catalog/binary_upgrade.h" #include "catalog/catalog.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/objectaccess.h" #include "catalog/pg_auth_members.h" #include "catalog/pg_authid.h" #include "catalog/pg_database.h" #include "catalog/pg_db_role_setting.h" #include "commands/comment.h" #include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/seclabel.h" #include "commands/user.h" #include "libpq/crypt.h" #include "miscadmin.h" #include "port/pg_bitutils.h" #include "storage/lmgr.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/fmgroids.h" #include "utils/syscache.h" #include "utils/varlena.h" /* * Removing a role grant - or the admin option on it - might recurse to * dependent grants. We use these values to reason about what would need to * be done in such cases. * * RRG_NOOP indicates a grant that would not need to be altered by the * operation. * * RRG_REMOVE_ADMIN_OPTION indicates a grant that would need to have * admin_option set to false by the operation. * * Similarly, RRG_REMOVE_INHERIT_OPTION and RRG_REMOVE_SET_OPTION indicate * grants that would need to have the corresponding options set to false. * * RRG_DELETE_GRANT indicates a grant that would need to be removed entirely * by the operation. */ typedef enum { RRG_NOOP, RRG_REMOVE_ADMIN_OPTION, RRG_REMOVE_INHERIT_OPTION, RRG_REMOVE_SET_OPTION, RRG_DELETE_GRANT, } RevokeRoleGrantAction; /* Potentially set by pg_upgrade_support functions */ Oid binary_upgrade_next_pg_authid_oid = InvalidOid; typedef struct { unsigned specified; bool admin; bool inherit; bool set; } GrantRoleOptions; #define GRANT_ROLE_SPECIFIED_ADMIN 0x0001 #define GRANT_ROLE_SPECIFIED_INHERIT 0x0002 #define GRANT_ROLE_SPECIFIED_SET 0x0004 /* GUC parameters */ int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256; char *createrole_self_grant = ""; static bool createrole_self_grant_enabled = false; static GrantRoleOptions createrole_self_grant_options; /* Hook to check passwords in CreateRole() and AlterRole() */ check_password_hook_type check_password_hook = NULL; static void AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, List *memberSpecs, List *memberIds, Oid grantorId, GrantRoleOptions *popt); static void DelRoleMems(Oid currentUserId, const char *rolename, Oid roleid, List *memberSpecs, List *memberIds, Oid grantorId, GrantRoleOptions *popt, DropBehavior behavior); static void check_role_membership_authorization(Oid currentUserId, Oid roleid, bool is_grant); static Oid check_role_grantor(Oid currentUserId, Oid roleid, Oid grantorId, bool is_grant); static RevokeRoleGrantAction *initialize_revoke_actions(CatCList *memlist); static bool plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, Oid member, Oid grantor, GrantRoleOptions *popt, DropBehavior behavior); static void plan_member_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, Oid member); static void plan_recursive_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, int index, bool revoke_admin_option_only, DropBehavior behavior); static void InitGrantRoleOptions(GrantRoleOptions *popt); /* Check if current user has createrole privileges */ static bool have_createrole_privilege(void) { return has_createrole_privilege(GetUserId()); } /* * CREATE ROLE */ Oid CreateRole(ParseState *pstate, CreateRoleStmt *stmt) { Relation pg_authid_rel; TupleDesc pg_authid_dsc; HeapTuple tuple; Datum new_record[Natts_pg_authid] = {0}; bool new_record_nulls[Natts_pg_authid] = {0}; Oid currentUserId = GetUserId(); Oid roleid; ListCell *item; ListCell *option; char *password = NULL; /* user password */ bool issuper = false; /* Make the user a superuser? */ bool inherit = true; /* Auto inherit privileges? */ bool createrole = false; /* Can this user create roles? */ bool createdb = false; /* Can the user create databases? */ bool canlogin = false; /* Can this user login? */ bool isreplication = false; /* Is this a replication role? */ bool bypassrls = false; /* Is this a row security enabled role? */ int connlimit = -1; /* maximum connections allowed */ List *addroleto = NIL; /* roles to make this a member of */ List *rolemembers = NIL; /* roles to be members of this role */ List *adminmembers = NIL; /* roles to be admins of this role */ char *validUntil = NULL; /* time the login is valid until */ Datum validUntil_datum; /* same, as timestamptz Datum */ bool validUntil_null; DefElem *dpassword = NULL; DefElem *dissuper = NULL; DefElem *dinherit = NULL; DefElem *dcreaterole = NULL; DefElem *dcreatedb = NULL; DefElem *dcanlogin = NULL; DefElem *disreplication = NULL; DefElem *dconnlimit = NULL; DefElem *daddroleto = NULL; DefElem *drolemembers = NULL; DefElem *dadminmembers = NULL; DefElem *dvalidUntil = NULL; DefElem *dbypassRLS = NULL; GrantRoleOptions popt; /* The defaults can vary depending on the original statement type */ switch (stmt->stmt_type) { case ROLESTMT_ROLE: break; case ROLESTMT_USER: canlogin = true; /* may eventually want inherit to default to false here */ break; case ROLESTMT_GROUP: break; } /* Extract options from the statement node tree */ foreach(option, stmt->options) { DefElem *defel = (DefElem *) lfirst(option); if (strcmp(defel->defname, "password") == 0) { if (dpassword) errorConflictingDefElem(defel, pstate); dpassword = defel; } else if (strcmp(defel->defname, "sysid") == 0) { ereport(NOTICE, (errmsg("SYSID can no longer be specified"))); } else if (strcmp(defel->defname, "superuser") == 0) { if (dissuper) errorConflictingDefElem(defel, pstate); dissuper = defel; } else if (strcmp(defel->defname, "inherit") == 0) { if (dinherit) errorConflictingDefElem(defel, pstate); dinherit = defel; } else if (strcmp(defel->defname, "createrole") == 0) { if (dcreaterole) errorConflictingDefElem(defel, pstate); dcreaterole = defel; } else if (strcmp(defel->defname, "createdb") == 0) { if (dcreatedb) errorConflictingDefElem(defel, pstate); dcreatedb = defel; } else if (strcmp(defel->defname, "canlogin") == 0) { if (dcanlogin) errorConflictingDefElem(defel, pstate); dcanlogin = defel; } else if (strcmp(defel->defname, "isreplication") == 0) { if (disreplication) errorConflictingDefElem(defel, pstate); disreplication = defel; } else if (strcmp(defel->defname, "connectionlimit") == 0) { if (dconnlimit) errorConflictingDefElem(defel, pstate); dconnlimit = defel; } else if (strcmp(defel->defname, "addroleto") == 0) { if (daddroleto) errorConflictingDefElem(defel, pstate); daddroleto = defel; } else if (strcmp(defel->defname, "rolemembers") == 0) { if (drolemembers) errorConflictingDefElem(defel, pstate); drolemembers = defel; } else if (strcmp(defel->defname, "adminmembers") == 0) { if (dadminmembers) errorConflictingDefElem(defel, pstate); dadminmembers = defel; } else if (strcmp(defel->defname, "validUntil") == 0) { if (dvalidUntil) errorConflictingDefElem(defel, pstate); dvalidUntil = defel; } else if (strcmp(defel->defname, "bypassrls") == 0) { if (dbypassRLS) errorConflictingDefElem(defel, pstate); dbypassRLS = defel; } else elog(ERROR, "option \"%s\" not recognized", defel->defname); } if (dpassword && dpassword->arg) password = strVal(dpassword->arg); if (dissuper) issuper = boolVal(dissuper->arg); if (dinherit) inherit = boolVal(dinherit->arg); if (dcreaterole) createrole = boolVal(dcreaterole->arg); if (dcreatedb) createdb = boolVal(dcreatedb->arg); if (dcanlogin) canlogin = boolVal(dcanlogin->arg); if (disreplication) isreplication = boolVal(disreplication->arg); if (dconnlimit) { connlimit = intVal(dconnlimit->arg); if (connlimit < -1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid connection limit: %d", connlimit))); } if (daddroleto) addroleto = (List *) daddroleto->arg; if (drolemembers) rolemembers = (List *) drolemembers->arg; if (dadminmembers) adminmembers = (List *) dadminmembers->arg; if (dvalidUntil) validUntil = strVal(dvalidUntil->arg); if (dbypassRLS) bypassrls = boolVal(dbypassRLS->arg); /* Check some permissions first */ if (!superuser_arg(currentUserId)) { if (!has_createrole_privilege(currentUserId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create role"), errdetail("Only roles with the %s attribute may create roles.", "CREATEROLE"))); if (issuper) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create role"), errdetail("Only roles with the %s attribute may create roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); if (createdb && !have_createdb_privilege()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create role"), errdetail("Only roles with the %s attribute may create roles with the %s attribute.", "CREATEDB", "CREATEDB"))); if (isreplication && !has_rolreplication(currentUserId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create role"), errdetail("Only roles with the %s attribute may create roles with the %s attribute.", "REPLICATION", "REPLICATION"))); if (bypassrls && !has_bypassrls_privilege(currentUserId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create role"), errdetail("Only roles with the %s attribute may create roles with the %s attribute.", "BYPASSRLS", "BYPASSRLS"))); } /* * Check that the user is not trying to create a role in the reserved * "pg_" namespace. */ if (IsReservedName(stmt->role)) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", stmt->role), errdetail("Role names starting with \"pg_\" are reserved."))); /* * If built with appropriate switch, whine when regression-testing * conventions for role names are violated. */ #ifdef ENFORCE_REGRESSION_TEST_NAME_RESTRICTIONS if (strncmp(stmt->role, "regress_", 8) != 0) elog(WARNING, "roles created by regression test cases should have names starting with \"regress_\""); #endif /* * Check the pg_authid relation to be certain the role doesn't already * exist. */ pg_authid_rel = table_open(AuthIdRelationId, RowExclusiveLock); pg_authid_dsc = RelationGetDescr(pg_authid_rel); if (OidIsValid(get_role_oid(stmt->role, true))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("role \"%s\" already exists", stmt->role))); /* Convert validuntil to internal form */ if (validUntil) { validUntil_datum = DirectFunctionCall3(timestamptz_in, CStringGetDatum(validUntil), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); validUntil_null = false; } else { validUntil_datum = (Datum) 0; validUntil_null = true; } /* * Call the password checking hook if there is one defined */ if (check_password_hook && password) (*check_password_hook) (stmt->role, password, get_password_type(password), validUntil_datum, validUntil_null); /* * Build a tuple to insert */ new_record[Anum_pg_authid_rolname - 1] = DirectFunctionCall1(namein, CStringGetDatum(stmt->role)); new_record[Anum_pg_authid_rolsuper - 1] = BoolGetDatum(issuper); new_record[Anum_pg_authid_rolinherit - 1] = BoolGetDatum(inherit); new_record[Anum_pg_authid_rolcreaterole - 1] = BoolGetDatum(createrole); new_record[Anum_pg_authid_rolcreatedb - 1] = BoolGetDatum(createdb); new_record[Anum_pg_authid_rolcanlogin - 1] = BoolGetDatum(canlogin); new_record[Anum_pg_authid_rolreplication - 1] = BoolGetDatum(isreplication); new_record[Anum_pg_authid_rolconnlimit - 1] = Int32GetDatum(connlimit); if (password) { char *shadow_pass; const char *logdetail = NULL; /* * Don't allow an empty password. Libpq treats an empty password the * same as no password at all, and won't even try to authenticate. But * other clients might, so allowing it would be confusing. By clearing * the password when an empty string is specified, the account is * consistently locked for all clients. * * Note that this only covers passwords stored in the database itself. * There are also checks in the authentication code, to forbid an * empty password from being used with authentication methods that * fetch the password from an external system, like LDAP or PAM. */ if (password[0] == '\0' || plain_crypt_verify(stmt->role, password, "", &logdetail) == STATUS_OK) { ereport(NOTICE, (errmsg("empty string is not a valid password, clearing password"))); new_record_nulls[Anum_pg_authid_rolpassword - 1] = true; } else { /* Encrypt the password to the requested format. */ shadow_pass = encrypt_password(Password_encryption, stmt->role, password); new_record[Anum_pg_authid_rolpassword - 1] = CStringGetTextDatum(shadow_pass); } } else new_record_nulls[Anum_pg_authid_rolpassword - 1] = true; new_record[Anum_pg_authid_rolvaliduntil - 1] = validUntil_datum; new_record_nulls[Anum_pg_authid_rolvaliduntil - 1] = validUntil_null; new_record[Anum_pg_authid_rolbypassrls - 1] = BoolGetDatum(bypassrls); /* * pg_largeobject_metadata contains pg_authid.oid's, so we use the * binary-upgrade override. */ if (IsBinaryUpgrade) { if (!OidIsValid(binary_upgrade_next_pg_authid_oid)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pg_authid OID value not set when in binary upgrade mode"))); roleid = binary_upgrade_next_pg_authid_oid; binary_upgrade_next_pg_authid_oid = InvalidOid; } else { roleid = GetNewOidWithIndex(pg_authid_rel, AuthIdOidIndexId, Anum_pg_authid_oid); } new_record[Anum_pg_authid_oid - 1] = ObjectIdGetDatum(roleid); tuple = heap_form_tuple(pg_authid_dsc, new_record, new_record_nulls); /* * Insert new record in the pg_authid table */ CatalogTupleInsert(pg_authid_rel, tuple); /* * Advance command counter so we can see new record; else tests in * AddRoleMems may fail. */ if (addroleto || adminmembers || rolemembers) CommandCounterIncrement(); /* Default grant. */ InitGrantRoleOptions(&popt); /* * Add the new role to the specified existing roles. */ if (addroleto) { RoleSpec *thisrole = makeNode(RoleSpec); List *thisrole_list = list_make1(thisrole); List *thisrole_oidlist = list_make1_oid(roleid); thisrole->roletype = ROLESPEC_CSTRING; thisrole->rolename = stmt->role; thisrole->location = -1; foreach(item, addroleto) { RoleSpec *oldrole = lfirst(item); HeapTuple oldroletup = get_rolespec_tuple(oldrole); Form_pg_authid oldroleform = (Form_pg_authid) GETSTRUCT(oldroletup); Oid oldroleid = oldroleform->oid; char *oldrolename = NameStr(oldroleform->rolname); /* can only add this role to roles for which you have rights */ check_role_membership_authorization(currentUserId, oldroleid, true); AddRoleMems(currentUserId, oldrolename, oldroleid, thisrole_list, thisrole_oidlist, InvalidOid, &popt); ReleaseSysCache(oldroletup); } } /* * If the current user isn't a superuser, make them an admin of the new * role so that they can administer the new object they just created. * Superusers will be able to do that anyway. * * The grantor of record for this implicit grant is the bootstrap * superuser, which means that the CREATEROLE user cannot revoke the * grant. They can however grant the created role back to themselves with * different options, since they enjoy ADMIN OPTION on it. */ if (!superuser()) { RoleSpec *current_role = makeNode(RoleSpec); GrantRoleOptions poptself; List *memberSpecs; List *memberIds = list_make1_oid(currentUserId); current_role->roletype = ROLESPEC_CURRENT_ROLE; current_role->location = -1; memberSpecs = list_make1(current_role); poptself.specified = GRANT_ROLE_SPECIFIED_ADMIN | GRANT_ROLE_SPECIFIED_INHERIT | GRANT_ROLE_SPECIFIED_SET; poptself.admin = true; poptself.inherit = false; poptself.set = false; AddRoleMems(BOOTSTRAP_SUPERUSERID, stmt->role, roleid, memberSpecs, memberIds, BOOTSTRAP_SUPERUSERID, &poptself); /* * We must make the implicit grant visible to the code below, else the * additional grants will fail. */ CommandCounterIncrement(); /* * Because of the implicit grant above, a CREATEROLE user who creates * a role has the ability to grant that role back to themselves with * the INHERIT or SET options, if they wish to inherit the role's * privileges or be able to SET ROLE to it. The createrole_self_grant * GUC can be used to make this happen automatically. This has no * security implications since the same user is able to make the same * grant using an explicit GRANT statement; it's just convenient. */ if (createrole_self_grant_enabled) AddRoleMems(currentUserId, stmt->role, roleid, memberSpecs, memberIds, currentUserId, &createrole_self_grant_options); } /* * Add the specified members to this new role. adminmembers get the admin * option, rolemembers don't. * * NB: No permissions check is required here. If you have enough rights to * create a role, you can add any members you like. */ AddRoleMems(currentUserId, stmt->role, roleid, rolemembers, roleSpecsToIds(rolemembers), InvalidOid, &popt); popt.specified |= GRANT_ROLE_SPECIFIED_ADMIN; popt.admin = true; AddRoleMems(currentUserId, stmt->role, roleid, adminmembers, roleSpecsToIds(adminmembers), InvalidOid, &popt); /* Post creation hook for new role */ InvokeObjectPostCreateHook(AuthIdRelationId, roleid, 0); /* * Close pg_authid, but keep lock till commit. */ table_close(pg_authid_rel, NoLock); return roleid; } /* * ALTER ROLE * * Note: the rolemembers option accepted here is intended to support the * backwards-compatible ALTER GROUP syntax. Although it will work to say * "ALTER ROLE role ROLE rolenames", we don't document it. */ Oid AlterRole(ParseState *pstate, AlterRoleStmt *stmt) { Datum new_record[Natts_pg_authid] = {0}; bool new_record_nulls[Natts_pg_authid] = {0}; bool new_record_repl[Natts_pg_authid] = {0}; Relation pg_authid_rel; TupleDesc pg_authid_dsc; HeapTuple tuple, new_tuple; Form_pg_authid authform; ListCell *option; char *rolename; char *password = NULL; /* user password */ int connlimit = -1; /* maximum connections allowed */ char *validUntil = NULL; /* time the login is valid until */ Datum validUntil_datum; /* same, as timestamptz Datum */ bool validUntil_null; DefElem *dpassword = NULL; DefElem *dissuper = NULL; DefElem *dinherit = NULL; DefElem *dcreaterole = NULL; DefElem *dcreatedb = NULL; DefElem *dcanlogin = NULL; DefElem *disreplication = NULL; DefElem *dconnlimit = NULL; DefElem *drolemembers = NULL; DefElem *dvalidUntil = NULL; DefElem *dbypassRLS = NULL; Oid roleid; Oid currentUserId = GetUserId(); GrantRoleOptions popt; check_rolespec_name(stmt->role, _("Cannot alter reserved roles.")); /* Extract options from the statement node tree */ foreach(option, stmt->options) { DefElem *defel = (DefElem *) lfirst(option); if (strcmp(defel->defname, "password") == 0) { if (dpassword) errorConflictingDefElem(defel, pstate); dpassword = defel; } else if (strcmp(defel->defname, "superuser") == 0) { if (dissuper) errorConflictingDefElem(defel, pstate); dissuper = defel; } else if (strcmp(defel->defname, "inherit") == 0) { if (dinherit) errorConflictingDefElem(defel, pstate); dinherit = defel; } else if (strcmp(defel->defname, "createrole") == 0) { if (dcreaterole) errorConflictingDefElem(defel, pstate); dcreaterole = defel; } else if (strcmp(defel->defname, "createdb") == 0) { if (dcreatedb) errorConflictingDefElem(defel, pstate); dcreatedb = defel; } else if (strcmp(defel->defname, "canlogin") == 0) { if (dcanlogin) errorConflictingDefElem(defel, pstate); dcanlogin = defel; } else if (strcmp(defel->defname, "isreplication") == 0) { if (disreplication) errorConflictingDefElem(defel, pstate); disreplication = defel; } else if (strcmp(defel->defname, "connectionlimit") == 0) { if (dconnlimit) errorConflictingDefElem(defel, pstate); dconnlimit = defel; } else if (strcmp(defel->defname, "rolemembers") == 0 && stmt->action != 0) { if (drolemembers) errorConflictingDefElem(defel, pstate); drolemembers = defel; } else if (strcmp(defel->defname, "validUntil") == 0) { if (dvalidUntil) errorConflictingDefElem(defel, pstate); dvalidUntil = defel; } else if (strcmp(defel->defname, "bypassrls") == 0) { if (dbypassRLS) errorConflictingDefElem(defel, pstate); dbypassRLS = defel; } else elog(ERROR, "option \"%s\" not recognized", defel->defname); } if (dpassword && dpassword->arg) password = strVal(dpassword->arg); if (dconnlimit) { connlimit = intVal(dconnlimit->arg); if (connlimit < -1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid connection limit: %d", connlimit))); } if (dvalidUntil) validUntil = strVal(dvalidUntil->arg); /* * Scan the pg_authid relation to be certain the user exists. */ pg_authid_rel = table_open(AuthIdRelationId, RowExclusiveLock); pg_authid_dsc = RelationGetDescr(pg_authid_rel); tuple = get_rolespec_tuple(stmt->role); authform = (Form_pg_authid) GETSTRUCT(tuple); rolename = pstrdup(NameStr(authform->rolname)); roleid = authform->oid; /* To mess with a superuser in any way you gotta be superuser. */ if (!superuser() && authform->rolsuper) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute may alter roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); if (!superuser() && dissuper) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute may change the %s attribute.", "SUPERUSER", "SUPERUSER"))); /* * Most changes to a role require that you both have CREATEROLE privileges * and also ADMIN OPTION on the role. */ if (!have_createrole_privilege() || !is_admin_of_role(GetUserId(), roleid)) { /* things an unprivileged user certainly can't do */ if (dinherit || dcreaterole || dcreatedb || dcanlogin || dconnlimit || dvalidUntil || disreplication || dbypassRLS) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute and the %s option on role \"%s\" may alter this role.", "CREATEROLE", "ADMIN", rolename))); /* an unprivileged user can change their own password */ if (dpassword && roleid != currentUserId) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("To change another role's password, the current user must have the %s attribute and the %s option on the role.", "CREATEROLE", "ADMIN"))); } else if (!superuser()) { /* * Even if you have both CREATEROLE and ADMIN OPTION on a role, you * can only change the CREATEDB, REPLICATION, or BYPASSRLS attributes * if they are set for your own role (or you are the superuser). */ if (dcreatedb && !have_createdb_privilege()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute may change the %s attribute.", "CREATEDB", "CREATEDB"))); if (disreplication && !has_rolreplication(currentUserId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute may change the %s attribute.", "REPLICATION", "REPLICATION"))); if (dbypassRLS && !has_bypassrls_privilege(currentUserId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute may change the %s attribute.", "BYPASSRLS", "BYPASSRLS"))); } /* To add or drop members, you need ADMIN OPTION. */ if (drolemembers && !is_admin_of_role(currentUserId, roleid)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s option on role \"%s\" may add or drop members.", "ADMIN", rolename))); /* Convert validuntil to internal form */ if (dvalidUntil) { validUntil_datum = DirectFunctionCall3(timestamptz_in, CStringGetDatum(validUntil), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); validUntil_null = false; } else { /* fetch existing setting in case hook needs it */ validUntil_datum = SysCacheGetAttr(AUTHNAME, tuple, Anum_pg_authid_rolvaliduntil, &validUntil_null); } /* * Call the password checking hook if there is one defined */ if (check_password_hook && password) (*check_password_hook) (rolename, password, get_password_type(password), validUntil_datum, validUntil_null); /* * Build an updated tuple, perusing the information just obtained */ /* * issuper/createrole/etc */ if (dissuper) { bool should_be_super = boolVal(dissuper->arg); if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("permission denied to alter role"), errdetail("The bootstrap superuser must have the %s attribute.", "SUPERUSER"))); new_record[Anum_pg_authid_rolsuper - 1] = BoolGetDatum(should_be_super); new_record_repl[Anum_pg_authid_rolsuper - 1] = true; } if (dinherit) { new_record[Anum_pg_authid_rolinherit - 1] = BoolGetDatum(boolVal(dinherit->arg)); new_record_repl[Anum_pg_authid_rolinherit - 1] = true; } if (dcreaterole) { new_record[Anum_pg_authid_rolcreaterole - 1] = BoolGetDatum(boolVal(dcreaterole->arg)); new_record_repl[Anum_pg_authid_rolcreaterole - 1] = true; } if (dcreatedb) { new_record[Anum_pg_authid_rolcreatedb - 1] = BoolGetDatum(boolVal(dcreatedb->arg)); new_record_repl[Anum_pg_authid_rolcreatedb - 1] = true; } if (dcanlogin) { new_record[Anum_pg_authid_rolcanlogin - 1] = BoolGetDatum(boolVal(dcanlogin->arg)); new_record_repl[Anum_pg_authid_rolcanlogin - 1] = true; } if (disreplication) { new_record[Anum_pg_authid_rolreplication - 1] = BoolGetDatum(boolVal(disreplication->arg)); new_record_repl[Anum_pg_authid_rolreplication - 1] = true; } if (dconnlimit) { new_record[Anum_pg_authid_rolconnlimit - 1] = Int32GetDatum(connlimit); new_record_repl[Anum_pg_authid_rolconnlimit - 1] = true; } /* password */ if (password) { char *shadow_pass; const char *logdetail = NULL; /* Like in CREATE USER, don't allow an empty password. */ if (password[0] == '\0' || plain_crypt_verify(rolename, password, "", &logdetail) == STATUS_OK) { ereport(NOTICE, (errmsg("empty string is not a valid password, clearing password"))); new_record_nulls[Anum_pg_authid_rolpassword - 1] = true; } else { /* Encrypt the password to the requested format. */ shadow_pass = encrypt_password(Password_encryption, rolename, password); new_record[Anum_pg_authid_rolpassword - 1] = CStringGetTextDatum(shadow_pass); } new_record_repl[Anum_pg_authid_rolpassword - 1] = true; } /* unset password */ if (dpassword && dpassword->arg == NULL) { new_record_repl[Anum_pg_authid_rolpassword - 1] = true; new_record_nulls[Anum_pg_authid_rolpassword - 1] = true; } /* valid until */ new_record[Anum_pg_authid_rolvaliduntil - 1] = validUntil_datum; new_record_nulls[Anum_pg_authid_rolvaliduntil - 1] = validUntil_null; new_record_repl[Anum_pg_authid_rolvaliduntil - 1] = true; if (dbypassRLS) { new_record[Anum_pg_authid_rolbypassrls - 1] = BoolGetDatum(boolVal(dbypassRLS->arg)); new_record_repl[Anum_pg_authid_rolbypassrls - 1] = true; } new_tuple = heap_modify_tuple(tuple, pg_authid_dsc, new_record, new_record_nulls, new_record_repl); CatalogTupleUpdate(pg_authid_rel, &tuple->t_self, new_tuple); InvokeObjectPostAlterHook(AuthIdRelationId, roleid, 0); ReleaseSysCache(tuple); heap_freetuple(new_tuple); InitGrantRoleOptions(&popt); /* * Advance command counter so we can see new record; else tests in * AddRoleMems may fail. */ if (drolemembers) { List *rolemembers = (List *) drolemembers->arg; CommandCounterIncrement(); if (stmt->action == +1) /* add members to role */ AddRoleMems(currentUserId, rolename, roleid, rolemembers, roleSpecsToIds(rolemembers), InvalidOid, &popt); else if (stmt->action == -1) /* drop members from role */ DelRoleMems(currentUserId, rolename, roleid, rolemembers, roleSpecsToIds(rolemembers), InvalidOid, &popt, DROP_RESTRICT); } /* * Close pg_authid, but keep lock till commit. */ table_close(pg_authid_rel, NoLock); return roleid; } /* * ALTER ROLE ... SET */ Oid AlterRoleSet(AlterRoleSetStmt *stmt) { HeapTuple roletuple; Form_pg_authid roleform; Oid databaseid = InvalidOid; Oid roleid = InvalidOid; if (stmt->role) { check_rolespec_name(stmt->role, _("Cannot alter reserved roles.")); roletuple = get_rolespec_tuple(stmt->role); roleform = (Form_pg_authid) GETSTRUCT(roletuple); roleid = roleform->oid; /* * Obtain a lock on the role and make sure it didn't go away in the * meantime. */ shdepLockAndCheckObject(AuthIdRelationId, roleid); /* * To mess with a superuser you gotta be superuser; otherwise you need * CREATEROLE plus admin option on the target role; unless you're just * trying to change your own settings */ if (roleform->rolsuper) { if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute may alter roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); } else { if ((!have_createrole_privilege() || !is_admin_of_role(GetUserId(), roleid)) && roleid != GetUserId()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter role"), errdetail("Only roles with the %s attribute and the %s option on role \"%s\" may alter this role.", "CREATEROLE", "ADMIN", NameStr(roleform->rolname)))); } ReleaseSysCache(roletuple); } /* look up and lock the database, if specified */ if (stmt->database != NULL) { databaseid = get_database_oid(stmt->database, false); shdepLockAndCheckObject(DatabaseRelationId, databaseid); if (!stmt->role) { /* * If no role is specified, then this is effectively the same as * ALTER DATABASE ... SET, so use the same permission check. */ if (!object_ownercheck(DatabaseRelationId, databaseid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, stmt->database); } } if (!stmt->role && !stmt->database) { /* Must be superuser to alter settings globally. */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to alter setting"), errdetail("Only roles with the %s attribute may alter settings globally.", "SUPERUSER"))); } AlterSetting(databaseid, roleid, stmt->setstmt); return roleid; } /* * DROP ROLE */ void DropRole(DropRoleStmt *stmt) { Relation pg_authid_rel, pg_auth_members_rel; ListCell *item; List *role_oids = NIL; if (!have_createrole_privilege()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to drop role"), errdetail("Only roles with the %s attribute and the %s option on the target roles may drop roles.", "CREATEROLE", "ADMIN"))); /* * Scan the pg_authid relation to find the Oid of the role(s) to be * deleted and perform preliminary permissions and sanity checks. */ pg_authid_rel = table_open(AuthIdRelationId, RowExclusiveLock); pg_auth_members_rel = table_open(AuthMemRelationId, RowExclusiveLock); foreach(item, stmt->roles) { RoleSpec *rolspec = lfirst(item); char *role; HeapTuple tuple, tmp_tuple; Form_pg_authid roleform; ScanKeyData scankey; SysScanDesc sscan; Oid roleid; if (rolspec->roletype != ROLESPEC_CSTRING) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot use special role specifier in DROP ROLE"))); role = rolspec->rolename; tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role)); if (!HeapTupleIsValid(tuple)) { if (!stmt->missing_ok) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("role \"%s\" does not exist", role))); } else { ereport(NOTICE, (errmsg("role \"%s\" does not exist, skipping", role))); } continue; } roleform = (Form_pg_authid) GETSTRUCT(tuple); roleid = roleform->oid; if (roleid == GetUserId()) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), errmsg("current user cannot be dropped"))); if (roleid == GetOuterUserId()) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), errmsg("current user cannot be dropped"))); if (roleid == GetSessionUserId()) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), errmsg("session user cannot be dropped"))); /* * For safety's sake, we allow createrole holders to drop ordinary * roles but not superuser roles, and only if they also have ADMIN * OPTION. */ if (roleform->rolsuper && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to drop role"), errdetail("Only roles with the %s attribute may drop roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); if (!is_admin_of_role(GetUserId(), roleid)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to drop role"), errdetail("Only roles with the %s attribute and the %s option on role \"%s\" may drop this role.", "CREATEROLE", "ADMIN", NameStr(roleform->rolname)))); /* DROP hook for the role being removed */ InvokeObjectDropHook(AuthIdRelationId, roleid, 0); /* Don't leak the syscache tuple */ ReleaseSysCache(tuple); /* * Lock the role, so nobody can add dependencies to her while we drop * her. We keep the lock until the end of transaction. */ LockSharedObject(AuthIdRelationId, roleid, 0, AccessExclusiveLock); /* * If there is a pg_auth_members entry that has one of the roles to be * dropped as the roleid or member, it should be silently removed, but * if there is a pg_auth_members entry that has one of the roles to be * dropped as the grantor, the operation should fail. * * It's possible, however, that a single pg_auth_members entry could * fall into multiple categories - e.g. the user could do "GRANT foo * TO bar GRANTED BY baz" and then "DROP ROLE baz, bar". We want such * an operation to succeed regardless of the order in which the * to-be-dropped roles are passed to DROP ROLE. * * To make that work, we remove all pg_auth_members entries that can * be silently removed in this loop, and then below we'll make a * second pass over the list of roles to be removed and check for any * remaining dependencies. */ ScanKeyInit(&scankey, Anum_pg_auth_members_roleid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(roleid)); sscan = systable_beginscan(pg_auth_members_rel, AuthMemRoleMemIndexId, true, NULL, 1, &scankey); while (HeapTupleIsValid(tmp_tuple = systable_getnext(sscan))) { Form_pg_auth_members authmem_form; authmem_form = (Form_pg_auth_members) GETSTRUCT(tmp_tuple); deleteSharedDependencyRecordsFor(AuthMemRelationId, authmem_form->oid, 0); CatalogTupleDelete(pg_auth_members_rel, &tmp_tuple->t_self); } systable_endscan(sscan); ScanKeyInit(&scankey, Anum_pg_auth_members_member, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(roleid)); sscan = systable_beginscan(pg_auth_members_rel, AuthMemMemRoleIndexId, true, NULL, 1, &scankey); while (HeapTupleIsValid(tmp_tuple = systable_getnext(sscan))) { Form_pg_auth_members authmem_form; authmem_form = (Form_pg_auth_members) GETSTRUCT(tmp_tuple); deleteSharedDependencyRecordsFor(AuthMemRelationId, authmem_form->oid, 0); CatalogTupleDelete(pg_auth_members_rel, &tmp_tuple->t_self); } systable_endscan(sscan); /* * Advance command counter so that later iterations of this loop will * see the changes already made. This is essential if, for example, * we are trying to drop both a role and one of its direct members --- * we'll get an error if we try to delete the linking pg_auth_members * tuple twice. (We do not need a CCI between the two delete loops * above, because it's not allowed for a role to directly contain * itself.) */ CommandCounterIncrement(); /* Looks tentatively OK, add it to the list if not there yet. */ role_oids = list_append_unique_oid(role_oids, roleid); } /* * Second pass over the roles to be removed. */ foreach(item, role_oids) { Oid roleid = lfirst_oid(item); HeapTuple tuple; Form_pg_authid roleform; char *detail; char *detail_log; /* * Re-find the pg_authid tuple. * * Since we've taken a lock on the role OID, it shouldn't be possible * for the tuple to have been deleted -- or for that matter updated -- * unless the user is manually modifying the system catalogs. */ tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleid)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "could not find tuple for role %u", roleid); roleform = (Form_pg_authid) GETSTRUCT(tuple); /* * Check for pg_shdepend entries depending on this role. * * This needs to happen after we've completed removing any * pg_auth_members entries that can be removed silently, in order to * avoid spurious failures. See notes above for more details. */ if (checkSharedDependencies(AuthIdRelationId, roleid, &detail, &detail_log)) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("role \"%s\" cannot be dropped because some objects depend on it", NameStr(roleform->rolname)), errdetail_internal("%s", detail), errdetail_log("%s", detail_log))); /* * Remove the role from the pg_authid table */ CatalogTupleDelete(pg_authid_rel, &tuple->t_self); ReleaseSysCache(tuple); /* * Remove any comments or security labels on this role. */ DeleteSharedComments(roleid, AuthIdRelationId); DeleteSharedSecurityLabel(roleid, AuthIdRelationId); /* * Remove settings for this role. */ DropSetting(InvalidOid, roleid); } /* * Now we can clean up; but keep locks until commit. */ table_close(pg_auth_members_rel, NoLock); table_close(pg_authid_rel, NoLock); } /* * Rename role */ ObjectAddress RenameRole(const char *oldname, const char *newname) { HeapTuple oldtuple, newtuple; TupleDesc dsc; Relation rel; Datum datum; bool isnull; Datum repl_val[Natts_pg_authid]; bool repl_null[Natts_pg_authid]; bool repl_repl[Natts_pg_authid]; int i; Oid roleid; ObjectAddress address; Form_pg_authid authform; rel = table_open(AuthIdRelationId, RowExclusiveLock); dsc = RelationGetDescr(rel); oldtuple = SearchSysCache1(AUTHNAME, CStringGetDatum(oldname)); if (!HeapTupleIsValid(oldtuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("role \"%s\" does not exist", oldname))); /* * XXX Client applications probably store the session user somewhere, so * renaming it could cause confusion. On the other hand, there may not be * an actual problem besides a little confusion, so think about this and * decide. Same for SET ROLE ... we don't restrict renaming the current * effective userid, though. */ authform = (Form_pg_authid) GETSTRUCT(oldtuple); roleid = authform->oid; if (roleid == GetSessionUserId()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("session user cannot be renamed"))); if (roleid == GetOuterUserId()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("current user cannot be renamed"))); /* * Check that the user is not trying to rename a system role and not * trying to rename a role into the reserved "pg_" namespace. */ if (IsReservedName(NameStr(authform->rolname))) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", NameStr(authform->rolname)), errdetail("Role names starting with \"pg_\" are reserved."))); if (IsReservedName(newname)) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", newname), errdetail("Role names starting with \"pg_\" are reserved."))); /* * If built with appropriate switch, whine when regression-testing * conventions for role names are violated. */ #ifdef ENFORCE_REGRESSION_TEST_NAME_RESTRICTIONS if (strncmp(newname, "regress_", 8) != 0) elog(WARNING, "roles created by regression test cases should have names starting with \"regress_\""); #endif /* make sure the new name doesn't exist */ if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("role \"%s\" already exists", newname))); /* * Only superusers can mess with superusers. Otherwise, a user with * CREATEROLE can rename a role for which they have ADMIN OPTION. */ if (authform->rolsuper) { if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to rename role"), errdetail("Only roles with the %s attribute may rename roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); } else { if (!have_createrole_privilege() || !is_admin_of_role(GetUserId(), roleid)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to rename role"), errdetail("Only roles with the %s attribute and the %s option on role \"%s\" may rename this role.", "CREATEROLE", "ADMIN", NameStr(authform->rolname)))); } /* OK, construct the modified tuple */ for (i = 0; i < Natts_pg_authid; i++) repl_repl[i] = false; repl_repl[Anum_pg_authid_rolname - 1] = true; repl_val[Anum_pg_authid_rolname - 1] = DirectFunctionCall1(namein, CStringGetDatum(newname)); repl_null[Anum_pg_authid_rolname - 1] = false; datum = heap_getattr(oldtuple, Anum_pg_authid_rolpassword, dsc, &isnull); if (!isnull && get_password_type(TextDatumGetCString(datum)) == PASSWORD_TYPE_MD5) { /* MD5 uses the username as salt, so just clear it on a rename */ repl_repl[Anum_pg_authid_rolpassword - 1] = true; repl_null[Anum_pg_authid_rolpassword - 1] = true; ereport(NOTICE, (errmsg("MD5 password cleared because of role rename"))); } newtuple = heap_modify_tuple(oldtuple, dsc, repl_val, repl_null, repl_repl); CatalogTupleUpdate(rel, &oldtuple->t_self, newtuple); InvokeObjectPostAlterHook(AuthIdRelationId, roleid, 0); ObjectAddressSet(address, AuthIdRelationId, roleid); ReleaseSysCache(oldtuple); /* * Close pg_authid, but keep lock till commit. */ table_close(rel, NoLock); return address; } /* * GrantRoleStmt * * Grant/Revoke roles to/from roles */ void GrantRole(ParseState *pstate, GrantRoleStmt *stmt) { Relation pg_authid_rel; Oid grantor; List *grantee_ids; ListCell *item; GrantRoleOptions popt; Oid currentUserId = GetUserId(); /* Parse options list. */ InitGrantRoleOptions(&popt); foreach(item, stmt->opt) { DefElem *opt = (DefElem *) lfirst(item); char *optval = defGetString(opt); if (strcmp(opt->defname, "admin") == 0) { popt.specified |= GRANT_ROLE_SPECIFIED_ADMIN; if (parse_bool(optval, &popt.admin)) continue; } else if (strcmp(opt->defname, "inherit") == 0) { popt.specified |= GRANT_ROLE_SPECIFIED_INHERIT; if (parse_bool(optval, &popt.inherit)) continue; } else if (strcmp(opt->defname, "set") == 0) { popt.specified |= GRANT_ROLE_SPECIFIED_SET; if (parse_bool(optval, &popt.set)) continue; } else ereport(ERROR, errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized role option \"%s\"", opt->defname), parser_errposition(pstate, opt->location)); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unrecognized value for role option \"%s\": \"%s\"", opt->defname, optval), parser_errposition(pstate, opt->location))); } /* Lookup OID of grantor, if specified. */ if (stmt->grantor) grantor = get_rolespec_oid(stmt->grantor, false); else grantor = InvalidOid; grantee_ids = roleSpecsToIds(stmt->grantee_roles); /* AccessShareLock is enough since we aren't modifying pg_authid */ pg_authid_rel = table_open(AuthIdRelationId, AccessShareLock); /* * Step through all of the granted roles and add, update, or remove * entries in pg_auth_members as appropriate. If stmt->is_grant is true, * we are adding new grants or, if they already exist, updating options on * those grants. If stmt->is_grant is false, we are revoking grants or * removing options from them. */ foreach(item, stmt->granted_roles) { AccessPriv *priv = (AccessPriv *) lfirst(item); char *rolename = priv->priv_name; Oid roleid; /* Must reject priv(columns) and ALL PRIVILEGES(columns) */ if (rolename == NULL || priv->cols != NIL) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("column names cannot be included in GRANT/REVOKE ROLE"))); roleid = get_role_oid(rolename, false); check_role_membership_authorization(currentUserId, roleid, stmt->is_grant); if (stmt->is_grant) AddRoleMems(currentUserId, rolename, roleid, stmt->grantee_roles, grantee_ids, grantor, &popt); else DelRoleMems(currentUserId, rolename, roleid, stmt->grantee_roles, grantee_ids, grantor, &popt, stmt->behavior); } /* * Close pg_authid, but keep lock till commit. */ table_close(pg_authid_rel, NoLock); } /* * DropOwnedObjects * * Drop the objects owned by a given list of roles. */ void DropOwnedObjects(DropOwnedStmt *stmt) { List *role_ids = roleSpecsToIds(stmt->roles); ListCell *cell; /* Check privileges */ foreach(cell, role_ids) { Oid roleid = lfirst_oid(cell); if (!has_privs_of_role(GetUserId(), roleid)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to drop objects"), errdetail("Only roles with privileges of role \"%s\" may drop objects owned by it.", GetUserNameFromId(roleid, false)))); } /* Ok, do it */ shdepDropOwned(role_ids, stmt->behavior); } /* * ReassignOwnedObjects * * Give the objects owned by a given list of roles away to another user. */ void ReassignOwnedObjects(ReassignOwnedStmt *stmt) { List *role_ids = roleSpecsToIds(stmt->roles); ListCell *cell; Oid newrole; /* Check privileges */ foreach(cell, role_ids) { Oid roleid = lfirst_oid(cell); if (!has_privs_of_role(GetUserId(), roleid)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to reassign objects"), errdetail("Only roles with privileges of role \"%s\" may reassign objects owned by it.", GetUserNameFromId(roleid, false)))); } /* Must have privileges on the receiving side too */ newrole = get_rolespec_oid(stmt->newrole, false); if (!has_privs_of_role(GetUserId(), newrole)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to reassign objects"), errdetail("Only roles with privileges of role \"%s\" may reassign objects to it.", GetUserNameFromId(newrole, false)))); /* Ok, do it */ shdepReassignOwned(role_ids, newrole); } /* * roleSpecsToIds * * Given a list of RoleSpecs, generate a list of role OIDs in the same order. * * ROLESPEC_PUBLIC is not allowed. */ List * roleSpecsToIds(List *memberNames) { List *result = NIL; ListCell *l; foreach(l, memberNames) { RoleSpec *rolespec = lfirst_node(RoleSpec, l); Oid roleid; roleid = get_rolespec_oid(rolespec, false); result = lappend_oid(result, roleid); } return result; } /* * AddRoleMems -- Add given members to the specified role * * currentUserId: OID of role performing the operation * rolename: name of role to add to (used only for error messages) * roleid: OID of role to add to * memberSpecs: list of RoleSpec of roles to add (used only for error messages) * memberIds: OIDs of roles to add * grantorId: OID that should be recorded as having granted the membership * (InvalidOid if not set explicitly) * popt: information about grant options */ static void AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, List *memberSpecs, List *memberIds, Oid grantorId, GrantRoleOptions *popt) { Relation pg_authmem_rel; TupleDesc pg_authmem_dsc; ListCell *specitem; ListCell *iditem; Assert(list_length(memberSpecs) == list_length(memberIds)); /* Validate grantor (and resolve implicit grantor if not specified). */ grantorId = check_role_grantor(currentUserId, roleid, grantorId, true); pg_authmem_rel = table_open(AuthMemRelationId, RowExclusiveLock); pg_authmem_dsc = RelationGetDescr(pg_authmem_rel); /* * Only allow changes to this role by one backend at a time, so that we * can check integrity constraints like the lack of circular ADMIN OPTION * grants without fear of race conditions. */ LockSharedObject(AuthIdRelationId, roleid, 0, ShareUpdateExclusiveLock); /* Preliminary sanity checks. */ forboth(specitem, memberSpecs, iditem, memberIds) { RoleSpec *memberRole = lfirst_node(RoleSpec, specitem); Oid memberid = lfirst_oid(iditem); /* * pg_database_owner is never a role member. Lifting this restriction * would require a policy decision about membership loops. One could * prevent loops, which would include making "ALTER DATABASE x OWNER * TO proposed_datdba" fail if is_member_of_role(pg_database_owner, * proposed_datdba). Hence, gaining a membership could reduce what a * role could do. Alternately, one could allow these memberships to * complete loops. A role could then have actual WITH ADMIN OPTION on * itself, prompting a decision about is_admin_of_role() treatment of * the case. * * Lifting this restriction also has policy implications for ownership * of shared objects (databases and tablespaces). We allow such * ownership, but we might find cause to ban it in the future. * Designing such a ban would more troublesome if the design had to * address pg_database_owner being a member of role FOO that owns a * shared object. (The effect of such ownership is that any owner of * another database can act as the owner of affected shared objects.) */ if (memberid == ROLE_PG_DATABASE_OWNER) ereport(ERROR, errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("role \"%s\" cannot be a member of any role", get_rolespec_name(memberRole))); /* * Refuse creation of membership loops, including the trivial case * where a role is made a member of itself. We do this by checking to * see if the target role is already a member of the proposed member * role. We have to ignore possible superuserness, however, else we * could never grant membership in a superuser-privileged role. */ if (is_member_of_role_nosuper(roleid, memberid)) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("role \"%s\" is a member of role \"%s\"", rolename, get_rolespec_name(memberRole)))); } /* * Disallow attempts to grant ADMIN OPTION back to a user who granted it * to you, similar to what check_circularity does for ACLs. We want the * chains of grants to remain acyclic, so that it's always possible to use * REVOKE .. CASCADE to clean up all grants that depend on the one being * revoked. * * NB: This check might look redundant with the check for membership loops * above, but it isn't. That's checking for role-member loop (e.g. A is a * member of B and B is a member of A) while this is checking for a * member-grantor loop (e.g. A gave ADMIN OPTION on X to B and now B, who * has no other source of ADMIN OPTION on X, tries to give ADMIN OPTION on * X back to A). */ if (popt->admin && grantorId != BOOTSTRAP_SUPERUSERID) { CatCList *memlist; RevokeRoleGrantAction *actions; int i; /* Get the list of members for this role. */ memlist = SearchSysCacheList1(AUTHMEMROLEMEM, ObjectIdGetDatum(roleid)); /* * Figure out what would happen if we removed all existing grants to * every role to which we've been asked to make a new grant. */ actions = initialize_revoke_actions(memlist); foreach(iditem, memberIds) { Oid memberid = lfirst_oid(iditem); if (memberid == BOOTSTRAP_SUPERUSERID) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("%s option cannot be granted back to your own grantor", "ADMIN"))); plan_member_revoke(memlist, actions, memberid); } /* * If the result would be that the grantor role would no longer have * the ability to perform the grant, then the proposed grant would * create a circularity. */ for (i = 0; i < memlist->n_members; ++i) { HeapTuple authmem_tuple; Form_pg_auth_members authmem_form; authmem_tuple = &memlist->members[i]->tuple; authmem_form = (Form_pg_auth_members) GETSTRUCT(authmem_tuple); if (actions[i] == RRG_NOOP && authmem_form->member == grantorId && authmem_form->admin_option) break; } if (i >= memlist->n_members) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("%s option cannot be granted back to your own grantor", "ADMIN"))); ReleaseSysCacheList(memlist); } /* Now perform the catalog updates. */ forboth(specitem, memberSpecs, iditem, memberIds) { RoleSpec *memberRole = lfirst_node(RoleSpec, specitem); Oid memberid = lfirst_oid(iditem); HeapTuple authmem_tuple; HeapTuple tuple; Datum new_record[Natts_pg_auth_members] = {0}; bool new_record_nulls[Natts_pg_auth_members] = {0}; bool new_record_repl[Natts_pg_auth_members] = {0}; /* Common initialization for possible insert or update */ new_record[Anum_pg_auth_members_roleid - 1] = ObjectIdGetDatum(roleid); new_record[Anum_pg_auth_members_member - 1] = ObjectIdGetDatum(memberid); new_record[Anum_pg_auth_members_grantor - 1] = ObjectIdGetDatum(grantorId); /* Find any existing tuple */ authmem_tuple = SearchSysCache3(AUTHMEMROLEMEM, ObjectIdGetDatum(roleid), ObjectIdGetDatum(memberid), ObjectIdGetDatum(grantorId)); /* * If we found a tuple, update it with new option values, unless there * are no changes, in which case issue a WARNING. * * If we didn't find a tuple, just insert one. */ if (HeapTupleIsValid(authmem_tuple)) { Form_pg_auth_members authmem_form; bool at_least_one_change = false; authmem_form = (Form_pg_auth_members) GETSTRUCT(authmem_tuple); if ((popt->specified & GRANT_ROLE_SPECIFIED_ADMIN) != 0 && authmem_form->admin_option != popt->admin) { new_record[Anum_pg_auth_members_admin_option - 1] = BoolGetDatum(popt->admin); new_record_repl[Anum_pg_auth_members_admin_option - 1] = true; at_least_one_change = true; } if ((popt->specified & GRANT_ROLE_SPECIFIED_INHERIT) != 0 && authmem_form->inherit_option != popt->inherit) { new_record[Anum_pg_auth_members_inherit_option - 1] = BoolGetDatum(popt->inherit); new_record_repl[Anum_pg_auth_members_inherit_option - 1] = true; at_least_one_change = true; } if ((popt->specified & GRANT_ROLE_SPECIFIED_SET) != 0 && authmem_form->set_option != popt->set) { new_record[Anum_pg_auth_members_set_option - 1] = BoolGetDatum(popt->set); new_record_repl[Anum_pg_auth_members_set_option - 1] = true; at_least_one_change = true; } if (!at_least_one_change) { ereport(NOTICE, (errmsg("role \"%s\" has already been granted membership in role \"%s\" by role \"%s\"", get_rolespec_name(memberRole), rolename, GetUserNameFromId(grantorId, false)))); ReleaseSysCache(authmem_tuple); continue; } tuple = heap_modify_tuple(authmem_tuple, pg_authmem_dsc, new_record, new_record_nulls, new_record_repl); CatalogTupleUpdate(pg_authmem_rel, &tuple->t_self, tuple); ReleaseSysCache(authmem_tuple); } else { Oid objectId; Oid *newmembers = palloc_object(Oid); /* * The values for these options can be taken directly from 'popt'. * Either they were specified, or the defaults as set by * InitGrantRoleOptions are correct. */ new_record[Anum_pg_auth_members_admin_option - 1] = BoolGetDatum(popt->admin); new_record[Anum_pg_auth_members_set_option - 1] = BoolGetDatum(popt->set); /* * If the user specified a value for the inherit option, use * whatever was specified. Otherwise, set the default value based * on the role-level property. */ if ((popt->specified & GRANT_ROLE_SPECIFIED_INHERIT) != 0) new_record[Anum_pg_auth_members_inherit_option - 1] = BoolGetDatum(popt->inherit); else { HeapTuple mrtup; Form_pg_authid mrform; mrtup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(memberid)); if (!HeapTupleIsValid(mrtup)) elog(ERROR, "cache lookup failed for role %u", memberid); mrform = (Form_pg_authid) GETSTRUCT(mrtup); new_record[Anum_pg_auth_members_inherit_option - 1] = BoolGetDatum(mrform->rolinherit); ReleaseSysCache(mrtup); } /* get an OID for the new row and insert it */ objectId = GetNewOidWithIndex(pg_authmem_rel, AuthMemOidIndexId, Anum_pg_auth_members_oid); new_record[Anum_pg_auth_members_oid - 1] = ObjectIdGetDatum(objectId); tuple = heap_form_tuple(pg_authmem_dsc, new_record, new_record_nulls); CatalogTupleInsert(pg_authmem_rel, tuple); /* updateAclDependencies wants to pfree array inputs */ newmembers[0] = grantorId; updateAclDependencies(AuthMemRelationId, objectId, 0, InvalidOid, 0, NULL, 1, newmembers); } /* CCI after each change, in case there are duplicates in list */ CommandCounterIncrement(); } /* * Close pg_authmem, but keep lock till commit. */ table_close(pg_authmem_rel, NoLock); } /* * DelRoleMems -- Remove given members from the specified role * * rolename: name of role to del from (used only for error messages) * roleid: OID of role to del from * memberSpecs: list of RoleSpec of roles to del (used only for error messages) * memberIds: OIDs of roles to del * grantorId: who is revoking the membership * popt: information about grant options * behavior: RESTRICT or CASCADE behavior for recursive removal */ static void DelRoleMems(Oid currentUserId, const char *rolename, Oid roleid, List *memberSpecs, List *memberIds, Oid grantorId, GrantRoleOptions *popt, DropBehavior behavior) { Relation pg_authmem_rel; TupleDesc pg_authmem_dsc; ListCell *specitem; ListCell *iditem; CatCList *memlist; RevokeRoleGrantAction *actions; int i; Assert(list_length(memberSpecs) == list_length(memberIds)); /* Validate grantor (and resolve implicit grantor if not specified). */ grantorId = check_role_grantor(currentUserId, roleid, grantorId, false); pg_authmem_rel = table_open(AuthMemRelationId, RowExclusiveLock); pg_authmem_dsc = RelationGetDescr(pg_authmem_rel); /* * Only allow changes to this role by one backend at a time, so that we * can check for things like dependent privileges without fear of race * conditions. */ LockSharedObject(AuthIdRelationId, roleid, 0, ShareUpdateExclusiveLock); memlist = SearchSysCacheList1(AUTHMEMROLEMEM, ObjectIdGetDatum(roleid)); actions = initialize_revoke_actions(memlist); /* * We may need to recurse to dependent privileges if DROP_CASCADE was * specified, or refuse to perform the operation if dependent privileges * exist and DROP_RESTRICT was specified. plan_single_revoke() will figure * out what to do with each catalog tuple. */ forboth(specitem, memberSpecs, iditem, memberIds) { RoleSpec *memberRole = lfirst(specitem); Oid memberid = lfirst_oid(iditem); if (!plan_single_revoke(memlist, actions, memberid, grantorId, popt, behavior)) { ereport(WARNING, (errmsg("role \"%s\" has not been granted membership in role \"%s\" by role \"%s\"", get_rolespec_name(memberRole), rolename, GetUserNameFromId(grantorId, false)))); continue; } } /* * We now know what to do with each catalog tuple: it should either be * left alone, deleted, or just have the admin_option flag cleared. * Perform the appropriate action in each case. */ for (i = 0; i < memlist->n_members; ++i) { HeapTuple authmem_tuple; Form_pg_auth_members authmem_form; if (actions[i] == RRG_NOOP) continue; authmem_tuple = &memlist->members[i]->tuple; authmem_form = (Form_pg_auth_members) GETSTRUCT(authmem_tuple); if (actions[i] == RRG_DELETE_GRANT) { /* * Remove the entry altogether, after first removing its * dependencies */ deleteSharedDependencyRecordsFor(AuthMemRelationId, authmem_form->oid, 0); CatalogTupleDelete(pg_authmem_rel, &authmem_tuple->t_self); } else { /* Just turn off the specified option */ HeapTuple tuple; Datum new_record[Natts_pg_auth_members] = {0}; bool new_record_nulls[Natts_pg_auth_members] = {0}; bool new_record_repl[Natts_pg_auth_members] = {0}; /* Build a tuple to update with */ if (actions[i] == RRG_REMOVE_ADMIN_OPTION) { new_record[Anum_pg_auth_members_admin_option - 1] = BoolGetDatum(false); new_record_repl[Anum_pg_auth_members_admin_option - 1] = true; } else if (actions[i] == RRG_REMOVE_INHERIT_OPTION) { new_record[Anum_pg_auth_members_inherit_option - 1] = BoolGetDatum(false); new_record_repl[Anum_pg_auth_members_inherit_option - 1] = true; } else if (actions[i] == RRG_REMOVE_SET_OPTION) { new_record[Anum_pg_auth_members_set_option - 1] = BoolGetDatum(false); new_record_repl[Anum_pg_auth_members_set_option - 1] = true; } else elog(ERROR, "unknown role revoke action"); tuple = heap_modify_tuple(authmem_tuple, pg_authmem_dsc, new_record, new_record_nulls, new_record_repl); CatalogTupleUpdate(pg_authmem_rel, &tuple->t_self, tuple); } } ReleaseSysCacheList(memlist); /* * Close pg_authmem, but keep lock till commit. */ table_close(pg_authmem_rel, NoLock); } /* * Check that currentUserId has permission to modify the membership list for * roleid. Throw an error if not. */ static void check_role_membership_authorization(Oid currentUserId, Oid roleid, bool is_grant) { /* * The charter of pg_database_owner is to have exactly one, implicit, * situation-dependent member. There's no technical need for this * restriction. (One could lift it and take the further step of making * object_ownercheck(DatabaseRelationId, ...) equivalent to * has_privs_of_role(roleid, ROLE_PG_DATABASE_OWNER), in which case * explicit, situation-independent members could act as the owner of any * database.) */ if (is_grant && roleid == ROLE_PG_DATABASE_OWNER) ereport(ERROR, errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("role \"%s\" cannot have explicit members", GetUserNameFromId(roleid, false))); /* To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { if (!superuser_arg(currentUserId)) { if (is_grant) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to grant role \"%s\"", GetUserNameFromId(roleid, false)), errdetail("Only roles with the %s attribute may grant roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to revoke role \"%s\"", GetUserNameFromId(roleid, false)), errdetail("Only roles with the %s attribute may revoke roles with the %s attribute.", "SUPERUSER", "SUPERUSER"))); } } else { /* * Otherwise, must have admin option on the role to be changed. */ if (!is_admin_of_role(currentUserId, roleid)) { if (is_grant) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to grant role \"%s\"", GetUserNameFromId(roleid, false)), errdetail("Only roles with the %s option on role \"%s\" may grant this role.", "ADMIN", GetUserNameFromId(roleid, false)))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to revoke role \"%s\"", GetUserNameFromId(roleid, false)), errdetail("Only roles with the %s option on role \"%s\" may revoke this role.", "ADMIN", GetUserNameFromId(roleid, false)))); } } } /* * Sanity-check, or infer, the grantor for a GRANT or REVOKE statement * targeting a role. * * The grantor must always be either a role with ADMIN OPTION on the role in * which membership is being granted, or the bootstrap superuser. This is * similar to the restriction enforced by select_best_grantor, except that * roles don't have owners, so we regard the bootstrap superuser as the * implicit owner. * * If the grantor was not explicitly specified by the user, grantorId should * be passed as InvalidOid, and this function will infer the user to be * recorded as the grantor. In many cases, this will be the current user, but * things get more complicated when the current user doesn't possess ADMIN * OPTION on the role but rather relies on having SUPERUSER privileges, or * on inheriting the privileges of a role which does have ADMIN OPTION. See * below for details. * * If the grantor was specified by the user, then it must be a user that * can legally be recorded as the grantor, as per the rule stated above. * This is an integrity constraint, not a permissions check, and thus even * superusers are subject to this restriction. However, there is also a * permissions check: to specify a role as the grantor, the current user * must possess the privileges of that role. Superusers will always pass * this check, but for non-superusers it may lead to an error. * * The return value is the OID to be regarded as the grantor when executing * the operation. */ static Oid check_role_grantor(Oid currentUserId, Oid roleid, Oid grantorId, bool is_grant) { /* If the grantor ID was not specified, pick one to use. */ if (!OidIsValid(grantorId)) { /* * Grants where the grantor is recorded as the bootstrap superuser do * not depend on any other existing grants, so always default to this * interpretation when possible. */ if (superuser_arg(currentUserId)) return BOOTSTRAP_SUPERUSERID; /* * Otherwise, the grantor must either have ADMIN OPTION on the role or * inherit the privileges of a role which does. In the former case, * record the grantor as the current user; in the latter, pick one of * the roles that is "most directly" inherited by the current role * (i.e. fewest "hops"). * * (We shouldn't fail to find a best grantor, because we've already * established that the current user has permission to perform the * operation.) */ grantorId = select_best_admin(currentUserId, roleid); if (!OidIsValid(grantorId)) elog(ERROR, "no possible grantors"); return grantorId; } /* * If an explicit grantor is specified, it must be a role whose privileges * the current user possesses. * * It should also be a role that has ADMIN OPTION on the target role, but * we check this condition only in case of GRANT. For REVOKE, no matching * grant should exist anyway, but if it somehow does, let the user get rid * of it. */ if (is_grant) { if (!has_privs_of_role(currentUserId, grantorId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to grant privileges as role \"%s\"", GetUserNameFromId(grantorId, false)), errdetail("Only roles with privileges of role \"%s\" may grant privileges as this role.", GetUserNameFromId(grantorId, false)))); if (grantorId != BOOTSTRAP_SUPERUSERID && select_best_admin(grantorId, roleid) != grantorId) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to grant privileges as role \"%s\"", GetUserNameFromId(grantorId, false)), errdetail("The grantor must have the %s option on role \"%s\".", "ADMIN", GetUserNameFromId(roleid, false)))); } else { if (!has_privs_of_role(currentUserId, grantorId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to revoke privileges granted by role \"%s\"", GetUserNameFromId(grantorId, false)), errdetail("Only roles with privileges of role \"%s\" may revoke privileges granted by this role.", GetUserNameFromId(grantorId, false)))); } /* * If a grantor was specified explicitly, always attribute the grant to * that role (unless we error out above). */ return grantorId; } /* * Initialize an array of RevokeRoleGrantAction objects. * * 'memlist' should be a list of all grants for the target role. * * This constructs an array indicating that no actions are to be performed; * that is, every element is initially RRG_NOOP. */ static RevokeRoleGrantAction * initialize_revoke_actions(CatCList *memlist) { RevokeRoleGrantAction *result; int i; if (memlist->n_members == 0) return NULL; result = palloc_array(RevokeRoleGrantAction, memlist->n_members); for (i = 0; i < memlist->n_members; i++) result[i] = RRG_NOOP; return result; } /* * Figure out what we would need to do in order to revoke a grant, or just the * admin option on a grant, given that there might be dependent privileges. * * 'memlist' should be a list of all grants for the target role. * * Whatever actions prove to be necessary will be signalled by updating * 'actions'. * * If behavior is DROP_RESTRICT, an error will occur if there are dependent * role membership grants; if DROP_CASCADE, those grants will be scheduled * for deletion. * * The return value is true if the matching grant was found in the list, * and false if not. */ static bool plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, Oid member, Oid grantor, GrantRoleOptions *popt, DropBehavior behavior) { int i; /* * If popt.specified == 0, we're revoking the grant entirely; otherwise, * we expect just one bit to be set, and we're revoking the corresponding * option. As of this writing, there's no syntax that would allow for an * attempt to revoke multiple options at once, and the logic below * wouldn't work properly if such syntax were added, so assert that our * caller isn't trying to do that. */ Assert(pg_popcount32(popt->specified) <= 1); for (i = 0; i < memlist->n_members; ++i) { HeapTuple authmem_tuple; Form_pg_auth_members authmem_form; authmem_tuple = &memlist->members[i]->tuple; authmem_form = (Form_pg_auth_members) GETSTRUCT(authmem_tuple); if (authmem_form->member == member && authmem_form->grantor == grantor) { if ((popt->specified & GRANT_ROLE_SPECIFIED_INHERIT) != 0) { /* * Revoking the INHERIT option doesn't change anything for * dependent privileges, so we don't need to recurse. */ actions[i] = RRG_REMOVE_INHERIT_OPTION; } else if ((popt->specified & GRANT_ROLE_SPECIFIED_SET) != 0) { /* Here too, no need to recurse. */ actions[i] = RRG_REMOVE_SET_OPTION; } else { bool revoke_admin_option_only; /* * Revoking the grant entirely, or ADMIN option on a grant, * implicates dependent privileges, so we may need to recurse. */ revoke_admin_option_only = (popt->specified & GRANT_ROLE_SPECIFIED_ADMIN) != 0; plan_recursive_revoke(memlist, actions, i, revoke_admin_option_only, behavior); } return true; } } return false; } /* * Figure out what we would need to do in order to revoke all grants to * a given member, given that there might be dependent privileges. * * 'memlist' should be a list of all grants for the target role. * * Whatever actions prove to be necessary will be signalled by updating * 'actions'. */ static void plan_member_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, Oid member) { int i; for (i = 0; i < memlist->n_members; ++i) { HeapTuple authmem_tuple; Form_pg_auth_members authmem_form; authmem_tuple = &memlist->members[i]->tuple; authmem_form = (Form_pg_auth_members) GETSTRUCT(authmem_tuple); if (authmem_form->member == member) plan_recursive_revoke(memlist, actions, i, false, DROP_CASCADE); } } /* * Workhorse for figuring out recursive revocation of role grants. * * This is similar to what recursive_revoke() does for ACLs. */ static void plan_recursive_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, int index, bool revoke_admin_option_only, DropBehavior behavior) { bool would_still_have_admin_option = false; HeapTuple authmem_tuple; Form_pg_auth_members authmem_form; int i; /* If it's already been done, we can just return. */ if (actions[index] == RRG_DELETE_GRANT) return; if (actions[index] == RRG_REMOVE_ADMIN_OPTION && revoke_admin_option_only) return; /* Locate tuple data. */ authmem_tuple = &memlist->members[index]->tuple; authmem_form = (Form_pg_auth_members) GETSTRUCT(authmem_tuple); /* * If the existing tuple does not have admin_option set, then we do not * need to recurse. If we're just supposed to clear that bit we don't need * to do anything at all; if we're supposed to remove the grant, we need * to do something, but only to the tuple, and not any others. */ if (!revoke_admin_option_only) { actions[index] = RRG_DELETE_GRANT; if (!authmem_form->admin_option) return; } else { if (!authmem_form->admin_option) return; actions[index] = RRG_REMOVE_ADMIN_OPTION; } /* Determine whether the member would still have ADMIN OPTION. */ for (i = 0; i < memlist->n_members; ++i) { HeapTuple am_cascade_tuple; Form_pg_auth_members am_cascade_form; am_cascade_tuple = &memlist->members[i]->tuple; am_cascade_form = (Form_pg_auth_members) GETSTRUCT(am_cascade_tuple); if (am_cascade_form->member == authmem_form->member && am_cascade_form->admin_option && actions[i] == RRG_NOOP) { would_still_have_admin_option = true; break; } } /* If the member would still have ADMIN OPTION, we need not recurse. */ if (would_still_have_admin_option) return; /* * Recurse to grants that are not yet slated for deletion which have this * member as the grantor. */ for (i = 0; i < memlist->n_members; ++i) { HeapTuple am_cascade_tuple; Form_pg_auth_members am_cascade_form; am_cascade_tuple = &memlist->members[i]->tuple; am_cascade_form = (Form_pg_auth_members) GETSTRUCT(am_cascade_tuple); if (am_cascade_form->grantor == authmem_form->member && actions[i] != RRG_DELETE_GRANT) { if (behavior == DROP_RESTRICT) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("dependent privileges exist"), errhint("Use CASCADE to revoke them too."))); plan_recursive_revoke(memlist, actions, i, false, behavior); } } } /* * Initialize a GrantRoleOptions object with default values. */ static void InitGrantRoleOptions(GrantRoleOptions *popt) { popt->specified = 0; popt->admin = false; popt->inherit = false; popt->set = true; } /* * GUC check_hook for createrole_self_grant */ bool check_createrole_self_grant(char **newval, void **extra, GucSource source) { char *rawstring; List *elemlist; ListCell *l; unsigned options = 0; unsigned *result; /* Need a modifiable copy of string */ rawstring = pstrdup(*newval); if (!SplitIdentifierString(rawstring, ',', &elemlist)) { /* syntax error in list */ GUC_check_errdetail("List syntax is invalid."); pfree(rawstring); list_free(elemlist); return false; } foreach(l, elemlist) { char *tok = (char *) lfirst(l); if (pg_strcasecmp(tok, "SET") == 0) options |= GRANT_ROLE_SPECIFIED_SET; else if (pg_strcasecmp(tok, "INHERIT") == 0) options |= GRANT_ROLE_SPECIFIED_INHERIT; else { GUC_check_errdetail("Unrecognized key word: \"%s\".", tok); pfree(rawstring); list_free(elemlist); return false; } } pfree(rawstring); list_free(elemlist); result = (unsigned *) guc_malloc(LOG, sizeof(unsigned)); if (!result) return false; *result = options; *extra = result; return true; } /* * GUC assign_hook for createrole_self_grant */ void assign_createrole_self_grant(const char *newval, void *extra) { unsigned options = *(unsigned *) extra; createrole_self_grant_enabled = (options != 0); createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN | GRANT_ROLE_SPECIFIED_INHERIT | GRANT_ROLE_SPECIFIED_SET; createrole_self_grant_options.admin = false; createrole_self_grant_options.inherit = (options & GRANT_ROLE_SPECIFIED_INHERIT) != 0; createrole_self_grant_options.set = (options & GRANT_ROLE_SPECIFIED_SET) != 0; }
c
github
https://github.com/postgres/postgres
src/backend/commands/user.c
# frozen_string_literal: true class CustomReader include ActiveModel::Validations def initialize(data = {}) @data = data end def []=(key, value) @data[key] = value end def read_attribute_for_validation(key) @data[key] end end
ruby
github
https://github.com/rails/rails
activemodel/test/models/custom_reader.rb
def get_product(val) : res = 1 for ele in val: res *= ele return res def find_k_product(test_list, K): res = get_product([sub[K] for sub in test_list]) return (res)
unknown
mbpp
This error code indicates a mismatch between the lifetimes appearing in the function signature (i.e., the parameter types and the return type) and the data-flow found in the function body. Erroneous code example: ```compile_fail,E0621 fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 { // error: explicit lifetime // required in the type of // `y` if x > y { x } else { y } } ``` In the code above, the function is returning data borrowed from either `x` or `y`, but the `'a` annotation indicates that it is returning data only from `x`. To fix the error, the signature and the body must be made to match. Typically, this is done by updating the function signature. So, in this case, we change the type of `y` to `&'a i32`, like so: ``` fn foo<'a>(x: &'a i32, y: &'a i32) -> &'a i32 { if x > y { x } else { y } } ``` Now the signature indicates that the function data borrowed from either `x` or `y`. Alternatively, you could change the body to not return data from `y`: ``` fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 { x } ```
unknown
github
https://github.com/rust-lang/rust
compiler/rustc_error_codes/src/error_codes/E0621.md
""" ============================================================= Online Latent Dirichlet Allocation with variational inference ============================================================= This implementation is modified from Matthew D. Hoffman's onlineldavb code Link: http://matthewdhoffman.com/code/onlineldavb.tar """ # Author: Chyi-Kwei Yau # Author: Matthew D. Hoffman (original onlineldavb implementation) import numpy as np import scipy.sparse as sp from scipy.special import gammaln import warnings from ..base import BaseEstimator, TransformerMixin from ..utils import (check_random_state, check_array, gen_batches, gen_even_slices, _get_n_jobs) from ..utils.validation import check_non_negative from ..utils.extmath import logsumexp from ..externals.joblib import Parallel, delayed from ..externals.six.moves import xrange from ..exceptions import NotFittedError from ._online_lda import (mean_change, _dirichlet_expectation_1d, _dirichlet_expectation_2d) EPS = np.finfo(np.float).eps def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior, max_iters, mean_change_tol, cal_sstats, random_state): """E-step: update document-topic distribution. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. exp_topic_word_distr : dense matrix, shape=(n_topics, n_features) Exponential value of expection of log topic word distribution. In the literature, this is `exp(E[log(beta)])`. doc_topic_prior : float Prior of document topic distribution `theta`. max_iters : int Max number of iterations for updating document topic distribution in the E-step. mean_change_tol : float Stopping tolerance for updating document topic distribution in E-setp. cal_sstats : boolean Parameter that indicate to calculate sufficient statistics or not. Set `cal_sstats` to `True` when we need to run M-step. random_state : RandomState instance or None Parameter that indicate how to initialize document topic distribution. Set `random_state` to None will initialize document topic distribution to a constant number. Returns ------- (doc_topic_distr, suff_stats) : `doc_topic_distr` is unnormalized topic distribution for each document. In the literature, this is `gamma`. we can calculate `E[log(theta)]` from it. `suff_stats` is expected sufficient statistics for the M-step. When `cal_sstats == False`, this will be None. """ is_sparse_x = sp.issparse(X) n_samples, n_features = X.shape n_topics = exp_topic_word_distr.shape[0] if random_state: doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics)) else: doc_topic_distr = np.ones((n_samples, n_topics)) # In the literature, this is `exp(E[log(theta)])` exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr)) # diff on `component_` (only calculate it when `cal_diff` is True) suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None if is_sparse_x: X_data = X.data X_indices = X.indices X_indptr = X.indptr for idx_d in xrange(n_samples): if is_sparse_x: ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]] cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]] else: ids = np.nonzero(X[idx_d, :])[0] cnts = X[idx_d, ids] doc_topic_d = doc_topic_distr[idx_d, :] # The next one is a copy, since the inner loop overwrites it. exp_doc_topic_d = exp_doc_topic[idx_d, :].copy() exp_topic_word_d = exp_topic_word_distr[:, ids] # Iterate between `doc_topic_d` and `norm_phi` until convergence for _ in xrange(0, max_iters): last_d = doc_topic_d # The optimal phi_{dwk} is proportional to # exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]). norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS doc_topic_d = (exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)) # Note: adds doc_topic_prior to doc_topic_d, in-place. _dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d) if mean_change(last_d, doc_topic_d) < mean_change_tol: break doc_topic_distr[idx_d, :] = doc_topic_d # Contribution of document d to the expected sufficient # statistics for the M step. if cal_sstats: norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi) return (doc_topic_distr, suff_stats) class LatentDirichletAllocation(BaseEstimator, TransformerMixin): """Latent Dirichlet Allocation with online variational Bayes algorithm .. versionadded:: 0.17 Read more in the :ref:`User Guide <LatentDirichletAllocation>`. Parameters ---------- n_topics : int, optional (default=10) Number of topics. doc_topic_prior : float, optional (default=None) Prior of document topic distribution `theta`. If the value is None, defaults to `1 / n_topics`. In the literature, this is called `alpha`. topic_word_prior : float, optional (default=None) Prior of topic word distribution `beta`. If the value is None, defaults to `1 / n_topics`. In the literature, this is called `eta`. learning_method : 'batch' | 'online', default='online' Method used to update `_component`. Only used in `fit` method. In general, if the data size is large, the online update will be much faster than the batch update. The default learning method is going to be changed to 'batch' in the 0.20 release. Valid options:: 'batch': Batch variational Bayes method. Use all training data in each EM update. Old `components_` will be overwritten in each iteration. 'online': Online variational Bayes method. In each EM update, use mini-batch of training data to update the ``components_`` variable incrementally. The learning rate is controlled by the ``learning_decay`` and the ``learning_offset`` parameters. learning_decay : float, optional (default=0.7) It is a parameter that control learning rate in the online learning method. The value should be set between (0.5, 1.0] to guarantee asymptotic convergence. When the value is 0.0 and batch_size is ``n_samples``, the update method is same as batch learning. In the literature, this is called kappa. learning_offset : float, optional (default=10.) A (positive) parameter that downweights early iterations in online learning. It should be greater than 1.0. In the literature, this is called tau_0. max_iter : integer, optional (default=10) The maximum number of iterations. total_samples : int, optional (default=1e6) Total number of documents. Only used in the `partial_fit` method. batch_size : int, optional (default=128) Number of documents to use in each EM iteration. Only used in online learning. evaluate_every : int optional (default=0) How often to evaluate perplexity. Only used in `fit` method. set it to 0 or negative number to not evalute perplexity in training at all. Evaluating perplexity can help you check convergence in training process, but it will also increase total training time. Evaluating perplexity in every iteration might increase training time up to two-fold. perp_tol : float, optional (default=1e-1) Perplexity tolerance in batch learning. Only used when ``evaluate_every`` is greater than 0. mean_change_tol : float, optional (default=1e-3) Stopping tolerance for updating document topic distribution in E-step. max_doc_update_iter : int (default=100) Max number of iterations for updating document topic distribution in the E-step. n_jobs : int, optional (default=1) The number of jobs to use in the E-step. If -1, all CPUs are used. For ``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used. verbose : int, optional (default=0) Verbosity level. random_state : int or RandomState instance or None, optional (default=None) Pseudo-random number generator seed control. Attributes ---------- components_ : array, [n_topics, n_features] Topic word distribution. ``components_[i, j]`` represents word j in topic `i`. n_batch_iter_ : int Number of iterations of the EM step. n_iter_ : int Number of passes over the dataset. References ---------- [1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman, David M. Blei, Francis Bach, 2010 [2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei, Chong Wang, John Paisley, 2013 [3] Matthew D. Hoffman's onlineldavb code. Link: http://matthewdhoffman.com//code/onlineldavb.tar """ def __init__(self, n_topics=10, doc_topic_prior=None, topic_word_prior=None, learning_method=None, learning_decay=.7, learning_offset=10., max_iter=10, batch_size=128, evaluate_every=-1, total_samples=1e6, perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100, n_jobs=1, verbose=0, random_state=None): self.n_topics = n_topics self.doc_topic_prior = doc_topic_prior self.topic_word_prior = topic_word_prior self.learning_method = learning_method self.learning_decay = learning_decay self.learning_offset = learning_offset self.max_iter = max_iter self.batch_size = batch_size self.evaluate_every = evaluate_every self.total_samples = total_samples self.perp_tol = perp_tol self.mean_change_tol = mean_change_tol self.max_doc_update_iter = max_doc_update_iter self.n_jobs = n_jobs self.verbose = verbose self.random_state = random_state def _check_params(self): """Check model parameters.""" if self.n_topics <= 0: raise ValueError("Invalid 'n_topics' parameter: %r" % self.n_topics) if self.total_samples <= 0: raise ValueError("Invalid 'total_samples' parameter: %r" % self.total_samples) if self.learning_offset < 0: raise ValueError("Invalid 'learning_offset' parameter: %r" % self.learning_offset) if self.learning_method not in ("batch", "online", None): raise ValueError("Invalid 'learning_method' parameter: %r" % self.learning_method) def _init_latent_vars(self, n_features): """Initialize latent variables.""" self.random_state_ = check_random_state(self.random_state) self.n_batch_iter_ = 1 self.n_iter_ = 0 if self.doc_topic_prior is None: self.doc_topic_prior_ = 1. / self.n_topics else: self.doc_topic_prior_ = self.doc_topic_prior if self.topic_word_prior is None: self.topic_word_prior_ = 1. / self.n_topics else: self.topic_word_prior_ = self.topic_word_prior init_gamma = 100. init_var = 1. / init_gamma # In the literature, this is called `lambda` self.components_ = self.random_state_.gamma( init_gamma, init_var, (self.n_topics, n_features)) # In the literature, this is `exp(E[log(beta)])` self.exp_dirichlet_component_ = np.exp( _dirichlet_expectation_2d(self.components_)) def _e_step(self, X, cal_sstats, random_init, parallel=None): """E-step in EM update. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. cal_sstats : boolean Parameter that indicate whether to calculate sufficient statistics or not. Set ``cal_sstats`` to True when we need to run M-step. random_init : boolean Parameter that indicate whether to initialize document topic distribution randomly in the E-step. Set it to True in training steps. parallel : joblib.Parallel (optional) Pre-initialized instance of joblib.Parallel. Returns ------- (doc_topic_distr, suff_stats) : `doc_topic_distr` is unnormalized topic distribution for each document. In the literature, this is called `gamma`. `suff_stats` is expected sufficient statistics for the M-step. When `cal_sstats == False`, it will be None. """ # Run e-step in parallel random_state = self.random_state_ if random_init else None # TODO: make Parallel._effective_n_jobs public instead? n_jobs = _get_n_jobs(self.n_jobs) if parallel is None: parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) results = parallel( delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, cal_sstats, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)) # merge result doc_topics, sstats_list = zip(*results) doc_topic_distr = np.vstack(doc_topics) if cal_sstats: # This step finishes computing the sufficient statistics for the # M-step. suff_stats = np.zeros(self.components_.shape) for sstats in sstats_list: suff_stats += sstats suff_stats *= self.exp_dirichlet_component_ else: suff_stats = None return (doc_topic_distr, suff_stats) def _em_step(self, X, total_samples, batch_update, parallel=None): """EM update for 1 iteration. update `_component` by batch VB or online VB. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. total_samples : integer Total umber of documents. It is only used when batch_update is `False`. batch_update : boolean Parameter that controls updating method. `True` for batch learning, `False` for online learning. parallel : joblib.Parallel Pre-initialized instance of joblib.Parallel Returns ------- doc_topic_distr : array, shape=(n_samples, n_topics) Unnormalized document topic distribution. """ # E-step _, suff_stats = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel) # M-step if batch_update: self.components_ = self.topic_word_prior_ + suff_stats else: # online update # In the literature, the weight is `rho` weight = np.power(self.learning_offset + self.n_batch_iter_, -self.learning_decay) doc_ratio = float(total_samples) / X.shape[0] self.components_ *= (1 - weight) self.components_ += (weight * (self.topic_word_prior_ + doc_ratio * suff_stats)) # update `component_` related variables self.exp_dirichlet_component_ = np.exp( _dirichlet_expectation_2d(self.components_)) self.n_batch_iter_ += 1 return def _check_non_neg_array(self, X, whom): """check X format check X format and make sure no negative value in X. Parameters ---------- X : array-like or sparse matrix """ X = check_array(X, accept_sparse='csr') check_non_negative(X, whom) return X def partial_fit(self, X, y=None): """Online VB with Mini-Batch update. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. Returns ------- self """ self._check_params() X = self._check_non_neg_array(X, "LatentDirichletAllocation.partial_fit") n_samples, n_features = X.shape batch_size = self.batch_size # initialize parameters or check if not hasattr(self, 'components_'): self._init_latent_vars(n_features) if n_features != self.components_.shape[1]: raise ValueError( "The provided data has %d dimensions while " "the model was trained with feature size %d." % (n_features, self.components_.shape[1])) n_jobs = _get_n_jobs(self.n_jobs) with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: for idx_slice in gen_batches(n_samples, batch_size): self._em_step(X[idx_slice, :], total_samples=self.total_samples, batch_update=False, parallel=parallel) return self def fit(self, X, y=None): """Learn model for the data X with variational Bayes method. When `learning_method` is 'online', use mini-batch update. Otherwise, use batch update. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. Returns ------- self """ self._check_params() X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit") n_samples, n_features = X.shape max_iter = self.max_iter evaluate_every = self.evaluate_every learning_method = self.learning_method if learning_method == None: warnings.warn("The default value for 'learning_method' will be " "changed from 'online' to 'batch' in the release 0.20. " "This warning was introduced in 0.18.", DeprecationWarning) learning_method = 'online' batch_size = self.batch_size # initialize parameters self._init_latent_vars(n_features) # change to perplexity later last_bound = None n_jobs = _get_n_jobs(self.n_jobs) with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel: for i in xrange(max_iter): if learning_method == 'online': for idx_slice in gen_batches(n_samples, batch_size): self._em_step(X[idx_slice, :], total_samples=n_samples, batch_update=False, parallel=parallel) else: # batch update self._em_step(X, total_samples=n_samples, batch_update=True, parallel=parallel) # check perplexity if evaluate_every > 0 and (i + 1) % evaluate_every == 0: doc_topics_distr, _ = self._e_step(X, cal_sstats=False, random_init=False, parallel=parallel) bound = self.perplexity(X, doc_topics_distr, sub_sampling=False) if self.verbose: print('iteration: %d, perplexity: %.4f' % (i + 1, bound)) if last_bound and abs(last_bound - bound) < self.perp_tol: break last_bound = bound self.n_iter_ += 1 return self def transform(self, X): """Transform data X according to the fitted model. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. Returns ------- doc_topic_distr : shape=(n_samples, n_topics) Document topic distribution for X. """ if not hasattr(self, 'components_'): raise NotFittedError("no 'components_' attribute in model." " Please fit model first.") # make sure feature size is the same in fitted model and in X X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform") n_samples, n_features = X.shape if n_features != self.components_.shape[1]: raise ValueError( "The provided data has %d dimensions while " "the model was trained with feature size %d." % (n_features, self.components_.shape[1])) doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False) # normalize doc_topic_distr doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis] return doc_topic_distr def _approx_bound(self, X, doc_topic_distr, sub_sampling): """Estimate the variational bound. Estimate the variational bound over "all documents" using only the documents passed in as X. Since log-likelihood of each word cannot be computed directly, we use this bound to estimate it. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. doc_topic_distr : array, shape=(n_samples, n_topics) Document topic distribution. In the literature, this is called gamma. sub_sampling : boolean, optional, (default=False) Compensate for subsampling of documents. It is used in calculate bound in online learning. Returns ------- score : float """ def _loglikelihood(prior, distr, dirichlet_distr, size): # calculate log-likelihood score = np.sum((prior - distr) * dirichlet_distr) score += np.sum(gammaln(distr) - gammaln(prior)) score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1))) return score is_sparse_x = sp.issparse(X) n_samples, n_topics = doc_topic_distr.shape n_features = self.components_.shape[1] score = 0 dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr) dirichlet_component_ = _dirichlet_expectation_2d(self.components_) doc_topic_prior = self.doc_topic_prior_ topic_word_prior = self.topic_word_prior_ if is_sparse_x: X_data = X.data X_indices = X.indices X_indptr = X.indptr # E[log p(docs | theta, beta)] for idx_d in xrange(0, n_samples): if is_sparse_x: ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]] cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]] else: ids = np.nonzero(X[idx_d, :])[0] cnts = X[idx_d, ids] temp = (dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]) norm_phi = logsumexp(temp) score += np.dot(cnts, norm_phi) # compute E[log p(theta | alpha) - log q(theta | gamma)] score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_topics) # Compensate for the subsampling of the population of documents if sub_sampling: doc_ratio = float(self.total_samples) / n_samples score *= doc_ratio # E[log p(beta | eta) - log q (beta | lambda)] score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features) return score def score(self, X, y=None): """Calculate approximate log-likelihood as score. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Document word matrix. Returns ------- score : float Use approximate bound as score. """ X = self._check_non_neg_array(X, "LatentDirichletAllocation.score") doc_topic_distr = self.transform(X) score = self._approx_bound(X, doc_topic_distr, sub_sampling=False) return score def perplexity(self, X, doc_topic_distr=None, sub_sampling=False): """Calculate approximate perplexity for data X. Perplexity is defined as exp(-1. * log-likelihood per word) Parameters ---------- X : array-like or sparse matrix, [n_samples, n_features] Document word matrix. doc_topic_distr : None or array, shape=(n_samples, n_topics) Document topic distribution. If it is None, it will be generated by applying transform on X. Returns ------- score : float Perplexity score. """ if not hasattr(self, 'components_'): raise NotFittedError("no 'components_' attribute in model." " Please fit model first.") X = self._check_non_neg_array(X, "LatentDirichletAllocation.perplexity") if doc_topic_distr is None: doc_topic_distr = self.transform(X) else: n_samples, n_topics = doc_topic_distr.shape if n_samples != X.shape[0]: raise ValueError("Number of samples in X and doc_topic_distr" " do not match.") if n_topics != self.n_topics: raise ValueError("Number of topics does not match.") current_samples = X.shape[0] bound = self._approx_bound(X, doc_topic_distr, sub_sampling) if sub_sampling: word_cnt = X.sum() * (float(self.total_samples) / current_samples) else: word_cnt = X.sum() perword_bound = bound / word_cnt return np.exp(-1.0 * perword_bound)
unknown
codeparrot/codeparrot-clean
--- applies_to: stack: serverless: navigation_title: "Date" mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html --- # Date field type [date] JSON doesn’t have a date data type, so dates in Elasticsearch can either be: * strings containing formatted dates, e.g. `"2015-01-01"` or `"2015/01/01 12:10:30"`. * a number representing *milliseconds-since-the-epoch*. * a number representing *seconds-since-the-epoch* ([configuration](#date-epoch-seconds)). Internally, dates are converted to UTC (if the time-zone is specified) and stored as a long number representing milliseconds-since-the-epoch. ::::{note} Use the [date_nanos](/reference/elasticsearch/mapping-reference/date_nanos.md) field type if a nanosecond resolution is expected. :::: Queries on dates are internally converted to range queries on this long representation, and the result of aggregations and stored fields is converted back to a string depending on the date format that is associated with the field. ::::{note} Dates will always be rendered as strings, even if they were initially supplied as a long in the JSON document. :::: Date formats can be customised, but if no `format` is specified then it uses the default: ```js "strict_date_optional_time||epoch_millis" ``` % NOTCONSOLE This means that it will accept dates with optional timestamps, which conform to the formats supported by [`strict_date_optional_time`](/reference/elasticsearch/mapping-reference/mapping-date-format.md#strict-date-time) or milliseconds-since-the-epoch. For instance: $$$date-example$$$ ```console PUT my-index-000001 { "mappings": { "properties": { "date": { "type": "date" <1> } } } } PUT my-index-000001/_doc/1 { "date": "2015-01-01" } <2> PUT my-index-000001/_doc/2 { "date": "2015-01-01T12:10:30Z" } <3> PUT my-index-000001/_doc/3 { "date": 1420070400001 } <4> GET my-index-000001/_search { "sort": { "date": "asc"} <5> } ``` 1. The `date` field uses the default `format`. 2. This document uses a plain date. 3. This document includes a time. 4. This document uses milliseconds-since-the-epoch. 5. Note that the `sort` values that are returned are all in milliseconds-since-the-epoch. ::::{warning} Dates will accept numbers with a decimal point like `{"date": 1618249875.123456}` but there are some cases ([#70085]({{es-issue}}70085)) where we’ll lose precision on those dates so they should be avoided. :::: ## Multiple date formats [multiple-date-formats] Multiple formats can be specified by separating them with `||` as a separator. Each format will be tried in turn until a matching format is found. The first format will be used to convert the *milliseconds-since-the-epoch* value back into a string. $$$date-format-example$$$ ```console PUT my-index-000001 { "mappings": { "properties": { "date": { "type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } } ``` ## Parameters for `date` fields [date-params] The following parameters are accepted by `date` fields: [`doc_values`](/reference/elasticsearch/mapping-reference/doc-values.md) : Should the field be stored on disk in a column-stride fashion, so that it can later be used for sorting, aggregations, or scripting? Accepts `true` (default) or `false`. [`format`](/reference/elasticsearch/mapping-reference/mapping-date-format.md) : The date format(s) that can be parsed. Defaults to `strict_date_optional_time||epoch_millis`. `locale` : The locale to use when parsing dates since months do not have the same names and/or abbreviations in all languages. The default is ENGLISH. [`ignore_malformed`](/reference/elasticsearch/mapping-reference/ignore-malformed.md) : If `true`, malformed numbers are ignored. If `false` (default), malformed numbers throw an exception and reject the whole document. Note that this cannot be set if the `script` parameter is used. [`index`](/reference/elasticsearch/mapping-reference/mapping-index.md) : Should the field be quickly searchable? Accepts `true` (default) and `false`. Date fields that only have [`doc_values`](/reference/elasticsearch/mapping-reference/doc-values.md) enabled can also be queried, albeit slower. [`null_value`](/reference/elasticsearch/mapping-reference/null-value.md) : Accepts a date value in one of the configured `format’s as the field which is substituted for any explicit `null` values. Defaults to `null`, which means the field is treated as missing. Note that this cannot be set of the `script` parameter is used. `on_script_error` : Defines what to do if the script defined by the `script` parameter throws an error at indexing time. Accepts `fail` (default), which will cause the entire document to be rejected, and `continue`, which will register the field in the document’s [`_ignored`](/reference/elasticsearch/mapping-reference/mapping-ignored-field.md) metadata field and continue indexing. This parameter can only be set if the `script` field is also set. `script` : If this parameter is set, then the field will index values generated by this script, rather than reading the values directly from the source. If a value is set for this field on the input document, then the document will be rejected with an error. Scripts are in the same format as their [runtime equivalent](docs-content://manage-data/data-store/mapping/map-runtime-field.md), and should emit long-valued timestamps. [`store`](/reference/elasticsearch/mapping-reference/mapping-store.md) : Whether the field value should be stored and retrievable separately from the [`_source`](/reference/elasticsearch/mapping-reference/mapping-source-field.md) field. Accepts `true` or `false` (default). [`meta`](/reference/elasticsearch/mapping-reference/mapping-field-meta.md) : Metadata about the field. ## Epoch seconds [date-epoch-seconds] If you need to send dates as *seconds-since-the-epoch* then make sure the `format` lists `epoch_second`: $$$date-epoch-seconds-example$$$ ```console PUT my-index-000001 { "mappings": { "properties": { "date": { "type": "date", "format": "strict_date_optional_time||epoch_second" } } } } PUT my-index-000001/_doc/example?refresh { "date": 1618321898 } POST my-index-000001/_search { "fields": [ {"field": "date"}], "_source": false } ``` % TEST[s/_search/_search?filter_path=hits.hits/] Which will reply with a date like: ```console-result { "hits": { "hits": [ { "_id": "example", "_index": "my-index-000001", "_score": 1.0, "fields": { "date": ["2021-04-13T13:51:38.000Z"] } } ] } } ``` ## Synthetic `_source` [date-synthetic-source] Synthetic source may sort `date` field values. For example: $$$synthetic-source-date-example$$$ ```console PUT idx { "settings": { "index": { "mapping": { "source": { "mode": "synthetic" } } } }, "mappings": { "properties": { "date": { "type": "date" } } } } PUT idx/_doc/1 { "date": ["2015-01-01T12:10:30Z", "2014-01-01T12:10:30Z"] } ``` % TEST[s/$/\nGET idx\/_doc\/1?filter_path=_source\n/] Will become: ```console-result { "date": ["2014-01-01T12:10:30.000Z", "2015-01-01T12:10:30.000Z"] } ``` % TEST[s/^/{"_source":/ s/\n$/}/]
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/elasticsearch/mapping-reference/date.md
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six.moves.urllib import parse as urlparse from st2common.util import isotime from st2common.util.jsonify import json_encode from st2common.exceptions import auth as exceptions from st2common import log as logging from st2common.util.auth import validate_token from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME LOG = logging.getLogger(__name__) # HTTP header name format (i.e. 'X-Auth-Token') # WSGI environment variable name format (ex. 'HTTP_X_AUTH_TOKEN') HEADERS = ['HTTP_X_AUTH_TOKEN_EXPIRY', 'HTTP_X_USER_NAME'] class AuthMiddleware(object): """WSGI middleware to handle authentication""" def __init__(self, app): self.app = app def __call__(self, environ, start_response): try: self._remove_auth_headers(environ) token = self._validate_token(environ) self._add_auth_headers(environ, token) except exceptions.TokenNotProvidedError: LOG.exception('Token is not provided.') return self._abort_unauthorized(environ, start_response) except exceptions.TokenNotFoundError: LOG.exception('Token is not found.') return self._abort_unauthorized(environ, start_response) except exceptions.TokenExpiredError: LOG.exception('Token has expired.') return self._abort_unauthorized(environ, start_response) except Exception: LOG.exception('Unexpected exception.') return self._abort_other_errors(environ, start_response) else: return self.app(environ, start_response) def _abort_other_errors(self, environ, start_response): body = json_encode({ 'faultstring': 'Internal Server Error' }) headers = [('Content-Type', 'application/json')] start_response('500 INTERNAL SERVER ERROR', headers) return [body] def _abort_unauthorized(self, environ, start_response): body = json_encode({ 'faultstring': 'Unauthorized' }) headers = [('Content-Type', 'application/json')] start_response('401 UNAUTHORIZED', headers) return [body] def _remove_auth_headers(self, env): """Remove middleware generated auth headers to prevent user from supplying them.""" headers_found = [k for k in HEADERS if k in env] for header in headers_found: del env[header] def _validate_token(self, env): """Validate token""" query_string = env.get('QUERY_STRING', '') query_params = dict(urlparse.parse_qsl(query_string)) # Note: This is a WSGI environment variable name token_in_headers = env.get('HTTP_X_AUTH_TOKEN', None) token_in_query_params = query_params.get(QUERY_PARAM_ATTRIBUTE_NAME, None) return validate_token(token_in_headers=token_in_headers, token_in_query_params=token_in_query_params) def _add_auth_headers(self, env, token): """Write authenticated user data to headers Build headers that represent authenticated user: * HTTP_X_AUTH_TOKEN_EXPIRY: Token expiration datetime * HTTP_X_USER_NAME: Name of confirmed user """ env['HTTP_X_AUTH_TOKEN_EXPIRY'] = isotime.format(token.expiry) env['HTTP_X_USER_NAME'] = str(token.user)
unknown
codeparrot/codeparrot-clean
/////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// #ifndef INCLUDED_IMF_COMPRESSION_ATTRIBUTE_H #define INCLUDED_IMF_COMPRESSION_ATTRIBUTE_H //----------------------------------------------------------------------------- // // class CompressionAttribute // //----------------------------------------------------------------------------- #include "ImfAttribute.h" #include "ImfCompression.h" OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_ENTER typedef TypedAttribute<OPENEXR_IMF_INTERNAL_NAMESPACE::Compression> CompressionAttribute; template <> IMF_EXPORT const char *CompressionAttribute::staticTypeName (); template <> IMF_EXPORT void CompressionAttribute::writeValueTo (OPENEXR_IMF_INTERNAL_NAMESPACE::OStream &, int) const; template <> IMF_EXPORT void CompressionAttribute::readValueFrom (OPENEXR_IMF_INTERNAL_NAMESPACE::IStream &, int, int); OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_EXIT #endif
c
github
https://github.com/opencv/opencv
3rdparty/openexr/IlmImf/ImfCompressionAttribute.h
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.connect.connector.policy; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.config.ConfigValue; import java.util.List; /** * An interface for enforcing a policy on overriding of Kafka client configs via the connector configs. * <p> * Common use cases are ability to provide principal per connector, <code>sasl.jaas.config</code> * and/or enforcing that the producer/consumer configurations for optimizations are within acceptable ranges. * <p>Kafka Connect discovers implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy}. * <p> * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the policy to register metrics. * The following tags are automatically added to all metrics registered: <code>config</code> set to * <code>connector.client.config.override.policy</code>, and <code>class</code> set to the * ConnectorClientConfigOverridePolicy class name. */ public interface ConnectorClientConfigOverridePolicy extends Configurable, AutoCloseable { /** * Workers will invoke this before configuring per-connector Kafka admin, producer, and consumer client instances * to validate if all the overridden client configurations are allowed per the policy implementation. * This would also be invoked during the validation of connector configs via the REST API. * <p> * If there are any policy violations, the connector will not be started. * * @param connectorClientConfigRequest an instance of {@link ConnectorClientConfigRequest} that provides the configs * to be overridden and its context; never {@code null} * @return list of {@link ConfigValue} instances that describe each client configuration in the request and includes an {@link ConfigValue#errorMessages() error} if the configuration is not allowed by the policy; never null */ List<ConfigValue> validate(ConnectorClientConfigRequest connectorClientConfigRequest); }
java
github
https://github.com/apache/kafka
connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * HMAC: Keyed-Hashing for Message Authentication (RFC2104). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * The HMAC implementation is derived from USAGI. * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI */ #include <crypto/hmac.h> #include <crypto/internal/hash.h> #include <linux/err.h> #include <linux/fips.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> struct hmac_ctx { struct crypto_shash *hash; /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ u8 pads[]; }; struct ahash_hmac_ctx { struct crypto_ahash *hash; /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ u8 pads[]; }; static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); struct hmac_ctx *tctx = crypto_shash_ctx(parent); struct crypto_shash *hash = tctx->hash; u8 *ipad = &tctx->pads[0]; u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); int err, i; if (fips_enabled && (keylen < 112 / 8)) return -EINVAL; shash->tfm = hash; if (keylen > bs) { int err; err = crypto_shash_digest(shash, inkey, keylen, ipad); if (err) return err; keylen = ds; } else memcpy(ipad, inkey, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); for (i = 0; i < bs; i++) { ipad[i] ^= HMAC_IPAD_VALUE; opad[i] ^= HMAC_OPAD_VALUE; } err = crypto_shash_init(shash) ?: crypto_shash_update(shash, ipad, bs) ?: crypto_shash_export(shash, ipad) ?: crypto_shash_init(shash) ?: crypto_shash_update(shash, opad, bs) ?: crypto_shash_export(shash, opad); shash_desc_zero(shash); return err; } static int hmac_export(struct shash_desc *pdesc, void *out) { struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_export(desc, out); } static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); desc->tfm = tctx->hash; return crypto_shash_import(desc, in); } static int hmac_export_core(struct shash_desc *pdesc, void *out) { struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_export_core(desc, out); } static int hmac_import_core(struct shash_desc *pdesc, const void *in) { const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); struct shash_desc *desc = shash_desc_ctx(pdesc); desc->tfm = tctx->hash; return crypto_shash_import_core(desc, in); } static int hmac_init(struct shash_desc *pdesc) { const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); return hmac_import(pdesc, &tctx->pads[0]); } static int hmac_update(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes) { struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_update(desc, data, nbytes); } static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); const struct hmac_ctx *tctx = crypto_shash_ctx(parent); const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_finup(desc, data, nbytes, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_init_tfm(struct crypto_shash *parent) { struct crypto_shash *hash; struct shash_instance *inst = shash_alg_instance(parent); struct crypto_shash_spawn *spawn = shash_instance_ctx(inst); struct hmac_ctx *tctx = crypto_shash_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); tctx->hash = hash; return 0; } static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) { struct hmac_ctx *sctx = crypto_shash_ctx(src); struct hmac_ctx *dctx = crypto_shash_ctx(dst); struct crypto_shash *hash; hash = crypto_clone_shash(sctx->hash); if (IS_ERR(hash)) return PTR_ERR(hash); dctx->hash = hash; return 0; } static void hmac_exit_tfm(struct crypto_shash *parent) { struct hmac_ctx *tctx = crypto_shash_ctx(parent); crypto_free_shash(tctx->hash); } static int __hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb, u32 mask) { struct shash_instance *inst; struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; int err; int ds; int ss; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = shash_instance_ctx(inst); mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; err = crypto_grab_shash(spawn, shash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); alg = &salg->base; /* The underlying hash algorithm must not require a key */ err = -EINVAL; if (crypto_shash_alg_needs_key(salg)) goto err_free_inst; ds = salg->digestsize; ss = salg->statesize; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto err_free_inst; err = crypto_inst_setname(shash_crypto_instance(inst), "hmac", "hmac-shash", alg); if (err) goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2); inst->alg.digestsize = ds; inst->alg.statesize = ss; inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize; inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; inst->alg.export_core = hmac_export_core; inst->alg.import_core = hmac_import_core; inst->alg.setkey = hmac_setkey; inst->alg.init_tfm = hmac_init_tfm; inst->alg.clone_tfm = hmac_clone_tfm; inst->alg.exit_tfm = hmac_exit_tfm; inst->free = shash_free_singlespawn_instance; err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: shash_free_singlespawn_instance(inst); } return err; } static int hmac_setkey_ahash(struct crypto_ahash *parent, const u8 *inkey, unsigned int keylen) { struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash); int ds = crypto_ahash_digestsize(parent); int bs = crypto_ahash_blocksize(parent); int ss = crypto_ahash_statesize(parent); HASH_REQUEST_ON_STACK(req, fb); u8 *opad = &tctx->pads[ss]; u8 *ipad = &tctx->pads[0]; int err, i; if (fips_enabled && (keylen < 112 / 8)) return -EINVAL; ahash_request_set_callback(req, 0, NULL, NULL); if (keylen > bs) { ahash_request_set_virt(req, inkey, ipad, keylen); err = crypto_ahash_digest(req); if (err) goto out_zero_req; keylen = ds; } else memcpy(ipad, inkey, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); for (i = 0; i < bs; i++) { ipad[i] ^= HMAC_IPAD_VALUE; opad[i] ^= HMAC_OPAD_VALUE; } ahash_request_set_virt(req, ipad, NULL, bs); err = crypto_ahash_init(req) ?: crypto_ahash_update(req) ?: crypto_ahash_export(req, ipad); ahash_request_set_virt(req, opad, NULL, bs); err = err ?: crypto_ahash_init(req) ?: crypto_ahash_update(req) ?: crypto_ahash_export(req, opad); out_zero_req: HASH_REQUEST_ZERO(req); return err; } static int hmac_export_ahash(struct ahash_request *preq, void *out) { return crypto_ahash_export(ahash_request_ctx(preq), out); } static int hmac_import_ahash(struct ahash_request *preq, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); struct ahash_request *req = ahash_request_ctx(preq); ahash_request_set_tfm(req, tctx->hash); return crypto_ahash_import(req, in); } static int hmac_export_core_ahash(struct ahash_request *preq, void *out) { return crypto_ahash_export_core(ahash_request_ctx(preq), out); } static int hmac_import_core_ahash(struct ahash_request *preq, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); struct ahash_request *req = ahash_request_ctx(preq); ahash_request_set_tfm(req, tctx->hash); return crypto_ahash_import_core(req, in); } static int hmac_init_ahash(struct ahash_request *preq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); return hmac_import_ahash(preq, &tctx->pads[0]); } static int hmac_update_ahash(struct ahash_request *preq) { struct ahash_request *req = ahash_request_ctx(preq); ahash_request_set_callback(req, ahash_request_flags(preq), preq->base.complete, preq->base.data); if (ahash_request_isvirt(preq)) ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes); else ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes); return crypto_ahash_update(req); } static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); struct ahash_request *req = ahash_request_ctx(preq); struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); int ds = crypto_ahash_digestsize(tfm); int ss = crypto_ahash_statesize(tfm); const u8 *opad = &tctx->pads[ss]; ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask, preq->base.complete, preq->base.data); ahash_request_set_virt(req, preq->result, preq->result, ds); return crypto_ahash_import(req, opad) ?: crypto_ahash_finup(req); } static void hmac_finup_done(void *data, int err) { struct ahash_request *preq = data; if (err) goto out; err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP); if (err == -EINPROGRESS || err == -EBUSY) return; out: ahash_request_complete(preq, err); } static int hmac_finup_ahash(struct ahash_request *preq) { struct ahash_request *req = ahash_request_ctx(preq); ahash_request_set_callback(req, ahash_request_flags(preq), hmac_finup_done, preq); if (ahash_request_isvirt(preq)) ahash_request_set_virt(req, preq->svirt, preq->result, preq->nbytes); else ahash_request_set_crypt(req, preq->src, preq->result, preq->nbytes); return crypto_ahash_finup(req) ?: hmac_finup_finish(preq, 0); } static int hmac_digest_ahash(struct ahash_request *preq) { return hmac_init_ahash(preq) ?: hmac_finup_ahash(preq); } static int hmac_init_ahash_tfm(struct crypto_ahash *parent) { struct ahash_instance *inst = ahash_alg_instance(parent); struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); struct crypto_ahash *hash; hash = crypto_spawn_ahash(ahash_instance_ctx(inst)); if (IS_ERR(hash)) return PTR_ERR(hash); if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) + crypto_ahash_reqsize(hash)) return -EINVAL; tctx->hash = hash; return 0; } static int hmac_clone_ahash_tfm(struct crypto_ahash *dst, struct crypto_ahash *src) { struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src); struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst); struct crypto_ahash *hash; hash = crypto_clone_ahash(sctx->hash); if (IS_ERR(hash)) return PTR_ERR(hash); dctx->hash = hash; return 0; } static void hmac_exit_ahash_tfm(struct crypto_ahash *parent) { struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); crypto_free_ahash(tctx->hash); } static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb, u32 mask) { struct crypto_ahash_spawn *spawn; struct ahash_instance *inst; struct crypto_alg *alg; struct hash_alg_common *halg; int ds, ss, err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = ahash_instance_ctx(inst); mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; halg = crypto_spawn_ahash_alg(spawn); alg = &halg->base; /* The underlying hash algorithm must not require a key */ err = -EINVAL; if (crypto_hash_alg_needs_key(halg)) goto err_free_inst; ds = halg->digestsize; ss = halg->statesize; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto err_free_inst; err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg); if (err) goto err_free_inst; inst->alg.halg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS; inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT; inst->alg.halg.base.cra_priority = alg->cra_priority + 100; inst->alg.halg.base.cra_blocksize = alg->cra_blocksize; inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) + (ss * 2); inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) + alg->cra_reqsize; inst->alg.halg.digestsize = ds; inst->alg.halg.statesize = ss; inst->alg.init = hmac_init_ahash; inst->alg.update = hmac_update_ahash; inst->alg.finup = hmac_finup_ahash; inst->alg.digest = hmac_digest_ahash; inst->alg.export = hmac_export_ahash; inst->alg.import = hmac_import_ahash; inst->alg.export_core = hmac_export_core_ahash; inst->alg.import_core = hmac_import_core_ahash; inst->alg.setkey = hmac_setkey_ahash; inst->alg.init_tfm = hmac_init_ahash_tfm; inst->alg.clone_tfm = hmac_clone_ahash_tfm; inst->alg.exit_tfm = hmac_exit_ahash_tfm; inst->free = ahash_free_singlespawn_instance; err = ahash_register_instance(tmpl, inst); if (err) { err_free_inst: ahash_free_singlespawn_instance(inst); } return err; } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; u32 mask; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); mask = crypto_algt_inherited_mask(algt); if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) & algt->mask & CRYPTO_ALG_TYPE_MASK)) return hmac_create_ahash(tmpl, tb, mask); if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) & algt->mask & CRYPTO_ALG_TYPE_MASK) return -EINVAL; return __hmac_create_shash(tmpl, tb, mask); } static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb) { u32 mask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err == -EINVAL ? -ENOENT : err; return __hmac_create_shash(tmpl, tb, mask); } static struct crypto_template hmac_tmpls[] = { { .name = "hmac", .create = hmac_create, .module = THIS_MODULE, }, { .name = "hmac-shash", .create = hmac_create_shash, .module = THIS_MODULE, }, }; static int __init hmac_module_init(void) { return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } static void __exit hmac_module_exit(void) { crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } module_init(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HMAC hash algorithm"); MODULE_ALIAS_CRYPTO("hmac");
c
github
https://github.com/torvalds/linux
crypto/hmac.c
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package pki import ( "fmt" "github.com/hashicorp/vault/builtin/logical/pki/observe" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) func pathAcmeAuthorization(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { return patternAcmeAuthorization(b, baseUrl+"/authorization/"+framework.MatchAllRegex("auth_id"), opts) } func addFieldsForACMEAuthorization(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { fields["auth_id"] = &framework.FieldSchema{ Type: framework.TypeString, Description: "ACME authorization identifier value", Required: true, } return fields } func patternAcmeAuthorization(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { fields := map[string]*framework.FieldSchema{} addFieldsForACMEPath(fields, pattern) addFieldsForACMERequest(fields) addFieldsForACMEAuthorization(fields) return &framework.Path{ Pattern: pattern, Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.acmeAccountRequiredWrapper(opts, b.acmeAuthorizationHandler), ForwardPerformanceSecondary: false, ForwardPerformanceStandby: true, }, }, HelpSynopsis: pathAcmeHelpSync, HelpDescription: pathAcmeHelpDesc, } } func (b *backend) acmeAuthorizationHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { authId := fields.Get("auth_id").(string) authz, err := b.GetAcmeState().LoadAuthorization(acmeCtx, userCtx, authId) if err != nil { return nil, fmt.Errorf("failed to load authorization: %w", err) } var status string rawStatus, haveStatus := data["status"] if haveStatus { var ok bool status, ok = rawStatus.(string) if !ok { return nil, fmt.Errorf("bad type (%T) for value 'status': %w", rawStatus, ErrMalformed) } } b.pkiObserver.RecordPKIObservation(acmeCtx, r, observe.ObservationTypePKIAcmeAuthorization, observe.NewAdditionalPKIMetadata("auth_id", authId), observe.NewAdditionalPKIMetadata("status", status), ) if len(data) == 0 { return b.acmeAuthorizationFetchHandler(acmeCtx, r, fields, userCtx, data, authz) } if haveStatus && status == "deactivated" { return b.acmeAuthorizationDeactivateHandler(acmeCtx, r, fields, userCtx, data, authz) } return nil, ErrMalformed } func (b *backend) acmeAuthorizationFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { return &logical.Response{ Data: authz.NetworkMarshal(acmeCtx), }, nil } func (b *backend) acmeAuthorizationDeactivateHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { if authz.Status != ACMEAuthorizationPending && authz.Status != ACMEAuthorizationValid { return nil, fmt.Errorf("unable to deactivate authorization in '%v' status: %w", authz.Status, ErrMalformed) } authz.Status = ACMEAuthorizationDeactivated for _, challenge := range authz.Challenges { challenge.Status = ACMEChallengeInvalid } if err := b.GetAcmeState().SaveAuthorization(acmeCtx, authz); err != nil { return nil, fmt.Errorf("error saving deactivated authorization: %w", err) } return &logical.Response{ Data: authz.NetworkMarshal(acmeCtx), }, nil }
go
github
https://github.com/hashicorp/vault
builtin/logical/pki/path_acme_authorizations.go
def main(request, response): import simplejson as json f = file('config.json') source = f.read() s = json.JSONDecoder().decode(source) url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1]) response.headers.set("Content-Security-Policy", "default-src 'self'; script-src 'self' 'unsafe-inline'") response.headers.set("X-Content-Security-Policy", "default-src 'self'; script-src 'self' 'unsafe-inline'") response.headers.set("X-WebKit-CSP", "default-src 'self'; script-src 'self' 'unsafe-inline'") return """<!DOCTYPE html> <!-- Copyright (c) 2013 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Hao, Yunfei <yunfeix.hao@intel.com> --> <html> <head> <title>CSP Test: csp_default-src_self-style</title> <link rel="author" title="Intel" href="http://www.intel.com/"/> <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/> <meta name="flags" content=""/> <meta name="assert" content="default-src 'self' 'unsafe-inline'"/> <meta charset="utf-8"/> <script src="../resources/testharness.js"></script> <script src="../resources/testharnessreport.js"></script> <link rel="stylesheet" type="text/css" href='""" + url1 + """/csp/support/w3c/canvas-index.css'/> <link rel="stylesheet" type="text/css" href="support/blue-100x100.css"/> <style> #test-green { background-color: green; } </style> </head> <body> <div id="log"></div> <div id="test-blue"></div> <div id="test-green"></div> <h3>ext-css:""" + url1 + """/tests/csp/support/w3c/canvas-index.css</h3> <script> test(function() { var div = document.querySelector("h3"); var fix = getComputedStyle(div)["display"]; assert_not_equals(fix, "inline", "style setted incorrectly"); }, document.title + "_blocked"); test(function() { var div = document.querySelector("#test-blue"); var fix = getComputedStyle(div)["backgroundColor"]; assert_equals(fix, "rgb(0, 0, 255)", "style setted incorrectly"); }, document.title + "_allowed"); test(function() { var div = document.querySelector("#test-green"); var fix = getComputedStyle(div)["backgroundColor"]; assert_not_equals(fix, "rgb(0, 128, 0)", "style setted incorrectly"); }, document.title + "_blocked_inline"); </script> </body> </html> """
unknown
codeparrot/codeparrot-clean
//===--- Headers.cpp - Include headers ---------------------------*- C++-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "Headers.h" #include "Preamble.h" #include "SourceCode.h" #include "support/Logger.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Lex/DirectoryLookup.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/PPCallbacks.h" #include "clang/Lex/Preprocessor.h" #include "clang/Tooling/Inclusions/HeaderAnalysis.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Path.h" #include <cstring> #include <optional> #include <string> namespace clang { namespace clangd { class IncludeStructure::RecordHeaders : public PPCallbacks { public: RecordHeaders(const CompilerInstance &CI, IncludeStructure *Out) : SM(CI.getSourceManager()), Out(Out) {} // Record existing #includes - both written and resolved paths. Only #includes // in the main file are collected. void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok, llvm::StringRef FileName, bool IsAngled, CharSourceRange /*FilenameRange*/, OptionalFileEntryRef File, llvm::StringRef /*SearchPath*/, llvm::StringRef /*RelativePath*/, const clang::Module * /*SuggestedModule*/, bool /*ModuleImported*/, SrcMgr::CharacteristicKind FileKind) override { auto MainFID = SM.getMainFileID(); // If an include is part of the preamble patch, translate #line directives. if (InBuiltinFile) HashLoc = translatePreamblePatchLocation(HashLoc, SM); // Record main-file inclusions (including those mapped from the preamble // patch). if (isInsideMainFile(HashLoc, SM)) { Out->MainFileIncludes.emplace_back(); auto &Inc = Out->MainFileIncludes.back(); Inc.Written = (IsAngled ? "<" + FileName + ">" : "\"" + FileName + "\"").str(); Inc.Resolved = std::string( File ? getCanonicalPath(*File, SM.getFileManager()).value_or("") : ""); Inc.HashOffset = SM.getFileOffset(HashLoc); Inc.HashLine = SM.getLineNumber(SM.getFileID(HashLoc), Inc.HashOffset) - 1; Inc.FileKind = FileKind; Inc.Directive = IncludeTok.getIdentifierInfo()->getPPKeywordID(); if (File) { IncludeStructure::HeaderID HID = Out->getOrCreateID(*File); Inc.HeaderID = static_cast<unsigned>(HID); if (IsAngled) if (auto StdlibHeader = tooling::stdlib::Header::named(Inc.Written)) { auto &IDs = Out->StdlibHeaders[*StdlibHeader]; // Few physical files for one stdlib header name, linear scan is ok. if (!llvm::is_contained(IDs, HID)) IDs.push_back(HID); } } Out->MainFileIncludesBySpelling[Inc.Written].push_back( Out->MainFileIncludes.size() - 1); } // Record include graph (not just for main-file includes) if (File) { auto IncludingFileEntry = SM.getFileEntryRefForID(SM.getFileID(HashLoc)); if (!IncludingFileEntry) { assert(SM.getBufferName(HashLoc).starts_with("<") && "Expected #include location to be a file or <built-in>"); // Treat as if included from the main file. IncludingFileEntry = SM.getFileEntryRefForID(MainFID); } auto IncludingID = Out->getOrCreateID(*IncludingFileEntry), IncludedID = Out->getOrCreateID(*File); Out->IncludeChildren[IncludingID].push_back(IncludedID); } } void FileChanged(SourceLocation Loc, FileChangeReason Reason, SrcMgr::CharacteristicKind FileType, FileID PrevFID) override { switch (Reason) { case PPCallbacks::EnterFile: ++Level; if (BuiltinFile.isInvalid() && SM.isWrittenInBuiltinFile(Loc)) { BuiltinFile = SM.getFileID(Loc); InBuiltinFile = true; } break; case PPCallbacks::ExitFile: { --Level; if (PrevFID == BuiltinFile) InBuiltinFile = false; break; } case PPCallbacks::RenameFile: case PPCallbacks::SystemHeaderPragma: break; } } private: // Keeps track of include depth for the current file. It's 1 for main file. int Level = 0; bool inMainFile() const { return Level == 1; } const SourceManager &SM; // Set after entering the <built-in> file. FileID BuiltinFile; // Indicates whether <built-in> file is part of include stack. bool InBuiltinFile = false; IncludeStructure *Out; }; bool isLiteralInclude(llvm::StringRef Include) { return Include.starts_with("<") || Include.starts_with("\""); } bool HeaderFile::valid() const { return (Verbatim && isLiteralInclude(File)) || (!Verbatim && llvm::sys::path::is_absolute(File)); } llvm::Expected<HeaderFile> toHeaderFile(llvm::StringRef Header, llvm::StringRef HintPath) { if (isLiteralInclude(Header)) return HeaderFile{Header.str(), /*Verbatim=*/true}; auto U = URI::parse(Header); if (!U) return U.takeError(); auto IncludePath = URI::includeSpelling(*U); if (!IncludePath) return IncludePath.takeError(); if (!IncludePath->empty()) return HeaderFile{std::move(*IncludePath), /*Verbatim=*/true}; auto Resolved = URI::resolve(*U, HintPath); if (!Resolved) return Resolved.takeError(); return HeaderFile{std::move(*Resolved), /*Verbatim=*/false}; } llvm::SmallVector<SymbolInclude, 1> getRankedIncludes(const Symbol &Sym) { auto Includes = Sym.IncludeHeaders; // Sort in descending order by reference count and header length. llvm::sort(Includes, [](const Symbol::IncludeHeaderWithReferences &LHS, const Symbol::IncludeHeaderWithReferences &RHS) { if (LHS.References == RHS.References) return LHS.IncludeHeader.size() < RHS.IncludeHeader.size(); return LHS.References > RHS.References; }); llvm::SmallVector<SymbolInclude, 1> Headers; for (const auto &Include : Includes) Headers.push_back({Include.IncludeHeader, Include.supportedDirectives()}); return Headers; } void IncludeStructure::collect(const CompilerInstance &CI) { auto &SM = CI.getSourceManager(); MainFileEntry = SM.getFileEntryForID(SM.getMainFileID()); auto Collector = std::make_unique<RecordHeaders>(CI, this); CI.getPreprocessor().addPPCallbacks(std::move(Collector)); // If we're reusing a preamble, don't repopulate SearchPathsCanonical. // The entries will be the same, but canonicalizing to find out is expensive! if (SearchPathsCanonical.empty()) { for (const auto &Dir : CI.getPreprocessor().getHeaderSearchInfo().search_dir_range()) { if (Dir.getLookupType() == DirectoryLookup::LT_NormalDir) SearchPathsCanonical.emplace_back( SM.getFileManager().getCanonicalName(*Dir.getDirRef())); } } } std::optional<IncludeStructure::HeaderID> IncludeStructure::getID(const FileEntry *Entry) const { // HeaderID of the main file is always 0; if (Entry == MainFileEntry) { return static_cast<IncludeStructure::HeaderID>(0u); } auto It = UIDToIndex.find(Entry->getUniqueID()); if (It == UIDToIndex.end()) return std::nullopt; return It->second; } IncludeStructure::HeaderID IncludeStructure::getOrCreateID(FileEntryRef Entry) { // Main file's FileEntry was not known at IncludeStructure creation time. if (&Entry.getFileEntry() == MainFileEntry) { if (RealPathNames.front().empty()) RealPathNames.front() = MainFileEntry->tryGetRealPathName().str(); return MainFileID; } auto R = UIDToIndex.try_emplace( Entry.getUniqueID(), static_cast<IncludeStructure::HeaderID>(RealPathNames.size())); if (R.second) RealPathNames.emplace_back(); IncludeStructure::HeaderID Result = R.first->getSecond(); std::string &RealPathName = RealPathNames[static_cast<unsigned>(Result)]; if (RealPathName.empty()) RealPathName = Entry.getFileEntry().tryGetRealPathName().str(); return Result; } llvm::DenseMap<IncludeStructure::HeaderID, unsigned> IncludeStructure::includeDepth(HeaderID Root) const { // Include depth 0 is the main file only. llvm::DenseMap<HeaderID, unsigned> Result; assert(static_cast<unsigned>(Root) < RealPathNames.size()); Result[Root] = 0; std::vector<IncludeStructure::HeaderID> CurrentLevel; CurrentLevel.push_back(Root); llvm::DenseSet<IncludeStructure::HeaderID> Seen; Seen.insert(Root); // Each round of BFS traversal finds the next depth level. std::vector<IncludeStructure::HeaderID> PreviousLevel; for (unsigned Level = 1; !CurrentLevel.empty(); ++Level) { PreviousLevel.clear(); PreviousLevel.swap(CurrentLevel); for (const auto &Parent : PreviousLevel) { for (const auto &Child : IncludeChildren.lookup(Parent)) { if (Seen.insert(Child).second) { CurrentLevel.push_back(Child); Result[Child] = Level; } } } } return Result; } llvm::SmallVector<const Inclusion *> IncludeStructure::mainFileIncludesWithSpelling(llvm::StringRef Spelling) const { llvm::SmallVector<const Inclusion *> Includes; for (auto Idx : MainFileIncludesBySpelling.lookup(Spelling)) Includes.push_back(&MainFileIncludes[Idx]); return Includes; } void IncludeInserter::addExisting(const Inclusion &Inc) { IncludedHeaders.insert(Inc.Written); if (!Inc.Resolved.empty()) IncludedHeaders.insert(Inc.Resolved); } /// FIXME(ioeric): we might not want to insert an absolute include path if the /// path is not shortened. bool IncludeInserter::shouldInsertInclude( PathRef DeclaringHeader, const HeaderFile &InsertedHeader) const { assert(InsertedHeader.valid()); if (!HeaderSearchInfo && !InsertedHeader.Verbatim) return false; if (FileName == DeclaringHeader || FileName == InsertedHeader.File) return false; auto Included = [&](llvm::StringRef Header) { return IncludedHeaders.contains(Header); }; return !Included(DeclaringHeader) && !Included(InsertedHeader.File); } std::optional<std::string> IncludeInserter::calculateIncludePath(const HeaderFile &InsertedHeader, llvm::StringRef IncludingFile) const { assert(InsertedHeader.valid()); if (InsertedHeader.Verbatim) return InsertedHeader.File; bool IsAngledByDefault = false; std::string Suggested; if (HeaderSearchInfo) { Suggested = HeaderSearchInfo->suggestPathToFileForDiagnostics( InsertedHeader.File, BuildDir, IncludingFile, &IsAngledByDefault); } else { // Calculate include relative to including file only. StringRef IncludingDir = llvm::sys::path::parent_path(IncludingFile); SmallString<256> RelFile(InsertedHeader.File); // Replacing with "" leaves "/RelFile" if IncludingDir doesn't end in "/". llvm::sys::path::replace_path_prefix(RelFile, IncludingDir, "./"); Suggested = llvm::sys::path::convert_to_slash( llvm::sys::path::remove_leading_dotslash(RelFile)); } // FIXME: should we allow (some limited number of) "../header.h"? if (llvm::sys::path::is_absolute(Suggested)) return std::nullopt; auto HeaderPath = llvm::sys::path::convert_to_slash(InsertedHeader.File); bool IsAngled = false; for (auto &Filter : AngledHeaders) { if (Filter(HeaderPath)) { IsAngled = true; break; } } bool IsQuoted = false; for (auto &Filter : QuotedHeaders) { if (Filter(HeaderPath)) { IsQuoted = true; break; } } // No filters apply, or both filters apply (a bug), use system default. if (IsAngled == IsQuoted) { // Probably a bug in the config regex. if (IsAngled && IsQuoted) { elog("Header '{0}' matches both quoted and angled regexes, default will " "be used.", HeaderPath); } IsAngled = IsAngledByDefault; } if (IsAngled) Suggested = "<" + Suggested + ">"; else // if (IsQuoted) Suggested = "\"" + Suggested + "\""; return Suggested; } std::optional<TextEdit> IncludeInserter::insert(llvm::StringRef VerbatimHeader, tooling::IncludeDirective Directive) const { std::optional<TextEdit> Edit; if (auto Insertion = Inserter.insert(VerbatimHeader.trim("\"<>"), VerbatimHeader.starts_with("<"), Directive)) Edit = replacementToEdit(Code, *Insertion); return Edit; } llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Inclusion &Inc) { return OS << Inc.Written << " = " << (!Inc.Resolved.empty() ? Inc.Resolved : "[unresolved]") << " at line" << Inc.HashLine; } bool operator==(const Inclusion &LHS, const Inclusion &RHS) { return std::tie(LHS.Directive, LHS.FileKind, LHS.HashOffset, LHS.HashLine, LHS.Resolved, LHS.Written) == std::tie(RHS.Directive, RHS.FileKind, RHS.HashOffset, RHS.HashLine, RHS.Resolved, RHS.Written); } } // namespace clangd } // namespace clang
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clangd/Headers.cpp
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : Versioning plugin for DB Manager Description : Set up versioning support for a table Date : Mar 12, 2012 copyright : (C) 2012 by Giuseppe Sucameli email : brush.tyler@gmail.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtWidgets import QAction, QApplication from qgis.PyQt.QtGui import QIcon # The load function is called when the "db" database or either one of its # children db objects (table o schema) is selected by the user. # @param db is the selected database # @param mainwindow is the DBManager mainwindow def load(db, mainwindow): # add the action to the DBManager menu action = QAction(QIcon(), QApplication.translate("DBManagerPlugin", "&Versioning"), db) mainwindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"), run) # The run function is called once the user clicks on the action TopoViewer # (look above at the load function) from the DBManager menu/toolbar. # @param item is the selected db item (either db, schema or table) # @param action is the clicked action on the DBManager menu/toolbar # @param mainwindow is the DBManager mainwindow def run(item, action, mainwindow): from .dlg_versioning import DlgVersioning dlg = DlgVersioning(item, mainwindow) QApplication.restoreOverrideCursor() try: dlg.exec_() finally: QApplication.setOverrideCursor(Qt.WaitCursor)
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.model.document import Document from frappe.utils import flt class UnableToSelectBatchError(frappe.ValidationError): pass class Batch(Document): def autoname(self): '''Generate random ID for batch if not specified''' if not self.batch_id: if frappe.db.get_value('Item', self.item, 'create_new_batch'): temp = None while not temp: temp = frappe.generate_hash()[:7].upper() if frappe.db.exists('Batch', temp): temp = None self.batch_id = temp else: frappe.throw(_('Batch ID is mandatory'), frappe.MandatoryError) self.name = self.batch_id def onload(self): self.image = frappe.db.get_value('Item', self.item, 'image') def validate(self): self.item_has_batch_enabled() def item_has_batch_enabled(self): if frappe.db.get_value("Item",self.item,"has_batch_no") == 0: frappe.throw(_("The selected item cannot have Batch")) @frappe.whitelist() def get_batch_qty(batch_no=None, warehouse=None, item_code=None): '''Returns batch actual qty if warehouse is passed, or returns dict of qty by warehouse if warehouse is None The user must pass either batch_no or batch_no + warehouse or item_code + warehouse :param batch_no: Optional - give qty for this batch no :param warehouse: Optional - give qty for this warehouse :param item_code: Optional - give qty for this item''' frappe.has_permission('Batch', throw=True) out = 0 if batch_no and warehouse: out = float(frappe.db.sql("""select sum(actual_qty) from `tabStock Ledger Entry` where warehouse=%s and batch_no=%s""", (warehouse, batch_no))[0][0] or 0) if batch_no and not warehouse: out = frappe.db.sql('''select warehouse, sum(actual_qty) as qty from `tabStock Ledger Entry` where batch_no=%s group by warehouse''', batch_no, as_dict=1) if not batch_no and item_code and warehouse: out = frappe.db.sql('''select batch_no, sum(actual_qty) as qty from `tabStock Ledger Entry` where item_code = %s and warehouse=%s group by batch_no''', (item_code, warehouse), as_dict=1) return out @frappe.whitelist() def get_batches_by_oldest(item_code, warehouse): '''Returns the oldest batch and qty for the given item_code and warehouse''' batches = get_batch_qty(item_code = item_code, warehouse = warehouse) batches_dates = [[batch, frappe.get_value('Batch', batch.batch_no, 'expiry_date')] for batch in batches] batches_dates.sort(key=lambda tup: tup[1]) return batches_dates @frappe.whitelist() def split_batch(batch_no, item_code, warehouse, qty, new_batch_id = None): '''Split the batch into a new batch''' batch = frappe.get_doc(dict(doctype='Batch', item=item_code, batch_id=new_batch_id)).insert() stock_entry = frappe.get_doc(dict( doctype='Stock Entry', purpose='Repack', items=[ dict( item_code = item_code, qty = float(qty or 0), s_warehouse = warehouse, batch_no = batch_no ), dict( item_code = item_code, qty = float(qty or 0), t_warehouse = warehouse, batch_no = batch.name ), ] )) stock_entry.insert() stock_entry.submit() return batch.name def set_batch_nos(doc, warehouse_field, throw = False): '''Automatically select `batch_no` for outgoing items in item table''' for d in doc.items: has_batch_no = frappe.db.get_value('Item', d.item_code, 'has_batch_no') warehouse = d.get(warehouse_field, None) if has_batch_no and warehouse and d.qty > 0: if not d.batch_no: d.batch_no = get_batch_no(d.item_code, warehouse, d.qty, throw) else: batch_qty = get_batch_qty(batch_no=d.batch_no, warehouse=warehouse) if flt(batch_qty) < flt(d.qty): frappe.throw(_("Row #{0}: The batch {1} has only {2} qty. Please select another batch which has {3} qty available or split the row into multiple rows, to deliver/issue from multiple batches").format(d.idx, d.batch_no, batch_qty, d.qty)) def get_batch_no(item_code, warehouse, qty, throw=False): '''get the smallest batch with for the given item_code, warehouse and qty''' batch_no = None batches = get_batch_qty(item_code = item_code, warehouse = warehouse) if batches: batches = sorted(batches, lambda a, b: 1 if a.qty > b.qty else -1) for b in batches: if b.qty >= qty: batch_no = b.batch_no # found! break if not batch_no: frappe.msgprint(_('Please select a Batch for Item {0}. Unable to find a single batch that fulfills this requirement').format(frappe.bold(item_code))) if throw: raise UnableToSelectBatchError return batch_no
unknown
codeparrot/codeparrot-clean
__author__ = 'michael' import threading class Player(): def __init__(self, game_type, ruleset, name): self.game_type = game_type self.ruleset = ruleset self.name = name def info(self, infoview): pass def move(self, moveview): pass def reset(self): pass def choose_helper(prompt, opt, mapper): for i in enumerate(opt): print("(%i) %s" % (i[0], mapper(i[1]))) print("================================================================") w = input("%s (0-%i):" % (prompt, len(opt) - 1)) return opt[int(w)] class ConsolePlayer(Player): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.visual = self.game_type.get_visual('text') def info(self, infoview): print("[%s] Spielinformationen:" % self.name) print("================================================================") print(self.visual.visualize_view(infoview)) print("================================================================") def move(self, moveview): self.info(moveview) print("Mögliche Züge:") return choose_helper("[%s] Bitte wählen Sie ihren Zug" % (self.name), list(moveview.get_options()), self.visual.visualize_option) class RandomPlayer(Player): def move(self, moveview): return moveview.get_options().sample() class GTKPlayer(Player): shared_visual=None def __init__(self,*args,**kwargs): super().__init__(*args,**kwargs) c=kwargs.get("visual") if c: self.visual=c else: if not GTKPlayer.shared_visual: GTKPlayer.shared_visual=self.game_type.get_visual("gtk") self.visual=GTKPlayer.shared_visual def move(self, moveview): return self.visual.wait_for_move(moveview)
unknown
codeparrot/codeparrot-clean
/* * SHA1 routine optimized to do word accesses rather than byte accesses, * and to avoid unnecessary copies into the context array. * * This was initially based on the Mozilla SHA1 implementation, although * none of the original Mozilla code remains. */ typedef struct { unsigned long long size; unsigned int H[5]; unsigned int W[16]; } blk_SHA_CTX; void blk_SHA1_Init(blk_SHA_CTX *ctx); void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, size_t len); void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx); #ifndef platform_SHA_CTX #define platform_SHA_CTX blk_SHA_CTX #define platform_SHA1_Init blk_SHA1_Init #define platform_SHA1_Update blk_SHA1_Update #define platform_SHA1_Final blk_SHA1_Final #endif
c
github
https://github.com/git/git
block-sha1/sha1.h
""" Doctest example from the official Python documentation. https://docs.python.org/3/library/doctest.html """ def factorial(n): """Return the factorial of n, an exact integer >= 0. >>> [factorial(n) for n in range(6)] [1, 1, 2, 6, 24, 120] >>> factorial(30) # doctest: +ELLIPSIS 265252859812191058636308480000000... >>> factorial(-1) Traceback (most recent call last): ... ValueError: n must be >= 0 Factorials of floats are OK, but the float must be an exact integer: >>> factorial(30.1) Traceback (most recent call last): ... ValueError: n must be exact integer >>> factorial(30.0) # doctest: +ELLIPSIS 265252859812191058636308480000000... It must also not be ridiculously large: >>> factorial(1e100) Traceback (most recent call last): ... OverflowError: n too large """ import math if not n >= 0: raise ValueError("n must be >= 0") if math.floor(n) != n: raise ValueError("n must be exact integer") if n + 1 == n: # catch a value like 1e300 raise OverflowError("n too large") result = 1 factor = 2 while factor <= n: result *= factor factor += 1 return result
unknown
codeparrot/codeparrot-clean
/* Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package // Package resource contains the latest (or "internal") version of the // Kubernetes resource API objects. package resource
go
github
https://github.com/kubernetes/kubernetes
pkg/apis/resource/doc.go
# DllReference [DllPlugin documentation](https://webpack.js.org/plugins/dll-plugin) This is the _reference_ bundle (with the manifests) for [dll user example](https://github.com/webpack/webpack/tree/main/examples/dll-user) # webpack.config.js ```javascript "use strict"; const path = require("path"); const webpack = require("../../"); /** @type {import("webpack").Configuration} */ const config = { // mode: "development" || "production", resolve: { extensions: [".js", ".jsx"] }, entry: { alpha: ["./alpha", "./a", "module"], beta: ["./beta", "./b", "./c"] }, output: { path: path.join(__dirname, "dist"), filename: "MyDll.[name].js", library: "[name]_[fullhash]" }, plugins: [ new webpack.DllPlugin({ path: path.join(__dirname, "dist", "[name]-manifest.json"), name: "[name]_[fullhash]" }) ] }; module.exports = config; ``` # dist/MyDll.alpha.js ```javascript var alpha_ca803ba9b1c1f84bc50c; /******/ (() => { // webpackBootstrap /******/ var __webpack_modules__ = ([ /* 0 */ /*!*****************!*\ !*** dll alpha ***! \*****************/ /*! unknown exports (runtime-defined) */ /*! runtime requirements: __webpack_require__, module */ /***/ ((module, __unused_webpack_exports, __webpack_require__) => { module.exports = __webpack_require__; /***/ }), /* 1 */ /*!******************!*\ !*** ./alpha.js ***! \******************/ /*! unknown exports (runtime-defined) */ /*! runtime requirements: module */ /*! CommonJS bailout: module.exports is used directly at 1:0-14 */ /***/ ((module) => { module.exports = "alpha"; /***/ }), /* 2 */ /*!**************!*\ !*** ./a.js ***! \**************/ /*! unknown exports (runtime-defined) */ /*! runtime requirements: module */ /*! CommonJS bailout: module.exports is used directly at 1:0-14 */ /***/ ((module) => { module.exports = "a"; /***/ }), /* 3 */ /*!*********************************!*\ !*** ../node_modules/module.js ***! \*********************************/ /*! unknown exports (runtime-defined) */ /*! runtime requirements: module */ /*! CommonJS bailout: module.exports is used directly at 1:0-14 */ /***/ ((module) => { module.exports = "module"; /***/ }) /******/ ]); ``` <details><summary><code>/* webpack runtime code */</code></summary> ``` js /************************************************************************/ /******/ // The module cache /******/ var __webpack_module_cache__ = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ // Check if module is in cache /******/ var cachedModule = __webpack_module_cache__[moduleId]; /******/ if (cachedModule !== undefined) { /******/ return cachedModule.exports; /******/ } /******/ // Check if module exists (development only) /******/ if (__webpack_modules__[moduleId] === undefined) { /******/ var e = new Error("Cannot find module '" + moduleId + "'"); /******/ e.code = 'MODULE_NOT_FOUND'; /******/ throw e; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = __webpack_module_cache__[moduleId] = { /******/ // no module.id needed /******/ // no module.loaded needed /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__); /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /************************************************************************/ ``` </details> ``` js /******/ /******/ // startup /******/ // Load entry module and return exports /******/ // This entry module doesn't tell about it's top-level declarations so it can't be inlined /******/ var __webpack_exports__ = __webpack_require__(0); /******/ alpha_ca803ba9b1c1f84bc50c = __webpack_exports__; /******/ /******/ })() ; ``` # dist/alpha-manifest.json ```javascript {"name":"alpha_ca803ba9b1c1f84bc50c","content":{"./alpha.js":{"id":1,"buildMeta":{"treatAsCommonJs":true}},"./a.js":{"id":2,"buildMeta":{"treatAsCommonJs":true}},"../node_modules/module.js":{"id":3,"buildMeta":{"treatAsCommonJs":true}}}} ``` # Info ## Unoptimized ``` asset MyDll.alpha.js 2.84 KiB [emitted] (name: alpha) asset MyDll.beta.js 2.81 KiB [emitted] (name: beta) chunk (runtime: alpha) MyDll.alpha.js (alpha) 85 bytes [entry] [rendered] > alpha dependent modules 73 bytes [dependent] 3 modules dll alpha 12 bytes [built] [code generated] [used exports unknown] dll entry used as library export chunk (runtime: beta) MyDll.beta.js (beta) 81 bytes [entry] [rendered] > beta dependent modules 69 bytes [dependent] 3 modules dll beta 12 bytes [built] [code generated] [used exports unknown] dll entry used as library export webpack X.X.X compiled successfully ``` ## Production mode ``` asset MyDll.alpha.js 307 bytes [emitted] [minimized] (name: alpha) asset MyDll.beta.js 301 bytes [emitted] [minimized] (name: beta) chunk (runtime: beta) MyDll.beta.js (beta) 81 bytes [entry] [rendered] > beta dependent modules 69 bytes [dependent] 3 modules dll beta 12 bytes [built] [code generated] dll entry used as library export chunk (runtime: alpha) MyDll.alpha.js (alpha) 85 bytes [entry] [rendered] > alpha dependent modules 73 bytes [dependent] 3 modules dll alpha 12 bytes [built] [code generated] dll entry used as library export webpack X.X.X compiled successfully ```
unknown
github
https://github.com/webpack/webpack
examples/dll/README.md
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" "fmt" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) const keysConfigPath = "config/keys" type keysConfig struct { DisableUpsert bool `json:"disable_upsert"` } var defaultKeysConfig = keysConfig{ DisableUpsert: false, } func (b *backend) pathConfigKeys() *framework.Path { return &framework.Path{ Pattern: "config/keys", DisplayAttrs: &framework.DisplayAttributes{ OperationPrefix: operationPrefixTransit, }, Fields: map[string]*framework.FieldSchema{ "disable_upsert": { Type: framework.TypeBool, Description: `Whether to allow automatic upserting (creation) of keys on the encrypt endpoint.`, }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigKeysWrite, DisplayAttrs: &framework.DisplayAttributes{ OperationVerb: "configure", OperationSuffix: "keys", }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigKeysRead, DisplayAttrs: &framework.DisplayAttributes{ OperationSuffix: "keys-configuration", }, }, }, HelpSynopsis: pathConfigKeysHelpSyn, HelpDescription: pathConfigKeysHelpDesc, } } func (b *backend) readConfigKeys(ctx context.Context, req *logical.Request) (*keysConfig, error) { entry, err := req.Storage.Get(ctx, keysConfigPath) if err != nil { return nil, fmt.Errorf("failed to fetch keys configuration: %w", err) } var cfg keysConfig if entry == nil { cfg = defaultKeysConfig return &cfg, nil } if err := entry.DecodeJSON(&cfg); err != nil { return nil, fmt.Errorf("failed to decode keys configuration: %w", err) } return &cfg, nil } func (b *backend) writeConfigKeys(ctx context.Context, req *logical.Request, cfg *keysConfig) error { entry, err := logical.StorageEntryJSON(keysConfigPath, cfg) if err != nil { return fmt.Errorf("failed to marshal keys configuration: %w", err) } return req.Storage.Put(ctx, entry) } func respondConfigKeys(cfg *keysConfig) *logical.Response { return &logical.Response{ Data: map[string]interface{}{ "disable_upsert": cfg.DisableUpsert, }, } } func (b *backend) pathConfigKeysWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { upsert := d.Get("disable_upsert").(bool) cfg, err := b.readConfigKeys(ctx, req) if err != nil { return nil, err } modified := false if cfg.DisableUpsert != upsert { cfg.DisableUpsert = upsert modified = true } if modified { if err := b.writeConfigKeys(ctx, req, cfg); err != nil { return nil, err } } return respondConfigKeys(cfg), nil } func (b *backend) pathConfigKeysRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { cfg, err := b.readConfigKeys(ctx, req) if err != nil { return nil, err } return respondConfigKeys(cfg), nil } const pathConfigKeysHelpSyn = `Configuration common across all keys` const pathConfigKeysHelpDesc = ` This path is used to configure common functionality across all keys. Currently, this supports limiting the ability to automatically create new keys when an unknown key is used for encryption (upsert). `
go
github
https://github.com/hashicorp/vault
builtin/logical/transit/path_config_keys.go
''' SASSIE Copyright (C) 2011 Joseph E. Curtis This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see http://www.gnu.org/licenses/gpl-3.0.html for details. ''' # System imports from distutils.core import * from distutils import sysconfig # Third-party modules - we depend on numpy for everything import numpy from numpy.distutils.core import Extension, setup # Obtain the numpy include directory. This logic works across numpy versions. try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() # simple extension module pairs = Extension(name="pairs",sources=['./pairs.f'], include_dirs = [numpy_include], ) # NumyTypemapTests setup setup( name = "PAIRS", description = "Module sets up overlap array", author = "Joseph E. Curtis", version = "0.1", ext_modules = [pairs] )
unknown
codeparrot/codeparrot-clean
from sympy.mpmath import * def test_matrix_basic(): A1 = matrix(3) for i in range(3): A1[i,i] = 1 assert A1 == eye(3) assert A1 == matrix(A1) A2 = matrix(3, 2) assert not A2._matrix__data A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert list(A3) == list(range(1, 10)) A3[1,1] = 0 assert not (1, 1) in A3._matrix__data A4 = matrix([[1, 2, 3], [4, 5, 6]]) A5 = matrix([[6, -1], [3, 2], [0, -3]]) assert A4 * A5 == matrix([[12, -6], [39, -12]]) assert A1 * A3 == A3 * A1 == A3 try: A2 * A2 assert False except ValueError: pass l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]] A6 = matrix(l) assert A6.tolist() == l assert A6 == eval(repr(A6)) A6 = matrix(A6, force_type=float) assert A6 == eval(repr(A6)) assert A6*1j == eval(repr(A6*1j)) assert A3 * 10 == 10 * A3 == A6 assert A2.rows == 3 assert A2.cols == 2 A3.rows = 2 A3.cols = 2 assert len(A3._matrix__data) == 3 assert A4 + A4 == 2*A4 try: A4 + A2 except ValueError: pass assert sum(A1 - A1) == 0 A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]]) x = matrix([10, -10]) assert A7*x == matrix([-10, -10, -10, -10]) A8 = ones(5) assert sum((A8 + 1) - (2 - zeros(5))) == 0 assert (1 + ones(4)) / 2 - 1 == zeros(4) assert eye(3)**10 == eye(3) try: A7**2 assert False except ValueError: pass A9 = randmatrix(3) A10 = matrix(A9) A9[0,0] = -100 assert A9 != A10 assert nstr(A9) def test_matrix_slices(): A = matrix([ [1, 2, 3], [4, 5 ,6], [7, 8 ,9]]) V = matrix([1,2,3,4,5]) # Get slice assert A[:,:] == A assert A[:,1] == matrix([[2],[5],[8]]) assert A[2,:] == matrix([[7, 8 ,9]]) assert A[1:3,1:3] == matrix([[5,6],[8,9]]) assert V[2:4] == matrix([3,4]) try: A6 = A[:,1:6] assert False except IndexError: pass # Assign slice with matrix A1 = matrix(3) A1[:,:] = A assert A1[:,:] == matrix([[1, 2, 3], [4, 5 ,6], [7, 8 ,9]]) A1[0,:] = matrix([[10, 11, 12]]) assert A1 == matrix([ [10, 11, 12], [4, 5 ,6], [7, 8 ,9]]) A1[:,2] = matrix([[13], [14], [15]]) assert A1 == matrix([ [10, 11, 13], [4, 5 ,14], [7, 8 ,15]]) A1[:2,:2] = matrix([[16, 17], [18 , 19]]) assert A1 == matrix([ [16, 17, 13], [18, 19 ,14], [7, 8 ,15]]) V[1:3] = 10 assert V == matrix([1,10,10,4,5]) try: A1[2,:] = A[:,1] assert False except ValueError: pass try: A1[2,1:20] = A[:,:] assert False except IndexError: pass # Assign slice with scalar A1[:,2] = 10 assert A1 == matrix([ [16, 17, 10], [18, 19 ,10], [7, 8 ,10]]) A1[:,:] = 40 for x in A1: assert x == 40 def test_matrix_power(): A = matrix([[1, 2], [3, 4]]) assert A**2 == A*A assert A**3 == A*A*A assert A**-1 == inverse(A) assert A**-2 == inverse(A*A) def test_matrix_transform(): A = matrix([[1, 2], [3, 4], [5, 6]]) assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]]) swap_row(A, 1, 2) assert A == matrix([[1, 2], [5, 6], [3, 4]]) l = [1, 2] swap_row(l, 0, 1) assert l == [2, 1] assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]]) def test_matrix_conjugate(): A = matrix([[1 + j, 0], [2, j]]) assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]]) assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2], [0, mpc(0, -1)]]) def test_matrix_creation(): assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) A1 = ones(2, 3) assert A1.rows == 2 and A1.cols == 3 for a in A1: assert a == 1 A2 = zeros(3, 2) assert A2.rows == 3 and A2.cols == 2 for a in A2: assert a == 0 assert randmatrix(10) != randmatrix(10) one = mpf(1) assert hilbert(3) == matrix([[one, one/2, one/3], [one/2, one/3, one/4], [one/3, one/4, one/5]]) def test_norms(): # matrix norms A = matrix([[1, -2], [-3, -1], [2, 1]]) assert mnorm(A,1) == 6 assert mnorm(A,inf) == 4 assert mnorm(A,'F') == sqrt(20) # vector norms assert norm(-3) == 3 x = [1, -2, 7, -12] assert norm(x, 1) == 22 assert round(norm(x, 2), 10) == 14.0712472795 assert round(norm(x, 10), 10) == 12.0054633727 assert norm(x, inf) == 12 def test_vector(): x = matrix([0, 1, 2, 3, 4]) assert x == matrix([[0], [1], [2], [3], [4]]) assert x[3] == 3 assert len(x._matrix__data) == 4 assert list(x) == list(range(5)) x[0] = -10 x[4] = 0 assert x[0] == -10 assert len(x) == len(x.T) == 5 assert x.T*x == matrix([[114]]) def test_matrix_copy(): A = ones(6) B = A.copy() assert A == B B[0,0] = 0 assert A != B def test_matrix_numpy(): from sympy.external import import_module numpy = import_module('numpy') if not numpy: return l = [[1, 2], [3, 4], [5, 6]] a = numpy.matrix(l) assert matrix(l) == matrix(a)
unknown
codeparrot/codeparrot-clean
//---------------------------------------------------------------------------// // Copyright (c) 2013 Kyle Lutz <kyle.r.lutz@gmail.com> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #ifndef BOOST_COMPUTE_KERNEL_HPP #define BOOST_COMPUTE_KERNEL_HPP #include <string> #include <boost/assert.hpp> #include <boost/utility/enable_if.hpp> #include <boost/optional.hpp> #include <boost/compute/cl_ext.hpp> // cl_khr_subgroups #include <boost/compute/config.hpp> #include <boost/compute/exception.hpp> #include <boost/compute/program.hpp> #include <boost/compute/platform.hpp> #include <boost/compute/type_traits/is_fundamental.hpp> #include <boost/compute/detail/diagnostic.hpp> #include <boost/compute/detail/get_object_info.hpp> #include <boost/compute/detail/assert_cl_success.hpp> namespace boost { namespace compute { namespace detail { template<class T> struct set_kernel_arg; } // end detail namespace /// \class kernel /// \brief A compute kernel. /// /// \see command_queue, program class kernel { public: /// Creates a null kernel object. kernel() : m_kernel(0) { } /// Creates a new kernel object for \p kernel. If \p retain is /// \c true, the reference count for \p kernel will be incremented. explicit kernel(cl_kernel kernel, bool retain = true) : m_kernel(kernel) { if(m_kernel && retain){ clRetainKernel(m_kernel); } } /// Creates a new kernel object with \p name from \p program. kernel(const program &program, const std::string &name) { cl_int error = 0; m_kernel = clCreateKernel(program.get(), name.c_str(), &error); if(!m_kernel){ BOOST_THROW_EXCEPTION(opencl_error(error)); } } /// Creates a new kernel object as a copy of \p other. kernel(const kernel &other) : m_kernel(other.m_kernel) { if(m_kernel){ clRetainKernel(m_kernel); } } /// Copies the kernel object from \p other to \c *this. kernel& operator=(const kernel &other) { if(this != &other){ if(m_kernel){ clReleaseKernel(m_kernel); } m_kernel = other.m_kernel; if(m_kernel){ clRetainKernel(m_kernel); } } return *this; } #ifndef BOOST_COMPUTE_NO_RVALUE_REFERENCES /// Move-constructs a new kernel object from \p other. kernel(kernel&& other) BOOST_NOEXCEPT : m_kernel(other.m_kernel) { other.m_kernel = 0; } /// Move-assigns the kernel from \p other to \c *this. kernel& operator=(kernel&& other) BOOST_NOEXCEPT { if(m_kernel){ clReleaseKernel(m_kernel); } m_kernel = other.m_kernel; other.m_kernel = 0; return *this; } #endif // BOOST_COMPUTE_NO_RVALUE_REFERENCES /// Destroys the kernel object. ~kernel() { if(m_kernel){ BOOST_COMPUTE_ASSERT_CL_SUCCESS( clReleaseKernel(m_kernel) ); } } #if defined(BOOST_COMPUTE_CL_VERSION_2_1) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) /// Creates a new kernel object based on a shallow copy of /// the undelying OpenCL kernel object. /// /// \opencl_version_warning{2,1} /// /// \see_opencl21_ref{clCloneKernel} kernel clone() { cl_int ret = 0; cl_kernel k = clCloneKernel(m_kernel, &ret); return kernel(k, false); } #endif // BOOST_COMPUTE_CL_VERSION_2_1 /// Returns a reference to the underlying OpenCL kernel object. cl_kernel& get() const { return const_cast<cl_kernel &>(m_kernel); } /// Returns the function name for the kernel. std::string name() const { return get_info<std::string>(CL_KERNEL_FUNCTION_NAME); } /// Returns the number of arguments for the kernel. size_t arity() const { return get_info<cl_uint>(CL_KERNEL_NUM_ARGS); } /// Returns the program for the kernel. program get_program() const { return program(get_info<cl_program>(CL_KERNEL_PROGRAM)); } /// Returns the context for the kernel. context get_context() const { return context(get_info<cl_context>(CL_KERNEL_CONTEXT)); } /// Returns information about the kernel. /// /// \see_opencl_ref{clGetKernelInfo} template<class T> T get_info(cl_kernel_info info) const { return detail::get_object_info<T>(clGetKernelInfo, m_kernel, info); } /// \overload template<int Enum> typename detail::get_object_info_type<kernel, Enum>::type get_info() const; #if defined(BOOST_COMPUTE_CL_VERSION_1_2) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) /// Returns information about the argument at \p index. /// /// For example, to get the name of the first argument: /// \code /// std::string arg = kernel.get_arg_info<std::string>(0, CL_KERNEL_ARG_NAME); /// \endcode /// /// Note, this function requires that the program be compiled with the /// \c "-cl-kernel-arg-info" flag. For example: /// \code /// program.build("-cl-kernel-arg-info"); /// \endcode /// /// \opencl_version_warning{1,2} /// /// \see_opencl_ref{clGetKernelArgInfo} template<class T> T get_arg_info(size_t index, cl_kernel_arg_info info) const { return detail::get_object_info<T>( clGetKernelArgInfo, m_kernel, info, static_cast<cl_uint>(index) ); } /// \overload template<int Enum> typename detail::get_object_info_type<kernel, Enum>::type get_arg_info(size_t index) const; #endif // BOOST_COMPUTE_CL_VERSION_1_2 /// Returns work-group information for the kernel with \p device. /// /// \see_opencl_ref{clGetKernelWorkGroupInfo} template<class T> T get_work_group_info(const device &device, cl_kernel_work_group_info info) const { return detail::get_object_info<T>(clGetKernelWorkGroupInfo, m_kernel, info, device.id()); } #if defined(BOOST_COMPUTE_CL_VERSION_2_1) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) /// Returns sub-group information for the kernel with \p device. Returns a null /// optional if \p device is not 2.1 device, or is not 2.0 device with support /// for cl_khr_subgroups extension. /// /// \opencl_version_warning{2,1} /// \see_opencl21_ref{clGetKernelSubGroupInfo} /// \see_opencl2_ref{clGetKernelSubGroupInfoKHR} template<class T> boost::optional<T> get_sub_group_info(const device &device, cl_kernel_sub_group_info info, const size_t input_size, const void * input) const { if(device.check_version(2, 1)) { return detail::get_object_info<T>( clGetKernelSubGroupInfo, m_kernel, info, device.id(), input_size, input ); } else if(!device.check_version(2, 0) || !device.supports_extension("cl_khr_subgroups")) { return boost::optional<T>(); } // Only CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE and CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE // are supported in cl_khr_subgroups extension for 2.0 devices. else if(info != CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE && info != CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE) { return boost::optional<T>(); } BOOST_COMPUTE_DISABLE_DEPRECATED_DECLARATIONS(); clGetKernelSubGroupInfoKHR_fn clGetKernelSubGroupInfoKHR_fptr = reinterpret_cast<clGetKernelSubGroupInfoKHR_fn>( reinterpret_cast<size_t>( device.platform().get_extension_function_address("clGetKernelSubGroupInfoKHR") ) ); BOOST_COMPUTE_ENABLE_DEPRECATED_DECLARATIONS(); return detail::get_object_info<T>( clGetKernelSubGroupInfoKHR_fptr, m_kernel, info, device.id(), input_size, input ); } /// \overload template<class T> boost::optional<T> get_sub_group_info(const device &device, cl_kernel_sub_group_info info) const { return get_sub_group_info<T>(device, info, 0, 0); } /// \overload template<class T> boost::optional<T> get_sub_group_info(const device &device, cl_kernel_sub_group_info info, const size_t input) const { return get_sub_group_info<T>(device, info, sizeof(size_t), &input); } #endif // BOOST_COMPUTE_CL_VERSION_2_1 #if defined(BOOST_COMPUTE_CL_VERSION_2_0) && !defined(BOOST_COMPUTE_CL_VERSION_2_1) /// Returns sub-group information for the kernel with \p device. Returns a null /// optional if cl_khr_subgroups extension is not supported by \p device. /// /// \opencl_version_warning{2,0} /// \see_opencl2_ref{clGetKernelSubGroupInfoKHR} template<class T> boost::optional<T> get_sub_group_info(const device &device, cl_kernel_sub_group_info info, const size_t input_size, const void * input) const { if(!device.check_version(2, 0) || !device.supports_extension("cl_khr_subgroups")) { return boost::optional<T>(); } BOOST_COMPUTE_DISABLE_DEPRECATED_DECLARATIONS(); clGetKernelSubGroupInfoKHR_fn clGetKernelSubGroupInfoKHR_fptr = reinterpret_cast<clGetKernelSubGroupInfoKHR_fn>( reinterpret_cast<size_t>( device.platform().get_extension_function_address("clGetKernelSubGroupInfoKHR") ) ); BOOST_COMPUTE_ENABLE_DEPRECATED_DECLARATIONS(); return detail::get_object_info<T>( clGetKernelSubGroupInfoKHR_fptr, m_kernel, info, device.id(), input_size, input ); } #endif // defined(BOOST_COMPUTE_CL_VERSION_2_0) && !defined(BOOST_COMPUTE_CL_VERSION_2_1) #if defined(BOOST_COMPUTE_CL_VERSION_2_0) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) /// \overload template<class T> boost::optional<T> get_sub_group_info(const device &device, cl_kernel_sub_group_info info, const std::vector<size_t> input) const { BOOST_ASSERT(input.size() > 0); return get_sub_group_info<T>(device, info, input.size() * sizeof(size_t), &input[0]); } #endif // BOOST_COMPUTE_CL_VERSION_2_0 /// Sets the argument at \p index to \p value with \p size. /// /// \see_opencl_ref{clSetKernelArg} void set_arg(size_t index, size_t size, const void *value) { BOOST_ASSERT(index < arity()); cl_int ret = clSetKernelArg(m_kernel, static_cast<cl_uint>(index), size, value); if(ret != CL_SUCCESS){ BOOST_THROW_EXCEPTION(opencl_error(ret)); } } /// Sets the argument at \p index to \p value. /// /// For built-in types (e.g. \c float, \c int4_), this is equivalent to /// calling set_arg(index, sizeof(type), &value). /// /// Additionally, this method is specialized for device memory objects /// such as buffer and image2d. This allows for them to be passed directly /// without having to extract their underlying cl_mem object. /// /// This method is also specialized for device container types such as /// vector<T> and array<T, N>. This allows for them to be passed directly /// as kernel arguments without having to extract their underlying buffer. /// /// For setting local memory arguments (e.g. "__local float *buf"), the /// local_buffer<T> class may be used: /// \code /// // set argument to a local buffer with storage for 32 float's /// kernel.set_arg(0, local_buffer<float>(32)); /// \endcode /// /// For setting NULL to global and constant memory arguments (C++11): /// \code /// kernel.set_arg(0, nullptr); /// \endcode template<class T> void set_arg(size_t index, const T &value) { // if you get a compilation error pointing here it means you // attempted to set a kernel argument from an invalid type. detail::set_kernel_arg<T>()(*this, index, value); } #ifndef BOOST_NO_CXX11_NULLPTR /// \overload void set_arg(size_t index, std::nullptr_t nul) { set_arg(index, sizeof(cl_mem), NULL); } #endif // BOOST_NO_CXX11_NULLPTR /// \internal_ void set_arg(size_t index, const cl_mem mem) { set_arg(index, sizeof(cl_mem), static_cast<const void *>(&mem)); } /// \internal_ void set_arg(size_t index, const cl_sampler sampler) { set_arg(index, sizeof(cl_sampler), static_cast<const void *>(&sampler)); } /// \internal_ void set_arg_svm_ptr(size_t index, void* ptr) { #ifdef BOOST_COMPUTE_CL_VERSION_2_0 cl_int ret = clSetKernelArgSVMPointer(m_kernel, static_cast<cl_uint>(index), ptr); if(ret != CL_SUCCESS){ BOOST_THROW_EXCEPTION(opencl_error(ret)); } #else (void) index; (void) ptr; BOOST_THROW_EXCEPTION(opencl_error(CL_INVALID_ARG_VALUE)); #endif } #ifndef BOOST_COMPUTE_NO_VARIADIC_TEMPLATES /// Sets the arguments for the kernel to \p args. template<class... T> void set_args(T&&... args) { BOOST_ASSERT(sizeof...(T) <= arity()); _set_args<0>(args...); } #endif // BOOST_COMPUTE_NO_VARIADIC_TEMPLATES #if defined(BOOST_COMPUTE_CL_VERSION_2_0) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) /// Sets additional execution information for the kernel. /// /// \opencl_version_warning{2,0} /// /// \see_opencl2_ref{clSetKernelExecInfo} void set_exec_info(cl_kernel_exec_info info, size_t size, const void *value) { cl_int ret = clSetKernelExecInfo(m_kernel, info, size, value); if(ret != CL_SUCCESS){ BOOST_THROW_EXCEPTION(opencl_error(ret)); } } #endif // BOOST_COMPUTE_CL_VERSION_2_0 /// Returns \c true if the kernel is the same at \p other. bool operator==(const kernel &other) const { return m_kernel == other.m_kernel; } /// Returns \c true if the kernel is different from \p other. bool operator!=(const kernel &other) const { return m_kernel != other.m_kernel; } /// \internal_ operator cl_kernel() const { return m_kernel; } /// \internal_ static kernel create_with_source(const std::string &source, const std::string &name, const context &context) { return program::build_with_source(source, context).create_kernel(name); } private: #ifndef BOOST_COMPUTE_NO_VARIADIC_TEMPLATES /// \internal_ template<size_t N> void _set_args() { } /// \internal_ template<size_t N, class T, class... Args> void _set_args(T&& arg, Args&&... rest) { set_arg(N, arg); _set_args<N+1>(rest...); } #endif // BOOST_COMPUTE_NO_VARIADIC_TEMPLATES private: cl_kernel m_kernel; }; inline kernel program::create_kernel(const std::string &name) const { return kernel(*this, name); } /// \internal_ define get_info() specializations for kernel BOOST_COMPUTE_DETAIL_DEFINE_GET_INFO_SPECIALIZATIONS(kernel, ((std::string, CL_KERNEL_FUNCTION_NAME)) ((cl_uint, CL_KERNEL_NUM_ARGS)) ((cl_uint, CL_KERNEL_REFERENCE_COUNT)) ((cl_context, CL_KERNEL_CONTEXT)) ((cl_program, CL_KERNEL_PROGRAM)) ) #ifdef BOOST_COMPUTE_CL_VERSION_1_2 BOOST_COMPUTE_DETAIL_DEFINE_GET_INFO_SPECIALIZATIONS(kernel, ((std::string, CL_KERNEL_ATTRIBUTES)) ) #endif // BOOST_COMPUTE_CL_VERSION_1_2 /// \internal_ define get_arg_info() specializations for kernel #ifdef BOOST_COMPUTE_CL_VERSION_1_2 #define BOOST_COMPUTE_DETAIL_DEFINE_KERNEL_GET_ARG_INFO_SPECIALIZATION(result_type, value) \ namespace detail { \ template<> struct get_object_info_type<kernel, value> { typedef result_type type; }; \ } \ template<> inline result_type kernel::get_arg_info<value>(size_t index) const { \ return get_arg_info<result_type>(index, value); \ } BOOST_COMPUTE_DETAIL_DEFINE_KERNEL_GET_ARG_INFO_SPECIALIZATION(cl_kernel_arg_address_qualifier, CL_KERNEL_ARG_ADDRESS_QUALIFIER) BOOST_COMPUTE_DETAIL_DEFINE_KERNEL_GET_ARG_INFO_SPECIALIZATION(cl_kernel_arg_access_qualifier, CL_KERNEL_ARG_ACCESS_QUALIFIER) BOOST_COMPUTE_DETAIL_DEFINE_KERNEL_GET_ARG_INFO_SPECIALIZATION(std::string, CL_KERNEL_ARG_TYPE_NAME) BOOST_COMPUTE_DETAIL_DEFINE_KERNEL_GET_ARG_INFO_SPECIALIZATION(cl_kernel_arg_type_qualifier, CL_KERNEL_ARG_TYPE_QUALIFIER) BOOST_COMPUTE_DETAIL_DEFINE_KERNEL_GET_ARG_INFO_SPECIALIZATION(std::string, CL_KERNEL_ARG_NAME) #endif // BOOST_COMPUTE_CL_VERSION_1_2 namespace detail { // set_kernel_arg implementation for built-in types template<class T> struct set_kernel_arg { typename boost::enable_if<is_fundamental<T> >::type operator()(kernel &kernel_, size_t index, const T &value) { kernel_.set_arg(index, sizeof(T), &value); } }; // set_kernel_arg specialization for char (different from built-in cl_char) template<> struct set_kernel_arg<char> { void operator()(kernel &kernel_, size_t index, const char c) { kernel_.set_arg(index, sizeof(char), &c); } }; } // end detail namespace } // end namespace compute } // end namespace boost #endif // BOOST_COMPUTE_KERNEL_HPP
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/compute/kernel.hpp
''' Boneh-Franklin Identity Based Encryption | From: "D. Boneh, M. Franklin Identity-Based Encryption from the Weil Pairing", Section 4.2. | Published in: Crypto 2003 | Available from: http://.../bfibe.pdf | Notes: This is the IBE . * type: encryption (identity-based) * setting: bilinear groups (asymmetric) :Authors: J. Ayo Akinyele :Date: 2/2011 ''' from charm.toolbox.pairinggroup import ZR,G1,G2,pair from charm.core.math.integer import randomBits,integer,bitsize from charm.toolbox.hash_module import Hash,int2Bytes,integer from charm.toolbox.IBEnc import IBEnc debug = False class IBE_BonehFranklin(IBEnc): """ >>> from charm.toolbox.pairinggroup import PairingGroup >>> group = PairingGroup('MNT224', secparam=1024) >>> ibe = IBE_BonehFranklin(group) >>> (master_public_key, master_secret_key) = ibe.setup() >>> ID = 'user@email.com' >>> private_key = ibe.extract(master_secret_key, ID) >>> msg = b"hello world!!!!!" >>> cipher_text = ibe.encrypt(master_public_key, ID, msg) >>> ibe.decrypt(master_public_key, private_key, cipher_text) b'hello world!!!!!' """ def __init__(self, groupObj): IBEnc.__init__(self) global group,h group = groupObj h = Hash(group) def setup(self): s, P = group.random(ZR), group.random(G2) P2 = s * P # choose H1, H2 hash functions pk = { 'P':P, 'P2':P2 } sk = { 's':s } if(debug): print("Public parameters...") group.debug(pk) print("Secret parameters...") group.debug(sk) return (pk, sk) def extract(self, sk, ID): d_ID = sk['s'] * group.hash(ID, G1) k = { 'id':d_ID, 'IDstr':ID } if(debug): print("Key for id => '%s'" % ID) group.debug(k) return k def encrypt(self, pk, ID, M): # check length to make sure it is within n bits Q_id = group.hash(ID, G1) #standard g_id = pair(Q_id, pk['P2']) #choose sig = {0,1}^n where n is # bits sig = integer(randomBits(group.secparam)) r = h.hashToZr(sig, M) enc_M = self.encodeToZn(M) if bitsize(enc_M) / 8 <= group.messageSize(): C = { 'U':r * pk['P'], 'V':sig ^ h.hashToZn(g_id ** r) , 'W':enc_M ^ h.hashToZn(sig) } else: print("Message cannot be encoded.") return None if(debug): print('\nEncrypt...') print('r => %s' % r) print('sig => %s' % sig) print("V' =>", g_id ** r) print('enc_M => %s' % enc_M) group.debug(C) return C def decrypt(self, pk, sk, ct): U, V, W = ct['U'], ct['V'], ct['W'] sig = V ^ h.hashToZn(pair(sk['id'], U)) dec_M = W ^ h.hashToZn(sig) M = self.decodeFromZn(dec_M) r = h.hashToZr(sig, M) if(debug): print('\nDecrypt....') print('V =>', V) print("V' =>", pair(sk['id'], U)) print('sig => %s' % sig) print('r => %s' % r) if U == r * pk['P']: if debug: print("Successful Decryption!!!") return M if debug: print("Decryption Failed!!!") return None def encodeToZn(self, message): assert type(message) == bytes, "Input must be of type bytes" return integer(message) def decodeFromZn(self, element): if type(element) == integer: msg = int2Bytes(element) return msg return None
unknown
codeparrot/codeparrot-clean
import uuid import kombu import lymph from lymph.events.kombu import KombuEventSystem from lymph.discovery.static import StaticServiceRegistryHub from lymph.testing import LymphIntegrationTestCase, AsyncTestsMixin class TestInterface(lymph.Interface): def __init__(self, *args, **kwargs): super(TestInterface, self).__init__(*args, **kwargs) self.collected_events = [] @lymph.event('foo') def on_foo(self, event): self.collected_events.append(event) @lymph.event('retryable_foo', retry=2) def on_retryable_foo(self, event): self.collected_events.append(event) raise Exception() @lymph.event('foo_broadcast', broadcast=True) def on_foo_broadcast(self, event): self.collected_events.append(event) class TestEventBroadcastInterface(lymph.Interface): def __init__(self, *args, **kwargs): super(TestEventBroadcastInterface, self).__init__(*args, **kwargs) self.collected_events = [] @lymph.event('foo_broadcast', broadcast=True) def on_foo_broadcast(self, event): self.collected_events.append(event) class KombuIntegrationTest(LymphIntegrationTestCase, AsyncTestsMixin): use_zookeeper = False def setUp(self): super(KombuIntegrationTest, self).setUp() self.exchange_name = 'test-%s' % uuid.uuid4() self.discovery_hub = StaticServiceRegistryHub() self.the_container, self.the_interface = self.create_container(TestInterface, 'test') self.the_container_broadcast, self.the_interface_broadcast = self.create_container(TestEventBroadcastInterface, 'test') self.lymph_client = self.create_client() def tearDown(self): super(KombuIntegrationTest, self).tearDown() connection = self.get_kombu_connection() exchange = kombu.Exchange(self.exchange_name) exchange(connection).delete() waiting_exchange = kombu.Exchange(self.the_container.events.waiting_exchange.name) waiting_exchange(connection).delete() retry_exchange = kombu.Exchange(self.the_container.events.retry_exchange.name) retry_exchange(connection).delete() for q in ('test-on_foo', 'test-on_retryable_foo'): self.delete_queue(q) def delete_queue(self, name): connection = self.get_kombu_connection() queue = kombu.Queue(name) queue(connection).delete() def get_kombu_connection(self): return kombu.Connection(transport='amqp', host='127.0.0.1') def create_event_system(self, **kwargs): return KombuEventSystem(self.get_kombu_connection(), self.exchange_name) def create_registry(self, **kwargs): return self.discovery_hub.create_registry(**kwargs) def received_check(self, n): def check(): return len(self.the_interface.collected_events) == n return check def received_broadcast_check(self, n): def check(): return (len(self.the_interface.collected_events) + len(self.the_interface_broadcast.collected_events)) == n return check def test_emit(self): self.lymph_client.emit('foo', {}) self.assert_eventually_true(self.received_check(1), timeout=10) self.assertEqual(self.the_interface.collected_events[0].evt_type, 'foo') def test_delayed_emit(self): self.lymph_client.emit('foo', {}, delay=.5) self.addCleanup(self.delete_queue, 'foo-wait_500') self.assert_temporarily_true(self.received_check(0), timeout=.2) self.assert_eventually_true(self.received_check(1), timeout=10) self.assertEqual(self.the_interface.collected_events[0].evt_type, 'foo') def test_broadcast_event(self): self.lymph_client.emit('foo_broadcast', {}) self.assert_eventually_true(self.received_broadcast_check(2), timeout=10) self.assertEqual(self.the_interface.collected_events[0].evt_type, 'foo_broadcast') self.assertEqual(self.the_interface_broadcast.collected_events[0].evt_type, 'foo_broadcast') def test_retryable_event(self): self.lymph_client.emit('retryable_foo', {}) self.assert_eventually_true(self.received_check(3), timeout=10)
unknown
codeparrot/codeparrot-clean
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.cache.jcache.config; import org.jspecify.annotations.Nullable; import org.springframework.cache.annotation.CachingConfigurer; import org.springframework.cache.interceptor.CacheResolver; /** * Extension of {@link CachingConfigurer} for the JSR-107 implementation. * * <p>To be implemented by classes annotated with * {@link org.springframework.cache.annotation.EnableCaching} that wish * or need to specify explicitly how exception caches are resolved for * annotation-driven cache management. * * <p>See {@link org.springframework.cache.annotation.EnableCaching} for * general examples and context; see {@link #exceptionCacheResolver()} for * detailed instructions. * * @author Stephane Nicoll * @since 4.1 * @see CachingConfigurer * @see org.springframework.cache.annotation.EnableCaching */ public interface JCacheConfigurer extends CachingConfigurer { /** * Return the {@link CacheResolver} bean to use to resolve exception caches for * annotation-driven cache management. Implementations must explicitly declare * {@link org.springframework.context.annotation.Bean @Bean}, for example, * <pre class="code"> * &#064;Configuration * &#064;EnableCaching * public class AppConfig implements JCacheConfigurer { * &#064;Bean // important! * &#064;Override * public CacheResolver exceptionCacheResolver() { * // configure and return CacheResolver instance * } * // ... * } * </pre> * See {@link org.springframework.cache.annotation.EnableCaching} for more complete examples. */ default @Nullable CacheResolver exceptionCacheResolver() { return null; } }
java
github
https://github.com/spring-projects/spring-framework
spring-context-support/src/main/java/org/springframework/cache/jcache/config/JCacheConfigurer.java
# frozen_string_literal: true require "active_support/core_ext/hash/except" require "active_support/core_ext/hash/slice" require "active_record/relation/merger" module ActiveRecord module SpawnMethods def spawn # :nodoc: already_in_scope?(model.scope_registry) ? model.all : clone end # Merges in the conditions from <tt>other</tt>, if <tt>other</tt> is an ActiveRecord::Relation. # Returns an array representing the intersection of the resulting records with <tt>other</tt>, if <tt>other</tt> is an array. # # Post.where(published: true).joins(:comments).merge( Comment.where(spam: false) ) # # Performs a single join query with both where conditions. # # recent_posts = Post.order('created_at DESC').first(5) # Post.where(published: true).merge(recent_posts) # # Returns the intersection of all published posts with the 5 most recently created posts. # # (This is just an example. You'd probably want to do this with a single query!) # # Procs will be evaluated by merge: # # Post.where(published: true).merge(-> { joins(:comments) }) # # => Post.where(published: true).joins(:comments) # # This is mainly intended for sharing common conditions between multiple associations. # # For conditions that exist in both relations, those from <tt>other</tt> will take precedence. # To find the intersection of two relations, use QueryMethods#and. def merge(other, *rest) if other.is_a?(Array) records & other elsif other spawn.merge!(other, *rest) else raise ArgumentError, "invalid argument: #{other.inspect}." end end def merge!(other, *rest) # :nodoc: if other.is_a?(Hash) Relation::HashMerger.new(self, other).merge elsif other.is_a?(Relation) Relation::Merger.new(self, other).merge elsif other.respond_to?(:to_proc) instance_exec(&other) else raise ArgumentError, "#{other.inspect} is not an ActiveRecord::Relation" end end # Removes the condition(s) specified in +skips+ from the query. # # Post.order('id asc').except(:order) # removes the order condition # Post.where('id > 10').order('id asc').except(:where) # removes the where condition but keeps the order def except(*skips) relation_with values.except(*skips) end # Keeps only the condition(s) specified in +onlies+ in the query, removing all others. # # Post.order('id asc').only(:where) # keeps only the where condition, removes the order # Post.order('id asc').only(:where, :order) # keeps only the where and order conditions def only(*onlies) relation_with values.slice(*onlies) end private def relation_with(values) result = spawn result.instance_variable_set(:@values, values) result end end end
ruby
github
https://github.com/rails/rails
activerecord/lib/active_record/relation/spawn_methods.rb
/* Copyright 2021 - 2025 R. Thomas * Copyright 2021 - 2025 Quarkslab * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "LIEF/Visitor.hpp" #include "LIEF/PE/signature/attributes/PKCS9AtSequenceNumber.hpp" namespace LIEF { namespace PE { void PKCS9AtSequenceNumber::accept(Visitor& visitor) const { visitor.visit(*this); } std::string PKCS9AtSequenceNumber::print() const { return std::to_string(number()); } } }
cpp
github
https://github.com/nodejs/node
deps/LIEF/src/PE/signature/attributes/PKCS9AtSequenceNumber.cpp
""" Python 'utf-16' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs, sys ### Codec APIs encode = codecs.utf_16_encode def decode(input, errors='strict'): return codecs.utf_16_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): codecs.IncrementalEncoder.__init__(self, errors) self.encoder = None def encode(self, input, final=False): if self.encoder is None: result = codecs.utf_16_encode(input, self.errors)[0] if sys.byteorder == 'little': self.encoder = codecs.utf_16_le_encode else: self.encoder = codecs.utf_16_be_encode return result return self.encoder(input, self.errors)[0] def reset(self): codecs.IncrementalEncoder.reset(self) self.encoder = None def getstate(self): # state info we return to the caller: # 0: stream is in natural order for this platform # 2: endianness hasn't been determined yet # (we're never writing in unnatural order) return (2 if self.encoder is None else 0) def setstate(self, state): if state: self.encoder = None else: if sys.byteorder == 'little': self.encoder = codecs.utf_16_le_encode else: self.encoder = codecs.utf_16_be_encode class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def __init__(self, errors='strict'): codecs.BufferedIncrementalDecoder.__init__(self, errors) self.decoder = None def _buffer_decode(self, input, errors, final): if self.decoder is None: (output, consumed, byteorder) = \ codecs.utf_16_ex_decode(input, errors, 0, final) if byteorder == -1: self.decoder = codecs.utf_16_le_decode elif byteorder == 1: self.decoder = codecs.utf_16_be_decode elif consumed >= 2: raise UnicodeError("UTF-16 stream does not start with BOM") return (output, consumed) return self.decoder(input, self.errors, final) def reset(self): codecs.BufferedIncrementalDecoder.reset(self) self.decoder = None class StreamWriter(codecs.StreamWriter): def __init__(self, stream, errors='strict'): codecs.StreamWriter.__init__(self, stream, errors) self.encoder = None def reset(self): codecs.StreamWriter.reset(self) self.encoder = None def encode(self, input, errors='strict'): if self.encoder is None: result = codecs.utf_16_encode(input, errors) if sys.byteorder == 'little': self.encoder = codecs.utf_16_le_encode else: self.encoder = codecs.utf_16_be_encode return result else: return self.encoder(input, errors) class StreamReader(codecs.StreamReader): def reset(self): codecs.StreamReader.reset(self) try: del self.decode except AttributeError: pass def decode(self, input, errors='strict'): (object, consumed, byteorder) = \ codecs.utf_16_ex_decode(input, errors, 0, False) if byteorder == -1: self.decode = codecs.utf_16_le_decode elif byteorder == 1: self.decode = codecs.utf_16_be_decode elif consumed>=2: raise UnicodeError,"UTF-16 stream does not start with BOM" return (object, consumed) ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-16', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
unknown
codeparrot/codeparrot-clean
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it. **Description** Simplifies the input geometry by applying the Douglas-Peucker algorithm with a specified tolerance. Vertices that fall within the tolerance distance from the simplified shape are removed. Note that the resulting geometry may be invalid, even if the original input was valid.
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/query-languages/esql/_snippets/functions/description/st_simplify.md
import { codeFixAll, createCodeFixAction, registerCodeFix, } from "../_namespaces/ts.codefix.js"; import { cast, Diagnostics, factory, getTokenAtPosition, isIdentifier, isPropertySignature, isTypeLiteralNode, SourceFile, textChanges, TypeLiteralNode, TypeNode, } from "../_namespaces/ts.js"; const fixId = "convertLiteralTypeToMappedType"; const errorCodes = [Diagnostics._0_only_refers_to_a_type_but_is_being_used_as_a_value_here_Did_you_mean_to_use_1_in_0.code]; registerCodeFix({ errorCodes, getCodeActions: function getCodeActionsToConvertLiteralTypeToMappedType(context) { const { sourceFile, span } = context; const info = getInfo(sourceFile, span.start); if (!info) { return undefined; } const { name, constraint } = info; const changes = textChanges.ChangeTracker.with(context, t => doChange(t, sourceFile, info)); return [createCodeFixAction(fixId, changes, [Diagnostics.Convert_0_to_1_in_0, constraint, name], fixId, Diagnostics.Convert_all_type_literals_to_mapped_type)]; }, fixIds: [fixId], getAllCodeActions: context => codeFixAll(context, errorCodes, (changes, diag) => { const info = getInfo(diag.file, diag.start); if (info) { doChange(changes, diag.file, info); } }), }); interface Info { container: TypeLiteralNode; typeNode: TypeNode | undefined; constraint: string; name: string; } function getInfo(sourceFile: SourceFile, pos: number): Info | undefined { const token = getTokenAtPosition(sourceFile, pos); if (isIdentifier(token)) { const propertySignature = cast(token.parent.parent, isPropertySignature); const propertyName = token.getText(sourceFile); return { container: cast(propertySignature.parent, isTypeLiteralNode), typeNode: propertySignature.type, constraint: propertyName, name: propertyName === "K" ? "P" : "K", }; } return undefined; } function doChange(changes: textChanges.ChangeTracker, sourceFile: SourceFile, { container, typeNode, constraint, name }: Info): void { changes.replaceNode( sourceFile, container, factory.createMappedTypeNode( /*readonlyToken*/ undefined, factory.createTypeParameterDeclaration(/*modifiers*/ undefined, name, factory.createTypeReferenceNode(constraint)), /*nameType*/ undefined, /*questionToken*/ undefined, typeNode, /*members*/ undefined, ), ); }
typescript
github
https://github.com/microsoft/TypeScript
src/services/codefixes/convertLiteralTypeToMappedType.ts
"""Test zha lock.""" from unittest.mock import patch import zigpy.zcl.clusters.closures as closures import zigpy.zcl.clusters.general as general import zigpy.zcl.foundation as zcl_f from homeassistant.components.lock import DOMAIN from homeassistant.const import STATE_LOCKED, STATE_UNAVAILABLE, STATE_UNLOCKED from .common import ( async_enable_traffic, async_init_zigpy_device, find_entity_id, make_attribute, make_zcl_header, ) from tests.common import mock_coro LOCK_DOOR = 0 UNLOCK_DOOR = 1 async def test_lock(hass, config_entry, zha_gateway): """Test zha lock platform.""" # create zigpy device zigpy_device = await async_init_zigpy_device( hass, [closures.DoorLock.cluster_id, general.Basic.cluster_id], [], None, zha_gateway, ) # load up lock domain await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN) await hass.async_block_till_done() cluster = zigpy_device.endpoints.get(1).door_lock zha_device = zha_gateway.get_device(zigpy_device.ieee) entity_id = await find_entity_id(DOMAIN, zha_device, hass) assert entity_id is not None # test that the lock was created and that it is unavailable assert hass.states.get(entity_id).state == STATE_UNAVAILABLE # allow traffic to flow through the gateway and device await async_enable_traffic(hass, zha_gateway, [zha_device]) # test that the state has changed from unavailable to unlocked assert hass.states.get(entity_id).state == STATE_UNLOCKED # set state to locked attr = make_attribute(0, 1) hdr = make_zcl_header(zcl_f.Command.Report_Attributes) cluster.handle_message(hdr, [[attr]]) await hass.async_block_till_done() assert hass.states.get(entity_id).state == STATE_LOCKED # set state to unlocked attr.value.value = 2 cluster.handle_message(hdr, [[attr]]) await hass.async_block_till_done() assert hass.states.get(entity_id).state == STATE_UNLOCKED # lock from HA await async_lock(hass, cluster, entity_id) # unlock from HA await async_unlock(hass, cluster, entity_id) async def async_lock(hass, cluster, entity_id): """Test lock functionality from hass.""" with patch( "zigpy.zcl.Cluster.request", return_value=mock_coro([zcl_f.Status.SUCCESS]) ): # lock via UI await hass.services.async_call( DOMAIN, "lock", {"entity_id": entity_id}, blocking=True ) assert cluster.request.call_count == 1 assert cluster.request.call_args[0][0] is False assert cluster.request.call_args[0][1] == LOCK_DOOR async def async_unlock(hass, cluster, entity_id): """Test lock functionality from hass.""" with patch( "zigpy.zcl.Cluster.request", return_value=mock_coro([zcl_f.Status.SUCCESS]) ): # lock via UI await hass.services.async_call( DOMAIN, "unlock", {"entity_id": entity_id}, blocking=True ) assert cluster.request.call_count == 1 assert cluster.request.call_args[0][0] is False assert cluster.request.call_args[0][1] == UNLOCK_DOOR
unknown
codeparrot/codeparrot-clean
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # class ModuleDocFragment(object): # Ansible Tower documentation fragment DOCUMENTATION = ''' options: tower_host: description: - URL to your Tower instance. required: False default: null tower_username: description: - Username for your Tower instance. required: False default: null tower_password: description: - Password for your Tower instance. required: False default: null tower_verify_ssl: description: - Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: False default: True tower_config_file: description: - Path to the Tower config file. See notes. required: False default: null requirements: - "python >= 2.6" - "ansible-tower-cli >= 3.0.2" notes: - If no I(config_file) is provided we will attempt to use the tower-cli library defaults to find your Tower host information. - I(config_file) should contain Tower configuration in the following format host=hostname username=username password=password '''
unknown
codeparrot/codeparrot-clean
# -*- mode:python -*- # Copyright (c) 2009 The University of Edinburgh # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Timothy M. Jones from m5.SimObject import SimObject from m5.params import * class PowerTLB(SimObject): type = 'PowerTLB' cxx_class = 'PowerISA::TLB' cxx_header = 'arch/power/tlb.hh' size = Param.Int(64, "TLB size")
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Add ``fail_fast`` column to dag table. Revision ID: 69ddce9a7247 Revises: 5cc8117e9285 Create Date: 2025-10-16 03:22:59.016272 """ from __future__ import annotations import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "69ddce9a7247" down_revision = "5cc8117e9285" branch_labels = None depends_on = None airflow_version = "3.2.0" def upgrade(): """Add fail_fast column to dag table.""" op.add_column("dag", sa.Column("fail_fast", sa.Boolean(), nullable=False, server_default="0")) def downgrade(): """Drop fail_fast column in dag table.""" op.drop_column("dag", "fail_fast")
python
github
https://github.com/apache/airflow
airflow-core/src/airflow/migrations/versions/0090_3_2_0_add_fail_fast_to_dag_table.py
# Copyright 2014 Open Connectome Project (http://openconnecto.me) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import nibabel as nib DEBUG = False # Makes a ton of assumptions about the XML data. # Totally customize for MRCAP data. Needs to be checked with # anything that's not the original 109 data files on braingraph1 class ROIData: """Class to read ROI data derived from MRCAP.""" # Get the dimension def __init__(self, filename): self.data = nib.load(filename).get_data() print "Data shape: ", self.data.shape def get ( self, index ): """ Returns the ROI associated with a voxel. Either returns 0 if out of the data space or returns ROI from 1 to 35 or 101 to 135. Caller must translate so that the weirdness is not hidden inside this function. """ # RBTODO experiment with -1 on index if index[0] >= self.data.shape[0] or \ index[1] >= self.data.shape[1] or \ index[2] >= self.data.shape[2]: if DEBUG: print "[Debug]: Fiber at index", index , "not in roi" return 0 else: return self.data[ index[0], index[1], index[2] ]
unknown
codeparrot/codeparrot-clean
import os import subprocess import sys import shutil # parse command line if len(sys.argv) < 5: print "usage: " + sys.argv[0] + " <x86|x64> <BinDir> <SourceDir> <Name> [NeededJarFiles] [MainClass]" exit(1) platform_string = "" if sys.argv[1] == "" or sys.argv[1] == "x86": platform_string = "Win32" elif sys.argv[1] == "x64": platform_string = "x64" else: print 'First argument must be "x86", "x64" or empty (x86)' exit(1) bin_dir = os.path.abspath(sys.argv[2]) source_dir = os.path.abspath(sys.argv[3]) proj_name = sys.argv[4] int_dir = os.path.join(bin_dir, "Intermediate", platform_string + "-Release", proj_name) needed_jar_files = "" main_class = "" if len(sys.argv) > 5: needed_jar_files = sys.argv[5] if len(sys.argv) > 6: main_class = sys.argv[6] RELEASE_DIR = os.path.abspath(os.path.join(bin_dir, platform_string + "-Release")) DEBUG_DIR = os.path.abspath(os.path.join(bin_dir, platform_string + "-Debug")) JAR_FILE = os.path.join(RELEASE_DIR, proj_name + '.jar') BATCH_FILE = os.path.join(RELEASE_DIR, proj_name + '.bat') # make sure JAVA_HOME is set JAVA_HOME = os.path.expandvars("$JAVA_HOME") if JAVA_HOME == "": print "JAVA_HOME is not set!" exit(1) CLASS_PATH = os.path.expandvars("$CLASSPATH") TEMP_BUILD_DIR = int_dir # create bin dir if needed if not os.path.exists(RELEASE_DIR): os.makedirs(RELEASE_DIR) if not os.path.exists(DEBUG_DIR): os.makedirs(DEBUG_DIR) if not os.path.exists(TEMP_BUILD_DIR): os.makedirs(TEMP_BUILD_DIR) # build cmd = [os.path.join(JAVA_HOME, 'bin\javac.exe')] if needed_jar_files != "": # add class path cp = '' needed_list = needed_jar_files.split(';') for needed in needed_list: cp += os.path.join(RELEASE_DIR, needed) + ';' cp += CLASS_PATH + ';' cmd.append('-cp') cmd.append(cp) cmd.append('-d') cmd.append(TEMP_BUILD_DIR) cmd.append('-Xlint:unchecked') cmd.append(os.path.join(source_dir, '*.java')) subprocess.check_call(cmd) # create JAR file cmd = [os.path.join(JAVA_HOME, 'bin\jar.exe')] need_manifest = main_class != "" or needed_jar_files != "" if need_manifest: cmd.append('-cfm') # add manifest TEMP_MANIFEST_FILE = os.path.join(int_dir, "Manifest.txt") manifest_file = open(TEMP_MANIFEST_FILE, 'w') if needed_jar_files != "": manifest_file.write("Class-Path:") needed_list = needed_jar_files.split(';') for needed in needed_list: manifest_file.write(" " + needed) manifest_file.write('\n') if main_class != "": manifest_file.write("Main-Class: " + main_class + "\n") manifest_file.close() else: cmd.append('-cf') cmd.append(JAR_FILE) if need_manifest: cmd.append(TEMP_MANIFEST_FILE) cmd.append('-C') cmd.append(TEMP_BUILD_DIR) cmd.append('.') subprocess.check_call(cmd) # copy jar to Bin/Debug shutil.copy(JAR_FILE, DEBUG_DIR) # create batch file (by default, windows does not open a console when double-clicking jar files) if main_class != "": print "Creating batch file..." batch = open(BATCH_FILE, 'w') batch.write('java -Xmx768m -jar ' + proj_name + '.jar\n') batch.close() # copy batch to Bin/Debug shutil.copy(BATCH_FILE, DEBUG_DIR)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # This file should be compatible with both Python 2 and 3. # If it is not, please file a bug report. """ High level operations on subusers. """ #external imports import sys #internal imports import subuserlib.classes.user,subuserlib.resolve,subuserlib.classes.subuser,subuserlib.verify,subuserlib.update def add(user,subuserName,imageSourceIdentifier): if subuserName in user.getRegistry().getSubusers(): sys.exit("A subuser named "+subuserName+" already exists.") user.getRegistry().logChange("Adding subuser "+subuserName+" "+imageSourceIdentifier) try: try: imageSource = subuserlib.resolve.resolveImageSource(user,imageSourceIdentifier) except KeyError as keyError: sys.exit("Could not add subuser. The image source "+imageSourceIdentifier+" does not exist.") user.getRegistry().getSubusers()[subuserName] = subuserlib.classes.subuser.Subuser(user,subuserName,imageSource,None,False,False) subuserlib.verify.verify(user) user.getRegistry().commit() except subuserlib.classes.dockerDaemon.ImageBuildException as e: print("Adding subuser failed.") print(str(e)) subuserlib.update.checkoutNoCommit(user,"HEAD") def remove(user,subuserNames): didSomething = False for subuserName in subuserNames: if subuserName in user.getRegistry().getSubusers(): user.getRegistry().logChange("Removing subuser "+str(subuserName)) try: subuserHome = user.getRegistry().getSubusers()[subuserName].getHomeDirOnHost() if subuserHome: user.getRegistry().logChange(" If you wish to remove the subusers home directory, issule the command $ rm -r "+subuserHome) except: pass user.getRegistry().logChange(" If you wish to remove the subusers image, issue the command $ subuser remove-old-images") del user.getRegistry().getSubusers()[subuserName] didSomething = True else: print("Cannot remove: subuser "+subuserName+" does not exist.") if didSomething: subuserlib.verify.verify(user) user.getRegistry().commit() def setExecutableShortcutInstalled(user,subuserName,installed): if installed: user.getRegistry().logChange("Creating shortcut for subuser "+subuserName) else: user.getRegistry().logChange("Removing shortcut for subuser "+subuserName) user.getRegistry().getSubusers()[subuserName].setExecutableShortcutInstalled(installed) subuserlib.verify.verify(user) user.getRegistry().commit()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import cStringIO import logging import os import sys import unittest ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, ROOT_DIR) from procfs import ProcMaps class ProcMapsTest(unittest.TestCase): _TEST_PROCMAPS = '\n'.join([ '00000000-00001000 r--p 00000000 fc:00 0', '0080b000-0080c000 r-xp 0020b000 fc:00 2231329' ' /usr/bin/some', '0080c000-0080f000 ---p 0020c000 fc:00 2231329' ' /usr/bin/some', '0100a000-0100c000 r-xp 0120a000 fc:00 22381' ' /usr/bin/chrome', '0100c000-0100f000 ---p 0120c000 fc:00 22381' ' /usr/bin/chrome', '0237d000-02a9b000 rw-p 00000000 00:00 0' ' [heap]', '7fb920e6d000-7fb920e85000 r-xp 00000000 fc:00 263482' ' /lib/x86_64-linux-gnu/libpthread-2.15.so', '7fb920e85000-7fb921084000 ---p 00018000 fc:00 263482' ' /lib/x86_64-linux-gnu/libpthread-2.15.so', '7fb9225f4000-7fb922654000 rw-s 00000000 00:04 19660808' ' /SYSV00000000 (deleted)', 'ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0' ' [vsyscall]', ]) _EXPECTED = [ (0x0, 0x1000, 'r', '-', '-', 'p', 0x0, 'fc', '00', 0, ''), (0x80b000, 0x80c000, 'r', '-', 'x', 'p', 0x20b000, 'fc', '00', 2231329, '/usr/bin/some'), (0x80c000, 0x80f000, '-', '-', '-', 'p', 0x20c000, 'fc', '00', 2231329, '/usr/bin/some'), (0x100a000, 0x100c000, 'r', '-', 'x', 'p', 0x120a000, 'fc', '00', 22381, '/usr/bin/chrome'), (0x100c000, 0x100f000, '-', '-', '-', 'p', 0x120c000, 'fc', '00', 22381, '/usr/bin/chrome'), (0x237d000, 0x2a9b000, 'r', 'w', '-', 'p', 0x0, '00', '00', 0, '[heap]'), (0x7fb920e6d000, 0x7fb920e85000, 'r', '-', 'x', 'p', 0x0, 'fc', '00', 263482, '/lib/x86_64-linux-gnu/libpthread-2.15.so'), (0x7fb920e85000, 0x7fb921084000, '-', '-', '-', 'p', 0x18000, 'fc', '00', 263482, '/lib/x86_64-linux-gnu/libpthread-2.15.so'), (0x7fb9225f4000, 0x7fb922654000, 'r', 'w', '-', 's', 0x0, '00', '04', 19660808, '/SYSV00000000 (deleted)'), (0xffffffffff600000, 0xffffffffff601000, 'r', '-', 'x', 'p', 0x0, '00', '00', 0, '[vsyscall]'), ] @staticmethod def _expected_as_dict(index): return { 'begin': ProcMapsTest._EXPECTED[index][0], 'end': ProcMapsTest._EXPECTED[index][1], 'readable': ProcMapsTest._EXPECTED[index][2], 'writable': ProcMapsTest._EXPECTED[index][3], 'executable': ProcMapsTest._EXPECTED[index][4], 'private': ProcMapsTest._EXPECTED[index][5], 'offset': ProcMapsTest._EXPECTED[index][6], 'major': ProcMapsTest._EXPECTED[index][7], 'minor': ProcMapsTest._EXPECTED[index][8], 'inode': ProcMapsTest._EXPECTED[index][9], 'name': ProcMapsTest._EXPECTED[index][10], } def test_load(self): maps = ProcMaps.load_file(cStringIO.StringIO(self._TEST_PROCMAPS)) for index, entry in enumerate(maps): self.assertEqual(entry.as_dict(), self._expected_as_dict(index)) def test_constants(self): maps = ProcMaps.load_file(cStringIO.StringIO(self._TEST_PROCMAPS)) selected = [0, 2, 4, 7] for index, entry in enumerate(maps.iter(ProcMaps.constants)): self.assertEqual(entry.as_dict(), self._expected_as_dict(selected[index])) def test_executable(self): maps = ProcMaps.load_file(cStringIO.StringIO(self._TEST_PROCMAPS)) selected = [1, 3, 6, 9] for index, entry in enumerate(maps.iter(ProcMaps.executable)): self.assertEqual(entry.as_dict(), self._expected_as_dict(selected[index])) def test_executable_and_constants(self): maps = ProcMaps.load_file(cStringIO.StringIO(self._TEST_PROCMAPS)) selected = [0, 1, 2, 3, 4, 6, 7, 9] for index, entry in enumerate(maps.iter(ProcMaps.executable_and_constants)): self.assertEqual(entry.as_dict(), self._expected_as_dict(selected[index])) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG if '-v' in sys.argv else logging.ERROR, format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s') unittest.main()
unknown
codeparrot/codeparrot-clean
# ###################################################################### # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the Brookhaven Science Associates, Brookhaven # # National Laboratory nor the names of its contributors may be used # # to endorse or promote products derived from this software without # # specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ######################################################################## __author__ = 'Li Li' import six import h5py import numpy as np import os from collections import OrderedDict from atom.api import Atom, Str, observe, Typed, Dict, List, Int, Enum import logging logger = logging.getLogger(__name__) class FileIOModel(Atom): """ This class focuses on file input and output. Attributes ---------- working_directory : str file_names : list list of loaded files data_file : str file_path : str data : array Experiment data. load_status : str Description of file loading status data_dict : dict Dict has filename as key and group data as value. """ working_directory = Str() data_file = Str() file_names = List() file_path = Str() data = Typed(np.ndarray) load_status = Str() data_dict = Dict() img_dict = Dict() img_dict_flat = Dict() data_sets = OrderedDict() def __init__(self, working_directory=None, data_file=None, *args, **kwargs): if working_directory is None: working_directory = os.path.expanduser('~') with self.suppress_notifications(): self.working_directory = working_directory self.data_file = data_file @observe('file_names') def update_more_data(self, change): self.data_sets.clear() self.file_names.sort() logger.info('Loaded files : {}'.format(self.file_names)) #detID = 'det1' detID = 'detector' for fname in self.file_names: try: self.file_path = os.path.join(self.working_directory, fname) f = h5py.File(self.file_path, 'r+') #data = f['MAPS'] #data = f['xrfmap'] data = f['entry/instrument'] exp_data = np.asarray(data[detID]['data']) logger.info('File : {} with total counts {}'.format(fname, np.sum(exp_data))) #exp_data = np.reshape(exp_data, [2, 4, 4096]) # dict has filename as key and group data as value self.data_dict.update({fname: data}) DS = DataSelection(filename=fname, raw_data=exp_data) self.data_sets.update({fname: DS}) # get roi sum data #roi_result = get_roi_sum(data[detID]['roi_name'].value, # data[detID]['roi_limits'].value, # data[detID]['counts']) #self.img_dict_flat.update({fname.split('.')[0]+'_roi': roi_result}) # read fitting results if 'xrf_fit' in data[detID]: fit_result = get_fit_data(data[detID]['xrf_fit_name'].value, data[detID]['xrf_fit'].value) self.img_dict_flat.update({fname.split('.')[0]+'_fit': fit_result}) except ValueError: continue def get_roi_data(self): """ Get roi sum data from data_dict. """ # for k, v in six.iteritems(self.data_dict): # roi_dict = {d[0]: d[1] for d in zip(v['channel_names'], v['XRF_roi'])} # self.img_dict.update({str(k): {'roi_sum': roi_dict}}) # # self.img_dict_flat.update({str(k).split('.')[0]+'_roi_sum': roi_dict}) pass def get_roi_sum(namelist, data_range, data): data_temp = dict() for i in range(len(namelist)): lowv = data_range[i, 0] highv = data_range[i, 1] data_sum = np.sum(data[:, :, lowv: highv], axis=2) data_temp.update({namelist[i].replace(' ', '_'): data_sum}) return data_temp def get_fit_data(namelist, data): """ Read fit data from h5 file. This is to be moved to filestore part. Parameters --------- namelist : list list of str for element lines data : array 3D array of fitting results """ data_temp = dict() for i in range(len(namelist)): data_temp.update({namelist[i]: data[i, :, :]}) return data_temp #self.img_dict_flat.update({fname.split('.')[0]: data_temp}) plot_as = ['Sum', 'Point', 'Roi'] class DataSelection(Atom): """ Attributes ---------- filename : str plot_choice : enum methods ot plot point1 : str starting position point2 : str ending position roi : list raw_data : array experiment 3D data data : array plot_index : int """ filename = Str() plot_choice = Enum(*plot_as) point1 = Str('0, 0') point2 = Str('0, 0') #roi = List() raw_data = Typed(np.ndarray) data = Typed(np.ndarray) plot_index = Int(0) fit_name = Str() fit_data = Typed(np.ndarray) @observe('plot_index', 'point1', 'point2') def _update_roi(self, change): if self.plot_index == 0: return elif self.plot_index == 1: self.data = self.get_sum() elif self.plot_index == 2: SC = SpectrumCalculator(self.raw_data, pos1=self.point1) self.data = SC.get_spectrum() else: SC = SpectrumCalculator(self.raw_data, pos1=self.point1, pos2=self.point2) self.data = SC.get_spectrum() def get_sum(self): SC = SpectrumCalculator(self.raw_data) return SC.get_spectrum() class SpectrumCalculator(object): """ Calculate summed spectrum according to starting and ending positions. Attributes ---------- data : array 3D array of experiment data pos1 : str starting position pos2 : str ending position """ def __init__(self, data, pos1=None, pos2=None): self.data = data if pos1: self.pos1 = self._parse_pos(pos1) else: self.pos1 = None if pos2: self.pos2 = self._parse_pos(pos2) else: self.pos2 = None def _parse_pos(self, pos): if isinstance(pos, list): return pos return [int(v) for v in pos.split(',')] def get_spectrum(self): if not self.pos1 and not self.pos2: return np.sum(self.data, axis=(0, 1)) elif self.pos1 and not self.pos2: print('shape: {}'.format(self.data.shape)) print('pos1: {}'.format(self.pos1)) return self.data[self.pos1[0], self.pos1[1], :] #return self.data[:, self.pos1[0], self.pos1[1]] else: return np.sum(self.data[self.pos1[0]:self.pos2[0], self.pos1[1]:self.pos2[1], :], axis=(0, 1)) #return np.sum(self.data[:, self.pos1[0]:self.pos2[0], self.pos1[1]:self.pos2[1]], # axis=(1, 2))
unknown
codeparrot/codeparrot-clean
{ "private": true, "workspaces": [ "packages/*" ], "scripts": { "dev": "yarn --cwd packages/web-app dev", "build": "yarn --cwd packages/web-app build", "start": "yarn --cwd packages/web-app start" } }
json
github
https://github.com/vercel/next.js
examples/with-stencil/package.json
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A Python blobstore API used by app developers. Contains methods uses to interface with Blobstore API. Defines db.Key-like class representing a blob-key. Contains API part that forward to apiproxy. """ import datetime import time from google.appengine.api import apiproxy_stub_map from google.appengine.api import datastore from google.appengine.api import datastore_errors from google.appengine.api import datastore_types from google.appengine.api import api_base_pb from google.appengine.api.blobstore import blobstore_service_pb from google.appengine.runtime import apiproxy_errors __all__ = ['BLOB_INFO_KIND', 'BLOB_KEY_HEADER', 'BLOB_MIGRATION_KIND', 'BLOB_RANGE_HEADER', 'MAX_BLOB_FETCH_SIZE', 'UPLOAD_INFO_CREATION_HEADER', 'CLOUD_STORAGE_OBJECT_HEADER', 'GS_PREFIX', 'BlobFetchSizeTooLargeError', 'BlobKey', 'BlobNotFoundError', 'DataIndexOutOfRangeError', 'PermissionDeniedError', 'Error', 'InternalError', 'create_rpc', 'create_upload_url', 'create_upload_url_async', 'delete', 'delete_async', 'fetch_data', 'fetch_data_async', 'create_gs_key', 'create_gs_key_async', ] BlobKey = datastore_types.BlobKey BLOB_INFO_KIND = '__BlobInfo__' BLOB_KEY_HEADER = 'X-AppEngine-BlobKey' BLOB_MIGRATION_KIND = '__BlobMigration__' BLOB_RANGE_HEADER = 'X-AppEngine-BlobRange' MAX_BLOB_FETCH_SIZE = (1 << 20) - (1 << 15) GS_PREFIX = '/gs/' UPLOAD_INFO_CREATION_HEADER = 'X-AppEngine-Upload-Creation' CLOUD_STORAGE_OBJECT_HEADER = 'X-AppEngine-Cloud-Storage-Object' _BASE_CREATION_HEADER_FORMAT = '%Y-%m-%d %H:%M:%S' class Error(Exception): """Base blobstore error type.""" class InternalError(Error): """Raised when an internal error occurs within API.""" class BlobNotFoundError(Error): """Raised when attempting to access blob data for non-existant blob.""" class DataIndexOutOfRangeError(Error): """Raised when attempting to access indexes out of range in wrong order.""" class BlobFetchSizeTooLargeError(Error): """Raised when attempting to fetch too large a block from a blob.""" class _CreationFormatError(Error): """Raised when attempting to parse bad creation date format.""" class PermissionDeniedError(Error): """Raised when permissions are lacking for a requested operation.""" def _ToBlobstoreError(error): """Translate an application error to a datastore Error, if possible. Args: error: An ApplicationError to translate. """ error_map = { blobstore_service_pb.BlobstoreServiceError.INTERNAL_ERROR: InternalError, blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND: BlobNotFoundError, blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE: DataIndexOutOfRangeError, blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE: BlobFetchSizeTooLargeError, blobstore_service_pb.BlobstoreServiceError.PERMISSION_DENIED: PermissionDeniedError, } desired_exc = error_map.get(error.application_error) return desired_exc(error.error_detail) if desired_exc else error def _format_creation(stamp): """Format an upload creation timestamp with milliseconds. This method is necessary to format a timestamp with microseconds on Python versions before 2.6. Cannot simply convert datetime objects to str because the microseconds are stripped from the format when set to 0. The upload creation date format will always have microseconds padded out to 6 places. Args: stamp: datetime.datetime object to format. Returns: Formatted datetime as Python 2.6 format '%Y-%m-%d %H:%M:%S.%f'. """ return '%s.%06d' % (stamp.strftime(_BASE_CREATION_HEADER_FORMAT), stamp.microsecond) def _parse_creation(creation_string, field_name): """Parses upload creation string from header format. Parse creation date of the format: YYYY-mm-dd HH:MM:SS.ffffff Y: Year m: Month (01-12) d: Day (01-31) H: Hour (00-24) M: Minute (00-59) S: Second (00-59) f: Microsecond Args: creation_string: String creation date format. Returns: datetime object parsed from creation_string. Raises: _CreationFormatError when the creation string is formatted incorrectly. """ split_creation_string = creation_string.split('.', 1) if len(split_creation_string) != 2: raise _CreationFormatError( 'Could not parse creation %s in field %s.' % (creation_string, field_name)) timestamp_string, microsecond = split_creation_string try: timestamp = time.strptime(timestamp_string, _BASE_CREATION_HEADER_FORMAT) microsecond = int(microsecond) except ValueError: raise _CreationFormatError('Could not parse creation %s in field %s.' % (creation_string, field_name)) return datetime.datetime(*timestamp[:6] + tuple([microsecond])) def create_rpc(deadline=None, callback=None): """Creates an RPC object for use with the blobstore API. Args: deadline: Optional deadline in seconds for the operation; the default is a system-specific deadline (typically 5 seconds). callback: Optional callable to invoke on completion. Returns: An apiproxy_stub_map.UserRPC object specialized for this service. """ return apiproxy_stub_map.UserRPC('blobstore', deadline, callback) def _make_async_call(rpc, method, request, response, get_result_hook, user_data): if rpc is None: rpc = create_rpc() rpc.make_call(method, request, response, get_result_hook, user_data) return rpc def _get_result_hook(rpc): try: rpc.check_success() except apiproxy_errors.ApplicationError, err: raise _ToBlobstoreError(err) hook = rpc.user_data return hook(rpc) def create_upload_url(success_path, max_bytes_per_blob=None, max_bytes_total=None, rpc=None, gs_bucket_name=None): """Create upload URL for POST form. Args: success_path: Path within application to call when POST is successful and upload is complete. max_bytes_per_blob: The maximum size in bytes that any one blob in the upload can be or None for no maximum size. max_bytes_total: The maximum size in bytes that the aggregate sizes of all of the blobs in the upload can be or None for no maximum size. rpc: Optional UserRPC object. gs_bucket_name: The Google Storage bucket name that the blobs should be uploaded to. The application's service account must have the correct permissions to write to this bucket. The bucket name may be of the foramt 'bucket/path/', in which case the included path will be prepended to the uploaded object name. Returns: The upload URL. Raises: TypeError: If max_bytes_per_blob or max_bytes_total are not integral types. ValueError: If max_bytes_per_blob or max_bytes_total are not positive values. """ rpc = create_upload_url_async(success_path, max_bytes_per_blob=max_bytes_per_blob, max_bytes_total=max_bytes_total, rpc=rpc, gs_bucket_name=gs_bucket_name) return rpc.get_result() def create_upload_url_async(success_path, max_bytes_per_blob=None, max_bytes_total=None, rpc=None, gs_bucket_name=None): """Create upload URL for POST form -- async version. Args: success_path: Path within application to call when POST is successful and upload is complete. max_bytes_per_blob: The maximum size in bytes that any one blob in the upload can be or None for no maximum size. max_bytes_total: The maximum size in bytes that the aggregate sizes of all of the blobs in the upload can be or None for no maximum size. rpc: Optional UserRPC object. gs_bucket_name: The Google Storage bucket name that the blobs should be uploaded to. The application's service account must have the correct permissions to write to this bucket. The bucket name may be of the foramt 'bucket/path/', in which case the included path will be prepended to the uploaded object name. Returns: A UserRPC whose result will be the upload URL. Raises: TypeError: If max_bytes_per_blob or max_bytes_total are not integral types. ValueError: If max_bytes_per_blob or max_bytes_total are not positive values. """ request = blobstore_service_pb.CreateUploadURLRequest() response = blobstore_service_pb.CreateUploadURLResponse() request.set_success_path(success_path) if max_bytes_per_blob is not None: if not isinstance(max_bytes_per_blob, (int, long)): raise TypeError('max_bytes_per_blob must be integer.') if max_bytes_per_blob < 1: raise ValueError('max_bytes_per_blob must be positive.') request.set_max_upload_size_per_blob_bytes(max_bytes_per_blob) if max_bytes_total is not None: if not isinstance(max_bytes_total, (int, long)): raise TypeError('max_bytes_total must be integer.') if max_bytes_total < 1: raise ValueError('max_bytes_total must be positive.') request.set_max_upload_size_bytes(max_bytes_total) if (request.has_max_upload_size_bytes() and request.has_max_upload_size_per_blob_bytes()): if (request.max_upload_size_bytes() < request.max_upload_size_per_blob_bytes()): raise ValueError('max_bytes_total can not be less' ' than max_upload_size_per_blob_bytes') if gs_bucket_name is not None: if not isinstance(gs_bucket_name, basestring): raise TypeError('gs_bucket_name must be a string.') request.set_gs_bucket_name(gs_bucket_name) return _make_async_call(rpc, 'CreateUploadURL', request, response, _get_result_hook, lambda rpc: rpc.response.url()) def delete(blob_keys, rpc=None, _token=None): """Delete a blob from Blobstore. Args: blob_keys: Single instance or list of blob keys. A blob-key can be either a string or an instance of BlobKey. rpc: Optional UserRPC object. Returns: None. """ rpc = delete_async(blob_keys, rpc, _token) return rpc.get_result() def delete_async(blob_keys, rpc=None, _token=None): """Delete a blob from Blobstore -- async version. Args: blob_keys: Single instance or list of blob keys. A blob-key can be either a string or an instance of BlobKey. rpc: Optional UserRPC object. Returns: A UserRPC whose result will be None. """ if isinstance(blob_keys, (basestring, BlobKey)): blob_keys = [blob_keys] request = blobstore_service_pb.DeleteBlobRequest() for blob_key in blob_keys: request.add_blob_key(str(blob_key)) if _token: request.set_token(_token) response = api_base_pb.VoidProto() return _make_async_call(rpc, 'DeleteBlob', request, response, _get_result_hook, lambda rpc: None) def fetch_data(blob_key, start_index, end_index, rpc=None): """Fetch data for blob. See docstring for ext.blobstore.fetch_data for more details. Args: blob: BlobKey, str or unicode representation of BlobKey of blob to fetch data from. start_index: Start index of blob data to fetch. May not be negative. end_index: End index (exclusive) of blob data to fetch. Must be >= start_index. rpc: Optional UserRPC object. Returns: A str containing partial data of blob. See docstring for ext.blobstore.fetch_data for more details. Raises: See docstring for ext.blobstore.fetch_data for more details. """ rpc = fetch_data_async(blob_key, start_index, end_index, rpc) return rpc.get_result() def fetch_data_async(blob_key, start_index, end_index, rpc=None): """Fetch data for blob -- async version. See docstring for ext.blobstore.fetch_data for more details. Args: blob: BlobKey, str or unicode representation of BlobKey of blob to fetch data from. start_index: Start index of blob data to fetch. May not be negative. end_index: End index (exclusive) of blob data to fetch. Must be >= start_index. rpc: Optional UserRPC object. Returns: A UserRPC whose result will be a str as returned by fetch_data(). Raises: See docstring for ext.blobstore.fetch_data for more details. """ if not isinstance(start_index, (int, long)): raise TypeError('start_index must be integer.') if not isinstance(end_index, (int, long)): raise TypeError('end_index must be integer.') if isinstance(blob_key, BlobKey): blob_key = str(blob_key).decode('utf-8') elif isinstance(blob_key, str): blob_key = blob_key.decode('utf-8') elif not isinstance(blob_key, unicode): raise TypeError('Blob-key must be str, unicode or BlobKey: %s' % blob_key) if start_index < 0: raise DataIndexOutOfRangeError( 'May not fetch blob at negative index.') if end_index < start_index: raise DataIndexOutOfRangeError( 'Start index %d > end index %d' % (start_index, end_index)) fetch_size = end_index - start_index + 1 if fetch_size > MAX_BLOB_FETCH_SIZE: raise BlobFetchSizeTooLargeError( 'Blob fetch size is too large: %d' % fetch_size) request = blobstore_service_pb.FetchDataRequest() response = blobstore_service_pb.FetchDataResponse() request.set_blob_key(blob_key) request.set_start_index(start_index) request.set_end_index(end_index) return _make_async_call(rpc, 'FetchData', request, response, _get_result_hook, lambda rpc: rpc.response.data()) def create_gs_key(filename, rpc=None): """Create an encoded key for a Google Storage file. It is safe to persist this key for future use. Args: filename: The filename of the google storage object to create the key for. rpc: Optional UserRPC object. Returns: An encrypted blob key string. """ rpc = create_gs_key_async(filename, rpc) return rpc.get_result() def create_gs_key_async(filename, rpc=None): """Create an encoded key for a google storage file - async version. It is safe to persist this key for future use. Args: filename: The filename of the google storage object to create the key for. rpc: Optional UserRPC object. Returns: A UserRPC whose result will be a string as returned by create_gs_key. Raises: TypeError: If filename is not a string. ValueError: If filename is not in the format '/gs/bucket_name/object_name' """ if not isinstance(filename, basestring): raise TypeError('filename must be str: %s' % filename) if not filename.startswith(GS_PREFIX): raise ValueError('filename must start with "/gs/": %s' % filename) if not '/' in filename[4:]: raise ValueError('filename must have the format ' '"/gs/bucket_name/object_name": %s' % filename) request = blobstore_service_pb.CreateEncodedGoogleStorageKeyRequest() response = blobstore_service_pb.CreateEncodedGoogleStorageKeyResponse() request.set_filename(filename) return _make_async_call(rpc, 'CreateEncodedGoogleStorageKey', request, response, _get_result_hook, lambda rpc: rpc.response.blob_key())
unknown
codeparrot/codeparrot-clean
from typing import TYPE_CHECKING, Any from langchain_classic._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import UnstructuredOrgModeLoader # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "UnstructuredOrgModeLoader": "langchain_community.document_loaders", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "UnstructuredOrgModeLoader", ]
python
github
https://github.com/langchain-ai/langchain
libs/langchain/langchain_classic/document_loaders/org_mode.py
########################################################################### # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### from django.core.management.base import BaseCommand, CommandError from starthinker_ui.recipe.scripts import Script from starthinker.util.csv import rows_to_csv class Command(BaseCommand): """ Command used to generate a simple list of solutions. Nothing depends on this, just a quick implementation, feel free to pivot as necessary. Current version returns a CSV with: - Name - Description - Global - Product List - Owners List - Year Active - Status - Link ( local, may need to regexp into production after running ) Call without arguments using: python manage.py scripts_to_csv """ help = 'Generate CSV Of Scripts' def get_scripts(self): for script in Script.get_scripts(): yield ( script.get_name(), # solution script.get_description().replace('"', '\''), # description 'Global', # region ', '.join(script.get_products()), # entity ', '.join(x.replace('@google.com', '') for x in script.get_authors()), # POC '%s - current' % script.get_released().year, # year script.get_status() or 'Live', # status script.get_link(), # link ) def handle(self, *args, **kwargs): print(rows_to_csv(self.get_scripts()).read())
unknown
codeparrot/codeparrot-clean
module.exports = function a() { return "This is a"; };
javascript
github
https://github.com/webpack/webpack
test/fixtures/a.js