code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
# Copyright 2017, Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import collections import copy import logging import random import threading import time import six from google.cloud.pubsub_v1.subscriber._protocol import requests _LOGGER = logging.getLogger(__name__) _LEASE_WORKER_NAME = "Thread-LeaseMaintainer" _LeasedMessage = collections.namedtuple("_LeasedMessage", ["added_time", "size"]) class Leaser(object): def __init__(self, manager): self._thread = None self._operational_lock = threading.Lock() self._manager = manager self._leased_messages = {} """dict[str, float]: A mapping of ack IDs to the local time when the ack ID was initially leased in seconds since the epoch.""" self._bytes = 0 """int: The total number of bytes consumed by leased messages.""" self._stop_event = threading.Event() @property def message_count(self): """int: The number of leased messages.""" return len(self._leased_messages) @property def ack_ids(self): """Sequence[str]: The ack IDs of all leased messages.""" return self._leased_messages.keys() @property def bytes(self): """int: The total size, in bytes, of all leased messages.""" return self._bytes def add(self, items): """Add messages to be managed by the leaser.""" for item in items: # Add the ack ID to the set of managed ack IDs, and increment # the size counter. if item.ack_id not in self._leased_messages: self._leased_messages[item.ack_id] = _LeasedMessage( added_time=time.time(), size=item.byte_size ) self._bytes += item.byte_size else: _LOGGER.debug("Message %s is already lease managed", item.ack_id) def remove(self, items): """Remove messages from lease management.""" # Remove the ack ID from lease management, and decrement the # byte counter. for item in items: if self._leased_messages.pop(item.ack_id, None) is not None: self._bytes -= item.byte_size else: _LOGGER.debug("Item %s was not managed.", item.ack_id) if self._bytes < 0: _LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes) self._bytes = 0 def maintain_leases(self): """Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats. """ while self._manager.is_active and not self._stop_event.is_set(): # Determine the appropriate duration for the lease. This is # based off of how long previous messages have taken to ack, with # a sensible default and within the ranges allowed by Pub/Sub. p99 = self._manager.ack_histogram.percentile(99) _LOGGER.debug("The current p99 value is %d seconds.", p99) # Make a copy of the leased messages. This is needed because it's # possible for another thread to modify the dictionary while # we're iterating over it. leased_messages = copy.copy(self._leased_messages) # Drop any leases that are well beyond max lease time. This # ensures that in the event of a badly behaving actor, we can # drop messages and allow Pub/Sub to resend them. cutoff = time.time() - self._manager.flow_control.max_lease_duration to_drop = [ requests.DropRequest(ack_id, item.size) for ack_id, item in six.iteritems(leased_messages) if item.added_time < cutoff ] if to_drop: _LOGGER.warning( "Dropping %s items because they were leased too long.", len(to_drop) ) self._manager.dispatcher.drop(to_drop) # Remove dropped items from our copy of the leased messages (they # have already been removed from the real one by # self._manager.drop(), which calls self.remove()). for item in to_drop: leased_messages.pop(item.ack_id) # Create a streaming pull request. # We do not actually call `modify_ack_deadline` over and over # because it is more efficient to make a single request. ack_ids = leased_messages.keys() if ack_ids: _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids)) # NOTE: This may not work as expected if ``consumer.active`` # has changed since we checked it. An implementation # without any sort of race condition would require a # way for ``send_request`` to fail when the consumer # is inactive. self._manager.dispatcher.modify_ack_deadline( [requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids] ) # Now wait an appropriate period of time and do this again. # # We determine the appropriate period of time based on a random # period between 0 seconds and 90% of the lease. This use of # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. snooze = random.uniform(0.0, p99 * 0.9) _LOGGER.debug("Snoozing lease management for %f seconds.", snooze) self._stop_event.wait(timeout=snooze) _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME) def start(self): with self._operational_lock: if self._thread is not None: raise ValueError("Leaser is already running.") # Create and start the helper thread. self._stop_event.clear() thread = threading.Thread( name=_LEASE_WORKER_NAME, target=self.maintain_leases ) thread.daemon = True thread.start() _LOGGER.debug("Started helper thread %s", thread.name) self._thread = thread def stop(self): with self._operational_lock: self._stop_event.set() if self._thread is not None: # The thread should automatically exit when the consumer is # inactive. self._thread.join() self._thread = None
unknown
codeparrot/codeparrot-clean
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: MPL-2.0 //go:build !enterprise package plugin import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" "google.golang.org/grpc" ) // newGRPCSystemViewFromSetupArgs (Vault Community edition) constructs a gRPC SystemView client. func newGRPCSystemViewFromSetupArgs(conn *grpc.ClientConn, _ *pb.SetupArgs) logical.SystemView { return newGRPCSystemView(conn) }
go
github
https://github.com/hashicorp/vault
sdk/plugin/grpc_backend_server_stubs_oss.go
# This file is part of beets. # Copyright 2013, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """A clone of the Music Player Daemon (MPD) that plays music from a Beets library. Attempts to implement a compatible protocol to allow use of the wide range of MPD clients. """ from __future__ import print_function import re from string import Template import traceback import logging import random import time import beets from beets.plugins import BeetsPlugin import beets.ui from beets import vfs from beets import config from beets.util import bluelet PROTOCOL_VERSION = '0.13.0' BUFSIZE = 1024 HELLO = 'OK MPD %s' % PROTOCOL_VERSION CLIST_BEGIN = 'command_list_begin' CLIST_VERBOSE_BEGIN = 'command_list_ok_begin' CLIST_END = 'command_list_end' RESP_OK = 'OK' RESP_CLIST_VERBOSE = 'list_OK' RESP_ERR = 'ACK' NEWLINE = u"\n" ERROR_NOT_LIST = 1 ERROR_ARG = 2 ERROR_PASSWORD = 3 ERROR_PERMISSION = 4 ERROR_UNKNOWN = 5 ERROR_NO_EXIST = 50 ERROR_PLAYLIST_MAX = 51 ERROR_SYSTEM = 52 ERROR_PLAYLIST_LOAD = 53 ERROR_UPDATE_ALREADY = 54 ERROR_PLAYER_SYNC = 55 ERROR_EXIST = 56 VOLUME_MIN = 0 VOLUME_MAX = 100 SAFE_COMMANDS = ( # Commands that are available when unauthenticated. u'close', u'commands', u'notcommands', u'password', u'ping', ) # Loggers. log = logging.getLogger('beets.bpd') global_log = logging.getLogger('beets') # Gstreamer import error. class NoGstreamerError(Exception): pass # Error-handling, exceptions, parameter parsing. class BPDError(Exception): """An error that should be exposed to the client to the BPD server. """ def __init__(self, code, message, cmd_name='', index=0): self.code = code self.message = message self.cmd_name = cmd_name self.index = index template = Template(u'$resp [$code@$index] {$cmd_name} $message') def response(self): """Returns a string to be used as the response code for the erring command. """ return self.template.substitute({'resp': RESP_ERR, 'code': self.code, 'index': self.index, 'cmd_name': self.cmd_name, 'message': self.message }) def make_bpd_error(s_code, s_message): """Create a BPDError subclass for a static code and message. """ class NewBPDError(BPDError): code = s_code message = s_message cmd_name = '' index = 0 def __init__(self): pass return NewBPDError ArgumentTypeError = make_bpd_error(ERROR_ARG, 'invalid type for argument') ArgumentIndexError = make_bpd_error(ERROR_ARG, 'argument out of range') ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, 'argument not found') def cast_arg(t, val): """Attempts to call t on val, raising a ArgumentTypeError on ValueError. If 't' is the special string 'intbool', attempts to cast first to an int and then to a bool (i.e., 1=True, 0=False). """ if t == 'intbool': return cast_arg(bool, cast_arg(int, val)) else: try: return t(val) except ValueError: raise ArgumentTypeError() class BPDClose(Exception): """Raised by a command invocation to indicate that the connection should be closed. """ # Generic server infrastructure, implementing the basic protocol. class BaseServer(object): """A MPD-compatible music player server. The functions with the `cmd_` prefix are invoked in response to client commands. For instance, if the client says `status`, `cmd_status` will be invoked. The arguments to the client's commands are used as function arguments following the connection issuing the command. The functions may send data on the connection. They may also raise BPDError exceptions to report errors. This is a generic superclass and doesn't support many commands. """ def __init__(self, host, port, password): """Create a new server bound to address `host` and listening on port `port`. If `password` is given, it is required to do anything significant on the server. """ self.host, self.port, self.password = host, port, password # Default server values. self.random = False self.repeat = False self.volume = VOLUME_MAX self.crossfade = 0 self.playlist = [] self.playlist_version = 0 self.current_index = -1 self.paused = False self.error = None # Object for random numbers generation self.random_obj = random.Random() def run(self): """Block and start listening for connections from clients. An interrupt (^C) closes the server. """ self.startup_time = time.time() bluelet.run(bluelet.server(self.host, self.port, Connection.handler(self))) def _item_info(self, item): """An abstract method that should response lines containing a single song's metadata. """ raise NotImplementedError def _item_id(self, item): """An abstract method returning the integer id for an item. """ raise NotImplementedError def _id_to_index(self, track_id): """Searches the playlist for a song with the given id and returns its index in the playlist. """ track_id = cast_arg(int, track_id) for index, track in enumerate(self.playlist): if self._item_id(track) == track_id: return index # Loop finished with no track found. raise ArgumentNotFoundError() def _random_idx(self): """Returns a random index different from the current one. If there are no songs in the playlist it returns -1. If there is only one song in the playlist it returns 0. """ if len(self.playlist) < 2: return len(self.playlist)-1 new_index = self.random_obj.randint(0, len(self.playlist)-1) while new_index == self.current_index: new_index = self.random_obj.randint(0, len(self.playlist)-1) return new_index def _succ_idx(self): """Returns the index for the next song to play. It also considers random and repeat flags. No boundaries are checked. """ if self.repeat: return self.current_index if self.random: return self._random_idx() return self.current_index+1 def _prev_idx(self): """Returns the index for the previous song to play. It also considers random and repeat flags. No boundaries are checked. """ if self.repeat: return self.current_index if self.random: return self._random_idx() return self.current_index-1 def cmd_ping(self, conn): """Succeeds.""" pass def cmd_kill(self, conn): """Exits the server process.""" exit(0) def cmd_close(self, conn): """Closes the connection.""" raise BPDClose() def cmd_password(self, conn, password): """Attempts password authentication.""" if password == self.password: conn.authenticated = True else: conn.authenticated = False raise BPDError(ERROR_PASSWORD, 'incorrect password') def cmd_commands(self, conn): """Lists the commands available to the user.""" if self.password and not conn.authenticated: # Not authenticated. Show limited list of commands. for cmd in SAFE_COMMANDS: yield u'command: ' + cmd else: # Authenticated. Show all commands. for func in dir(self): if func.startswith('cmd_'): yield u'command: ' + func[4:] def cmd_notcommands(self, conn): """Lists all unavailable commands.""" if self.password and not conn.authenticated: # Not authenticated. Show privileged commands. for func in dir(self): if func.startswith('cmd_'): cmd = func[4:] if cmd not in SAFE_COMMANDS: yield u'command: ' + cmd else: # Authenticated. No commands are unavailable. pass def cmd_status(self, conn): """Returns some status information for use with an implementation of cmd_status. Gives a list of response-lines for: volume, repeat, random, playlist, playlistlength, and xfade. """ yield (u'volume: ' + unicode(self.volume), u'repeat: ' + unicode(int(self.repeat)), u'random: ' + unicode(int(self.random)), u'playlist: ' + unicode(self.playlist_version), u'playlistlength: ' + unicode(len(self.playlist)), u'xfade: ' + unicode(self.crossfade), ) if self.current_index == -1: state = u'stop' elif self.paused: state = u'pause' else: state = u'play' yield u'state: ' + state if self.current_index != -1: # i.e., paused or playing current_id = self._item_id(self.playlist[self.current_index]) yield u'song: ' + unicode(self.current_index) yield u'songid: ' + unicode(current_id) if self.error: yield u'error: ' + self.error def cmd_clearerror(self, conn): """Removes the persistent error state of the server. This error is set when a problem arises not in response to a command (for instance, when playing a file). """ self.error = None def cmd_random(self, conn, state): """Set or unset random (shuffle) mode.""" self.random = cast_arg('intbool', state) def cmd_repeat(self, conn, state): """Set or unset repeat mode.""" self.repeat = cast_arg('intbool', state) def cmd_setvol(self, conn, vol): """Set the player's volume level (0-100).""" vol = cast_arg(int, vol) if vol < VOLUME_MIN or vol > VOLUME_MAX: raise BPDError(ERROR_ARG, u'volume out of range') self.volume = vol def cmd_crossfade(self, conn, crossfade): """Set the number of seconds of crossfading.""" crossfade = cast_arg(int, crossfade) if crossfade < 0: raise BPDError(ERROR_ARG, u'crossfade time must be nonnegative') def cmd_clear(self, conn): """Clear the playlist.""" self.playlist = [] self.playlist_version += 1 self.cmd_stop(conn) def cmd_delete(self, conn, index): """Remove the song at index from the playlist.""" index = cast_arg(int, index) try: del(self.playlist[index]) except IndexError: raise ArgumentIndexError() self.playlist_version += 1 if self.current_index == index: # Deleted playing song. self.cmd_stop(conn) elif index < self.current_index: # Deleted before playing. # Shift playing index down. self.current_index -= 1 def cmd_deleteid(self, conn, track_id): self.cmd_delete(conn, self._id_to_index(track_id)) def cmd_move(self, conn, idx_from, idx_to): """Move a track in the playlist.""" idx_from = cast_arg(int, idx_from) idx_to = cast_arg(int, idx_to) try: track = self.playlist.pop(idx_from) self.playlist.insert(idx_to, track) except IndexError: raise ArgumentIndexError() # Update currently-playing song. if idx_from == self.current_index: self.current_index = idx_to elif idx_from < self.current_index <= idx_to: self.current_index -= 1 elif idx_from > self.current_index >= idx_to: self.current_index += 1 self.playlist_version += 1 def cmd_moveid(self, conn, idx_from, idx_to): idx_from = self._id_to_index(idx_from) return self.cmd_move(conn, idx_from, idx_to) def cmd_swap(self, conn, i, j): """Swaps two tracks in the playlist.""" i = cast_arg(int, i) j = cast_arg(int, j) try: track_i = self.playlist[i] track_j = self.playlist[j] except IndexError: raise ArgumentIndexError() self.playlist[j] = track_i self.playlist[i] = track_j # Update currently-playing song. if self.current_index == i: self.current_index = j elif self.current_index == j: self.current_index = i self.playlist_version += 1 def cmd_swapid(self, conn, i_id, j_id): i = self._id_to_index(i_id) j = self._id_to_index(j_id) return self.cmd_swap(conn, i, j) def cmd_urlhandlers(self, conn): """Indicates supported URL schemes. None by default.""" pass def cmd_playlistinfo(self, conn, index=-1): """Gives metadata information about the entire playlist or a single track, given by its index. """ index = cast_arg(int, index) if index == -1: for track in self.playlist: yield self._item_info(track) else: try: track = self.playlist[index] except IndexError: raise ArgumentIndexError() yield self._item_info(track) def cmd_playlistid(self, conn, track_id=-1): return self.cmd_playlistinfo(conn, self._id_to_index(track_id)) def cmd_plchanges(self, conn, version): """Sends playlist changes since the given version. This is a "fake" implementation that ignores the version and just returns the entire playlist (rather like version=0). This seems to satisfy many clients. """ return self.cmd_playlistinfo(conn) def cmd_plchangesposid(self, conn, version): """Like plchanges, but only sends position and id. Also a dummy implementation. """ for idx, track in enumerate(self.playlist): yield u'cpos: ' + unicode(idx) yield u'Id: ' + unicode(track.id) def cmd_currentsong(self, conn): """Sends information about the currently-playing song. """ if self.current_index != -1: # -1 means stopped. track = self.playlist[self.current_index] yield self._item_info(track) def cmd_next(self, conn): """Advance to the next song in the playlist.""" self.current_index = self._succ_idx() if self.current_index >= len(self.playlist): # Fallen off the end. Just move to stopped state. return self.cmd_stop(conn) else: return self.cmd_play(conn) def cmd_previous(self, conn): """Step back to the last song.""" self.current_index = self._prev_idx() if self.current_index < 0: return self.cmd_stop(conn) else: return self.cmd_play(conn) def cmd_pause(self, conn, state=None): """Set the pause state playback.""" if state is None: self.paused = not self.paused # Toggle. else: self.paused = cast_arg('intbool', state) def cmd_play(self, conn, index=-1): """Begin playback, possibly at a specified playlist index.""" index = cast_arg(int, index) if index < -1 or index > len(self.playlist): raise ArgumentIndexError() if index == -1: # No index specified: start where we are. if not self.playlist: # Empty playlist: stop immediately. return self.cmd_stop(conn) if self.current_index == -1: # No current song. self.current_index = 0 # Start at the beginning. # If we have a current song, just stay there. else: # Start with the specified index. self.current_index = index self.paused = False def cmd_playid(self, conn, track_id=0): track_id = cast_arg(int, track_id) if track_id == -1: index = -1 else: index = self._id_to_index(track_id) return self.cmd_play(conn, index) def cmd_stop(self, conn): """Stop playback.""" self.current_index = -1 self.paused = False def cmd_seek(self, conn, index, pos): """Seek to a specified point in a specified song.""" index = cast_arg(int, index) if index < 0 or index >= len(self.playlist): raise ArgumentIndexError() self.current_index = index def cmd_seekid(self, conn, track_id, pos): index = self._id_to_index(track_id) return self.cmd_seek(conn, index, pos) def cmd_profile(self, conn): """Memory profiling for debugging.""" from guppy import hpy heap = hpy().heap() print(heap) class Connection(object): """A connection between a client and the server. Handles input and output from and to the client. """ def __init__(self, server, sock): """Create a new connection for the accepted socket `client`. """ self.server = server self.sock = sock self.authenticated = False def send(self, lines): """Send lines, which which is either a single string or an iterable consisting of strings, to the client. A newline is added after every string. Returns a Bluelet event that sends the data. """ if isinstance(lines, basestring): lines = [lines] out = NEWLINE.join(lines) + NEWLINE log.debug(out[:-1]) # Don't log trailing newline. if isinstance(out, unicode): out = out.encode('utf8') return self.sock.sendall(out) def do_command(self, command): """A coroutine that runs the given command and sends an appropriate response.""" try: yield bluelet.call(command.run(self)) except BPDError as e: # Send the error. yield self.send(e.response()) else: # Send success code. yield self.send(RESP_OK) def run(self): """Send a greeting to the client and begin processing commands as they arrive. """ yield self.send(HELLO) clist = None # Initially, no command list is being constructed. while True: line = yield self.sock.readline() if not line: break line = line.strip() if not line: break log.debug(line) if clist is not None: # Command list already opened. if line == CLIST_END: yield bluelet.call(self.do_command(clist)) clist = None # Clear the command list. else: clist.append(Command(line)) elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN: # Begin a command list. clist = CommandList([], line == CLIST_VERBOSE_BEGIN) else: # Ordinary command. try: yield bluelet.call(self.do_command(Command(line))) except BPDClose: # Command indicates that the conn should close. self.sock.close() return @classmethod def handler(cls, server): def _handle(sock): """Creates a new `Connection` and runs it. """ return cls(server, sock).run() return _handle class Command(object): """A command issued by the client for processing by the server. """ command_re = re.compile(r'^([^ \t]+)[ \t]*') arg_re = re.compile(r'"((?:\\"|[^"])+)"|([^ \t"]+)') def __init__(self, s): """Creates a new `Command` from the given string, `s`, parsing the string for command name and arguments. """ command_match = self.command_re.match(s) self.name = command_match.group(1) self.args = [] arg_matches = self.arg_re.findall(s[command_match.end():]) for match in arg_matches: if match[0]: # Quoted argument. arg = match[0] arg = arg.replace('\\"', '"').replace('\\\\', '\\') else: # Unquoted argument. arg = match[1] arg = arg.decode('utf8') self.args.append(arg) def run(self, conn): """A coroutine that executes the command on the given connection. """ # Attempt to get correct command function. func_name = 'cmd_' + self.name if not hasattr(conn.server, func_name): raise BPDError(ERROR_UNKNOWN, u'unknown command', self.name) func = getattr(conn.server, func_name) # Ensure we have permission for this command. if conn.server.password and \ not conn.authenticated and \ self.name not in SAFE_COMMANDS: raise BPDError(ERROR_PERMISSION, u'insufficient privileges') try: args = [conn] + self.args results = func(*args) if results: for data in results: yield conn.send(data) except BPDError as e: # An exposed error. Set the command name and then let # the Connection handle it. e.cmd_name = self.name raise e except BPDClose: # An indication that the connection should close. Send # it on the Connection. raise except Exception as e: # An "unintentional" error. Hide it from the client. log.error(traceback.format_exc(e)) raise BPDError(ERROR_SYSTEM, u'server error', self.name) class CommandList(list): """A list of commands issued by the client for processing by the server. May be verbose, in which case the response is delimited, or not. Should be a list of `Command` objects. """ def __init__(self, sequence=None, verbose=False): """Create a new `CommandList` from the given sequence of `Command`s. If `verbose`, this is a verbose command list. """ if sequence: for item in sequence: self.append(item) self.verbose = verbose def run(self, conn): """Coroutine executing all the commands in this list. """ for i, command in enumerate(self): try: yield bluelet.call(command.run(conn)) except BPDError as e: # If the command failed, stop executing. e.index = i # Give the error the correct index. raise e # Otherwise, possibly send the output delimeter if we're in a # verbose ("OK") command list. if self.verbose: yield conn.send(RESP_CLIST_VERBOSE) # A subclass of the basic, protocol-handling server that actually plays # music. class Server(BaseServer): """An MPD-compatible server using GStreamer to play audio and beets to store its library. """ def __init__(self, library, host, port, password): try: from beetsplug.bpd import gstplayer except ImportError as e: # This is a little hacky, but it's the best I know for now. if e.args[0].endswith(' gst'): raise NoGstreamerError() else: raise super(Server, self).__init__(host, port, password) self.lib = library self.player = gstplayer.GstPlayer(self.play_finished) self.cmd_update(None) def run(self): self.player.run() super(Server, self).run() def play_finished(self): """A callback invoked every time our player finishes a track. """ self.cmd_next(None) # Metadata helper functions. def _item_info(self, item): info_lines = [u'file: ' + self.lib.destination(item, fragment=True), u'Time: ' + unicode(int(item.length)), u'Title: ' + item.title, u'Artist: ' + item.artist, u'Album: ' + item.album, u'Genre: ' + item.genre, ] track = unicode(item.track) if item.tracktotal: track += u'/' + unicode(item.tracktotal) info_lines.append(u'Track: ' + track) info_lines.append(u'Date: ' + unicode(item.year)) try: pos = self._id_to_index(item.id) info_lines.append(u'Pos: ' + unicode(pos)) except ArgumentNotFoundError: # Don't include position if not in playlist. pass info_lines.append(u'Id: ' + unicode(item.id)) return info_lines def _item_id(self, item): return item.id # Database updating. def cmd_update(self, conn, path=u'/'): """Updates the catalog to reflect the current database state. """ # Path is ignored. Also, the real MPD does this asynchronously; # this is done inline. print('Building directory tree...') self.tree = vfs.libtree(self.lib) print('... done.') self.updated_time = time.time() # Path (directory tree) browsing. def _resolve_path(self, path): """Returns a VFS node or an item ID located at the path given. If the path does not exist, raises a """ components = path.split(u'/') node = self.tree for component in components: if not component: continue if isinstance(node, int): # We're trying to descend into a file node. raise ArgumentNotFoundError() if component in node.files: node = node.files[component] elif component in node.dirs: node = node.dirs[component] else: raise ArgumentNotFoundError() return node def _path_join(self, p1, p2): """Smashes together two BPD paths.""" out = p1 + u'/' + p2 return out.replace(u'//', u'/').replace(u'//', u'/') def cmd_lsinfo(self, conn, path=u"/"): """Sends info on all the items in the path.""" node = self._resolve_path(path) if isinstance(node, int): # Trying to list a track. raise BPDError(ERROR_ARG, 'this is not a directory') else: for name, itemid in iter(sorted(node.files.items())): item = self.lib.get_item(itemid) yield self._item_info(item) for name, _ in iter(sorted(node.dirs.iteritems())): dirpath = self._path_join(path, name) if dirpath.startswith(u"/"): # Strip leading slash (libmpc rejects this). dirpath = dirpath[1:] yield u'directory: %s' % dirpath def _listall(self, basepath, node, info=False): """Helper function for recursive listing. If info, show tracks' complete info; otherwise, just show items' paths. """ if isinstance(node, int): # List a single file. if info: item = self.lib.get_item(node) yield self._item_info(item) else: yield u'file: ' + basepath else: # List a directory. Recurse into both directories and files. for name, itemid in sorted(node.files.iteritems()): newpath = self._path_join(basepath, name) # "yield from" for v in self._listall(newpath, itemid, info): yield v for name, subdir in sorted(node.dirs.iteritems()): newpath = self._path_join(basepath, name) yield u'directory: ' + newpath for v in self._listall(newpath, subdir, info): yield v def cmd_listall(self, conn, path=u"/"): """Send the paths all items in the directory, recursively.""" return self._listall(path, self._resolve_path(path), False) def cmd_listallinfo(self, conn, path=u"/"): """Send info on all the items in the directory, recursively.""" return self._listall(path, self._resolve_path(path), True) # Playlist manipulation. def _all_items(self, node): """Generator yielding all items under a VFS node. """ if isinstance(node, int): # Could be more efficient if we built up all the IDs and # then issued a single SELECT. yield self.lib.get_item(node) else: # Recurse into a directory. for name, itemid in sorted(node.files.iteritems()): # "yield from" for v in self._all_items(itemid): yield v for name, subdir in sorted(node.dirs.iteritems()): for v in self._all_items(subdir): yield v def _add(self, path, send_id=False): """Adds a track or directory to the playlist, specified by the path. If `send_id`, write each item's id to the client. """ for item in self._all_items(self._resolve_path(path)): self.playlist.append(item) if send_id: yield u'Id: ' + unicode(item.id) self.playlist_version += 1 def cmd_add(self, conn, path): """Adds a track or directory to the playlist, specified by a path. """ return self._add(path, False) def cmd_addid(self, conn, path): """Same as `cmd_add` but sends an id back to the client.""" return self._add(path, True) # Server info. def cmd_status(self, conn): for line in super(Server, self).cmd_status(conn): yield line if self.current_index > -1: item = self.playlist[self.current_index] yield u'bitrate: ' + unicode(item.bitrate/1000) # Missing 'audio'. (pos, total) = self.player.time() yield u'time: ' + unicode(pos) + u':' + unicode(total) # Also missing 'updating_db'. def cmd_stats(self, conn): """Sends some statistics about the library.""" with self.lib.transaction() as tx: songs, totaltime = beets.library.TrueQuery().count(tx) statement = 'SELECT COUNT(DISTINCT artist), ' \ 'COUNT(DISTINCT album) FROM items' result = tx.query(statement)[0] artists, albums = result[0], result[1] yield (u'artists: ' + unicode(artists), u'albums: ' + unicode(albums), u'songs: ' + unicode(songs), u'uptime: ' + unicode(int(time.time() - self.startup_time)), u'playtime: ' + u'0', # Missing. u'db_playtime: ' + unicode(int(totaltime)), u'db_update: ' + unicode(int(self.updated_time)), ) # Searching. tagtype_map = { u'Artist': u'artist', u'Album': u'album', u'Title': u'title', u'Track': u'track', u'AlbumArtist': u'albumartist', u'AlbumArtistSort': u'albumartist_sort', # Name? u'Genre': u'genre', u'Date': u'year', u'Composer': u'composer', # Performer? u'Disc': u'disc', u'filename': u'path', # Suspect. } def cmd_tagtypes(self, conn): """Returns a list of the metadata (tag) fields available for searching. """ for tag in self.tagtype_map: yield u'tagtype: ' + tag def _tagtype_lookup(self, tag): """Uses `tagtype_map` to look up the beets column name for an MPD tagtype (or throw an appropriate exception). Returns both the canonical name of the MPD tagtype and the beets column name. """ for test_tag, key in self.tagtype_map.items(): # Match case-insensitively. if test_tag.lower() == tag.lower(): return test_tag, key raise BPDError(ERROR_UNKNOWN, u'no such tagtype') def _metadata_query(self, query_type, any_query_type, kv): """Helper function returns a query object that will find items according to the library query type provided and the key-value pairs specified. The any_query_type is used for queries of type "any"; if None, then an error is thrown. """ if kv: # At least one key-value pair. queries = [] # Iterate pairwise over the arguments. it = iter(kv) for tag, value in zip(it, it): if tag.lower() == u'any': if any_query_type: queries.append(any_query_type(value)) else: raise BPDError(ERROR_UNKNOWN, u'no such tagtype') else: _, key = self._tagtype_lookup(tag) queries.append(query_type(key, value)) return beets.library.AndQuery(queries) else: # No key-value pairs. return beets.library.TrueQuery() def cmd_search(self, conn, *kv): """Perform a substring match for items.""" query = self._metadata_query(beets.library.SubstringQuery, beets.library.AnySubstringQuery, kv) for item in self.lib.items(query): yield self._item_info(item) def cmd_find(self, conn, *kv): """Perform an exact match for items.""" query = self._metadata_query(beets.library.MatchQuery, None, kv) for item in self.lib.items(query): yield self._item_info(item) def cmd_list(self, conn, show_tag, *kv): """List distinct metadata values for show_tag, possibly filtered by matching match_tag to match_term. """ show_tag_canon, show_key = self._tagtype_lookup(show_tag) query = self._metadata_query(beets.library.MatchQuery, None, kv) clause, subvals = query.clause() statement = 'SELECT DISTINCT ' + show_key + \ ' FROM items WHERE ' + clause + \ ' ORDER BY ' + show_key with self.lib.transaction() as tx: rows = tx.query(statement, subvals) for row in rows: yield show_tag_canon + u': ' + unicode(row[0]) def cmd_count(self, conn, tag, value): """Returns the number and total time of songs matching the tag/value query. """ _, key = self._tagtype_lookup(tag) query = beets.library.MatchQuery(key, value) songs, playtime = query.count(self.lib) yield u'songs: ' + unicode(songs) yield u'playtime: ' + unicode(int(playtime)) # "Outputs." Just a dummy implementation because we don't control # any outputs. def cmd_outputs(self, conn): """List the available outputs.""" yield (u'outputid: 0', u'outputname: gstreamer', u'outputenabled: 1', ) def cmd_enableoutput(self, conn, output_id): output_id = cast_arg(int, output_id) if output_id != 0: raise ArgumentIndexError() def cmd_disableoutput(self, conn, output_id): output_id = cast_arg(int, output_id) if output_id == 0: raise BPDError(ERROR_ARG, u'cannot disable this output') else: raise ArgumentIndexError() # Playback control. The functions below hook into the # half-implementations provided by the base class. Together, they're # enough to implement all normal playback functionality. def cmd_play(self, conn, index=-1): new_index = index != -1 and index != self.current_index was_paused = self.paused super(Server, self).cmd_play(conn, index) if self.current_index > -1: # Not stopped. if was_paused and not new_index: # Just unpause. self.player.play() else: self.player.play_file(self.playlist[self.current_index].path) def cmd_pause(self, conn, state=None): super(Server, self).cmd_pause(conn, state) if self.paused: self.player.pause() elif self.player.playing: self.player.play() def cmd_stop(self, conn): super(Server, self).cmd_stop(conn) self.player.stop() def cmd_seek(self, conn, index, pos): """Seeks to the specified position in the specified song.""" index = cast_arg(int, index) pos = cast_arg(int, pos) super(Server, self).cmd_seek(conn, index, pos) self.player.seek(pos) # Volume control. def cmd_setvol(self, conn, vol): vol = cast_arg(int, vol) super(Server, self).cmd_setvol(conn, vol) self.player.volume = float(vol)/100 # Beets plugin hooks. class BPDPlugin(BeetsPlugin): """Provides the "beet bpd" command for running a music player server. """ def __init__(self): super(BPDPlugin, self).__init__() self.config.add({ 'host': u'', 'port': 6600, 'password': u'', }) def start_bpd(self, lib, host, port, password, debug): """Starts a BPD server.""" if debug: log.setLevel(logging.DEBUG) else: log.setLevel(logging.WARNING) try: Server(lib, host, port, password).run() except NoGstreamerError: global_log.error('Gstreamer Python bindings not found.') global_log.error('Install "python-gst0.10", "py27-gst-python", ' 'or similar package to use BPD.') def commands(self): cmd = beets.ui.Subcommand('bpd', help='run an MPD-compatible music player server') cmd.parser.add_option('-d', '--debug', action='store_true', help='dump all MPD traffic to stdout') def func(lib, opts, args): host = args.pop(0) if args else self.config['host'].get(unicode) port = args.pop(0) if args else self.config['port'].get(int) if args: raise beets.ui.UserError('too many arguments') password = self.config['password'].get(unicode) debug = opts.debug or False self.start_bpd(lib, host, int(port), password, debug) cmd.func = func return [cmd]
unknown
codeparrot/codeparrot-clean
"""Probability data from various sources. In this module, the "Essen corpus" refers to a corpus of 6,217 European folk songs from the Essen Folksong Collection. The songs are available at http://kern.ccarh.org/cgi-bin/ksbrowse?l=/essen and the list of songs used to train the monophonic key and meter programs is published at http://theory.esm.rochester.edu/temperley/music-prob/data/essen-train-list. The "Kostka-Payne corpus" refers to 46 excerpts from the common-practice repertoire, appearing in the workbook for the textbook "Tonal Harmony" by Stefan Kostka and Dorothy Payne. The list of of the songs in the corpus is published at http://theory.esm.rochester.edu/temperley/music-prob/data/kp-list. The source code and data for "Music and Probability" (Temperley 2007), which we use for much of our probalistic data, is published at http://theory.esm.rochester.edu/temperley/music-prob/materials.html. """ import math import numpy as np import scipy.stats from impromptica import settings def build_distance_profile_data(standard_deviation): """Builds distance profile data using the given standard deviation (as a distance between note values.) The profile data is built from a Gaussian distribution, which is an approximation for the actual data. The result is a table, where the probability of note value j given reference note value i is located at the index equal to the absolute value of j - i. """ result = [] dist = scipy.stats.norm(0, standard_deviation) for i in range(settings.MAX_NOTE + 1): result.append(dist.pdf(i)) return result def build_lognorm_tempo_profile_data(shape, scale, base_period, max_multiple): """Returns a log-Gaussian-derived likelihood table for periods of a metrical level. `base_period` is the time in seconds of the base period of which all other period hypotheses will be integer multiples of. `max_multiple` is the highest integer multiple by which the base period will be multiplied by for period hypotheses. """ result = np.zeros(max_multiple) dist = scipy.stats.lognorm(shape, scale=scale) for i in range(1, max_multiple + 1): result[i - 1] = dist.pdf(base_period * i) # Divide the values in the table by the maximum if the maximum is greater # than one. max_value = np.max(result) if max_value > 1: result /= max_value return result def build_rayleigh_tempo_profile_data(scale, base_period, max_multiple): """Returns a Rayleigh-dervied likelihood table for periods of a metrical level.""" result = np.zeros(max_multiple) dist = scipy.stats.rayleigh(0, scale=scale) for i in range(1, max_multiple + 1): result[i - 1] = dist.pdf(base_period * i) return result def build_tempo_change_profile_data( max_multiple, standard_deviation=settings.TEMPO_CHANGE_STANDARD_DEVIATION): """Returns a table of the likelihood of transitions in tempo. The table is indexed by the period of the new tempo and the period of the old tempo, where the periods are integers of some base period and range from one to the given `max_multiple`. If the tempos are not being measured in terms of a common base period, consider quantizing the the ratio of the new and old tempos to some fraction and using that with the table generated by this function. The table is zero-indexed but the likelihood estimates start at a period value of one, so the likelihood of a tempo change of a/b will be located at result[a-1][b-1]. As currently implemented, the likelihood of a tempo change is symmetric across inversion, that is, the likelihood of a tempo change of a/b is equal to the likelihood of a tempo change of b/a. """ # Precompute the transition probabilities for all possible transitions # between periods. This probability is modeled as a Gaussian distribution # centered at one. A transition from a period of n to m is assigned # likelihood according to the value of the Gaussian distribution at # (log(m/n))^2. dist = scipy.stats.norm(scale=standard_deviation) result = np.zeros((max_multiple, max_multiple)) for i in range(max_multiple): for j in range(i + 1): try: result[i][j] = result[j][i] = dist.pdf( math.pow(math.log((j + 1.) / (i + 1.)), 2.)) except FloatingPointError: result[i][j] = 0. # Normalize the distribution so that the highest likelihood value is 1. highest = np.max(result, axis=1).max() for i in range(max_multiple): result[i] /= highest return result # This monophonic key profile generated from the Essen corpus provides # probabilities of the offset of a note from the tonic note of a major key. # This profile sums to 1 because it represents the probability that the next # monophonic note is the given index offset from the tonic note of the key. # Source: David Temperley. Music and Probability (Figure 4.7). ESSEN_MAJOR_KEY_PROFILE_DATA = [ 0.184, 0.001, 0.155, 0.003, 0.191, 0.109, 0.005, 0.214, 0.001, 0.078, 0.004, 0.055, ] # This monophonic key profile generated from the Essen corpus provides # probabilities of the offset of a note from the tonic note of a minor key. # This profile sums to 1 because it represents the probability that the next # monophonic note is the given index offset from the tonic note of the key. # Source: David Temperley. Music and Probability (Figure 4.7). ESSEN_MINOR_KEY_PROFILE_DATA = [ 0.192, 0.005, 0.149, 0.179, 0.002, 0.144, 0.002, 0.201, 0.038, 0.012, 0.053, 0.022, ] # This polyphonic key profile generated from the Kostka-Payne corpus provides # probabilities of the offset of a note from the tonic note of a major key. # This profile doesn't sum to 1 because we view notes as independent variables # representing whether that note is present in a segment of the given key. # Source: David Temperley. Music and Probability (Figure 6.4). KP_MAJOR_KEY_PROFILE_DATA = [ 0.748, 0.060, 0.488, 0.082, 0.670, 0.460, 0.096, 0.715, 0.104, 0.366, 0.057, 0.400 ] # This polyphonic key profile generated from the Kostka-Payne corpus provides # probabilities of the offset of a note from the tonic note of a minor key. # This profile doesn't sum to 1 because we view notes as independent variables # representing whether that note is present in a segment of the given key. # Source: David Temperley. Music and Probability (Figure 6.4). KP_MINOR_KEY_PROFILE_DATA = [ 0.712, 0.084, 0.474, 0.618, 0.049, 0.460, 0.105, 0.747, 0.404, 0.067, 0.133, 0.330 ] # This proximity profile generated from the Essen corpus provides # probabilities of the distance of a note from the previous note. # Source: David Temperley. Music and Probability (Table 4.1). PROXIMITY_PROFILE_DATA = build_distance_profile_data(7.2) # This range profile generated from the Essen corpus provides probabilities # of the distance of a note from the central pitch. The central pitch is # essentially the mean note value of over a song. # Source: David Temperley. Music and Probability (Table 4.1). RANGE_PROFILE_DATA = build_distance_profile_data(29.0)
unknown
codeparrot/codeparrot-clean
declare module "*module.css" { const styles: { [className: string]: string; }; export default styles; }
typescript
github
https://github.com/vercel/next.js
examples/with-jest-babel/types.d.ts
## Input ```javascript function Component(props) { const f = item => item; const x = [...props.items].map(f); // `f` doesn't escape here... return [x, f]; // ...but it does here so it's memoized } export const FIXTURE_ENTRYPOINT = { fn: Component, params: [{items: [{id: 1}]}], isComponent: false, }; ``` ## Code ```javascript import { c as _c } from "react/compiler-runtime"; function Component(props) { const $ = _c(2); const f = _temp; let t0; if ($[0] !== props.items) { const x = [...props.items].map(f); t0 = [x, f]; $[0] = props.items; $[1] = t0; } else { t0 = $[1]; } return t0; } function _temp(item) { return item; } export const FIXTURE_ENTRYPOINT = { fn: Component, params: [{ items: [{ id: 1 }] }], isComponent: false, }; ``` ### Eval output (kind: ok) [[{"id":1}],"[[ function params=1 ]]"]
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/array-map-noAlias-escaping-function.expect.md
import unittest2 from ice import entities class TestEntity(unittest2.TestCase): def test_to_json(self): e = entities.Entity() e.id = 'test-123' e.name = 'banana' e.age = 12 self.assertDictEqual( e.to_dict(), { 'name': 'banana', 'age': 12 } ) def test_to_json_with_underscore(self): e = entities.Entity() e._test = 123 e.name = 'banana' self.assertEqual(e.to_dict(), {'name': 'banana'}) class TestSession(unittest2.TestCase): def test_missing_property(self): with self.assertRaises(KeyError): entities.Session() def test_to_json(self): e = entities.Session(client_ip_addr='127.0.0.1') self.assertDictEqual( e.to_dict(), {'client_ip_addr': '127.0.0.1'} ) class TestInstance(unittest2.TestCase): def test_with_missing_session_id(self): with self.assertRaises(KeyError): entities.Instance( public_ip_addr='127.0.0.1' ) def test_with_missing_ip_addr(self): with self.assertRaises(KeyError): entities.Instance( session_id='banana' ) def test_add_network(self): entityA = entities.Instance( session_id='banana', public_ip_addr='127.0.0.1' ) entityA.add_network('192.168.1.12', iface='eth0', bcast_addr='192.168.1.255') entityA.add_network('127.0.0.1', iface='lo') entityA.add_network('56.58.59.60') entityB = entities.Instance( session_id='banana', public_ip_addr='127.0.0.1', networks=[ { 'addr': '192.168.1.12', 'iface': 'eth0', 'bcast_addr': '192.168.1.255' }, { 'addr': '127.0.0.1', 'iface': 'lo' }, { 'addr': '56.58.59.60' } ] ) self.assertEqual(entityA.networks, entityB.networks)
unknown
codeparrot/codeparrot-clean
# Author: moparisthebest <admin@moparisthebest.com> # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import re import generic from sickbeard import logger from sickbeard import tvcache class BinSearchProvider(generic.NZBProvider): def __init__(self): generic.NZBProvider.__init__(self, "BinSearch") self.enabled = False self.public = True self.cache = BinSearchCache(self) self.urls = {'base_url': 'https://www.binsearch.info/'} self.url = self.urls['base_url'] def isEnabled(self): return self.enabled class BinSearchCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll Binsearch every 30 minutes max self.minTime = 30 # compile and save our regular expressions # this pulls the title from the URL in the description self.descTitleStart = re.compile('^.*https?://www\.binsearch\.info/.b=') self.descTitleEnd = re.compile('&amp;.*$') # these clean up the horrible mess of a title if the above fail self.titleCleaners = [ re.compile('.?yEnc.?\(\d+/\d+\)$'), re.compile(' \[\d+/\d+\] '), ] def _get_title_and_url(self, item): """ Retrieves the title and URL data from the item XML node item: An elementtree.ElementTree element representing the <item> tag of the RSS feed Returns: A tuple containing two strings representing title and URL respectively """ title = item.get('description') if title: title = u'' + title if self.descTitleStart.match(title): title = self.descTitleStart.sub('', title) title = self.descTitleEnd.sub('', title) title = title.replace('+', '.') else: # just use the entire title, looks hard/impossible to parse title = item.get('title') if title: for titleCleaner in self.titleCleaners: title = titleCleaner.sub('', title) url = item.get('link') if url: url = url.replace('&amp;', '&') return (title, url) def updateCache(self): # check if we should update if not self.shouldUpdate(): return # clear cache self._clearCache() # set updated self.setLastUpdate() cl = [] for group in ['alt.binaries.boneless','alt.binaries.misc','alt.binaries.hdtv','alt.binaries.hdtv.x264','alt.binaries.tv','alt.binaries.tvseries','alt.binaries.teevee']: url = self.provider.url + 'rss.php?' urlArgs = {'max': 1000,'g': group} url += urllib.urlencode(urlArgs) logger.log(u"BinSearch cache update URL: " + url, logger.DEBUG) for item in self.getRSSFeed(url)['entries'] or []: ci = self._parseItem(item) if ci is not None: cl.append(ci) if len(cl) > 0: myDB = self._getDB() myDB.mass_action(cl) def _checkAuth(self, data): return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None provider = BinSearchProvider()
unknown
codeparrot/codeparrot-clean
from operator import methodcaller import numpy as np import pytest import pandas as pd from pandas import ( MultiIndex, Series, date_range, ) import pandas._testing as tm class TestSeries: @pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"]) def test_set_axis_name_mi(self, func): ser = Series( [11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"] ), ) result = methodcaller(func, ["L1", "L2"])(ser) assert ser.index.name is None assert ser.index.names == ["l1", "l2"] assert result.index.name is None assert result.index.names, ["L1", "L2"] def test_set_axis_name_raises(self): ser = Series([1]) msg = "No axis named 1 for object type Series" with pytest.raises(ValueError, match=msg): ser._set_axis_name(name="a", axis=1) def test_get_bool_data_preserve_dtype(self): ser = Series([True, False, True]) result = ser._get_bool_data() tm.assert_series_equal(result, ser) @pytest.mark.parametrize("data", [np.nan, pd.NaT, True, False]) def test_nonzero_single_element_raise_1(self, data): # single item nan to raise series = Series([data]) msg = "The truth value of a Series is ambiguous" with pytest.raises(ValueError, match=msg): bool(series) @pytest.mark.parametrize("data", [(True, True), (False, False)]) def test_nonzero_multiple_element_raise(self, data): # multiple bool are still an error msg_err = "The truth value of a Series is ambiguous" series = Series([data]) with pytest.raises(ValueError, match=msg_err): bool(series) @pytest.mark.parametrize("data", [1, 0, "a", 0.0]) def test_nonbool_single_element_raise(self, data): # single non-bool are an error msg_err1 = "The truth value of a Series is ambiguous" series = Series([data]) with pytest.raises(ValueError, match=msg_err1): bool(series) def test_metadata_propagation_indiv_resample(self): # resample ts = Series( np.random.default_rng(2).random(1000), index=date_range("20130101", periods=1000, freq="s"), name="foo", ) result = ts.resample("1min").mean() tm.assert_metadata_equivalent(ts, result) result = ts.resample("1min").min() tm.assert_metadata_equivalent(ts, result) result = ts.resample("1min").apply(lambda x: x.sum()) tm.assert_metadata_equivalent(ts, result) def test_metadata_propagation_indiv(self, monkeypatch): # check that the metadata matches up on the resulting ops ser = Series(range(3), range(3)) ser.name = "foo" ser2 = Series(range(3), range(3)) ser2.name = "bar" result = ser.T tm.assert_metadata_equivalent(ser, result) def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == "concat" and name == "filename": value = "+".join( [ getattr(obj, name) for obj in other.input_objs if getattr(obj, name, None) ] ) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self with monkeypatch.context() as m: m.setattr(Series, "_metadata", ["name", "filename"]) m.setattr(Series, "__finalize__", finalize) ser.filename = "foo" ser2.filename = "bar" result = pd.concat([ser, ser2]) assert result.filename == "foo+bar" assert result.name is None
python
github
https://github.com/pandas-dev/pandas
pandas/tests/generic/test_series.py
#ifndef JEMALLOC_INTERNAL_PAC_H #define JEMALLOC_INTERNAL_PAC_H #include "jemalloc/internal/exp_grow.h" #include "jemalloc/internal/pai.h" #include "san_bump.h" /* * Page allocator classic; an implementation of the PAI interface that: * - Can be used for arenas with custom extent hooks. * - Can always satisfy any allocation request (including highly-fragmentary * ones). * - Can use efficient OS-level zeroing primitives for demand-filled pages. */ /* How "eager" decay/purging should be. */ enum pac_purge_eagerness_e { PAC_PURGE_ALWAYS, PAC_PURGE_NEVER, PAC_PURGE_ON_EPOCH_ADVANCE }; typedef enum pac_purge_eagerness_e pac_purge_eagerness_t; typedef struct pac_decay_stats_s pac_decay_stats_t; struct pac_decay_stats_s { /* Total number of purge sweeps. */ locked_u64_t npurge; /* Total number of madvise calls made. */ locked_u64_t nmadvise; /* Total number of pages purged. */ locked_u64_t purged; }; typedef struct pac_estats_s pac_estats_t; struct pac_estats_s { /* * Stats for a given index in the range [0, SC_NPSIZES] in the various * ecache_ts. * We track both bytes and # of extents: two extents in the same bucket * may have different sizes if adjacent size classes differ by more than * a page, so bytes cannot always be derived from # of extents. */ size_t ndirty; size_t dirty_bytes; size_t nmuzzy; size_t muzzy_bytes; size_t nretained; size_t retained_bytes; }; typedef struct pac_stats_s pac_stats_t; struct pac_stats_s { pac_decay_stats_t decay_dirty; pac_decay_stats_t decay_muzzy; /* * Number of unused virtual memory bytes currently retained. Retained * bytes are technically mapped (though always decommitted or purged), * but they are excluded from the mapped statistic (above). */ size_t retained; /* Derived. */ /* * Number of bytes currently mapped, excluding retained memory (and any * base-allocated memory, which is tracked by the arena stats). * * We name this "pac_mapped" to avoid confusion with the arena_stats * "mapped". */ atomic_zu_t pac_mapped; /* VM space had to be leaked (undocumented). Normally 0. */ atomic_zu_t abandoned_vm; }; typedef struct pac_s pac_t; struct pac_s { /* * Must be the first member (we convert it to a PAC given only a * pointer). The handle to the allocation interface. */ pai_t pai; /* * Collections of extents that were previously allocated. These are * used when allocating extents, in an attempt to re-use address space. * * Synchronization: internal. */ ecache_t ecache_dirty; ecache_t ecache_muzzy; ecache_t ecache_retained; base_t *base; emap_t *emap; edata_cache_t *edata_cache; /* The grow info for the retained ecache. */ exp_grow_t exp_grow; malloc_mutex_t grow_mtx; /* Special allocator for guarded frequently reused extents. */ san_bump_alloc_t sba; /* How large extents should be before getting auto-purged. */ atomic_zu_t oversize_threshold; /* * Decay-based purging state, responsible for scheduling extent state * transitions. * * Synchronization: via the internal mutex. */ decay_t decay_dirty; /* dirty --> muzzy */ decay_t decay_muzzy; /* muzzy --> retained */ malloc_mutex_t *stats_mtx; pac_stats_t *stats; /* Extent serial number generator state. */ atomic_zu_t extent_sn_next; }; bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold, ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx); static inline size_t pac_mapped(pac_t *pac) { return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED); } static inline ehooks_t * pac_ehooks_get(pac_t *pac) { return base_ehooks_get(pac->base); } /* * All purging functions require holding decay->mtx. This is one of the few * places external modules are allowed to peek inside pa_shard_t internals. */ /* * Decays the number of pages currently in the ecache. This might not leave the * ecache empty if other threads are inserting dirty objects into it * concurrently with the call. */ void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay, pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay); /* * Updates decay settings for the current time, and conditionally purges in * response (depending on decay_purge_setting). Returns whether or not the * epoch advanced. */ bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, pac_decay_stats_t *decay_stats, ecache_t *ecache, pac_purge_eagerness_t eagerness); /* * Gets / sets the maximum amount that we'll grow an arena down the * grow-retained pathways (unless forced to by an allocaction request). * * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't * care about the previous value. * * Returns true on error (if the new limit is not valid). */ bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, size_t *new_limit); bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, ssize_t decay_ms, pac_purge_eagerness_t eagerness); ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state); void pac_reset(tsdn_t *tsdn, pac_t *pac); void pac_destroy(tsdn_t *tsdn, pac_t *pac); #endif /* JEMALLOC_INTERNAL_PAC_H */
c
github
https://github.com/redis/redis
deps/jemalloc/include/jemalloc/internal/pac.h
// SPDX-License-Identifier: GPL-2.0-or-later /* * Helpers for initial module or kernel cmdline parsing * Copyright (C) 2001 Rusty Russell. */ #include <linux/ctype.h> #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/overflow.h> #include <linux/security.h> #include <linux/slab.h> #include <linux/string.h> #ifdef CONFIG_SYSFS /* Protects all built-in parameters, modules use their own param_lock */ static DEFINE_MUTEX(param_lock); /* Use the module's mutex, or if built-in use the built-in mutex */ #ifdef CONFIG_MODULES #define KPARAM_MUTEX(mod) ((mod) ? &(mod)->param_lock : &param_lock) #else #define KPARAM_MUTEX(mod) (&param_lock) #endif static inline void check_kparam_locked(struct module *mod) { BUG_ON(!mutex_is_locked(KPARAM_MUTEX(mod))); } #else static inline void check_kparam_locked(struct module *mod) { } #endif /* !CONFIG_SYSFS */ /* This just allows us to keep track of which parameters are kmalloced. */ struct kmalloced_param { struct list_head list; char val[]; }; static LIST_HEAD(kmalloced_params); static DEFINE_SPINLOCK(kmalloced_params_lock); static void *kmalloc_parameter(unsigned int size) { struct kmalloced_param *p; p = kmalloc(size_add(sizeof(*p), size), GFP_KERNEL); if (!p) return NULL; spin_lock(&kmalloced_params_lock); list_add(&p->list, &kmalloced_params); spin_unlock(&kmalloced_params_lock); return p->val; } /* Does nothing if parameter wasn't kmalloced above. */ static void maybe_kfree_parameter(void *param) { struct kmalloced_param *p; spin_lock(&kmalloced_params_lock); list_for_each_entry(p, &kmalloced_params, list) { if (p->val == param) { list_del(&p->list); kfree(p); break; } } spin_unlock(&kmalloced_params_lock); } static char dash2underscore(char c) { if (c == '-') return '_'; return c; } bool parameqn(const char *a, const char *b, size_t n) { size_t i; for (i = 0; i < n; i++) { if (dash2underscore(a[i]) != dash2underscore(b[i])) return false; } return true; } bool parameq(const char *a, const char *b) { return parameqn(a, b, strlen(a)+1); } static bool param_check_unsafe(const struct kernel_param *kp) { if (kp->flags & KERNEL_PARAM_FL_HWPARAM && security_locked_down(LOCKDOWN_MODULE_PARAMETERS)) return false; if (kp->flags & KERNEL_PARAM_FL_UNSAFE) { pr_notice("Setting dangerous option %s - tainting kernel\n", kp->name); add_taint(TAINT_USER, LOCKDEP_STILL_OK); } return true; } static int parse_one(char *param, char *val, const char *doing, const struct kernel_param *params, unsigned num_params, s16 min_level, s16 max_level, void *arg, parse_unknown_fn handle_unknown) { unsigned int i; int err; /* Find parameter */ for (i = 0; i < num_params; i++) { if (parameq(param, params[i].name)) { if (params[i].level < min_level || params[i].level > max_level) return 0; /* No one handled NULL, so do it here. */ if (!val && !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG)) return -EINVAL; pr_debug("handling %s with %p\n", param, params[i].ops->set); kernel_param_lock(params[i].mod); if (param_check_unsafe(&params[i])) err = params[i].ops->set(val, &params[i]); else err = -EPERM; kernel_param_unlock(params[i].mod); return err; } } if (handle_unknown) { pr_debug("doing %s: %s='%s'\n", doing, param, val); return handle_unknown(param, val, doing, arg); } pr_debug("Unknown argument '%s'\n", param); return -ENOENT; } /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ char *parse_args(const char *doing, char *args, const struct kernel_param *params, unsigned num, s16 min_level, s16 max_level, void *arg, parse_unknown_fn unknown) { char *param, *val, *err = NULL; /* Chew leading spaces */ args = skip_spaces(args); if (*args) pr_debug("doing %s, parsing ARGS: '%s'\n", doing, args); while (*args) { int ret; int irq_was_disabled; args = next_arg(args, &param, &val); /* Stop at -- */ if (!val && strcmp(param, "--") == 0) return err ?: args; irq_was_disabled = irqs_disabled(); ret = parse_one(param, val, doing, params, num, min_level, max_level, arg, unknown); if (irq_was_disabled && !irqs_disabled()) pr_warn("%s: option '%s' enabled irq's!\n", doing, param); switch (ret) { case 0: continue; case -ENOENT: pr_err("%s: Unknown parameter `%s'\n", doing, param); break; case -ENOSPC: pr_err("%s: `%s' too large for parameter `%s'\n", doing, val ?: "", param); break; default: pr_err("%s: `%s' invalid for parameter `%s'\n", doing, val ?: "", param); break; } err = ERR_PTR(ret); } return err; } /* Lazy bastard, eh? */ #define STANDARD_PARAM_DEF(name, type, format, strtolfn) \ int param_set_##name(const char *val, const struct kernel_param *kp) \ { \ return strtolfn(val, 0, (type *)kp->arg); \ } \ int param_get_##name(char *buffer, const struct kernel_param *kp) \ { \ return scnprintf(buffer, PAGE_SIZE, format "\n", \ *((type *)kp->arg)); \ } \ const struct kernel_param_ops param_ops_##name = { \ .set = param_set_##name, \ .get = param_get_##name, \ }; \ EXPORT_SYMBOL(param_set_##name); \ EXPORT_SYMBOL(param_get_##name); \ EXPORT_SYMBOL(param_ops_##name) STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); STANDARD_PARAM_DEF(long, long, "%li", kstrtol); STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); STANDARD_PARAM_DEF(hexint, unsigned int, "%#08x", kstrtouint); int param_set_uint_minmax(const char *val, const struct kernel_param *kp, unsigned int min, unsigned int max) { unsigned int num; int ret; if (!val) return -EINVAL; ret = kstrtouint(val, 0, &num); if (ret) return ret; if (num < min || num > max) return -EINVAL; *((unsigned int *)kp->arg) = num; return 0; } EXPORT_SYMBOL_GPL(param_set_uint_minmax); int param_set_charp(const char *val, const struct kernel_param *kp) { size_t len, maxlen = 1024; len = strnlen(val, maxlen + 1); if (len == maxlen + 1) { pr_err("%s: string parameter too long\n", kp->name); return -ENOSPC; } maybe_kfree_parameter(*(char **)kp->arg); /* * This is a hack. We can't kmalloc() in early boot, and we * don't need to; this mangled commandline is preserved. */ if (slab_is_available()) { *(char **)kp->arg = kmalloc_parameter(len + 1); if (!*(char **)kp->arg) return -ENOMEM; strcpy(*(char **)kp->arg, val); } else *(const char **)kp->arg = val; return 0; } EXPORT_SYMBOL(param_set_charp); int param_get_charp(char *buffer, const struct kernel_param *kp) { return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg)); } EXPORT_SYMBOL(param_get_charp); void param_free_charp(void *arg) { maybe_kfree_parameter(*((char **)arg)); } EXPORT_SYMBOL(param_free_charp); const struct kernel_param_ops param_ops_charp = { .set = param_set_charp, .get = param_get_charp, .free = param_free_charp, }; EXPORT_SYMBOL(param_ops_charp); /* Actually could be a bool or an int, for historical reasons. */ int param_set_bool(const char *val, const struct kernel_param *kp) { /* No equals means "set"... */ if (!val) val = "1"; /* One of =[yYnN01] */ return kstrtobool(val, kp->arg); } EXPORT_SYMBOL(param_set_bool); int param_get_bool(char *buffer, const struct kernel_param *kp) { /* Y and N chosen as being relatively non-coder friendly */ return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N'); } EXPORT_SYMBOL(param_get_bool); const struct kernel_param_ops param_ops_bool = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_bool, .get = param_get_bool, }; EXPORT_SYMBOL(param_ops_bool); int param_set_bool_enable_only(const char *val, const struct kernel_param *kp) { int err; bool new_value; bool orig_value = *(bool *)kp->arg; struct kernel_param dummy_kp = *kp; dummy_kp.arg = &new_value; err = param_set_bool(val, &dummy_kp); if (err) return err; /* Don't let them unset it once it's set! */ if (!new_value && orig_value) return -EROFS; if (new_value) err = param_set_bool(val, kp); return err; } EXPORT_SYMBOL_GPL(param_set_bool_enable_only); const struct kernel_param_ops param_ops_bool_enable_only = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_bool_enable_only, .get = param_get_bool, }; EXPORT_SYMBOL_GPL(param_ops_bool_enable_only); /* This one must be bool. */ int param_set_invbool(const char *val, const struct kernel_param *kp) { int ret; bool boolval; struct kernel_param dummy; dummy.arg = &boolval; ret = param_set_bool(val, &dummy); if (ret == 0) *(bool *)kp->arg = !boolval; return ret; } EXPORT_SYMBOL(param_set_invbool); int param_get_invbool(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y'); } EXPORT_SYMBOL(param_get_invbool); const struct kernel_param_ops param_ops_invbool = { .set = param_set_invbool, .get = param_get_invbool, }; EXPORT_SYMBOL(param_ops_invbool); int param_set_bint(const char *val, const struct kernel_param *kp) { /* Match bool exactly, by re-using it. */ struct kernel_param boolkp = *kp; bool v; int ret; boolkp.arg = &v; ret = param_set_bool(val, &boolkp); if (ret == 0) *(int *)kp->arg = v; return ret; } EXPORT_SYMBOL(param_set_bint); const struct kernel_param_ops param_ops_bint = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_bint, .get = param_get_int, }; EXPORT_SYMBOL(param_ops_bint); /* We break the rule and mangle the string. */ static int param_array(struct module *mod, const char *name, const char *val, unsigned int min, unsigned int max, void *elem, int elemsize, int (*set)(const char *, const struct kernel_param *kp), s16 level, unsigned int *num) { int ret; struct kernel_param kp; char save; /* Get the name right for errors. */ kp.name = name; kp.arg = elem; kp.level = level; *num = 0; /* We expect a comma-separated list of values. */ do { int len; if (*num == max) { pr_err("%s: can only take %i arguments\n", name, max); return -EINVAL; } len = strcspn(val, ","); /* nul-terminate and parse */ save = val[len]; ((char *)val)[len] = '\0'; check_kparam_locked(mod); ret = set(val, &kp); if (ret != 0) return ret; kp.arg += elemsize; val += len+1; (*num)++; } while (save == ','); if (*num < min) { pr_err("%s: needs at least %i arguments\n", name, min); return -EINVAL; } return 0; } static int param_array_set(const char *val, const struct kernel_param *kp) { const struct kparam_array *arr = kp->arr; unsigned int temp_num; return param_array(kp->mod, kp->name, val, 1, arr->max, arr->elem, arr->elemsize, arr->ops->set, kp->level, arr->num ?: &temp_num); } static int param_array_get(char *buffer, const struct kernel_param *kp) { int i, off, ret; const struct kparam_array *arr = kp->arr; struct kernel_param p = *kp; for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { /* Replace \n with comma */ if (i) buffer[off - 1] = ','; p.arg = arr->elem + arr->elemsize * i; check_kparam_locked(p.mod); ret = arr->ops->get(buffer + off, &p); if (ret < 0) return ret; off += ret; } buffer[off] = '\0'; return off; } static void param_array_free(void *arg) { unsigned int i; const struct kparam_array *arr = arg; if (arr->ops->free) for (i = 0; i < (arr->num ? *arr->num : arr->max); i++) arr->ops->free(arr->elem + arr->elemsize * i); } const struct kernel_param_ops param_array_ops = { .set = param_array_set, .get = param_array_get, .free = param_array_free, }; EXPORT_SYMBOL(param_array_ops); int param_set_copystring(const char *val, const struct kernel_param *kp) { const struct kparam_string *kps = kp->str; const size_t len = strnlen(val, kps->maxlen); if (len == kps->maxlen) { pr_err("%s: string doesn't fit in %u chars.\n", kp->name, kps->maxlen-1); return -ENOSPC; } memcpy(kps->string, val, len + 1); return 0; } EXPORT_SYMBOL(param_set_copystring); int param_get_string(char *buffer, const struct kernel_param *kp) { const struct kparam_string *kps = kp->str; return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string); } EXPORT_SYMBOL(param_get_string); const struct kernel_param_ops param_ops_string = { .set = param_set_copystring, .get = param_get_string, }; EXPORT_SYMBOL(param_ops_string); /* sysfs output in /sys/modules/XYZ/parameters/ */ #define to_module_attr(n) container_of_const(n, struct module_attribute, attr) #define to_module_kobject(n) container_of(n, struct module_kobject, kobj) struct param_attribute { struct module_attribute mattr; const struct kernel_param *param; }; struct module_param_attrs { unsigned int num; struct attribute_group grp; struct param_attribute attrs[] __counted_by(num); }; #ifdef CONFIG_SYSFS #define to_param_attr(n) container_of_const(n, struct param_attribute, mattr) static ssize_t param_attr_show(const struct module_attribute *mattr, struct module_kobject *mk, char *buf) { int count; const struct param_attribute *attribute = to_param_attr(mattr); if (!attribute->param->ops->get) return -EPERM; kernel_param_lock(mk->mod); count = attribute->param->ops->get(buf, attribute->param); kernel_param_unlock(mk->mod); return count; } /* sysfs always hands a nul-terminated string in buf. We rely on that. */ static ssize_t param_attr_store(const struct module_attribute *mattr, struct module_kobject *mk, const char *buf, size_t len) { int err; const struct param_attribute *attribute = to_param_attr(mattr); if (!attribute->param->ops->set) return -EPERM; kernel_param_lock(mk->mod); if (param_check_unsafe(attribute->param)) err = attribute->param->ops->set(buf, attribute->param); else err = -EPERM; kernel_param_unlock(mk->mod); if (!err) return len; return err; } #endif #ifdef CONFIG_SYSFS void kernel_param_lock(struct module *mod) { mutex_lock(KPARAM_MUTEX(mod)); } void kernel_param_unlock(struct module *mod) { mutex_unlock(KPARAM_MUTEX(mod)); } EXPORT_SYMBOL(kernel_param_lock); EXPORT_SYMBOL(kernel_param_unlock); /* * add_sysfs_param - add a parameter to sysfs * @mk: struct module_kobject * @kp: the actual parameter definition to add to sysfs * @name: name of parameter * * Create a kobject if for a (per-module) parameter if mp NULL, and * create file in sysfs. Returns an error on out of memory. Always cleans up * if there's an error. */ static __init_or_module int add_sysfs_param(struct module_kobject *mk, const struct kernel_param *kp, const char *name) { struct module_param_attrs *new_mp; struct attribute **new_attrs; unsigned int i; /* We don't bother calling this with invisible parameters. */ BUG_ON(!kp->perm); if (!mk->mp) { /* First allocation. */ mk->mp = kzalloc(sizeof(*mk->mp), GFP_KERNEL); if (!mk->mp) return -ENOMEM; mk->mp->grp.name = "parameters"; /* NULL-terminated attribute array. */ mk->mp->grp.attrs = kzalloc(sizeof(mk->mp->grp.attrs[0]), GFP_KERNEL); /* Caller will cleanup via free_module_param_attrs */ if (!mk->mp->grp.attrs) return -ENOMEM; } /* Enlarge allocations. */ new_mp = krealloc(mk->mp, struct_size(mk->mp, attrs, mk->mp->num + 1), GFP_KERNEL); if (!new_mp) return -ENOMEM; mk->mp = new_mp; mk->mp->num++; /* Extra pointer for NULL terminator */ new_attrs = krealloc_array(mk->mp->grp.attrs, mk->mp->num + 1, sizeof(mk->mp->grp.attrs[0]), GFP_KERNEL); if (!new_attrs) return -ENOMEM; mk->mp->grp.attrs = new_attrs; /* Tack new one on the end. */ memset(&mk->mp->attrs[mk->mp->num - 1], 0, sizeof(mk->mp->attrs[0])); sysfs_attr_init(&mk->mp->attrs[mk->mp->num - 1].mattr.attr); mk->mp->attrs[mk->mp->num - 1].param = kp; mk->mp->attrs[mk->mp->num - 1].mattr.show = param_attr_show; /* Do not allow runtime DAC changes to make param writable. */ if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) mk->mp->attrs[mk->mp->num - 1].mattr.store = param_attr_store; else mk->mp->attrs[mk->mp->num - 1].mattr.store = NULL; mk->mp->attrs[mk->mp->num - 1].mattr.attr.name = (char *)name; mk->mp->attrs[mk->mp->num - 1].mattr.attr.mode = kp->perm; /* Fix up all the pointers, since krealloc can move us */ for (i = 0; i < mk->mp->num; i++) mk->mp->grp.attrs[i] = &mk->mp->attrs[i].mattr.attr; mk->mp->grp.attrs[mk->mp->num] = NULL; return 0; } #ifdef CONFIG_MODULES static void free_module_param_attrs(struct module_kobject *mk) { if (mk->mp) kfree(mk->mp->grp.attrs); kfree(mk->mp); mk->mp = NULL; } /* * module_param_sysfs_setup - setup sysfs support for one module * @mod: module * @kparam: module parameters (array) * @num_params: number of module parameters * * Adds sysfs entries for module parameters under * /sys/module/[mod->name]/parameters/ */ int module_param_sysfs_setup(struct module *mod, const struct kernel_param *kparam, unsigned int num_params) { int i, err; bool params = false; for (i = 0; i < num_params; i++) { if (kparam[i].perm == 0) continue; err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name); if (err) { free_module_param_attrs(&mod->mkobj); return err; } params = true; } if (!params) return 0; /* Create the param group. */ err = sysfs_create_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp); if (err) free_module_param_attrs(&mod->mkobj); return err; } /* * module_param_sysfs_remove - remove sysfs support for one module * @mod: module * * Remove sysfs entries for module parameters and the corresponding * kobject. */ void module_param_sysfs_remove(struct module *mod) { if (mod->mkobj.mp) { sysfs_remove_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp); /* * We are positive that no one is using any param * attrs at this point. Deallocate immediately. */ free_module_param_attrs(&mod->mkobj); } } #endif void destroy_params(const struct kernel_param *params, unsigned num) { unsigned int i; for (i = 0; i < num; i++) if (params[i].ops->free) params[i].ops->free(params[i].arg); } struct module_kobject * __init_or_module lookup_or_create_module_kobject(const char *name) { struct module_kobject *mk; struct kobject *kobj; int err; kobj = kset_find_obj(module_kset, name); if (kobj) return to_module_kobject(kobj); mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); if (!mk) return NULL; mk->mod = THIS_MODULE; mk->kobj.kset = module_kset; err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name); if (IS_ENABLED(CONFIG_MODULES) && !err) err = sysfs_create_file(&mk->kobj, &module_uevent.attr); if (err) { kobject_put(&mk->kobj); pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n", name, err); return NULL; } /* So that we hold reference in both cases. */ kobject_get(&mk->kobj); return mk; } static void __init kernel_add_sysfs_param(const char *name, const struct kernel_param *kparam, unsigned int name_skip) { struct module_kobject *mk; int err; mk = lookup_or_create_module_kobject(name); if (!mk) return; /* We need to remove old parameters before adding more. */ if (mk->mp) sysfs_remove_group(&mk->kobj, &mk->mp->grp); /* These should not fail at boot. */ err = add_sysfs_param(mk, kparam, kparam->name + name_skip); BUG_ON(err); err = sysfs_create_group(&mk->kobj, &mk->mp->grp); BUG_ON(err); kobject_uevent(&mk->kobj, KOBJ_ADD); kobject_put(&mk->kobj); } /* * param_sysfs_builtin - add sysfs parameters for built-in modules * * Add module_parameters to sysfs for "modules" built into the kernel. * * The "module" name (KBUILD_MODNAME) is stored before a dot, the * "parameter" name is stored behind a dot in kernel_param->name. So, * extract the "module" name for all built-in kernel_param-eters, * and for all who have the same, call kernel_add_sysfs_param. */ static void __init param_sysfs_builtin(void) { const struct kernel_param *kp; unsigned int name_len; char modname[MODULE_NAME_LEN]; for (kp = __start___param; kp < __stop___param; kp++) { char *dot; if (kp->perm == 0) continue; dot = strchr(kp->name, '.'); if (!dot) { /* This happens for core_param() */ strscpy(modname, "kernel"); name_len = 0; } else { name_len = dot - kp->name + 1; strscpy(modname, kp->name, name_len); } kernel_add_sysfs_param(modname, kp, name_len); } } ssize_t __modver_version_show(const struct module_attribute *mattr, struct module_kobject *mk, char *buf) { const struct module_version_attribute *vattr = container_of_const(mattr, struct module_version_attribute, mattr); return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version); } extern const struct module_version_attribute __start___modver[]; extern const struct module_version_attribute __stop___modver[]; static void __init version_sysfs_builtin(void) { const struct module_version_attribute *vattr; struct module_kobject *mk; int err; for (vattr = __start___modver; vattr < __stop___modver; vattr++) { mk = lookup_or_create_module_kobject(vattr->module_name); if (mk) { err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); WARN_ON_ONCE(err); kobject_uevent(&mk->kobj, KOBJ_ADD); kobject_put(&mk->kobj); } } } /* module-related sysfs stuff */ static ssize_t module_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { const struct module_attribute *attribute; struct module_kobject *mk; int ret; attribute = to_module_attr(attr); mk = to_module_kobject(kobj); if (!attribute->show) return -EIO; ret = attribute->show(attribute, mk, buf); return ret; } static ssize_t module_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { const struct module_attribute *attribute; struct module_kobject *mk; int ret; attribute = to_module_attr(attr); mk = to_module_kobject(kobj); if (!attribute->store) return -EIO; ret = attribute->store(attribute, mk, buf, len); return ret; } static const struct sysfs_ops module_sysfs_ops = { .show = module_attr_show, .store = module_attr_store, }; static int uevent_filter(const struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); if (ktype == &module_ktype) return 1; return 0; } static const struct kset_uevent_ops module_uevent_ops = { .filter = uevent_filter, }; struct kset *module_kset; static void module_kobj_release(struct kobject *kobj) { struct module_kobject *mk = to_module_kobject(kobj); if (mk->kobj_completion) complete(mk->kobj_completion); } const struct kobj_type module_ktype = { .release = module_kobj_release, .sysfs_ops = &module_sysfs_ops, }; /* * param_sysfs_init - create "module" kset * * This must be done before the initramfs is unpacked and * request_module() thus becomes possible, because otherwise the * module load would fail in mod_sysfs_init. */ static int __init param_sysfs_init(void) { module_kset = kset_create_and_add("module", &module_uevent_ops, NULL); if (!module_kset) { printk(KERN_WARNING "%s (%d): error creating kset\n", __FILE__, __LINE__); return -ENOMEM; } return 0; } subsys_initcall(param_sysfs_init); /* * param_sysfs_builtin_init - add sysfs version and parameter * attributes for built-in modules */ static int __init param_sysfs_builtin_init(void) { if (!module_kset) return -ENOMEM; version_sysfs_builtin(); param_sysfs_builtin(); return 0; } late_initcall(param_sysfs_builtin_init); #endif /* CONFIG_SYSFS */
c
github
https://github.com/torvalds/linux
kernel/params.c
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations import textwrap from datetime import datetime from airflow.models.dag import DAG from airflow.providers.standard.operators.bash import BashOperator DEFAULT_DATE = datetime(2016, 1, 1) args = { "owner": "airflow", "start_date": DEFAULT_DATE, } dag = DAG(dag_id="test_impersonation", schedule=None, default_args=args) run_as_user = "airflow_test_user" test_command = textwrap.dedent( f"""\ if [ '{run_as_user}' != "$(whoami)" ]; then echo current user is not {run_as_user}! exit 1 fi """ ) task = BashOperator( task_id="test_impersonated_user", bash_command=test_command, dag=dag, run_as_user=run_as_user, )
python
github
https://github.com/apache/airflow
airflow-core/tests/unit/dags/test_impersonation.py
@import "@sass/abstracts/vars/colors"; @import "@sass/abstracts/mixins"; .button-default, .default { @include default-link-button(); background: #f6f6f6; color: $text-basic; } .button-success, .success { @include default-link-button(); background: $btn-green-light; color: $text-white; &:hover { color: $text-white; background: $btn-green; } } .button-warning, .warning { @include default-link-button(); background: $btn-red; color: $text-white; &:hover { color: $text-white; background: $btn-red-active; } }
unknown
github
https://github.com/vercel/next.js
examples/cms-sitecore-xmcloud/src/assets/sass/base/links/_link-button.scss
## Input ```javascript // @flow @enableEmitHookGuards @panicThreshold:"none" @enableFire component Foo(useDynamicHook) { useDynamicHook(); return <div>hello world</div>; } ``` ## Code ```javascript function Foo({ useDynamicHook, }: $ReadOnly<{ useDynamicHook: any }>): React.Node { useDynamicHook(); return <div>hello world</div>; } ``` ### Eval output (kind: exception) Fixture not implemented
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/repro-dont-add-hook-guards-on-retry.expect.md
# -*- test-case-name: twisted.python.test.test_dist3 -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Support for installing Twisted on Python 3. Only necessary while parts of Twisted are unported. @var modules: A list of modules that have been ported, e.g. "twisted.python.versions"; a package name (e.g. "twisted.python") indicates the corresponding __init__.py file has been ported (e.g. "twisted/python/__init__.py"). To reduce merge conflicts, add new lines in alphabetical sort. @var testModules: A list of test modules that have been ported, e.g "twisted.python.test.test_versions". To reduce merge conflicts, add new lines in alphabetical sort. @var almostModules: A list of any other modules which are needed by any of the modules in the other two lists, but which themselves have not actually been properly ported to Python 3. These modules might work well enough to satisfy some of the requirements of the modules that depend on them, but cannot be considered generally usable otherwise. @var modulesToInstall: A list of all modules that should be installed on Python 3. """ from __future__ import division modules = [ "twisted", "twisted.copyright", "twisted.internet", "twisted.internet.abstract", "twisted.internet.address", "twisted.internet.base", "twisted.internet.default", "twisted.internet.defer", "twisted.internet.endpoints", "twisted.internet.epollreactor", "twisted.internet.error", "twisted.internet.interfaces", "twisted.internet.fdesc", "twisted.internet.gireactor", "twisted.internet._glibbase", "twisted.internet.gtk3reactor", "twisted.internet.main", "twisted.internet._newtls", "twisted.internet.posixbase", "twisted.internet.protocol", "twisted.internet.pollreactor", "twisted.internet.reactor", "twisted.internet.selectreactor", "twisted.internet._signals", "twisted.internet.ssl", "twisted.internet.task", "twisted.internet.tcp", "twisted.internet.test", "twisted.internet.test.connectionmixins", "twisted.internet.test.modulehelpers", "twisted.internet.test._posixifaces", "twisted.internet.test.reactormixins", "twisted.internet.threads", "twisted.internet.udp", "twisted.internet.utils", "twisted.names", "twisted.names.cache", "twisted.names.client", "twisted.names.common", "twisted.names.dns", "twisted.names.error", "twisted.names.hosts", "twisted.names.resolve", "twisted.names._rfc1982", "twisted.names.test", "twisted.names._version", "twisted.protocols", "twisted.protocols.basic", "twisted.protocols.policies", "twisted.protocols.test", "twisted.protocols.tls", "twisted.python", "twisted.python.compat", "twisted.python.components", "twisted.python.constants", "twisted.python.context", "twisted.python.deprecate", "twisted.python.dist3", "twisted.python.failure", "twisted.python.filepath", "twisted.python.lockfile", "twisted.python.log", "twisted.python.monkey", "twisted.python.randbytes", "twisted.python.reflect", "twisted.python.runtime", "twisted.python.test", "twisted.python.test.deprecatedattributes", "twisted.python.test.modules_helpers", "twisted.python.threadable", "twisted.python.threadpool", "twisted.python.util", "twisted.python.versions", "twisted.test", "twisted.test.proto_helpers", "twisted.test.iosim", "twisted.test.ssl_helpers", "twisted.trial", "twisted.trial._asynctest", "twisted.trial.itrial", "twisted.trial._synctest", "twisted.trial.test", "twisted.trial.test.detests", "twisted.trial.test.erroneous", "twisted.trial.test.suppression", "twisted.trial.test.packages", "twisted.trial.test.skipping", "twisted.trial.test.suppression", "twisted.trial.unittest", "twisted.trial.util", "twisted._version", "twisted.web", "twisted.web.http_headers", "twisted.web.resource", "twisted.web._responses", "twisted.web.test", "twisted.web.test.requesthelper", "twisted.web._version", ] testModules = [ "twisted.internet.test.test_abstract", "twisted.internet.test.test_address", "twisted.internet.test.test_base", "twisted.internet.test.test_core", "twisted.internet.test.test_default", "twisted.internet.test.test_endpoints", "twisted.internet.test.test_epollreactor", "twisted.internet.test.test_fdset", "twisted.internet.test.test_filedescriptor", "twisted.internet.test.test_inlinecb", "twisted.internet.test.test_gireactor", "twisted.internet.test.test_glibbase", "twisted.internet.test.test_main", "twisted.internet.test.test_newtls", "twisted.internet.test.test_posixbase", "twisted.internet.test.test_protocol", "twisted.internet.test.test_sigchld", "twisted.internet.test.test_tcp", "twisted.internet.test.test_threads", "twisted.internet.test.test_tls", "twisted.internet.test.test_udp", "twisted.internet.test.test_udp_internals", "twisted.names.test.test_cache", "twisted.names.test.test_client", "twisted.names.test.test_common", "twisted.names.test.test_dns", "twisted.names.test.test_hosts", "twisted.names.test.test_rfc1982", "twisted.protocols.test.test_basic", "twisted.protocols.test.test_tls", "twisted.python.test.test_components", "twisted.python.test.test_constants", "twisted.python.test.test_deprecate", "twisted.python.test.test_dist3", "twisted.python.test.test_runtime", "twisted.python.test.test_util", "twisted.python.test.test_versions", "twisted.test.test_abstract", "twisted.test.test_compat", "twisted.test.test_context", "twisted.test.test_cooperator", "twisted.test.test_defer", "twisted.test.test_defgen", "twisted.test.test_error", "twisted.test.test_factories", "twisted.test.test_failure", "twisted.test.test_fdesc", "twisted.test.test_internet", "twisted.test.test_iosim", "twisted.test.test_iutils", "twisted.test.test_lockfile", "twisted.test.test_log", "twisted.test.test_loopback", "twisted.test.test_monkey", "twisted.test.test_paths", "twisted.test.test_policies", "twisted.test.test_randbytes", "twisted.test.test_reflect", "twisted.test.test_setup", "twisted.test.test_ssl", "twisted.test.test_sslverify", "twisted.test.test_task", "twisted.test.test_tcp", "twisted.test.test_tcp_internals", "twisted.test.test_threadable", "twisted.test.test_threads", "twisted.test.test_twisted", "twisted.test.test_threadpool", "twisted.test.test_udp", "twisted.trial.test.test_assertions", "twisted.trial.test.test_asyncassertions", "twisted.trial.test.test_deferred", "twisted.trial.test.test_pyunitcompat", "twisted.trial.test.test_suppression", "twisted.trial.test.test_testcase", "twisted.trial.test.test_tests", "twisted.trial.test.test_util", "twisted.trial.test.test_warning", # The downloadPage tests weren't ported: "twisted.web.test.test_webclient", "twisted.web.test.test_http", "twisted.web.test.test_http_headers", "twisted.web.test.test_resource", "twisted.web.test.test_web", ] almostModules = [ # Missing test coverage, see #6156: "twisted.internet._sslverify", # twisted.names.client semi-depends on twisted.names.root, but only on # Windows really: "twisted.names.root", # Missing test coverage: "twisted.protocols.loopback", # Minimally used by setup3.py: "twisted.python.dist", # twisted.python.filepath depends on twisted.python.win32, but on Linux it # only really needs to import: "twisted.python.win32", "twisted.test.reflect_helper_IE", "twisted.test.reflect_helper_VE", "twisted.test.reflect_helper_ZDE", # Required by some of the ported trial tests: "twisted.trial.reporter", # Agent code and downloadPage aren't ported, test coverage isn't complete: "twisted.web.client", # twisted.web.resource depends on twisted.web.error, so it is sorta # ported, but its tests are not yet ported, so it probably doesn't # completely work. "twisted.web.error", # Required by twisted.web.server, no actual code here: "twisted.web.iweb", # Required by twisted.web.server for an error handling case: "twisted.web.html", # This module has a lot of missing test coverage. What tests it has pass, # but it needs a lot more. It was ported only enough to make the client # work. "twisted.web.http", # GzipEncoder and allowed methods functionality not ported, no doubt # missing lots of test coverage: "twisted.web.server", ] modulesToInstall = modules + testModules + almostModules
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################### # # ByAddress # Retrieves weather and UV index data for a given Geo point using the Yahoo Weather and EnviroFacts APIs. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class ByAddress(Choreography): def __init__(self, temboo_session): """ Create a new instance of the ByAddress Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(ByAddress, self).__init__(temboo_session, '/Library/Labs/GetWeather/ByAddress') def new_input_set(self): return ByAddressInputSet() def _make_result_set(self, result, path): return ByAddressResultSet(result, path) def _make_execution(self, session, exec_id, path): return ByAddressChoreographyExecution(session, exec_id, path) class ByAddressInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the ByAddress Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APICredentials(self, value): """ Set the value of the APICredentials input for this Choreo. ((optional, json) A JSON dictionary containing a Yahoo App ID. See Choreo documentation for formatting examples.) """ super(ByAddressInputSet, self)._set_input('APICredentials', value) def set_Address(self, value): """ Set the value of the Address input for this Choreo. ((required, string) The street address of the location to get weather for.) """ super(ByAddressInputSet, self)._set_input('Address', value) class ByAddressResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the ByAddress Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) Contains combined weather data from Yahoo Weather and EnviroFacts.) """ return self._output.get('Response', None) class ByAddressChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return ByAddressResultSet(response, path)
unknown
codeparrot/codeparrot-clean
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::SectionItem; use super::VersionMetadata; use colored::Colorize; use serde::Deserialize; use std::path::PathBuf; use crate::error::Context; use crate::{ error::Error, helpers::{cross_command, npm::PackageManager}, }; #[derive(Deserialize)] struct YarnVersionInfo { data: Vec<String>, } pub fn npm_latest_version(pm: &PackageManager, name: &str) -> crate::Result<Option<String>> { match pm { PackageManager::Yarn => { let mut cmd = cross_command("yarn"); let output = cmd .arg("info") .arg(name) .args(["version", "--json"]) .output() .map_err(|error| Error::CommandFailed { command: "yarn info --json".to_string(), error, })?; if output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); let info: YarnVersionInfo = serde_json::from_str(&stdout).context("failed to parse yarn info")?; Ok(Some(info.data.last().unwrap().to_string())) } else { Ok(None) } } PackageManager::YarnBerry => { let mut cmd = cross_command("yarn"); let output = cmd .arg("npm") .arg("info") .arg(name) .args(["--fields", "version", "--json"]) .output() .map_err(|error| Error::CommandFailed { command: "yarn npm info --fields version --json".to_string(), error, })?; if output.status.success() { let info: crate::PackageJson = serde_json::from_reader(std::io::Cursor::new(output.stdout)) .context("failed to parse yarn npm info")?; Ok(info.version) } else { Ok(None) } } // Bun and Deno don't support show command PackageManager::Npm | PackageManager::Deno | PackageManager::Bun => { let mut cmd = cross_command("npm"); let output = cmd .arg("show") .arg(name) .arg("version") .output() .map_err(|error| Error::CommandFailed { command: "npm show --version".to_string(), error, })?; if output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); Ok(Some(stdout.replace('\n', ""))) } else { Ok(None) } } PackageManager::Pnpm => { let mut cmd = cross_command("pnpm"); let output = cmd .arg("info") .arg(name) .arg("version") .output() .map_err(|error| Error::CommandFailed { command: "pnpm info --version".to_string(), error, })?; if output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); Ok(Some(stdout.replace('\n', ""))) } else { Ok(None) } } } } pub fn package_manager(frontend_dir: &PathBuf) -> PackageManager { let found = PackageManager::all_from_project(frontend_dir); if found.is_empty() { println!( "{}: no lock files found, defaulting to npm", "WARNING".yellow() ); return PackageManager::Npm; } let pkg_manager = found[0]; if found.len() > 1 { println!( "{}: Only one package manager should be used, but found {}.\n Please remove unused package manager lock files, will use {} for now!", "WARNING".yellow(), found.iter().map(ToString::to_string).collect::<Vec<_>>().join(" and "), pkg_manager ); } pkg_manager } pub fn items( frontend_dir: Option<&PathBuf>, package_manager: PackageManager, metadata: &VersionMetadata, ) -> Vec<SectionItem> { let mut items = Vec::new(); if let Some(frontend_dir) = frontend_dir { for (package, version) in [ ("@tauri-apps/api", None), ("@tauri-apps/cli", Some(metadata.js_cli.version.clone())), ] { let frontend_dir = frontend_dir.clone(); let item = nodejs_section_item(package.into(), version, frontend_dir, package_manager); items.push(item); } } items } pub fn nodejs_section_item( package: String, version: Option<String>, frontend_dir: PathBuf, package_manager: PackageManager, ) -> SectionItem { SectionItem::new().action(move || { let version = version.clone().unwrap_or_else(|| { package_manager .current_package_version(&package, &frontend_dir) .unwrap_or_default() .unwrap_or_default() }); let latest_ver = super::packages_nodejs::npm_latest_version(&package_manager, &package) .unwrap_or_default() .unwrap_or_default(); if version.is_empty() { format!("{} {}: not installed!", package, " ⱼₛ".black().on_yellow()) } else { format!( "{} {}: {}{}", package, " ⱼₛ".black().on_yellow(), version, if !(version.is_empty() || latest_ver.is_empty()) { let version = semver::Version::parse(version.as_str()).unwrap(); let target_version = semver::Version::parse(latest_ver.as_str()).unwrap(); if version < target_version { format!(" ({}, latest: {})", "outdated".yellow(), latest_ver.green()) } else { "".into() } } else { "".into() } ) } .into() }) }
rust
github
https://github.com/tauri-apps/tauri
crates/tauri-cli/src/info/packages_nodejs.rs
/* * Copyright (C) 2012 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.collect; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.collect.Maps.transformValues; import static com.google.common.collect.testing.Helpers.mapEntry; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.sort; import com.google.common.base.Function; import com.google.common.base.Predicate; import com.google.common.collect.Maps.EntryTransformer; import com.google.common.collect.testing.MapTestSuiteBuilder; import com.google.common.collect.testing.NavigableMapTestSuiteBuilder; import com.google.common.collect.testing.SafeTreeMap; import com.google.common.collect.testing.SampleElements; import com.google.common.collect.testing.SortedMapTestSuiteBuilder; import com.google.common.collect.testing.TestMapGenerator; import com.google.common.collect.testing.TestStringMapGenerator; import com.google.common.collect.testing.TestStringSortedMapGenerator; import com.google.common.collect.testing.features.CollectionFeature; import com.google.common.collect.testing.features.CollectionSize; import com.google.common.collect.testing.features.MapFeature; import com.google.common.collect.testing.google.BiMapTestSuiteBuilder; import com.google.common.collect.testing.google.TestStringBiMapGenerator; import com.google.common.io.BaseEncoding; import java.util.Comparator; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.SortedSet; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import org.jspecify.annotations.NullUnmarked; import org.jspecify.annotations.Nullable; /** * Test suites for wrappers in {@code Maps}. * * @author Louis Wasserman */ @NullUnmarked @AndroidIncompatible // test-suite builders public class MapsCollectionTest extends TestCase { public static Test suite() { TestSuite suite = new TestSuite(); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected SortedMap<String, String> create(Entry<String, String>[] entries) { SafeTreeMap<String, String> map = new SafeTreeMap<>(); putEntries(map, entries); return Maps.unmodifiableNavigableMap(map); } }) .named("unmodifiableNavigableMap[SafeTreeMap]") .withFeatures( CollectionSize.ANY, MapFeature.ALLOWS_NULL_VALUES, CollectionFeature.SERIALIZABLE) .createTestSuite()); suite.addTest( BiMapTestSuiteBuilder.using( new TestStringBiMapGenerator() { @Override protected BiMap<String, String> create(Entry<String, String>[] entries) { BiMap<String, String> bimap = HashBiMap.create(entries.length); for (Entry<String, String> entry : entries) { checkArgument(!bimap.containsKey(entry.getKey())); bimap.put(entry.getKey(), entry.getValue()); } return Maps.unmodifiableBiMap(bimap); } }) .named("unmodifiableBiMap[HashBiMap]") .withFeatures( CollectionSize.ANY, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.REJECTS_DUPLICATES_AT_CREATION, CollectionFeature.SERIALIZABLE) .createTestSuite()); suite.addTest( MapTestSuiteBuilder.using( new TestMapGenerator<String, Integer>() { @Override public SampleElements<Entry<String, Integer>> samples() { return new SampleElements<>( mapEntry("x", 1), mapEntry("xxx", 3), mapEntry("xx", 2), mapEntry("xxxx", 4), mapEntry("aaaaa", 5)); } @Override public Map<String, Integer> create(Object... elements) { Set<String> set = new LinkedHashSet<>(); for (Object e : elements) { Entry<?, ?> entry = (Entry<?, ?>) e; checkNotNull(entry.getValue()); set.add((String) checkNotNull(entry.getKey())); } return Maps.asMap( set, new Function<String, Integer>() { @Override public Integer apply(String input) { return input.length(); } }); } @SuppressWarnings("unchecked") @Override public Entry<String, Integer>[] createArray(int length) { return (Entry<String, Integer>[]) new Entry<?, ?>[length]; } @Override public Iterable<Entry<String, Integer>> order( List<Entry<String, Integer>> insertionOrder) { return insertionOrder; } @Override public String[] createKeyArray(int length) { return new String[length]; } @Override public Integer[] createValueArray(int length) { return new Integer[length]; } }) .named("Maps.asMap[Set, Function]") .withFeatures( CollectionSize.ANY, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); suite.addTest( SortedMapTestSuiteBuilder.using( new TestMapGenerator<String, Integer>() { @Override public String[] createKeyArray(int length) { return new String[length]; } @Override public Integer[] createValueArray(int length) { return new Integer[length]; } @Override public SampleElements<Entry<String, Integer>> samples() { return new SampleElements<>( mapEntry("a", 1), mapEntry("aa", 2), mapEntry("aba", 3), mapEntry("bbbb", 4), mapEntry("ccccc", 5)); } @Override public SortedMap<String, Integer> create(Object... elements) { SortedSet<String> set = new NonNavigableSortedSet(); for (Object e : elements) { Entry<?, ?> entry = (Entry<?, ?>) e; checkNotNull(entry.getValue()); set.add((String) checkNotNull(entry.getKey())); } return Maps.asMap( set, new Function<String, Integer>() { @Override public Integer apply(String input) { return input.length(); } }); } @SuppressWarnings("unchecked") @Override public Entry<String, Integer>[] createArray(int length) { return (Entry<String, Integer>[]) new Entry<?, ?>[length]; } @Override public Iterable<Entry<String, Integer>> order( List<Entry<String, Integer>> insertionOrder) { sort( insertionOrder, new Comparator<Entry<String, Integer>>() { @Override public int compare(Entry<String, Integer> o1, Entry<String, Integer> o2) { return o1.getKey().compareTo(o2.getKey()); } }); return insertionOrder; } }) .named("Maps.asMap[SortedSet, Function]") .withFeatures( CollectionSize.ANY, CollectionFeature.SUPPORTS_ITERATOR_REMOVE, MapFeature.SUPPORTS_REMOVE) .createTestSuite()); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestMapGenerator<String, Integer>() { @Override public String[] createKeyArray(int length) { return new String[length]; } @Override public Integer[] createValueArray(int length) { return new Integer[length]; } @Override public SampleElements<Entry<String, Integer>> samples() { return new SampleElements<>( mapEntry("a", 1), mapEntry("aa", 2), mapEntry("aba", 3), mapEntry("bbbb", 4), mapEntry("ccccc", 5)); } @Override public NavigableMap<String, Integer> create(Object... elements) { NavigableSet<String> set = Sets.newTreeSet(Ordering.natural()); for (Object e : elements) { Entry<?, ?> entry = (Entry<?, ?>) e; checkNotNull(entry.getValue()); set.add((String) checkNotNull(entry.getKey())); } return Maps.asMap( set, new Function<String, Integer>() { @Override public Integer apply(String input) { return input.length(); } }); } @SuppressWarnings("unchecked") @Override public Entry<String, Integer>[] createArray(int length) { return (Entry<String, Integer>[]) new Entry<?, ?>[length]; } @Override public Iterable<Entry<String, Integer>> order( List<Entry<String, Integer>> insertionOrder) { sort( insertionOrder, new Comparator<Entry<String, Integer>>() { @Override public int compare(Entry<String, Integer> o1, Entry<String, Integer> o2) { return o1.getKey().compareTo(o2.getKey()); } }); return insertionOrder; } }) .named("Maps.asMap[NavigableSet, Function]") .withFeatures( CollectionSize.ANY, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); suite.addTest(filterSuite()); suite.addTest(transformSuite()); return suite; } static TestSuite filterSuite() { TestSuite suite = new TestSuite("Filter"); suite.addTest(filterMapSuite()); suite.addTest(filterBiMapSuite()); suite.addTest(filterSortedMapSuite()); suite.addTest(filterNavigableMapSuite()); return suite; } static TestSuite filterMapSuite() { TestSuite suite = new TestSuite("FilterMap"); suite.addTest( MapTestSuiteBuilder.using( new TestStringMapGenerator() { @Override protected Map<String, String> create(Entry<String, String>[] entries) { Map<String, String> map = new HashMap<>(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterKeys(map, FILTER_KEYS); } }) .named("Maps.filterKeys[Map, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( MapTestSuiteBuilder.using( new TestStringMapGenerator() { @Override protected Map<String, String> create(Entry<String, String>[] entries) { Map<String, String> map = new HashMap<>(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterValues(map, FILTER_VALUES); } }) .named("Maps.filterValues[Map, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( MapTestSuiteBuilder.using( new TestStringMapGenerator() { @Override protected Map<String, String> create(Entry<String, String>[] entries) { Map<String, String> map = new HashMap<>(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterEntries(map, FILTER_ENTRIES); } }) .named("Maps.filterEntries[Map, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( MapTestSuiteBuilder.using( new TestStringMapGenerator() { @Override protected Map<String, String> create(Entry<String, String>[] entries) { Map<String, String> map = new HashMap<>(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); map = Maps.filterEntries(map, FILTER_ENTRIES_1); return Maps.filterEntries(map, FILTER_ENTRIES_2); } }) .named("Maps.filterEntries[Maps.filterEntries[Map, Predicate], Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); return suite; } static TestSuite filterBiMapSuite() { TestSuite suite = new TestSuite("FilterBiMap"); suite.addTest( BiMapTestSuiteBuilder.using( new TestStringBiMapGenerator() { @Override protected BiMap<String, String> create(Entry<String, String>[] entries) { BiMap<String, String> map = HashBiMap.create(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterKeys(map, FILTER_KEYS); } }) .named("Maps.filterKeys[BiMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( BiMapTestSuiteBuilder.using( new TestStringBiMapGenerator() { @Override protected BiMap<String, String> create(Entry<String, String>[] entries) { BiMap<String, String> map = HashBiMap.create(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterValues(map, FILTER_VALUES); } }) .named("Maps.filterValues[BiMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( BiMapTestSuiteBuilder.using( new TestStringBiMapGenerator() { @Override protected BiMap<String, String> create(Entry<String, String>[] entries) { BiMap<String, String> map = HashBiMap.create(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterEntries(map, FILTER_ENTRIES); } }) .named("Maps.filterEntries[BiMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); return suite; } static TestSuite filterSortedMapSuite() { TestSuite suite = new TestSuite("FilterSortedMap"); suite.addTest( SortedMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected SortedMap<String, String> create(Entry<String, String>[] entries) { SortedMap<String, String> map = new NonNavigableSortedMap(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterKeys(map, FILTER_KEYS); } }) .named("Maps.filterKeys[SortedMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( SortedMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected SortedMap<String, String> create(Entry<String, String>[] entries) { SortedMap<String, String> map = new NonNavigableSortedMap(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterValues(map, FILTER_VALUES); } }) .named("Maps.filterValues[SortedMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( SortedMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected SortedMap<String, String> create(Entry<String, String>[] entries) { SortedMap<String, String> map = new NonNavigableSortedMap(); putEntries(map, entries); map.putAll(ENTRIES_TO_FILTER); return Maps.filterEntries(map, FILTER_ENTRIES); } }) .named("Maps.filterEntries[SortedMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); return suite; } static TestSuite filterNavigableMapSuite() { TestSuite suite = new TestSuite("FilterNavigableMap"); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected NavigableMap<String, String> create(Entry<String, String>[] entries) { NavigableMap<String, String> map = new SafeTreeMap<>(); putEntries(map, entries); map.put("banana", "toast"); map.put("eggplant", "spam"); return Maps.filterKeys(map, FILTER_KEYS); } }) .named("Maps.filterKeys[NavigableMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected NavigableMap<String, String> create(Entry<String, String>[] entries) { NavigableMap<String, String> map = new SafeTreeMap<>(); putEntries(map, entries); map.put("banana", "toast"); map.put("eggplant", "spam"); return Maps.filterValues(map, FILTER_VALUES); } }) .named("Maps.filterValues[NavigableMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected NavigableMap<String, String> create(Entry<String, String>[] entries) { NavigableMap<String, String> map = new SafeTreeMap<>(); putEntries(map, entries); map.put("banana", "toast"); map.put("eggplant", "spam"); return Maps.filterEntries(map, FILTER_ENTRIES); } }) .named("Maps.filterEntries[NavigableMap, Predicate]") .withFeatures( MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, CollectionSize.ANY) .createTestSuite()); return suite; } static void putEntries(Map<String, String> map, Entry<String, String>[] entries) { for (Entry<String, String> entry : entries) { map.put(entry.getKey(), entry.getValue()); } } static final Predicate<String> FILTER_KEYS = new Predicate<String>() { @Override public boolean apply(@Nullable String string) { return !Objects.equals(string, "banana") && !Objects.equals(string, "eggplant"); } }; static final Predicate<String> FILTER_VALUES = new Predicate<String>() { @Override public boolean apply(@Nullable String string) { return !Objects.equals(string, "toast") && !Objects.equals(string, "spam"); } }; static final Predicate<Entry<String, String>> FILTER_ENTRIES = new Predicate<Entry<String, String>>() { @Override public boolean apply(Entry<String, String> entry) { return !mapEntry("banana", "toast").equals(entry) && !mapEntry("eggplant", "spam").equals(entry); } }; static final Predicate<Entry<String, String>> FILTER_ENTRIES_1 = new Predicate<Entry<String, String>>() { @Override public boolean apply(Entry<String, String> entry) { return !mapEntry("banana", "toast").equals(entry); } }; static final Predicate<Entry<String, String>> FILTER_ENTRIES_2 = new Predicate<Entry<String, String>>() { @Override public boolean apply(Entry<String, String> entry) { return !mapEntry("eggplant", "spam").equals(entry); } }; static final ImmutableMap<String, String> ENTRIES_TO_FILTER = ImmutableMap.of("banana", "toast", "eggplant", "spam"); static final Predicate<Entry<String, String>> NOT_NULL_ENTRY = new Predicate<Entry<String, String>>() { @Override public boolean apply(Entry<String, String> entry) { return entry.getKey() != null && entry.getValue() != null; } }; private static class NonNavigableSortedSet extends ForwardingSortedSet<String> { private final SortedSet<String> delegate = Sets.newTreeSet(Ordering.natural()); @Override protected SortedSet<String> delegate() { return delegate; } } private static class NonNavigableSortedMap extends ForwardingSortedMap<String, String> { private final SortedMap<String, String> delegate = new SafeTreeMap<>(Ordering.natural()); @Override protected SortedMap<String, String> delegate() { return delegate; } } private static String encode(String str) { return BaseEncoding.base64().encode(str.getBytes(UTF_8)); } private static final Function<String, String> DECODE_FUNCTION = new Function<String, String>() { @Override public String apply(String input) { return new String(BaseEncoding.base64().decode(input), UTF_8); } }; private static final EntryTransformer<String, String, String> DECODE_ENTRY_TRANSFORMER = new EntryTransformer<String, String, String>() { @Override public String transformEntry(String key, String value) { return DECODE_FUNCTION.apply(value); } }; static TestSuite transformSuite() { TestSuite suite = new TestSuite("Maps.transform"); suite.addTest(transformMapSuite()); suite.addTest(transformSortedMapSuite()); suite.addTest(transformNavigableMapSuite()); return suite; } static TestSuite transformMapSuite() { TestSuite suite = new TestSuite("TransformMap"); suite.addTest( MapTestSuiteBuilder.using( new TestStringMapGenerator() { @Override protected Map<String, String> create(Entry<String, String>[] entries) { Map<String, String> map = new LinkedHashMap<>(); for (Entry<String, String> entry : entries) { map.put(entry.getKey(), encode(entry.getValue())); } return transformValues(map, DECODE_FUNCTION); } }) .named("Maps.transformValues[Map, Function]") .withFeatures( CollectionSize.ANY, CollectionFeature.KNOWN_ORDER, MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); suite.addTest( MapTestSuiteBuilder.using( new TestStringMapGenerator() { @Override protected Map<String, String> create(Entry<String, String>[] entries) { Map<String, String> map = new LinkedHashMap<>(); for (Entry<String, String> entry : entries) { map.put(entry.getKey(), encode(entry.getValue())); } return Maps.transformEntries(map, DECODE_ENTRY_TRANSFORMER); } }) .named("Maps.transformEntries[Map, EntryTransformer]") .withFeatures( CollectionSize.ANY, CollectionFeature.KNOWN_ORDER, MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_ANY_NULL_QUERIES, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); return suite; } static TestSuite transformSortedMapSuite() { TestSuite suite = new TestSuite("TransformSortedMap"); suite.addTest( SortedMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected SortedMap<String, String> create(Entry<String, String>[] entries) { SortedMap<String, String> map = new NonNavigableSortedMap(); for (Entry<String, String> entry : entries) { map.put(entry.getKey(), encode(entry.getValue())); } return transformValues(map, DECODE_FUNCTION); } }) .named("Maps.transformValues[SortedMap, Function]") .withFeatures( CollectionSize.ANY, CollectionFeature.KNOWN_ORDER, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); suite.addTest( SortedMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected SortedMap<String, String> create(Entry<String, String>[] entries) { SortedMap<String, String> map = new NonNavigableSortedMap(); for (Entry<String, String> entry : entries) { map.put(entry.getKey(), encode(entry.getValue())); } return Maps.transformEntries(map, DECODE_ENTRY_TRANSFORMER); } }) .named("Maps.transformEntries[SortedMap, EntryTransformer]") .withFeatures( CollectionSize.ANY, CollectionFeature.KNOWN_ORDER, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); return suite; } static TestSuite transformNavigableMapSuite() { TestSuite suite = new TestSuite("TransformNavigableMap"); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected NavigableMap<String, String> create(Entry<String, String>[] entries) { NavigableMap<String, String> map = new SafeTreeMap<>(); for (Entry<String, String> entry : entries) { map.put(entry.getKey(), encode(entry.getValue())); } return transformValues(map, DECODE_FUNCTION); } }) .named("Maps.transformValues[NavigableMap, Function]") .withFeatures( CollectionSize.ANY, CollectionFeature.KNOWN_ORDER, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); suite.addTest( NavigableMapTestSuiteBuilder.using( new TestStringSortedMapGenerator() { @Override protected NavigableMap<String, String> create(Entry<String, String>[] entries) { NavigableMap<String, String> map = new SafeTreeMap<>(); for (Entry<String, String> entry : entries) { map.put(entry.getKey(), encode(entry.getValue())); } return Maps.transformEntries(map, DECODE_ENTRY_TRANSFORMER); } }) .named("Maps.transformEntries[NavigableMap, EntryTransformer]") .withFeatures( CollectionSize.ANY, CollectionFeature.KNOWN_ORDER, MapFeature.SUPPORTS_REMOVE, CollectionFeature.SUPPORTS_ITERATOR_REMOVE) .createTestSuite()); return suite; } }
java
github
https://github.com/google/guava
android/guava-tests/test/com/google/common/collect/MapsCollectionTest.java
#pragma once #include <c10/hip/HIPCachingAllocator.h> // Use of c10::hip namespace here makes hipification easier, because // I don't have to also fix namespaces. Sorry! namespace c10::hip { // NB: THIS SHOULD NOT BE USED // I couldn't find anywhere it was used in public pytorch sources or downstream projects. // But to avoid risk in removing it, it's still here. // Takes a valid HIPAllocator (of any sort) and turns it into // an allocator pretending to be a CUDA allocator. See // Note [Masquerading as CUDA] class HIPAllocatorMasqueradingAsCUDA final : public HIPCachingAllocator::HIPAllocator { HIPCachingAllocator::HIPAllocator* allocator_; public: explicit HIPAllocatorMasqueradingAsCUDA(HIPCachingAllocator::HIPAllocator* allocator) : allocator_(allocator) {} virtual ~HIPAllocatorMasqueradingAsCUDA() = default; // From c10::Allocator DataPtr allocate(size_t size) override { return allocator_->allocate(size); } bool is_simple_data_ptr(const DataPtr& data_ptr) const override { return allocator_->is_simple_data_ptr(data_ptr); } DeleterFnPtr raw_deleter() const override { return allocator_->raw_deleter(); } void copy_data(void* dest, const void* src, std::size_t count) const final { allocator_->copy_data(dest, src, count); } // From DeviceAllocator bool initialized() override { return allocator_->initialized(); } void emptyCache(MempoolId_t mempool_id = {0, 0}) override { allocator_->emptyCache(mempool_id); } void recordStream(const DataPtr& ptr, c10::Stream stream) override { HIPStream hip_stream = HIPStream(stream); recordStream(ptr, hip_stream); } CachingDeviceAllocator::DeviceStats getDeviceStats(c10::DeviceIndex device) override { return allocator_->getDeviceStats(device); } void resetAccumulatedStats(c10::DeviceIndex device) override { allocator_->resetAccumulatedStats(device); } void resetPeakStats(c10::DeviceIndex device) override { allocator_->resetPeakStats(device); } // From CUDAAllocator void* raw_alloc(size_t nbytes) override { return allocator_->raw_alloc(nbytes); } void* raw_alloc_with_stream(size_t nbytes, hipStream_t stream) override { return allocator_->raw_alloc_with_stream(nbytes, stream); } void raw_delete(void* ptr) override { allocator_->raw_delete(ptr); } void init(int device_count) override { allocator_->init(device_count); } double getMemoryFraction(c10::DeviceIndex device) override { return allocator_->getMemoryFraction(device); } void setMemoryFraction(double fraction, c10::DeviceIndex device) override { allocator_->setMemoryFraction(fraction, device); } std::vector<HIPCachingAllocator::StreamSegmentSize> getExpandableSegmentSizes(c10::DeviceIndex device) override { return allocator_->getExpandableSegmentSizes(device); } void enable(bool value) override { allocator_->enable(value); } bool isEnabled() const override { return allocator_->isEnabled(); } void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) override { allocator_->cacheInfo(device, largestBlock); } void* getBaseAllocation(void* ptr, size_t* size) override { return allocator_->getBaseAllocation(ptr, size); } void recordStream(const DataPtr& ptr, HIPStream stream) override { allocator_->recordStream(ptr, stream); } HIPCachingAllocator::SnapshotInfo snapshot(MempoolId_t mempool_id = {0, 0}, bool include_traces = true) override { return allocator_->snapshot(mempool_id, include_traces); } void beginAllocateToPool( c10::DeviceIndex device, MempoolId_t mempool_id, std::function<bool(hipStream_t)> filter) override { allocator_->beginAllocateToPool(device, mempool_id, filter); } void endAllocateToPool( c10::DeviceIndex device, MempoolId_t mempool_id) override { allocator_->endAllocateToPool(device, mempool_id); } void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) override { allocator_->releasePool(device, mempool_id); } int getPoolUseCount(c10::DeviceIndex device, MempoolId_t mempool_id) override { return allocator_->getPoolUseCount(device, mempool_id); } void createOrIncrefPool( c10::DeviceIndex device, MempoolId_t mempool_id, std::shared_ptr<HIPAllocator> allocator = nullptr) override { allocator_->createOrIncrefPool(device, mempool_id, std::move(allocator)); } void setUseOnOOM(c10::DeviceIndex device, MempoolId_t mempool_id, bool use_on_oom) override { allocator_->setUseOnOOM(device, mempool_id, use_on_oom); } void setNoSplit(c10::DeviceIndex device, MempoolId_t mempool_id) override { allocator_->setNoSplit(device, mempool_id); } bool checkPoolLiveAllocations( c10::DeviceIndex device, MempoolId_t mempool_id, const std::unordered_set<void*>& expected_live_allocations) override { return allocator_->checkPoolLiveAllocations(device, mempool_id, expected_live_allocations); } HIPCachingAllocator::ShareableHandle shareIpcHandle(void* ptr) override { return allocator_->shareIpcHandle(ptr); } std::shared_ptr<void> getIpcDevPtr(std::string handle) override { return allocator_->getIpcDevPtr(handle); } bool isHistoryEnabled() override { return allocator_->isHistoryEnabled(); } void recordHistory( bool enabled, HIPCachingAllocator::CreateContextFn context_recorder, size_t alloc_trace_max_entries, HIPCachingAllocator::RecordContext when, bool clearHistory, const std::vector<std::string>& skip_actions) override { allocator_->recordHistory(enabled, context_recorder, alloc_trace_max_entries, when, clearHistory, skip_actions); } void recordAnnotation( const std::vector<std::pair<std::string, std::string>>& md) override { allocator_->recordAnnotation(md); } void pushCompileContext(std::string& md) override { allocator_->pushCompileContext(md); } void popCompileContext() override { allocator_->popCompileContext(); } void attachOutOfMemoryObserver(HIPCachingAllocator::OutOfMemoryObserver observer) override { allocator_->attachOutOfMemoryObserver(observer); } void attachAllocatorTraceTracker(HIPCachingAllocator::AllocatorTraceTracker tracker) override { allocator_->attachAllocatorTraceTracker(tracker); } void enablePeerAccess(c10::DeviceIndex dev, c10::DeviceIndex dev_to_access) override { allocator_->enablePeerAccess(dev, dev_to_access); } hipError_t memcpyAsync( void* dst, int dstDevice, const void* src, int srcDevice, size_t count, hipStream_t stream, bool p2p_enabled) override { return allocator_->memcpyAsync(dst, dstDevice, src, srcDevice, count, stream, p2p_enabled); } std::shared_ptr<HIPCachingAllocator::AllocatorState> getCheckpointState( c10::DeviceIndex device, MempoolId_t id) override { return allocator_->getCheckpointState(device, id); } HIPCachingAllocator::CheckpointDelta setCheckpointPoolState( c10::DeviceIndex device, std::shared_ptr<HIPCachingAllocator::AllocatorState> pps) override { return allocator_->setCheckpointPoolState(device, pps); } std::string name() override { return allocator_->name(); } }; } // namespace c10::hip
c
github
https://github.com/pytorch/pytorch
aten/src/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h
import maya.cmds as cmds from tank.platform.qt import QtCore, QtGui def _findShotCamera(): """ Shot camera setup. You can replace this entire func with your own code to return the correct cameraShape for the app to use. """ camera = [] for each in cmds.ls(type = 'camera'): getCamTransform = cmds.listRelatives(each, parent = True)[0] ## We don't care about any suffix used, we're looking for an attr called type on the camera here to find the shot cam. ## You can change this to find your shot camera as you need if cmds.objExists('%s.type' % getCamTransform): camera.append(each) if not camera: QtGui.QMessageBox.information(None, "Aborted...", 'No shotCam found!!') return -1 else: if len(camera) > 1: QtGui.QMessageBox.information(None, "Aborted...", 'Make sure you have only ONE shot camera in the scene!') return -1 else: ## Camera is the first in the list. cam = camera[0] return cam def _setCameraDefaults(camera = ''): """ Sets the base defaults for the camera @param camera: The name of the camera transform node NOT the shape node! @type camera: String """ if not camera: camera = None if camera: camName = camera camShape = cmds.listRelatives(camera, shapes = True)[0] cmds.camera(camName, e = True, displayFilmGate = 0, displayResolution = 1, overscan = 1.19) cmds.setAttr("%s.displayGateMask" % camShape, 1) cmds.setAttr('%s.displayGateMaskOpacity' % camShape, 1) cmds.setAttr('%s.displayGateMaskColor' % camShape, 0, 0, 0, type = 'double3' ) cmds.setAttr("%s.displayResolution" % camShape, 1) cmds.setAttr("%s.displaySafeAction" % camShape, 1) cmds.setAttr("%s.journalCommand" % camShape, 0) cmds.setAttr("%s.nearClipPlane" % camShape, 0.05) cmds.setAttr("%s.overscan" % camShape, 1) else: cmds.warning('No shotcam found!') def _createCamGate(camera = '', pathToImage = ''): if not camera: camera = _findShotCamera() if camera: if not cmds.objExists('camGate'): cmds.imagePlane(n = 'camGate') cmds.rename('camGate1', 'camGate') cmds.pointConstraint('%s' % camera, 'camGate', mo = False, n ='tmpPoint') cmds.orientConstraint('%s' % camera, 'camGate', mo = False, n ='tmpOrient') cmds.delete(['tmpPoint', 'tmpOrient']) cmds.parent('camGate', '%s' % camera) cmds.connectAttr('camGateShape.message', '%sShape.imagePlane[0]' % camera, f = True) cmds.setAttr('camGate.depth', 0.01) cmds.setAttr('camGate.sizeX', 1.710) cmds.setAttr('camGate.sizeY', 2) cmds.setAttr('camGate.offsetX', 0.004) cmds.setAttr('camGate.offsetY', 0.003) cmds.setAttr('camGateShape.imageName', pathToImage, type = 'string') cmds.setAttr('camGateShape.lockedToCamera', 1) cmds.setAttr('camGateShape.displayOnlyIfCurrent', 1)
unknown
codeparrot/codeparrot-clean
import { flushSync } from 'svelte'; import { ok, test } from '../../test'; export default test({ compileOptions: { dev: true }, test({ assert, target, window }) { assert.htmlEqual(target.innerHTML, `<input><p>hello</p>`); const input = target.querySelector('input'); ok(input); input.value = 'goodbye'; input.dispatchEvent(new window.Event('input')); flushSync(); assert.htmlEqual(target.innerHTML, `<input><p>goodbye</p>`); }, warnings: [] });
javascript
github
https://github.com/sveltejs/svelte
packages/svelte/tests/runtime-legacy/samples/binding-member-expression-no-warning/_config.js
prelude: | $LOAD_PATH.unshift(File.expand_path("lib")) require "strscan" str = "test string" scanner = StringScanner.new(str) str = "string" reg = /string/ benchmark: check_until(reg): | scanner.check_until(reg) check_until(str): | scanner.check_until(str) exist?(reg): | scanner.exist?(reg) exist?(str): | scanner.exist?(str)
unknown
github
https://github.com/ruby/ruby
benchmark/search.yaml
/* * Copyright (C) 2014 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.graph; import static com.google.common.graph.Graphs.TransitiveClosureSelfLoopStrategy.ADD_SELF_LOOPS_ALWAYS; import static com.google.common.graph.Graphs.TransitiveClosureSelfLoopStrategy.ADD_SELF_LOOPS_FOR_CYCLES; import static com.google.common.graph.Graphs.copyOf; import static com.google.common.graph.Graphs.inducedSubgraph; import static com.google.common.graph.Graphs.transitiveClosure; import static com.google.common.graph.Graphs.transpose; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertThrows; import com.google.common.collect.ImmutableSet; import org.jspecify.annotations.NullUnmarked; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** * Tests for {@link Graphs}. Tests assume that the implementation of the method {@code addEdge} adds * the missing nodes to the graph, then adds the edge between them. */ @RunWith(JUnit4.class) @NullUnmarked public class GraphsTest { private static final Integer N1 = 1; private static final Integer N2 = 2; private static final Integer N3 = 3; private static final Integer N4 = 4; private static final String E11 = "1-1"; private static final String E11_A = "1-1a"; private static final String E12 = "1-2"; private static final String E12_A = "1-2a"; private static final String E12_B = "1-2b"; private static final String E21 = "2-1"; private static final String E13 = "1-3"; private static final String E31 = "3-1"; private static final String E34 = "3-4"; private static final String E44 = "4-4"; private static final int NODE_COUNT = 20; private static final int EDGE_COUNT = 20; // TODO(user): Consider adding both error messages from here and {@link AbstractNetworkTest} // in one class (may be a utility class for error messages). private static final String ERROR_PARALLEL_EDGE = "connected by a different edge"; private static final String ERROR_NEGATIVE_COUNT = "is non-negative"; static final String ERROR_SELF_LOOP = "self-loops are not allowed"; @Test public void transitiveClosure_directedGraph_addSelfLoopsAlways() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(false).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N1, N3); directedGraph.putEdge(N2, N3); directedGraph.addNode(N4); MutableGraph<Integer> expectedClosure = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(directedGraph, ADD_SELF_LOOPS_ALWAYS)).isEqualTo(expectedClosure); } @Test public void transitiveClosure_directedGraph_addSelfLoopsForCycles() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(false).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N1, N3); directedGraph.putEdge(N2, N3); directedGraph.addNode(N4); // the above graph is its own transitive closure MutableGraph<Integer> expectedClosure = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N2, N3); expectedClosure.addNode(N4); assertThat(transitiveClosure(directedGraph, ADD_SELF_LOOPS_FOR_CYCLES)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_undirectedGraph_addSelfLoopsAlways() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(false).build(); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N1, N3); undirectedGraph.putEdge(N2, N3); undirectedGraph.addNode(N4); MutableGraph<Integer> expectedClosure = GraphBuilder.undirected().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(undirectedGraph, ADD_SELF_LOOPS_ALWAYS)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_undirectedGraph_addSelfLoopsForCycles() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(false).build(); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N1, N3); undirectedGraph.putEdge(N2, N3); undirectedGraph.addNode(N4); MutableGraph<Integer> expectedClosure = GraphBuilder.undirected().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N3, N3); expectedClosure.addNode(N4); // N4 is isolated => no incident edges in this transitive closure assertThat(transitiveClosure(undirectedGraph, ADD_SELF_LOOPS_FOR_CYCLES)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_directedPathGraph_addSelfLoopsAlways() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(false).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N2, N3); directedGraph.putEdge(N3, N4); MutableGraph<Integer> expectedClosure = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(directedGraph, ADD_SELF_LOOPS_ALWAYS)).isEqualTo(expectedClosure); } @Test public void transitiveClosure_directedPathGraph_addSelfLoopsForCycles() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(false).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N2, N3); directedGraph.putEdge(N3, N4); MutableGraph<Integer> expectedClosure = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N4); assertThat(transitiveClosure(directedGraph, ADD_SELF_LOOPS_FOR_CYCLES)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_undirectedPathGraph() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(false).build(); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N2, N3); undirectedGraph.putEdge(N3, N4); MutableGraph<Integer> expectedClosure = GraphBuilder.undirected().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(undirectedGraph, ADD_SELF_LOOPS_ALWAYS)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_undirectedPathGraph_addSelfLoopsForCycles() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(false).build(); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N2, N3); undirectedGraph.putEdge(N3, N4); MutableGraph<Integer> expectedClosure = GraphBuilder.undirected().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(undirectedGraph, ADD_SELF_LOOPS_FOR_CYCLES)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_directedCycleGraph_addSelfLoopsAlways() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(false).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N2, N3); directedGraph.putEdge(N3, N4); directedGraph.putEdge(N4, N1); MutableGraph<Integer> expectedClosure = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N1); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N1); expectedClosure.putEdge(N3, N2); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N1); expectedClosure.putEdge(N4, N2); expectedClosure.putEdge(N4, N3); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(directedGraph, ADD_SELF_LOOPS_ALWAYS)).isEqualTo(expectedClosure); } @Test public void transitiveClosure_directedCycleGraph_addSelfLoopsForCycles() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(false).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N2, N3); directedGraph.putEdge(N3, N4); directedGraph.putEdge(N4, N1); MutableGraph<Integer> expectedClosure = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N1); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N1); expectedClosure.putEdge(N3, N2); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N1); expectedClosure.putEdge(N4, N2); expectedClosure.putEdge(N4, N3); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(directedGraph, ADD_SELF_LOOPS_FOR_CYCLES)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_undirectedCycleGraph_addSelfLoopsAlways() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(false).build(); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N2, N3); undirectedGraph.putEdge(N3, N4); undirectedGraph.putEdge(N4, N1); MutableGraph<Integer> expectedClosure = GraphBuilder.undirected().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(undirectedGraph, ADD_SELF_LOOPS_ALWAYS)) .isEqualTo(expectedClosure); } @Test public void transitiveClosure_undirectedCycleGraph_addSelfLoopsForCycles() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(false).build(); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N2, N3); undirectedGraph.putEdge(N3, N4); undirectedGraph.putEdge(N4, N1); MutableGraph<Integer> expectedClosure = GraphBuilder.undirected().allowsSelfLoops(true).build(); expectedClosure.putEdge(N1, N1); expectedClosure.putEdge(N1, N2); expectedClosure.putEdge(N1, N3); expectedClosure.putEdge(N1, N4); expectedClosure.putEdge(N2, N2); expectedClosure.putEdge(N2, N3); expectedClosure.putEdge(N2, N4); expectedClosure.putEdge(N3, N3); expectedClosure.putEdge(N3, N4); expectedClosure.putEdge(N4, N4); assertThat(transitiveClosure(undirectedGraph, ADD_SELF_LOOPS_FOR_CYCLES)) .isEqualTo(expectedClosure); } @Test public void transpose_undirectedGraph() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().build(); undirectedGraph.putEdge(N1, N2); assertThat(transpose(undirectedGraph)).isSameInstanceAs(undirectedGraph); } @Test public void transpose_directedGraph() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(true).build(); directedGraph.putEdge(N1, N3); directedGraph.putEdge(N3, N1); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N1, N1); directedGraph.putEdge(N3, N4); MutableGraph<Integer> expectedTranspose = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedTranspose.putEdge(N3, N1); expectedTranspose.putEdge(N1, N3); expectedTranspose.putEdge(N2, N1); expectedTranspose.putEdge(N1, N1); expectedTranspose.putEdge(N4, N3); Graph<Integer> transpose = transpose(directedGraph); assertThat(transpose).isEqualTo(expectedTranspose); assertThat(transpose(transpose)).isSameInstanceAs(directedGraph); AbstractGraphTest.validateGraph(transpose); for (Integer node : directedGraph.nodes()) { assertThat(directedGraph.inDegree(node)).isSameInstanceAs(transpose.outDegree(node)); assertThat(directedGraph.outDegree(node)).isSameInstanceAs(transpose.inDegree(node)); } assertThat(transpose.successors(N1)).doesNotContain(N2); directedGraph.putEdge(N2, N1); // View should be updated. assertThat(transpose.successors(N1)).contains(N2); AbstractGraphTest.validateGraph(transpose); } @Test public void transpose_undirectedValueGraph() { MutableValueGraph<Integer, String> undirectedGraph = ValueGraphBuilder.undirected().build(); undirectedGraph.putEdgeValue(N1, N2, E12); assertThat(transpose(undirectedGraph)).isSameInstanceAs(undirectedGraph); } @Test public void transpose_directedValueGraph() { MutableValueGraph<Integer, String> directedGraph = ValueGraphBuilder.directed().allowsSelfLoops(true).build(); directedGraph.putEdgeValue(N1, N3, E13); directedGraph.putEdgeValue(N3, N1, E31); directedGraph.putEdgeValue(N1, N2, E12); directedGraph.putEdgeValue(N1, N1, E11); directedGraph.putEdgeValue(N3, N4, E34); MutableValueGraph<Integer, String> expectedTranspose = ValueGraphBuilder.directed().allowsSelfLoops(true).build(); expectedTranspose.putEdgeValue(N3, N1, E13); expectedTranspose.putEdgeValue(N1, N3, E31); expectedTranspose.putEdgeValue(N2, N1, E12); expectedTranspose.putEdgeValue(N1, N1, E11); expectedTranspose.putEdgeValue(N4, N3, E34); ValueGraph<Integer, String> transpose = transpose(directedGraph); assertThat(transpose).isEqualTo(expectedTranspose); assertThat(transpose(transpose)).isSameInstanceAs(directedGraph); AbstractGraphTest.validateGraph(transpose.asGraph()); assertThat(transpose.edgeValueOrDefault(N1, N2, null)).isNull(); for (Integer node : directedGraph.nodes()) { assertThat(directedGraph.inDegree(node)).isSameInstanceAs(transpose.outDegree(node)); assertThat(directedGraph.outDegree(node)).isSameInstanceAs(transpose.inDegree(node)); } directedGraph.putEdgeValue(N2, N1, E21); // View should be updated. assertThat(transpose.edgeValueOrDefault(N1, N2, null)).isEqualTo(E21); AbstractGraphTest.validateGraph(transpose.asGraph()); } @Test public void transpose_undirectedNetwork() { MutableNetwork<Integer, String> undirectedGraph = NetworkBuilder.undirected().build(); undirectedGraph.addEdge(N1, N2, E12); assertThat(transpose(undirectedGraph)).isSameInstanceAs(undirectedGraph); } @Test public void transpose_directedNetwork() { MutableNetwork<Integer, String> directedGraph = NetworkBuilder.directed().allowsParallelEdges(true).allowsSelfLoops(true).build(); directedGraph.addEdge(N1, N3, E13); directedGraph.addEdge(N3, N1, E31); directedGraph.addEdge(N1, N2, E12); directedGraph.addEdge(N1, N2, E12_A); directedGraph.addEdge(N1, N1, E11); directedGraph.addEdge(N3, N4, E34); MutableNetwork<Integer, String> expectedTranspose = NetworkBuilder.directed().allowsParallelEdges(true).allowsSelfLoops(true).build(); expectedTranspose.addEdge(N3, N1, E13); expectedTranspose.addEdge(N1, N3, E31); expectedTranspose.addEdge(N2, N1, E12); expectedTranspose.addEdge(N2, N1, E12_A); expectedTranspose.addEdge(N1, N1, E11); expectedTranspose.addEdge(N4, N3, E34); Network<Integer, String> transpose = transpose(directedGraph); assertThat(transpose).isEqualTo(expectedTranspose); assertThat(transpose(transpose)).isSameInstanceAs(directedGraph); AbstractNetworkTest.validateNetwork(transpose); assertThat(transpose.edgesConnecting(N1, N2)).isEmpty(); assertThat(transpose.edgeConnecting(N1, N2)).isEmpty(); assertThat(transpose.edgeConnectingOrNull(N1, N2)).isNull(); for (Integer node : directedGraph.nodes()) { assertThat(directedGraph.inDegree(node)).isSameInstanceAs(transpose.outDegree(node)); assertThat(directedGraph.outDegree(node)).isSameInstanceAs(transpose.inDegree(node)); } directedGraph.addEdge(N2, N1, E21); // View should be updated. assertThat(transpose.edgesConnecting(N1, N2)).containsExactly(E21); assertThat(transpose.edgeConnecting(N1, N2)).hasValue(E21); assertThat(transpose.edgeConnectingOrNull(N1, N2)).isEqualTo(E21); AbstractNetworkTest.validateNetwork(transpose); } @Test public void inducedSubgraph_graph() { ImmutableSet<Integer> nodeSubset = ImmutableSet.of(N1, N2, N4); MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(true).build(); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N2, N1); directedGraph.putEdge(N1, N3); // only incident to one node in nodeSubset directedGraph.putEdge(N4, N4); directedGraph.putEdge(5, 6); // not incident to any node in nodeSubset MutableGraph<Integer> expectedSubgraph = GraphBuilder.directed().allowsSelfLoops(true).build(); expectedSubgraph.putEdge(N1, N2); expectedSubgraph.putEdge(N2, N1); expectedSubgraph.putEdge(N4, N4); assertThat(inducedSubgraph(directedGraph, nodeSubset)).isEqualTo(expectedSubgraph); } @Test public void inducedSubgraph_valueGraph() { ImmutableSet<Integer> nodeSubset = ImmutableSet.of(N1, N2, N4); MutableValueGraph<Integer, String> directedGraph = ValueGraphBuilder.directed().allowsSelfLoops(true).build(); directedGraph.putEdgeValue(N1, N2, E12); directedGraph.putEdgeValue(N2, N1, E21); directedGraph.putEdgeValue(N1, N3, E13); // only incident to one node in nodeSubset directedGraph.putEdgeValue(N4, N4, E44); directedGraph.putEdgeValue(5, 6, "5-6"); // not incident to any node in nodeSubset MutableValueGraph<Integer, String> expectedSubgraph = ValueGraphBuilder.directed().allowsSelfLoops(true).build(); expectedSubgraph.putEdgeValue(N1, N2, E12); expectedSubgraph.putEdgeValue(N2, N1, E21); expectedSubgraph.putEdgeValue(N4, N4, E44); assertThat(inducedSubgraph(directedGraph, nodeSubset)).isEqualTo(expectedSubgraph); } @Test public void inducedSubgraph_network() { ImmutableSet<Integer> nodeSubset = ImmutableSet.of(N1, N2, N4); MutableNetwork<Integer, String> directedGraph = NetworkBuilder.directed().allowsSelfLoops(true).build(); directedGraph.addEdge(N1, N2, E12); directedGraph.addEdge(N2, N1, E21); directedGraph.addEdge(N1, N3, E13); // only incident to one node in nodeSubset directedGraph.addEdge(N4, N4, E44); directedGraph.addEdge(5, 6, "5-6"); // not incident to any node in nodeSubset MutableNetwork<Integer, String> expectedSubgraph = NetworkBuilder.directed().allowsSelfLoops(true).build(); expectedSubgraph.addEdge(N1, N2, E12); expectedSubgraph.addEdge(N2, N1, E21); expectedSubgraph.addEdge(N4, N4, E44); assertThat(inducedSubgraph(directedGraph, nodeSubset)).isEqualTo(expectedSubgraph); } @Test public void inducedSubgraph_nodeNotInGraph() { MutableNetwork<Integer, String> undirectedGraph = NetworkBuilder.undirected().build(); assertThrows( IllegalArgumentException.class, () -> inducedSubgraph(undirectedGraph, ImmutableSet.of(N1))); } @Test public void copyOf_nullArgument() { assertThrows(NullPointerException.class, () -> copyOf((Graph<?>) null)); } @Test public void copyOf_directedGraph() { Graph<Integer> directedGraph = buildDirectedGraph(); Graph<Integer> copy = copyOf(directedGraph); assertThat(copy).isEqualTo(directedGraph); } @Test public void copyOf_undirectedGraph() { Graph<Integer> undirectedGraph = buildUndirectedGraph(); Graph<Integer> copy = copyOf(undirectedGraph); assertThat(copy).isEqualTo(undirectedGraph); } @Test public void copyOf_directedValueGraph() { ValueGraph<Integer, String> directedGraph = buildDirectedValueGraph(); ValueGraph<Integer, String> copy = copyOf(directedGraph); assertThat(copy).isEqualTo(directedGraph); } @Test public void copyOf_undirectedValueGraph() { ValueGraph<Integer, String> undirectedGraph = buildUndirectedValueGraph(); ValueGraph<Integer, String> copy = copyOf(undirectedGraph); assertThat(copy).isEqualTo(undirectedGraph); } @Test public void copyOf_directedNetwork() { Network<Integer, String> directedGraph = buildDirectedNetwork(); Network<Integer, String> copy = copyOf(directedGraph); assertThat(copy).isEqualTo(directedGraph); } @Test public void copyOf_undirectedNetwork() { Network<Integer, String> undirectedGraph = buildUndirectedNetwork(); Network<Integer, String> copy = copyOf(undirectedGraph); assertThat(copy).isEqualTo(undirectedGraph); } // Graph creation tests @Test public void createDirected() { MutableNetwork<Integer, String> directedGraph = NetworkBuilder.directed().build(); assertThat(directedGraph.nodes()).isEmpty(); assertThat(directedGraph.edges()).isEmpty(); assertThat(directedGraph.addEdge(N1, N2, E12)).isTrue(); assertThat(directedGraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12)); assertThat(directedGraph.edgesConnecting(N2, N1)).isEmpty(); // By default, parallel edges are not allowed. IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> directedGraph.addEdge(N1, N2, E12_A)); assertThat(e).hasMessageThat().contains(ERROR_PARALLEL_EDGE); // By default, self-loop edges are not allowed. e = assertThrows(IllegalArgumentException.class, () -> directedGraph.addEdge(N1, N1, E11)); assertThat(e).hasMessageThat().contains(ERROR_SELF_LOOP); } @Test public void createUndirected() { MutableNetwork<Integer, String> undirectedGraph = NetworkBuilder.undirected().build(); assertThat(undirectedGraph.nodes()).isEmpty(); assertThat(undirectedGraph.edges()).isEmpty(); assertThat(undirectedGraph.addEdge(N1, N2, E12)).isTrue(); assertThat(undirectedGraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12)); assertThat(undirectedGraph.edgesConnecting(N2, N1)).isEqualTo(ImmutableSet.of(E12)); // By default, parallel edges are not allowed. IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> undirectedGraph.addEdge(N1, N2, E12_A)); assertThat(e).hasMessageThat().contains(ERROR_PARALLEL_EDGE); e = assertThrows(IllegalArgumentException.class, () -> undirectedGraph.addEdge(N2, N1, E21)); assertThat(e).hasMessageThat().contains(ERROR_PARALLEL_EDGE); // By default, self-loop edges are not allowed. e = assertThrows(IllegalArgumentException.class, () -> undirectedGraph.addEdge(N1, N1, E11)); assertThat(e).hasMessageThat().contains(ERROR_SELF_LOOP); } @Test public void createDirected_multigraph() { MutableNetwork<Integer, String> directedMultigraph = NetworkBuilder.directed().allowsParallelEdges(true).build(); assertThat(directedMultigraph.addEdge(N1, N2, E12)).isTrue(); assertThat(directedMultigraph.addEdge(N1, N2, E12_A)).isTrue(); assertThat(directedMultigraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12, E12_A)); assertThat(directedMultigraph.edgesConnecting(N2, N1)).isEmpty(); } @Test public void createUndirected_multigraph() { MutableNetwork<Integer, String> undirectedMultigraph = NetworkBuilder.undirected().allowsParallelEdges(true).build(); assertThat(undirectedMultigraph.addEdge(N1, N2, E12)).isTrue(); assertThat(undirectedMultigraph.addEdge(N1, N2, E12_A)).isTrue(); assertThat(undirectedMultigraph.addEdge(N2, N1, E21)).isTrue(); assertThat(undirectedMultigraph.edgesConnecting(N1, N2)) .isEqualTo(ImmutableSet.of(E12, E12_A, E21)); } @Test public void createDirected_expectedNodeCount() { MutableNetwork<Integer, String> directedGraph = NetworkBuilder.directed().expectedNodeCount(NODE_COUNT).build(); assertThat(directedGraph.addEdge(N1, N2, E12)).isTrue(); assertThat(directedGraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12)); assertThat(directedGraph.edgesConnecting(N2, N1)).isEmpty(); } @Test public void createUndirected_expectedNodeCount() { MutableNetwork<Integer, String> undirectedGraph = NetworkBuilder.undirected().expectedNodeCount(NODE_COUNT).build(); assertThat(undirectedGraph.addEdge(N1, N2, E12)).isTrue(); assertThat(undirectedGraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12)); assertThat(undirectedGraph.edgesConnecting(N2, N1)).isEqualTo(ImmutableSet.of(E12)); } @Test public void builder_expectedNodeCount_negative() { IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> NetworkBuilder.directed().expectedNodeCount(-1)); assertThat(e).hasMessageThat().contains(ERROR_NEGATIVE_COUNT); } @Test public void createDirected_expectedEdgeCount() { MutableNetwork<Integer, String> directedGraph = NetworkBuilder.directed().expectedEdgeCount(EDGE_COUNT).build(); assertThat(directedGraph.addEdge(N1, N2, E12)).isTrue(); assertThat(directedGraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12)); assertThat(directedGraph.edgesConnecting(N2, N1)).isEmpty(); } @Test public void createUndirected_expectedEdgeCount() { MutableNetwork<Integer, String> undirectedGraph = NetworkBuilder.undirected().expectedEdgeCount(EDGE_COUNT).build(); assertThat(undirectedGraph.addEdge(N1, N2, E12)).isTrue(); assertThat(undirectedGraph.edgesConnecting(N1, N2)).isEqualTo(ImmutableSet.of(E12)); assertThat(undirectedGraph.edgesConnecting(N2, N1)).isEqualTo(ImmutableSet.of(E12)); } @Test public void builder_expectedEdgeCount_negative() { IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> NetworkBuilder.directed().expectedEdgeCount(-1)); assertThat(e).hasMessageThat().contains(ERROR_NEGATIVE_COUNT); } private static MutableGraph<Integer> buildDirectedGraph() { MutableGraph<Integer> directedGraph = GraphBuilder.directed().allowsSelfLoops(true).build(); directedGraph.putEdge(N1, N1); directedGraph.putEdge(N1, N2); directedGraph.putEdge(N2, N1); return directedGraph; } private static MutableGraph<Integer> buildUndirectedGraph() { MutableGraph<Integer> undirectedGraph = GraphBuilder.undirected().allowsSelfLoops(true).build(); undirectedGraph.putEdge(N1, N1); undirectedGraph.putEdge(N1, N2); undirectedGraph.putEdge(N2, N1); return undirectedGraph; } private static MutableValueGraph<Integer, String> buildDirectedValueGraph() { MutableValueGraph<Integer, String> directedGraph = ValueGraphBuilder.directed().allowsSelfLoops(true).build(); directedGraph.putEdgeValue(N1, N1, E11); directedGraph.putEdgeValue(N1, N2, E12); directedGraph.putEdgeValue(N2, N1, E21); return directedGraph; } private static MutableValueGraph<Integer, String> buildUndirectedValueGraph() { MutableValueGraph<Integer, String> undirectedGraph = ValueGraphBuilder.undirected().allowsSelfLoops(true).build(); undirectedGraph.putEdgeValue(N1, N1, E11); undirectedGraph.putEdgeValue(N1, N2, E12); undirectedGraph.putEdgeValue(N2, N1, E21); // overwrites E12 return undirectedGraph; } private static MutableNetwork<Integer, String> buildDirectedNetwork() { MutableNetwork<Integer, String> directedGraph = NetworkBuilder.directed().allowsParallelEdges(true).allowsSelfLoops(true).build(); directedGraph.addEdge(N1, N1, E11); directedGraph.addEdge(N1, N2, E12); directedGraph.addEdge(N1, N1, E11_A); directedGraph.addEdge(N1, N2, E12_A); directedGraph.addEdge(N2, N1, E21); return directedGraph; } private static MutableNetwork<Integer, String> buildUndirectedNetwork() { MutableNetwork<Integer, String> undirectedGraph = NetworkBuilder.undirected().allowsParallelEdges(true).allowsSelfLoops(true).build(); undirectedGraph.addEdge(N1, N1, E11); undirectedGraph.addEdge(N1, N2, E12); undirectedGraph.addEdge(N1, N1, E11_A); undirectedGraph.addEdge(N1, N2, E12_A); undirectedGraph.addEdge(N2, N1, E21); return undirectedGraph; } }
java
github
https://github.com/google/guava
guava-tests/test/com/google/common/graph/GraphsTest.java
# -*- coding: utf-8 -*- """ Swedish specific Form helpers """ from __future__ import absolute_import, unicode_literals import re from django import forms from django.utils.translation import ugettext_lazy as _ from django.core.validators import EMPTY_VALUES from django.contrib.localflavor.se.se_counties import COUNTY_CHOICES from django.contrib.localflavor.se.utils import (id_number_checksum, validate_id_birthday, format_personal_id_number, valid_organisation, format_organisation_number) __all__ = ('SECountySelect', 'SEOrganisationNumberField', 'SEPersonalIdentityNumberField', 'SEPostalCodeField') SWEDISH_ID_NUMBER = re.compile(r'^(?P<century>\d{2})?(?P<year>\d{2})(?P<month>\d{2})(?P<day>\d{2})(?P<sign>[\-+])?(?P<serial>\d{3})(?P<checksum>\d)$') SE_POSTAL_CODE = re.compile(r'^[1-9]\d{2} ?\d{2}$') class SECountySelect(forms.Select): """ A Select form widget that uses a list of the Swedish counties (län) as its choices. The cleaned value is the official county code -- see http://en.wikipedia.org/wiki/Counties_of_Sweden for a list. """ def __init__(self, attrs=None): super(SECountySelect, self).__init__(attrs=attrs, choices=COUNTY_CHOICES) class SEOrganisationNumberField(forms.CharField): """ A form field that validates input as a Swedish organisation number (organisationsnummer). It accepts the same input as SEPersonalIdentityField (for sole proprietorships (enskild firma). However, co-ordination numbers are not accepted. It also accepts ordinary Swedish organisation numbers with the format NNNNNNNNNN. The return value will be YYYYMMDDXXXX for sole proprietors, and NNNNNNNNNN for other organisations. """ default_error_messages = { 'invalid': _('Enter a valid Swedish organisation number.'), } def clean(self, value): value = super(SEOrganisationNumberField, self).clean(value) if value in EMPTY_VALUES: return '' match = SWEDISH_ID_NUMBER.match(value) if not match: raise forms.ValidationError(self.error_messages['invalid']) gd = match.groupdict() # Compare the calculated value with the checksum if id_number_checksum(gd) != int(gd['checksum']): raise forms.ValidationError(self.error_messages['invalid']) # First: check if this is a real organisation_number if valid_organisation(gd): return format_organisation_number(gd) # Is this a single properitor (enskild firma)? try: birth_day = validate_id_birthday(gd, False) return format_personal_id_number(birth_day, gd) except ValueError: raise forms.ValidationError(self.error_messages['invalid']) class SEPersonalIdentityNumberField(forms.CharField): """ A form field that validates input as a Swedish personal identity number (personnummer). The correct formats are YYYYMMDD-XXXX, YYYYMMDDXXXX, YYMMDD-XXXX, YYMMDDXXXX and YYMMDD+XXXX. A + indicates that the person is older than 100 years, which will be taken into consideration when the date is validated. The checksum will be calculated and checked. The birth date is checked to be a valid date. By default, co-ordination numbers (samordningsnummer) will be accepted. To only allow real personal identity numbers, pass the keyword argument coordination_number=False to the constructor. The cleaned value will always have the format YYYYMMDDXXXX. """ def __init__(self, coordination_number=True, *args, **kwargs): self.coordination_number = coordination_number super(SEPersonalIdentityNumberField, self).__init__(*args, **kwargs) default_error_messages = { 'invalid': _('Enter a valid Swedish personal identity number.'), 'coordination_number': _('Co-ordination numbers are not allowed.'), } def clean(self, value): value = super(SEPersonalIdentityNumberField, self).clean(value) if value in EMPTY_VALUES: return '' match = SWEDISH_ID_NUMBER.match(value) if match is None: raise forms.ValidationError(self.error_messages['invalid']) gd = match.groupdict() # compare the calculated value with the checksum if id_number_checksum(gd) != int(gd['checksum']): raise forms.ValidationError(self.error_messages['invalid']) # check for valid birthday try: birth_day = validate_id_birthday(gd) except ValueError: raise forms.ValidationError(self.error_messages['invalid']) # make sure that co-ordination numbers do not pass if not allowed if not self.coordination_number and int(gd['day']) > 60: raise forms.ValidationError(self.error_messages['coordination_number']) return format_personal_id_number(birth_day, gd) class SEPostalCodeField(forms.RegexField): """ A form field that validates input as a Swedish postal code (postnummer). Valid codes consist of five digits (XXXXX). The number can optionally be formatted with a space after the third digit (XXX XX). The cleaned value will never contain the space. """ default_error_messages = { 'invalid': _('Enter a Swedish postal code in the format XXXXX.'), } def __init__(self, *args, **kwargs): super(SEPostalCodeField, self).__init__(SE_POSTAL_CODE, *args, **kwargs) def clean(self, value): return super(SEPostalCodeField, self).clean(value).replace(' ', '')
unknown
codeparrot/codeparrot-clean
"""A high-speed, production ready, thread pooled, generic WSGI server. Simplest example on how to use this module directly (without using CherryPy's application machinery): from cherrypy import wsgiserver def my_crazy_app(environ, start_response): status = '200 OK' response_headers = [('Content-type','text/plain')] start_response(status, response_headers) return ['Hello world!\n'] # Here we set our application to the script_name '/' wsgi_apps = [('/', my_crazy_app)] server = wsgiserver.CherryPyWSGIServer(('localhost', 8070), wsgi_apps, server_name='localhost') # Want SSL support? Just set these attributes # server.ssl_certificate = <filename> # server.ssl_private_key = <filename> if __name__ == '__main__': try: server.start() except KeyboardInterrupt: server.stop() This won't call the CherryPy engine (application side) at all, only the WSGI server, which is independant from the rest of CherryPy. Don't let the name "CherryPyWSGIServer" throw you; the name merely reflects its origin, not it's coupling. The CherryPy WSGI server can serve as many WSGI applications as you want in one instance: wsgi_apps = [('/', my_crazy_app), ('/blog', my_blog_app)] """ import base64 import Queue import os import re quoted_slash = re.compile("(?i)%2F") import rfc822 import socket try: import cStringIO as StringIO except ImportError: import StringIO import sys import threading import time import traceback from urllib import unquote from urlparse import urlparse try: from OpenSSL import SSL from OpenSSL import crypto except ImportError: SSL = None import errno socket_errors_to_ignore = [] # Not all of these names will be defined for every platform. for _ in ("EPIPE", "ETIMEDOUT", "ECONNREFUSED", "ECONNRESET", "EHOSTDOWN", "EHOSTUNREACH", "WSAECONNABORTED", "WSAECONNREFUSED", "WSAECONNRESET", "WSAENETRESET", "WSAETIMEDOUT"): if _ in dir(errno): socket_errors_to_ignore.append(getattr(errno, _)) # de-dupe the list socket_errors_to_ignore = dict.fromkeys(socket_errors_to_ignore).keys() socket_errors_to_ignore.append("timed out") comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING', 'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL', 'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT', 'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE', 'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING', 'WWW-AUTHENTICATE'] class HTTPRequest(object): """An HTTP Request (and response). A single HTTP connection may consist of multiple request/response pairs. connection: the HTTP Connection object which spawned this request. rfile: the 'read' fileobject from the connection's socket ready: when True, the request has been parsed and is ready to begin generating the response. When False, signals the calling Connection that the response should not be generated and the connection should close. close_connection: signals the calling Connection that the request should close. This does not imply an error! The client and/or server may each request that the connection be closed. chunked_write: if True, output will be encoded with the "chunked" transfer-coding. This value is set automatically inside send_headers. """ def __init__(self, connection): self.connection = connection self.rfile = self.connection.rfile self.sendall = self.connection.sendall self.environ = connection.environ.copy() self.ready = False self.started_response = False self.status = "" self.outheaders = [] self.sent_headers = False self.close_connection = False self.chunked_write = False def parse_request(self): """Parse the next HTTP request start-line and message-headers.""" # HTTP/1.1 connections are persistent by default. If a client # requests a page, then idles (leaves the connection open), # then rfile.readline() will raise socket.error("timed out"). # Note that it does this based on the value given to settimeout(), # and doesn't need the client to request or acknowledge the close # (although your TCP stack might suffer for it: cf Apache's history # with FIN_WAIT_2). request_line = self.rfile.readline() if not request_line: # Force self.ready = False so the connection will close. self.ready = False return if request_line == "\r\n": # RFC 2616 sec 4.1: "...if the server is reading the protocol # stream at the beginning of a message and receives a CRLF # first, it should ignore the CRLF." # But only ignore one leading line! else we enable a DoS. request_line = self.rfile.readline() if not request_line: self.ready = False return server = self.connection.server environ = self.environ environ["SERVER_SOFTWARE"] = "%s WSGI Server" % server.version method, path, req_protocol = request_line.strip().split(" ", 2) environ["REQUEST_METHOD"] = method # path may be an abs_path (including "http://host.domain.tld"); scheme, location, path, params, qs, frag = urlparse(path) if frag: self.simple_response("400 Bad Request", "Illegal #fragment in Request-URI.") return if scheme: environ["wsgi.url_scheme"] = scheme if params: path = path + ";" + params # Unquote the path+params (e.g. "/this%20path" -> "this path"). # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 # # But note that "...a URI must be separated into its components # before the escaped characters within those components can be # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 atoms = [unquote(x) for x in quoted_slash.split(path)] path = "%2F".join(atoms) if path == "*": # This means, of course, that the last wsgi_app (shortest path) # will always handle a URI of "*". environ["SCRIPT_NAME"] = "" environ["PATH_INFO"] = "*" self.wsgi_app = server.mount_points[-1][1] else: for mount_point, wsgi_app in server.mount_points: # The mount_points list should be sorted by length, descending. if path.startswith(mount_point + "/") or path == mount_point: environ["SCRIPT_NAME"] = mount_point environ["PATH_INFO"] = path[len(mount_point):] self.wsgi_app = wsgi_app break else: self.simple_response("404 Not Found") return # Note that, like wsgiref and most other WSGI servers, # we unquote the path but not the query string. environ["QUERY_STRING"] = qs # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. rp = int(req_protocol[5]), int(req_protocol[7]) sp = int(server.protocol[5]), int(server.protocol[7]) if sp[0] != rp[0]: self.simple_response("505 HTTP Version Not Supported") return # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. environ["SERVER_PROTOCOL"] = req_protocol # set a non-standard environ entry so the WSGI app can know what # the *real* server protocol is (and what features to support). # See http://www.faqs.org/rfcs/rfc2145.html. environ["ACTUAL_SERVER_PROTOCOL"] = server.protocol self.response_protocol = "HTTP/%s.%s" % min(rp, sp) # If the Request-URI was an absoluteURI, use its location atom. if location: environ["SERVER_NAME"] = location # then all the http headers try: self.read_headers() except ValueError, ex: self.simple_response("400 Bad Request", repr(ex.args)) return creds = environ.get("HTTP_AUTHORIZATION", "").split(" ", 1) environ["AUTH_TYPE"] = creds[0] if creds[0].lower() == 'basic': user, pw = base64.decodestring(creds[1]).split(":", 1) environ["REMOTE_USER"] = user # Persistent connection support if self.response_protocol == "HTTP/1.1": if environ.get("HTTP_CONNECTION", "") == "close": self.close_connection = True else: # HTTP/1.0 if environ.get("HTTP_CONNECTION", "") != "Keep-Alive": self.close_connection = True # Transfer-Encoding support te = None if self.response_protocol == "HTTP/1.1": te = environ.get("HTTP_TRANSFER_ENCODING") if te: te = [x.strip().lower() for x in te.split(",") if x.strip()] read_chunked = False if te: for enc in te: if enc == "chunked": read_chunked = True else: # Note that, even if we see "chunked", we must reject # if there is an extension we don't recognize. self.simple_response("501 Unimplemented") self.close_connection = True return if read_chunked: if not self.decode_chunked(): return # From PEP 333: # "Servers and gateways that implement HTTP 1.1 must provide # transparent support for HTTP 1.1's "expect/continue" mechanism. # This may be done in any of several ways: # 1. Respond to requests containing an Expect: 100-continue request # with an immediate "100 Continue" response, and proceed normally. # 2. Proceed with the request normally, but provide the application # with a wsgi.input stream that will send the "100 Continue" # response if/when the application first attempts to read from # the input stream. The read request must then remain blocked # until the client responds. # 3. Wait until the client decides that the server does not support # expect/continue, and sends the request body on its own. # (This is suboptimal, and is not recommended.) # # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, # but it seems like it would be a big slowdown for such a rare case. if environ.get("HTTP_EXPECT", "") == "100-continue": self.simple_response(100) self.ready = True def read_headers(self): """Read header lines from the incoming stream.""" environ = self.environ while True: line = self.rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == '\r\n': # Normal end of headers break if line[0] in ' \t': # It's a continuation line. v = line.strip() else: k, v = line.split(":", 1) k, v = k.strip().upper(), v.strip() envname = "HTTP_" + k.replace("-", "_") if k in comma_separated_headers: existing = environ.get(envname) if existing: v = ", ".join((existing, v)) environ[envname] = v ct = environ.pop("HTTP_CONTENT_TYPE", None) if ct: environ["CONTENT_TYPE"] = ct cl = environ.pop("HTTP_CONTENT_LENGTH", None) if cl: environ["CONTENT_LENGTH"] = cl def decode_chunked(self): """Decode the 'chunked' transfer coding.""" cl = 0 data = StringIO.StringIO() while True: line = self.rfile.readline().strip().split(";", 1) chunk_size = int(line.pop(0), 16) if chunk_size <= 0: break ## if line: chunk_extension = line[0] cl += chunk_size data.write(self.rfile.read(chunk_size)) crlf = self.rfile.read(2) if crlf != "\r\n": self.simple_response("400 Bad Request", "Bad chunked transfer coding " "(expected '\\r\\n', got %r)" % crlf) return # Grab any trailer headers self.read_headers() data.seek(0) self.environ["wsgi.input"] = data self.environ["CONTENT_LENGTH"] = str(cl) or "" return True def respond(self): """Call the appropriate WSGI app and write its iterable output.""" response = self.wsgi_app(self.environ, self.start_response) try: for chunk in response: # "The start_response callable must not actually transmit # the response headers. Instead, it must store them for the # server or gateway to transmit only after the first # iteration of the application return value that yields # a NON-EMPTY string, or upon the application's first # invocation of the write() callable." (PEP 333) if chunk: self.write(chunk) finally: if hasattr(response, "close"): response.close() if (self.ready and not self.sent_headers and not self.connection.server.interrupt): self.sent_headers = True self.send_headers() if self.chunked_write: self.sendall("0\r\n\r\n") def simple_response(self, status, msg=""): """Write a simple response back to the client.""" status = str(status) buf = ["%s %s\r\n" % (self.connection.server.protocol, status), "Content-Length: %s\r\n" % len(msg)] if status[:3] == "413" and self.response_protocol == 'HTTP/1.1': # Request Entity Too Large self.close_connection = True buf.append("Connection: close\r\n") buf.append("\r\n") if msg: buf.append(msg) self.sendall("".join(buf)) def start_response(self, status, headers, exc_info = None): """WSGI callable to begin the HTTP response.""" if self.started_response: if not exc_info: raise AssertionError("WSGI start_response called a second " "time with no exc_info.") else: try: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None self.started_response = True self.status = status self.outheaders.extend(headers) return self.write def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError("WSGI write called before start_response.") if not self.sent_headers: self.sent_headers = True self.send_headers() if self.chunked_write and chunk: buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"] self.sendall("".join(buf)) else: self.sendall(chunk) def send_headers(self): """Assert, process, and send the HTTP response message-headers.""" hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: # Request Entity Too Large. Close conn to avoid garbage. self.close_connection = True elif "content-length" not in hkeys: # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." So no point chunking. if status < 200 or status in (204, 205, 304): pass else: if self.response_protocol == 'HTTP/1.1': # Use the chunked transfer-coding self.chunked_write = True self.outheaders.append(("Transfer-Encoding", "chunked")) else: # Closing the conn is the only way to determine len. self.close_connection = True if "connection" not in hkeys: if self.response_protocol == 'HTTP/1.1': if self.close_connection: self.outheaders.append(("Connection", "close")) else: if not self.close_connection: self.outheaders.append(("Connection", "Keep-Alive")) if "date" not in hkeys: self.outheaders.append(("Date", rfc822.formatdate())) server = self.connection.server if "server" not in hkeys: self.outheaders.append(("Server", server.version)) buf = [server.protocol, " ", self.status, "\r\n"] try: buf += [k + ": " + v + "\r\n" for k, v in self.outheaders] except TypeError: if not isinstance(k, str): raise TypeError("WSGI response header key %r is not a string.") if not isinstance(v, str): raise TypeError("WSGI response header value %r is not a string.") else: raise buf.append("\r\n") self.sendall("".join(buf)) class NoSSLError(Exception): """Exception raised when a client speaks HTTP to an HTTPS socket.""" pass def _ssl_wrap_method(method, is_reader=False): """Wrap the given method with SSL error-trapping. is_reader: if False (the default), EOF errors will be raised. If True, EOF errors will return "" (to emulate normal sockets). """ def ssl_method_wrapper(self, *args, **kwargs): ## print (id(self), method, args, kwargs) start = time.time() while True: try: return method(self, *args, **kwargs) except (SSL.WantReadError, SSL.WantWriteError): # Sleep and try again. This is dangerous, because it means # the rest of the stack has no way of differentiating # between a "new handshake" error and "client dropped". # Note this isn't an endless loop: there's a timeout below. time.sleep(self.ssl_retry) except SSL.SysCallError, e: if is_reader and e.args == (-1, 'Unexpected EOF'): return "" errno = e.args[0] if is_reader and errno in socket_errors_to_ignore: return "" raise socket.error(errno) except SSL.Error, e: if is_reader and e.args == (-1, 'Unexpected EOF'): return "" thirdarg = None try: thirdarg = e.args[0][0][2] except IndexError: pass if is_reader and thirdarg == 'ssl handshake failure': return "" if thirdarg == 'http request': # The client is talking HTTP to an HTTPS server. raise NoSSLError() raise if time.time() - start > self.ssl_timeout: raise socket.timeout("timed out") return ssl_method_wrapper class SSL_fileobject(socket._fileobject): """Faux file object attached to a socket object.""" ssl_timeout = 3 ssl_retry = .01 close = _ssl_wrap_method(socket._fileobject.close) flush = _ssl_wrap_method(socket._fileobject.flush) write = _ssl_wrap_method(socket._fileobject.write) writelines = _ssl_wrap_method(socket._fileobject.writelines) read = _ssl_wrap_method(socket._fileobject.read, is_reader=True) readline = _ssl_wrap_method(socket._fileobject.readline, is_reader=True) readlines = _ssl_wrap_method(socket._fileobject.readlines, is_reader=True) class HTTPConnection(object): """An HTTP connection (active socket). socket: the raw socket object (usually TCP) for this connection. addr: the "bind address" for the remote end of the socket. For IP sockets, this is a tuple of (REMOTE_ADDR, REMOTE_PORT). For UNIX domain sockets, this will be a string. server: the HTTP Server for this Connection. Usually, the server object possesses a passive (server) socket which spawns multiple, active (client) sockets, one for each connection. environ: a WSGI environ template. This will be copied for each request. rfile: a fileobject for reading from the socket. sendall: a function for writing (+ flush) to the socket. """ rbufsize = -1 RequestHandlerClass = HTTPRequest environ = {"wsgi.version": (1, 0), "wsgi.url_scheme": "http", "wsgi.multithread": True, "wsgi.multiprocess": False, "wsgi.run_once": False, "wsgi.errors": sys.stderr, } def __init__(self, sock, addr, server): self.socket = sock self.addr = addr self.server = server # Copy the class environ into self. self.environ = self.environ.copy() if SSL and isinstance(sock, SSL.ConnectionType): timeout = sock.gettimeout() self.rfile = SSL_fileobject(sock, "r", self.rbufsize) self.rfile.ssl_timeout = timeout self.sendall = _ssl_wrap_method(sock.sendall) self.environ["wsgi.url_scheme"] = "https" self.environ["HTTPS"] = "on" sslenv = getattr(server, "ssl_environ", None) if sslenv: self.environ.update(sslenv) else: self.rfile = sock.makefile("r", self.rbufsize) self.sendall = sock.sendall self.environ.update({"wsgi.input": self.rfile, "SERVER_NAME": self.server.server_name, }) if isinstance(self.server.bind_addr, basestring): # AF_UNIX. This isn't really allowed by WSGI, which doesn't # address unix domain sockets. But it's better than nothing. self.environ["SERVER_PORT"] = "" else: self.environ["SERVER_PORT"] = str(self.server.bind_addr[1]) # optional values # Until we do DNS lookups, omit REMOTE_HOST self.environ["REMOTE_ADDR"] = self.addr[0] self.environ["REMOTE_PORT"] = str(self.addr[1]) def communicate(self): """Read each request and respond appropriately.""" try: while True: # (re)set req to None so that if something goes wrong in # the RequestHandlerClass constructor, the error doesn't # get written to the previous request. req = None req = self.RequestHandlerClass(self) # This order of operations should guarantee correct pipelining. req.parse_request() if not req.ready: return req.respond() if req.close_connection: return except socket.error, e: errno = e.args[0] if errno not in socket_errors_to_ignore: if req: req.simple_response("500 Internal Server Error", format_exc()) return except (KeyboardInterrupt, SystemExit): raise except NoSSLError: # Unwrap our sendall req.sendall = self.socket._sock.sendall req.simple_response("400 Bad Request", "The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") except: if req: req.simple_response("500 Internal Server Error", format_exc()) def close(self): """Close the socket underlying this connection.""" self.rfile.close() self.socket.close() def format_exc(limit=None): """Like print_exc() but return a string. Backport for Python 2.3.""" try: etype, value, tb = sys.exc_info() return ''.join(traceback.format_exception(etype, value, tb, limit)) finally: etype = value = tb = None _SHUTDOWNREQUEST = None class WorkerThread(threading.Thread): """Thread which continuously polls a Queue for Connection objects. server: the HTTP Server which spawned this thread, and which owns the Queue and is placing active connections into it. ready: a simple flag for the calling server to know when this thread has begun polling the Queue. Due to the timing issues of polling a Queue, a WorkerThread does not check its own 'ready' flag after it has started. To stop the thread, it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue (one for each running WorkerThread). """ def __init__(self, server): self.ready = False self.server = server threading.Thread.__init__(self) def run(self): try: self.ready = True while True: conn = self.server.requests.get() if conn is _SHUTDOWNREQUEST: return try: conn.communicate() finally: conn.close() except (KeyboardInterrupt, SystemExit), exc: self.server.interrupt = exc class SSLConnection: """A thread-safe wrapper for an SSL.Connection. *args: the arguments to create the wrapped SSL.Connection(*args). """ def __init__(self, *args): self._ssl_conn = SSL.Connection(*args) self._lock = threading.RLock() for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read', 'renegotiate', 'bind', 'listen', 'connect', 'accept', 'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list', 'getpeername', 'getsockname', 'getsockopt', 'setsockopt', 'makefile', 'get_app_data', 'set_app_data', 'state_string', 'sock_shutdown', 'get_peer_certificate', 'want_read', 'want_write', 'set_connect_state', 'set_accept_state', 'connect_ex', 'sendall', 'settimeout'): exec """def %s(self, *args): self._lock.acquire() try: return self._ssl_conn.%s(*args) finally: self._lock.release() """ % (f, f) class CherryPyWSGIServer(object): """An HTTP server for WSGI. bind_addr: a (host, port) tuple if TCP sockets are desired; for UNIX sockets, supply the filename as a string. wsgi_app: the WSGI 'application callable'; multiple WSGI applications may be passed as (script_name, callable) pairs. numthreads: the number of worker threads to create (default 10). server_name: the string to set for WSGI's SERVER_NAME environ entry. Defaults to socket.gethostname(). max: the maximum number of queued requests (defaults to -1 = no limit). request_queue_size: the 'backlog' argument to socket.listen(); specifies the maximum number of queued connections (default 5). timeout: the timeout in seconds for accepted connections (default 10). protocol: the version string to write in the Status-Line of all HTTP responses. For example, "HTTP/1.1" (the default). This also limits the supported features used in the response. SSL/HTTPS --------- The OpenSSL module must be importable for SSL functionality. You can obtain it from http://pyopenssl.sourceforge.net/ ssl_certificate: the filename of the server SSL certificate. ssl_privatekey: the filename of the server's private key file. If either of these is None (both are None by default), this server will not use SSL. If both are given and are valid, they will be read on server start and used in the SSL context for the listening socket. """ protocol = "HTTP/1.1" version = "CherryPy/3.0.1" ready = False _interrupt = None ConnectionClass = HTTPConnection # Paths to certificate and private key files ssl_certificate = None ssl_private_key = None def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, max=-1, request_queue_size=5, timeout=10): self.requests = Queue.Queue(max) if callable(wsgi_app): # We've been handed a single wsgi_app, in CP-2.1 style. # Assume it's mounted at "". self.mount_points = [("", wsgi_app)] else: # We've been handed a list of (mount_point, wsgi_app) tuples, # so that the server can call different wsgi_apps, and also # correctly set SCRIPT_NAME. self.mount_points = wsgi_app self.mount_points.sort() self.mount_points.reverse() self.bind_addr = bind_addr self.numthreads = numthreads or 1 if not server_name: server_name = socket.gethostname() self.server_name = server_name self.request_queue_size = request_queue_size self._workerThreads = [] self.timeout = timeout def start(self): """Run the server forever.""" # We don't have to trap KeyboardInterrupt or SystemExit here, # because cherrpy.server already does so, calling self.stop() for us. # If you're using this server with another framework, you should # trap those exceptions in whatever code block calls start(). self._interrupt = None # Select the appropriate socket if isinstance(self.bind_addr, basestring): # AF_UNIX socket # So we can reuse the socket... try: os.unlink(self.bind_addr) except: pass # So everyone can access the socket... try: os.chmod(self.bind_addr, 0777) except: pass info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] else: # AF_INET or AF_INET6 socket # Get the correct address family for our host (allows IPv6 addresses) host, port = self.bind_addr flags = 0 if host == '': # Despite the socket module docs, using '' does not # allow AI_PASSIVE to work. Passing None instead # returns '0.0.0.0' like we want. In other words: # host AI_PASSIVE result # '' Y 192.168.x.y # '' N 192.168.x.y # None Y 0.0.0.0 # None N 127.0.0.1 host = None flags = socket.AI_PASSIVE try: info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, flags) except socket.gaierror: # Probably a DNS issue. Assume IPv4. info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)] self.socket = None msg = "No socket could be created" for res in info: af, socktype, proto, canonname, sa = res try: self.bind(af, socktype, proto) except socket.error, msg: if self.socket: self.socket.close() self.socket = None continue break if not self.socket: raise socket.error, msg # Timeout so KeyboardInterrupt can be caught on Win32 self.socket.settimeout(1) self.socket.listen(self.request_queue_size) # Create worker threads for i in xrange(self.numthreads): self._workerThreads.append(WorkerThread(self)) for worker in self._workerThreads: worker.setName("CP WSGIServer " + worker.getName()) worker.start() for worker in self._workerThreads: while not worker.ready: time.sleep(.1) self.ready = True while self.ready: self.tick() if self.interrupt: while self.interrupt is True: # Wait for self.stop() to complete. See _set_interrupt. time.sleep(0.1) raise self.interrupt def bind(self, family, type, proto=0): """Create (or recreate) the actual socket object.""" self.socket = socket.socket(family, type, proto) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ## self.socket.setsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY, 1) if self.ssl_certificate and self.ssl_private_key: if SSL is None: raise ImportError("You must install pyOpenSSL to use HTTPS.") # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473 ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_privatekey_file(self.ssl_private_key) ctx.use_certificate_file(self.ssl_certificate) self.socket = SSLConnection(ctx, self.socket) self.populate_ssl_environ() self.socket.bind(self.bind_addr) def tick(self): """Accept a new connection and put it on the Queue.""" try: s, addr = self.socket.accept() if not self.ready: return if hasattr(s, 'settimeout'): s.settimeout(self.timeout) conn = self.ConnectionClass(s, addr, self) self.requests.put(conn) except socket.timeout: # The only reason for the timeout in start() is so we can # notice keyboard interrupts on Win32, which don't interrupt # accept() by default return except socket.error, x: msg = x.args[1] if msg in ("Bad file descriptor", "Socket operation on non-socket"): # Our socket was closed. return if msg == "Resource temporarily unavailable": # Just try again. See http://www.cherrypy.org/ticket/479. return raise def _get_interrupt(self): return self._interrupt def _set_interrupt(self, interrupt): self._interrupt = True self.stop() self._interrupt = interrupt interrupt = property(_get_interrupt, _set_interrupt, doc="Set this to an Exception instance to " "interrupt the server.") def stop(self): """Gracefully shutdown a server that is serving forever.""" self.ready = False sock = getattr(self, "socket", None) if sock: if not isinstance(self.bind_addr, basestring): # Touch our own socket to make accept() return immediately. try: host, port = sock.getsockname()[:2] except socket.error, x: if x.args[1] != "Bad file descriptor": raise else: # Note that we're explicitly NOT using AI_PASSIVE, # here, because we want an actual IP to touch. # localhost won't work if we've bound to a public IP, # but it would if we bound to INADDR_ANY via host = ''. for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See http://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() if hasattr(sock, "close"): sock.close() self.socket = None # Must shut down threads here so the code that calls # this method can know when all threads are stopped. for worker in self._workerThreads: self.requests.put(_SHUTDOWNREQUEST) # Don't join currentThread (when stop is called inside a request). current = threading.currentThread() while self._workerThreads: worker = self._workerThreads.pop() if worker is not current and worker.isAlive: try: worker.join() except AssertionError: pass def populate_ssl_environ(self): """Create WSGI environ entries to be merged into each request.""" cert = open(self.ssl_certificate).read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) self.ssl_environ = { # pyOpenSSL doesn't provide access to any of these AFAICT ## 'SSL_PROTOCOL': 'SSLv2', ## SSL_CIPHER string The cipher specification name ## SSL_VERSION_INTERFACE string The mod_ssl program version ## SSL_VERSION_LIBRARY string The OpenSSL program version } # Server certificate attributes self.ssl_environ.update({ 'SSL_SERVER_M_VERSION': cert.get_version(), 'SSL_SERVER_M_SERIAL': cert.get_serial_number(), ## 'SSL_SERVER_V_START': Validity of server's certificate (start time), ## 'SSL_SERVER_V_END': Validity of server's certificate (end time), }) for prefix, dn in [("I", cert.get_issuer()), ("S", cert.get_subject())]: # X509Name objects don't seem to have a way to get the # complete DN string. Use str() and slice it instead, # because str(dn) == "<X509Name object '/C=US/ST=...'>" dnstr = str(dn)[18:-2] wsgikey = 'SSL_SERVER_%s_DN' % prefix self.ssl_environ[wsgikey] = dnstr # The DN should be of the form: /k1=v1/k2=v2, but we must allow # for any value to contain slashes itself (in a URL). while dnstr: pos = dnstr.rfind("=") dnstr, value = dnstr[:pos], dnstr[pos + 1:] pos = dnstr.rfind("/") dnstr, key = dnstr[:pos], dnstr[pos + 1:] if key and value: wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key) self.ssl_environ[wsgikey] = value
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2006 ACYSOS S.L. (http://acysos.com) All Rights Reserved. # Pedro Tarrafeta <pedro@acysos.com> # Ignacio Ibeas <ignacio@acysos.com> # Copyright (c) 2008 Pablo Rocandio. All Rights Reserved. # Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights # Reserved. # Jordi Esteve <jesteve@zikzakmedia.com> # Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com) # Pedro M. Baeza <pedro.baeza@serviciosbaeza.com> # $Id$ # # Corregido para instalación TinyERP estándar 4.2.0: Zikzakmedia S.L. 2008 # Jordi Esteve <jesteve@zikzakmedia.com> # # Añadidas cuentas de remesas y tipos de pago. 2008 # Pablo Rocandio <salbet@gmail.com> # # Rehecho de nuevo para instalación OpenERP 5.0.0 sobre # account_payment_extension: Zikzakmedia S.L. 2009 # Jordi Esteve <jesteve@zikzakmedia.com> # # Refactorización. Acysos S.L. (http://www.acysos.com) 2012 # Ignacio Ibeas <ignacio@acysos.com> # # Migración Odoo 8.0. Acysos S.L. (http://www.acysos.com) 2015 # Ignacio Ibeas <ignacio@acysos.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import _ from datetime import datetime from .log import Log from .converter import PaymentConverterSpain class Csb58(object): def __init__(self, env): self.env = env def _cabecera_presentador_58(self): converter = PaymentConverterSpain() texto = '5170' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += datetime.today().strftime('%d%m%y') texto += 6*' ' texto += converter.to_ascii( self.order.mode.bank_id.partner_id.name).ljust(40) texto += 20*' ' cc = converter.digits_only(self.order.mode.bank_id.acc_number) texto += cc[0:8] texto += 66*' ' texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Cabecera presentador 58', texto), True) return texto def _cabecera_ordenante_58(self): converter = PaymentConverterSpain() texto = '5370' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += datetime.today().strftime('%d%m%y') texto += 6*' ' texto += converter.to_ascii( self.order.mode.bank_id.partner_id.name).ljust(40) cc = converter.digits_only(self.order.mode.bank_id.acc_number) texto += cc[0:20] texto += 8*' ' texto += '06' texto += 52*' ' texto += self.order.mode.csb58_ine and converter.to_ascii( self.order.mode.csb58_ine)[:9].zfill(9) or 9*' ' texto += 3*' ' texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Cabecera ordenante 58', texto), True) return texto def _individual_obligatorio_58(self, recibo): converter = PaymentConverterSpain() texto = '5670' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += str(recibo['name'])[-12:].zfill(12) nombre = converter.to_ascii(recibo['partner_id'].name) texto += nombre[0:40].ljust(40) ccc = recibo['bank_id'] and recibo['bank_id'].acc_number or '' ccc = converter.digits_only(ccc) texto += str(ccc)[0:20].zfill(20) importe = int(round(abs(recibo['amount'])*100, 0)) texto += str(importe).zfill(10) # Referencia para devolución (sólo válida si no se agrupa) # if len(recibo['ml_inv_ref']) == 1: texto += str(recibo['ml_inv_ref'][0].id)[-16:].zfill(16) else: texto += 16*' ' ###################################################################### concepto = '' if recibo['communication']: concepto = recibo['communication'] texto += converter.to_ascii(concepto)[0:40].ljust(40) if recibo.get('date'): date_cargo = datetime.strptime(recibo['date'], '%Y-%m-%d') elif recibo.get('ml_maturity_date'): date_cargo = datetime.strptime(recibo['ml_maturity_date'], '%Y-%m-%d') else: date_cargo = datetime.today() texto += date_cargo.strftime('%d%m%y') texto += 2*' ' texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Individual obligatorio 58', texto), True) return texto def _individual_opcional_58(self, recibo): """Para poner el segundo texto de comunicación""" converter = PaymentConverterSpain() texto = '5671' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += str(recibo['name'])[-12:].zfill(12) texto += converter.to_ascii(recibo['communication2'])[0:134].ljust(134) texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Individual opcional 58', texto), True) return texto def _registro_obligatorio_domicilio_58(self, recibo): """ Registro obligatorio domicilio 58 para no domiciliados. Formato: ZONA DESCRIPCION POS LONGITUD TIPO INICIAL REGISTRO A: A1 Código de Registro: 56 1 2 Numérico A2 Código de Dato: 76 3 2 Numérico B: B1 Código del Cliente Ordenante 5 12 Alfanumérico (NIF 9POS Y SUF 3POS) B2 Código de Referencia 17 12 Alfanumérico C: Domicilio del Deudor 29 40 Alfanumérico D: D1 Plaza del Domicilio del Deudor 69 35 Alfanumérico D2 Código Postal del Domicilio 104 5 Numérico del Deudor E: E1 Localidad del Ordenante al 109 38 Alfanumérico que se anticipó el Crédito E2 Código de la Provincia de 147 2 Numérico esta Localidad F: F1 Fecha de origen en que se 149 6 Numérico formalizó el Cto.(DDMMAA) F2 Libre 155 8 Alfanumérico """ converter = PaymentConverterSpain() alt_format = self.order.mode.csb58_alt_address_format # # Obtenemos la dirección (por defecto) del partner, a imagen # y semejanza de lo que hace info_partner # del objeto payment.line (account_payment/payment.py), # Pero si no encontramos ninguna dirección por defecto, # tomamos la primera del partner. # st = '' code_zip = '' city = '' if recibo['partner_id'].address: ads = None for item in recibo['partner_id'].address: if item.type == 'default': ads = item break if not ads and len(recibo['partner_id'].address) > 0: ads = recibo['partner_id'].address[0] st = ads.street and ads.street or '' partner_zip = self.env['res.partner.zip'] if 'zip_id' in ads: obj_zip_city = ads.zip_id and partner_zip.browse( ads.zip_id.id, self.context) or '' code_zip = obj_zip_city and obj_zip_city.name or '' city = obj_zip_city and obj_zip_city.city or '' else: code_zip = ads.zip and ads.zip or '' city = ads.city and ads.city or '' # # Comprobamos el código postal: # "Cuando no se conozca el código # completo, se cumplimentara, al menos, las dos primeras # posiciones que identifican la provincia, dejando el resto de # posiciones a cero." # if len(code_zip) < 2: code_zip = ads.state_id and ads.state_id.code or '' # # Obtenemos la localidad y código de provincia del ordenante # ord_city = '' ord_state_code = '' if self.order.mode.partner_id.address: ads = None for item in self.order.mode.partner_id.address: if item.type == 'default': ads = item break if not ads and len(self.order.mode.partner_id.address) > 0: ads = self.order.mode.partner_id.address[0] ord_city = ads.state_id and ads.state_id.name or '' ord_state_code = ads.state_id and ads.state_id.code or '' # # Calculamos la 'Fecha de origen en que se formalizo el crédito # anticipado' esto es, la fecha de creación del recibo. # if recibo.get('create_date'): date_ct = datetime.strptime(recibo['create_date'], '%Y-%m-%d %H:%M:%S') elif recibo.get('ml_date_created'): date_ct = datetime.strptime(recibo['ml_date_created'], '%Y-%m-%d') else: date_ct = datetime.today() # # Componemos la línea formateada # texto = '5676' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += str(recibo['name'])[-12:].zfill(12) texto += converter.to_ascii(st)[:40].ljust(40) # Domicilio texto += converter.to_ascii(city)[:35].ljust(35) # Plaza (ciudad) texto += converter.to_ascii(code_zip)[:5].zfill(5) # CP # Localidad del ordenante (ciudad) texto += converter.to_ascii(ord_city)[:38].ljust(38) if alt_format: # # Si usamos el formato alternativo (basado en FacturaPlus) # escribimos la fecha en la posición 147 y dejamos dos carácteres # en blanco tras ella. # Lo correcto, según la norma, es que en la posición 147 aparezca # el código de provincia (2 dígitos) y la fecha empiece en # la posición 149. # texto += date_ct.strftime('%d%m%y') # Fecha crédito texto += 2*' ' else: # Cod prov del ordenante texto += converter.to_ascii(ord_state_code)[:2].zfill(2) texto += date_ct.strftime('%d%m%y') # Fecha crédito texto += 8*' ' # Libre texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Obligatorio domicilio 58', texto), True) return texto def _total_ordenante_58(self): texto = '5870' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += 72*' ' totalordenante = int(round(abs(self.order.total) * 100, 0)) texto += str(totalordenante).zfill(10) texto += 6*' ' texto += str(self.num_recibos).zfill(10) texto += str(self.num_recibos + self.num_lines_opc + 2).zfill(10) texto += 38*' ' texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Total ordenante 58', texto), True) return texto def _total_general_58(self): texto = '5970' texto += (self.order.mode.bank_id.partner_id.vat[2:] + self.order.mode.csb_suffix).zfill(12) texto += 52*' ' texto += '0001' texto += 16*' ' totalremesa = int(round(abs(self.order.total) * 100, 0)) texto += str(totalremesa).zfill(10) texto += 6*' ' texto += str(self.num_recibos).zfill(10) texto += str(self.num_recibos + self.num_lines_opc + 4).zfill(10) texto += 38*' ' texto += '\r\n' if len(texto) != 164: raise Log(_('Configuration error:\n\nThe line "%s" is not 162 ' 'characters long:\n%s') % ('Total general 58', texto), True) return texto def create_file(self, order, lines): self.order = order txt_file = '' self.num_recibos = 0 self.num_lines_opc = 0 txt_file += self._cabecera_presentador_58() txt_file += self._cabecera_ordenante_58() for recibo in lines: txt_file += self._individual_obligatorio_58(recibo) self.num_recibos = self.num_recibos + 1 # Sólo emitimos el registro individual si communication2 contiene # texto if (recibo['communication2'] and len(recibo['communication2'].strip()) > 0): txt_file += self._individual_opcional_58(recibo) self.num_lines_opc = self.num_lines_opc + 1 # Para recibos no domiciliados, añadimos el registro obligatorio # de domicilio (necesario con algunos bancos/cajas). if self.order.mode.csb58_include_address: txt_file += self._registro_obligatorio_domicilio_58(recibo) self.num_lines_opc = self.num_lines_opc + 1 txt_file += self._total_ordenante_58() txt_file += self._total_general_58() return txt_file
unknown
codeparrot/codeparrot-clean
'use strict'; const _ = require('lodash'); /*----------------------------------------------------------------------------*/ /** * Creates a hash object. If a `properties` object is provided, its own * enumerable properties are assigned to the created hash. * * @memberOf util * @param {Object} [properties] The properties to assign to the hash. * @returns {Object} Returns the new hash object. */ function Hash(properties) { return _.transform(properties, (result, value, key) => { result[key] = (_.isPlainObject(value) && !(value instanceof Hash)) ? new Hash(value) : value; }, this); } Hash.prototype = Object.create(null); /** * This method throws any error it receives. * * @memberOf util * @param {Object} [error] The error object. */ function pitch(error) { if (error != null) { throw error; } } module.exports = { Hash, pitch };
javascript
github
https://github.com/lodash/lodash
lib/common/util.js
#!/usr/bin/env python # Copyright 2014 RethinkDB, all rights reserved. """The `interface.db_config` test checks that the special `rethinkdb.db_config` table behaves as expected.""" from __future__ import print_function import os, sys, time startTime = time.time() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse r = utils.import_python_driver() op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) print("Starting server (%.2fs)" % (time.time() - startTime)) with driver.Process(files='a', output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as server: print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime)) conn = r.connect(host=server.host, port=server.driver_port) print("Starting tests (%.2fs)" % (time.time() - startTime)) assert list(r.db("rethinkdb").table("db_config").run(conn)) == [] res = r.db_create("foo").run(conn) assert res["dbs_created"] == 1 assert len(res["config_changes"]) == 1 assert res["config_changes"][0]["old_val"] is None assert res["config_changes"][0]["new_val"] == r.db("foo").config().run(conn) rows = list(r.db("rethinkdb").table("db_config").run(conn)) assert len(rows) == 1 and rows[0]["name"] == "foo" foo_uuid = rows[0]["id"] assert r.db("rethinkdb").table("db_config").get(foo_uuid).run(conn)["name"] == "foo" res = r.db("rethinkdb").table("db_config").get(foo_uuid).update({"name": "foo2"}) \ .run(conn) assert res["replaced"] == 1 assert res["errors"] == 0 rows = list(r.db("rethinkdb").table("db_config").run(conn)) assert len(rows) == 1 and rows[0]["name"] == "foo2" res = r.db_create("bar").run(conn) assert res["dbs_created"] == 1 rows = list(r.db("rethinkdb").table("db_config").run(conn)) assert len(rows) == 2 and set(row["name"] for row in rows) == set(["foo2", "bar"]) bar_uuid = [row["id"] for row in rows if row["name"] == "bar"][0] foo2_config = r.db("foo2").config().run(conn) assert foo2_config["name"] == "foo2" try: rows = r.db("not_a_database").config().run(conn) except r.RqlRuntimeError: pass else: raise ValueError("r.db().config() should fail if argument does not exist.") res = r.db("rethinkdb").table("db_config").get(bar_uuid).update({"name": "foo2"}) \ .run(conn) # This would cause a name conflict, so it should fail assert res["errors"] == 1 res = r.db("rethinkdb").table("db_config").get(bar_uuid) \ .update({"name": "rethinkdb"}).run(conn) assert res["errors"] == 1 res = r.db_drop("foo2").run(conn) assert res["dbs_dropped"] == 1 assert res["tables_dropped"] == 0 assert len(res["config_changes"]) == 1 assert res["config_changes"][0]["old_val"] == foo2_config assert res["config_changes"][0]["new_val"] is None res = r.db("rethinkdb").table("db_config").insert({"name": "baz"}).run(conn) assert res["errors"] == 0 assert res["inserted"] == 1 assert "baz" in r.db_list().run(conn) baz_uuid = res["generated_keys"][0] res = r.db("rethinkdb").table("db_config").get(baz_uuid).delete().run(conn) assert res["errors"] == 0 assert res["deleted"] == 1 assert "baz" not in r.db_list().run(conn) print("Inserting nonsense to make sure it's not accepted (%.2fs)" % (time.time() - startTime)) res = r.db("rethinkdb").table("db_config").insert({}).run(conn) assert res["errors"] == 1, res res = r.db("rethinkdb").table("db_config") \ .insert({"name": "hi", "nonsense": "yes"}).run(conn) assert res["errors"] == 1, res print("Cleaning up (%.2fs)" % (time.time() - startTime)) print("Done. (%.2fs)" % (time.time() - startTime))
unknown
codeparrot/codeparrot-clean
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import hmac import re from ansible.module_utils.six.moves.urllib.parse import urlparse try: from hashlib import sha1 except ImportError: import sha as sha1 HASHED_KEY_MAGIC = "|1|" def is_ssh_url(url): """ check if url is ssh """ if "@" in url and "://" not in url: return True for scheme in "ssh://", "git+ssh://", "ssh+git://": if url.startswith(scheme): return True return False def get_fqdn_and_port(repo_url): """ chop the hostname and port out of a url """ fqdn = None port = None ipv6_re = re.compile('(\[[^]]*\])(?::([0-9]+))?') if "@" in repo_url and "://" not in repo_url: # most likely an user@host:path or user@host/path type URL repo_url = repo_url.split("@", 1)[1] match = ipv6_re.match(repo_url) # For this type of URL, colon specifies the path, not the port if match: fqdn, path = match.groups() elif ":" in repo_url: fqdn = repo_url.split(":")[0] elif "/" in repo_url: fqdn = repo_url.split("/")[0] elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse(repo_url) # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so # ensure we actually have a parts[1] before continuing. if parts[1] != '': fqdn = parts[1] if "@" in fqdn: fqdn = fqdn.split("@", 1)[1] match = ipv6_re.match(fqdn) if match: fqdn, port = match.groups() elif ":" in fqdn: fqdn, port = fqdn.split(":")[0:2] return fqdn, port def check_hostkey(module, fqdn): return not not_in_host_file(module, fqdn) # this is a variant of code found in connection_plugins/paramiko.py and we should modify # the paramiko code to import and use this. def not_in_host_file(self, host): if 'USER' in os.environ: user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: user_host_file = "~/.ssh/known_hosts" user_host_file = os.path.expanduser(user_host_file) host_file_list = [] host_file_list.append(user_host_file) host_file_list.append("/etc/ssh/ssh_known_hosts") host_file_list.append("/etc/ssh/ssh_known_hosts2") host_file_list.append("/etc/openssh/ssh_known_hosts") hfiles_not_found = 0 for hf in host_file_list: if not os.path.exists(hf): hfiles_not_found += 1 continue try: host_fh = open(hf) except IOError: hfiles_not_found += 1 continue else: data = host_fh.read() host_fh.close() for line in data.split("\n"): if line is None or " " not in line: continue tokens = line.split() if tokens[0].find(HASHED_KEY_MAGIC) == 0: # this is a hashed known host entry try: (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2) hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) hash.update(host) if hash.digest() == kn_host.decode('base64'): return False except: # invalid hashed host key, skip it continue else: # standard host file entry if host in tokens[0]: return False return True def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): """ use ssh-keyscan to add the hostkey """ keyscan_cmd = module.get_bin_path('ssh-keyscan', True) if 'USER' in os.environ: user_ssh_dir = os.path.expandvars("~${USER}/.ssh/") user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: user_ssh_dir = "~/.ssh/" user_host_file = "~/.ssh/known_hosts" user_ssh_dir = os.path.expanduser(user_ssh_dir) if not os.path.exists(user_ssh_dir): if create_dir: try: os.makedirs(user_ssh_dir, int('700', 8)) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: module.fail_json(msg="%s does not exist" % user_ssh_dir) elif not os.path.isdir(user_ssh_dir): module.fail_json(msg="%s is not a directory" % user_ssh_dir) if port: this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn) else: this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) rc, out, err = module.run_command(this_cmd) # ssh-keyscan gives a 0 exit code and prints nothing on timeout if rc != 0 or not out: msg = 'failed to retrieve hostkey' if not out: msg += '. "%s" returned no matches.' % this_cmd else: msg += ' using command "%s". [stdout]: %s' % (this_cmd, out) if err: msg += ' [stderr]: %s' % err module.fail_json(msg=msg) module.append_to_file(user_host_file, out) return rc, out, err
unknown
codeparrot/codeparrot-clean
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_UNHANDLEDEXCEPTIONATNEWCHECK_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_UNHANDLEDEXCEPTIONATNEWCHECK_H #include "../ClangTidyCheck.h" namespace clang::tidy::bugprone { /// Finds calls to 'new' that may throw unhandled exception at allocation /// failure. /// /// For the user-facing documentation see: /// https://clang.llvm.org/extra/clang-tidy/checks/bugprone/unhandled-exception-at-new.html class UnhandledExceptionAtNewCheck : public ClangTidyCheck { public: UnhandledExceptionAtNewCheck(StringRef Name, ClangTidyContext *Context); bool isLanguageVersionSupported(const LangOptions &LangOpts) const override { return LangOpts.CPlusPlus && LangOpts.CXXExceptions; } void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; }; } // namespace clang::tidy::bugprone #endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_UNHANDLEDEXCEPTIONATNEWCHECK_H
c
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/bugprone/UnhandledExceptionAtNewCheck.h
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). impl_tinystr_subtag!( /// A variant subtag (examples: `"macos"`, `"posix"`, `"1996"` etc.) /// /// [`Variant`] represents a Unicode base language code conformant to the /// [`unicode_variant_id`] field of the Language and Locale Identifier. /// /// # Examples /// /// ``` /// use icu::locale::subtags::Variant; /// /// let variant: Variant = /// "macos".parse().expect("Failed to parse a variant subtag."); /// ``` /// /// [`unicode_variant_id`]: https://unicode.org/reports/tr35/#unicode_variant_id Variant, subtags, variant, subtags_variant, 4..=8, s, s.is_ascii_alphanumeric() && (s.len() != 4 || s.all_bytes()[0].is_ascii_digit()), s.to_ascii_lowercase(), s.is_ascii_lowercase() && s.is_ascii_alphanumeric() && (s.len() != 4 || s.all_bytes()[0].is_ascii_digit()), InvalidSubtag, ["posix", "1996"], ["yes"], );
rust
github
https://github.com/nodejs/node
deps/crates/vendor/icu_locale_core/src/subtags/variant.rs
from django.db import models from django.contrib import admin from django.conf import settings from django.core.urlresolvers import reverse from autocomplete import widgets from autocomplete.views import autocomplete as default_view from autocomplete.utils import autocomplete_formfield class AdminMedia: extend = False js = (settings.AUTOCOMPLETE_MEDIA_PREFIX + 'js/jquery_autocomplete.js',) css = {'all': (settings.AUTOCOMPLETE_MEDIA_PREFIX + 'css/jquery-ui.css',) } class AdminAutocompleteWidget(widgets.AutocompleteWidget): Media = AdminMedia class AdminMultipleAutocompleteWidget(widgets.MultipleAutocompleteWidget): Media = AdminMedia class AutocompleteAdmin(object): autocomplete_autoconfigure = True autocomplete_view = default_view autocomplete_fields = {} def autocomplete_formfield(self, ac_id, formfield=None, **kwargs): return autocomplete_formfield(ac_id, formfield, self.autocomplete_view, AdminAutocompleteWidget, AdminMultipleAutocompleteWidget, **kwargs) def formfield_for_dbfield(self, db_field, **kwargs): if db_field.name in self.autocomplete_fields: ac_id = self.autocomplete_fields[db_field.name] return self.autocomplete_formfield(ac_id, db_field.formfield, **kwargs) elif self.autocomplete_autoconfigure: if db_field in self.autocomplete_view.settings: return self.autocomplete_formfield(db_field, **kwargs) return super(AutocompleteAdmin, self).formfield_for_dbfield(db_field, **kwargs) def _media(self): # little hack to include autocomplete's js before jquery.init.js media = super(AutocompleteAdmin, self).media media._js.insert(3, settings.AUTOCOMPLETE_MEDIA_PREFIX + 'js/jquery-ui.min.js') return media media = property(_media) def _autocomplete_view(request, field): info = self.model._meta.app_label, self.model._meta.module_name, field if field in self.autocomplete_fields: ac_id = self.autocomplete_fields[field] else: ac_id = '/'.join(info) return self.autocomplete_view(request, ac_id) def get_urls(self): # This ensures that `admin_site.admin_view` is applied to the # autocomplete_view. from django.conf.urls import patterns, url info = self.model._meta.app_label, self.model._meta.module_name urlpatterns = super(AutocompleteAdmin, self).get_urls() urlpatterns += patterns('', url(r'^autocomplete/(?P<field>[\w]+)/$', self.admin_site.admin_view(self._autocomplete_view), name='%s_%s_autocomplete' % info) ) return urlpatterns def urls(self): return self.get_urls() urls = property(urls) @classmethod def _validate(self): pass
unknown
codeparrot/codeparrot-clean
import numpy as np AR = np.arange(10) AR.setflags(write=False) with np.printoptions(): np.set_printoptions( precision=1, threshold=2, edgeitems=3, linewidth=4, suppress=False, nanstr="Bob", infstr="Bill", formatter={}, sign="+", floatmode="unique", ) np.get_printoptions() str(AR) np.array2string( AR, max_line_width=5, precision=2, suppress_small=True, separator=";", prefix="test", threshold=5, floatmode="fixed", suffix="?", legacy="1.13", ) np.format_float_scientific(1, precision=5) np.format_float_positional(1, trim="k") np.array_repr(AR) np.array_str(AR)
python
github
https://github.com/numpy/numpy
numpy/typing/tests/data/pass/arrayprint.py
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2014-2017 CERN. # # INSPIRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE. If not, see <http://www.gnu.org/licenses/>. # # In applying this license, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """Communication handler with Beard Celery service.""" from __future__ import absolute_import, division, print_function import celery from flask import current_app def make_beard_clusters(records, signatures): """Dispatch a clustering task to Beard Celery instance. The method receives a list of records and a list of signatures representing records that will be clustered by Beard algorithm. The argument 'records' is a list of dictionaries containing values like authors of a particular record, earliest date recorded of the publication and finally publication UUID. The argument 'signatures' represents the phonetic block, that is being currently computed by one of 'disambiguation' workers. In order to check what is being used in order to cluster signatures, check 'create_beard_record' and 'create_beard_signatures' methods in 'search.py' as the reference. The method dispatches a Celery task to Beard server. Beard algorithm distinguishes different authors for *the same* signature block. The clustered authors are then returned in the format of dictionaries. :param records: A list of the records, where at least one author will be clustered (by having the same phonetic block). Example: records = [{'authors': [u'Hohm, Olaf', u'Wang, Yi-Nan'], 'publication_id': u'13c3cca8-b0bf-42f5-90d4-...', 'year': u'2015'}] :param signatures: A list of signatures belonging to the same signature block, which is currently being clustered. Example: signatures = [{'author_affiliation': u'MIT, Cambridge, CTP', 'author_name': u'Wang, Yi-Nan', 'publication_id': u'13c3cca8-b0bf-42f5-90d4-...', 'signature_id': u'a4156520-4248-a57f-949c361e0dd0', 'author_recid': u'10123', 'author_claimed': False}] :return: A tuple containing clusters matched with existing author profiles and clusters for which new profile must be created. The first 'bucket' is a list of dictionaries, where each key represents recid of an existing author profile. The second 'bucket' is a list of dictionaries as well, however with enumerated keys, which are meaningless. Except clustering, Beard Celery instance is also responsible for matching output of a clustering job with a current state of Inspire system, ie. current links between signatures and their author profiles. The matching process is done using simplex algorithm for maximising overlap between new clusters (Beard output) with signatures clustered by belonging to the same profile. To see what is the workflow behind Beard Celery instance, check https://github.com/inspirehep/beard-server Example: [{u'10123': [u'a4156520-4248-a57f-949c361e0dd0']}, {}] """ if records and signatures: clusters = celery.current_app.send_task( 'beard_server.tasks.make_clusters', (records, signatures), queue=current_app.config.get('DISAMBIGUATION_QUEUE')) return clusters
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """System tests for Google Cloud Build operators""" from tests.providers.google.cloud.operators.test_sftp_to_gcs_system_helper import SFTPtoGcsTestHelper from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, provide_gcp_context, skip_gcp_system from tests.test_utils.system_tests_class import SystemTest @skip_gcp_system(GCP_GCS_KEY) class SFTPToGcsExampleDagsSystemTest(SystemTest): """ System tests for SFTP to Google Cloud Storage transfer operator It use a real service. """ helper = SFTPtoGcsTestHelper() @provide_gcp_context(GCP_GCS_KEY) def setUp(self): super().setUp() self.helper.create_buckets() self.helper.create_temp_files() @provide_gcp_context(GCP_GCS_KEY) def test_run_example_dag(self): self.run_dag("example_sftp_to_gcs", CLOUD_DAG_FOLDER) @provide_gcp_context(GCP_GCS_KEY) def tearDown(self): self.helper.delete_buckets() self.helper.delete_temp_files() super().tearDown()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python """ AES Key Expansion. Expands 128, 192, or 256 bit key for use with AES Running this file as __main__ will result in a self-test of the algorithm. Algorithm per NIST FIPS-197 http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf Copyright (c) 2010, Adam Newman http://www.caller9.com/ Licensed under the MIT license http://www.opensource.org/licenses/mit-license.php """ __author__ = "Adam Newman" #Normally use relative import. In test mode use local import. try:from .aes_tables import sbox,rcon except ValueError:from aes_tables import sbox,rcon from operator import xor class KeyExpander: """Perform AES Key Expansion""" _expanded_key_length = {128 : 176, 192 : 208, 256 : 240} def __init__(self, key_length): self._key_length = key_length self._n = key_length>>3 if key_length in self._expanded_key_length: self._b = self._expanded_key_length[key_length] else: raise LookupError('Invalid Key Size') def expand(self, new_key): """ Expand the encryption key per AES key schedule specifications http://en.wikipedia.org/wiki/Rijndael_key_schedule#Key_schedule_description """ #First n bytes are copied from key len_new_key = len(new_key) if len_new_key != self._n: raise RuntimeError('expand(): key size is invalid') rcon_iter = 1 nex=new_key.extend #Grow the key until it is the correct length while 1: #Copy last 4 bytes of extended key, apply core, increment i(rcon_iter), #core Append the list of elements 1-3 and list comprised of element 0 (circular rotate left) #core For each element of this new list, put the result of sbox into output array. #xor with 4 bytes n bytes from end of extended key keyarr=[sbox[i] for i in new_key[-3:]+new_key[-4:-3]] #First byte of output array is XORed with rcon(iter) keyarr[0] ^= rcon[rcon_iter] nex(map(xor,keyarr, new_key[-self._n:4-self._n])) rcon_iter += 1 len_new_key += 4 #Run three passes of 4 byte expansion using copy of 4 byte tail of extended key #which is then xor'd with 4 bytes n bytes from end of extended key for j in 0,1,2: nex(map(xor,new_key[-4:], new_key[-self._n:4-self._n])) len_new_key += 4 if len_new_key >= self._b:return new_key else: #If key length is 256 and key is not complete, add 4 bytes tail of extended key #run through sbox before xor with 4 bytes n bytes from end of extended key if self._key_length == 256: nex(map(xor,[sbox[x] for x in new_key[-4:]], new_key[-self._n:4-self._n])) len_new_key += 4 if len_new_key >= self._b:return new_key #If key length is 192 or 256 and key is not complete, run 2 or 3 passes respectively #of 4 byte tail of extended key xor with 4 bytes n bytes from end of extended key if self._key_length != 128: for j in ((0,1) if self._key_length == 192 else (0,1,2)): nex(map(xor,new_key[-4:], new_key[-self._n:4-self._n])) len_new_key += 4 if len_new_key >= self._b:return new_key import unittest class TestKeyExpander(unittest.TestCase): def test_keys(self): """Test All Key Expansions""" import test_keys test_data = test_keys.TestKeys() for key_size in 128, 192, 256: test_expander = KeyExpander(key_size) test_expanded_key = test_expander.expand(test_data.test_key[key_size]) self.assertEqual (len([i for i, j in zip(test_expanded_key, test_data.test_expanded_key_validated[key_size]) if i == j]), len(test_data.test_expanded_key_validated[key_size]), msg='Key expansion ' + str(key_size) + ' bit') if __name__ == "__main__": unittest.main()
unknown
codeparrot/codeparrot-clean
""" Letsencrypt Integration Test Tool - Configures (canned) boulder server - Launches EC2 instances with a given list of AMIs for different distros - Copies letsencrypt repo and puts it on the instances - Runs letsencrypt tests (bash scripts) on all of these - Logs execution and success/fail for debugging Notes: - Some AWS images, e.g. official CentOS and FreeBSD images require acceptance of user terms on the AWS marketplace website. This can't be automated. - AWS EC2 has a default limit of 20 t2/t1 instances, if more are needed, they need to be requested via online webform. Usage: - Requires AWS IAM secrets to be set up with aws cli - Requires an AWS associated keyfile <keyname>.pem >aws configure --profile HappyHacker [interactive: enter secrets for IAM role] >aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \ --query 'KeyMaterial' --output text > MyKeyPair.pem then: >python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh see: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html """ from __future__ import print_function from __future__ import with_statement import sys, os, time, argparse, socket import multiprocessing as mp from multiprocessing import Manager import urllib2 import yaml import boto3 import fabric from fabric.api import run, execute, local, env, sudo, cd, lcd from fabric.operations import get, put from fabric.context_managers import shell_env # Command line parser #------------------------------------------------------------------------------- parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.') parser.add_argument('config_file', help='yaml configuration file for AWS server cluster') parser.add_argument('key_file', help='key file (<keyname>.pem) for AWS') parser.add_argument('aws_profile', help='profile for AWS (i.e. as in ~/.aws/certificates)') parser.add_argument('test_script', default='test_letsencrypt_auto_certonly_standalone.sh', help='path of bash script in to deploy and run') #parser.add_argument('--script_args', # nargs='+', # help='space-delimited list of arguments to pass to the bash test script', # required=False) parser.add_argument('--repo', default='https://github.com/letsencrypt/letsencrypt.git', help='letsencrypt git repo to use') parser.add_argument('--branch', default='~', help='letsencrypt git branch to trial') parser.add_argument('--pull_request', default='~', help='letsencrypt/letsencrypt pull request to trial') parser.add_argument('--merge_master', action='store_true', help="if set merges PR into master branch of letsencrypt/letsencrypt") parser.add_argument('--saveinstances', action='store_true', help="don't kill EC2 instances after run, useful for debugging") parser.add_argument('--alt_pip', default='', help="server from which to pull candidate release packages") parser.add_argument('--killboulder', action='store_true', help="do not leave a persistent boulder server running") parser.add_argument('--boulderonly', action='store_true', help="only make a boulder server") parser.add_argument('--fast', action='store_true', help="use larger instance types to run faster (saves about a minute, probably not worth it)") cl_args = parser.parse_args() # Credential Variables #------------------------------------------------------------------------------- # assumes naming: <key_filename> = <keyname>.pem KEYFILE = cl_args.key_file KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0] PROFILE = cl_args.aws_profile # Globals #------------------------------------------------------------------------------- BOULDER_AMI = 'ami-5f490b35' # premade shared boulder AMI 14.04LTS us-east-1 LOGDIR = "" #points to logging / working directory # boto3/AWS api globals AWS_SESSION = None EC2 = None # Boto3/AWS automation functions #------------------------------------------------------------------------------- def make_security_group(): # will fail if security group of GroupName already exists # cannot have duplicate SGs of the same name mysg = EC2.create_security_group(GroupName="letsencrypt_test", Description='security group for automated testing') mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22) mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80) mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443) # for boulder wfe (http) server mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000) # for mosh mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000) return mysg def make_instance(instance_name, ami_id, keyname, machine_type='t2.micro', security_groups=['letsencrypt_test'], userdata=""): #userdata contains bash or cloud-init script new_instance = EC2.create_instances( ImageId=ami_id, SecurityGroups=security_groups, KeyName=keyname, MinCount=1, MaxCount=1, UserData=userdata, InstanceType=machine_type)[0] # brief pause to prevent rare error on EC2 delay, should block until ready instead time.sleep(1.0) # give instance a name try: new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}]) except botocore.exceptions.ClientError as e: if "InvalidInstanceID.NotFound" in str(e): # This seems to be ephemeral... retry time.sleep(1) new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}]) else: raise return new_instance def terminate_and_clean(instances): """ Some AMIs specify EBS stores that won't delete on instance termination. These must be manually deleted after shutdown. """ volumes_to_delete = [] for instance in instances: for bdmap in instance.block_device_mappings: if 'Ebs' in bdmap.keys(): if not bdmap['Ebs']['DeleteOnTermination']: volumes_to_delete.append(bdmap['Ebs']['VolumeId']) for instance in instances: instance.terminate() # can't delete volumes until all attaching instances are terminated _ids = [instance.id for instance in instances] all_terminated = False while not all_terminated: all_terminated = True for _id in _ids: # necessary to reinit object for boto3 to get true state inst = EC2.Instance(id=_id) if inst.state['Name'] != 'terminated': all_terminated = False time.sleep(5) for vol_id in volumes_to_delete: volume = EC2.Volume(id=vol_id) volume.delete() return volumes_to_delete # Helper Routines #------------------------------------------------------------------------------- def block_until_http_ready(urlstring, wait_time=10, timeout=240): "Blocks until server at urlstring can respond to http requests" server_ready = False t_elapsed = 0 while not server_ready and t_elapsed < timeout: try: sys.stdout.write('.') sys.stdout.flush() req = urllib2.Request(urlstring) response = urllib2.urlopen(req) #if response.code == 200: server_ready = True except urllib2.URLError: pass time.sleep(wait_time) t_elapsed += wait_time def block_until_ssh_open(ipstring, wait_time=10, timeout=120): "Blocks until server at ipstring has an open port 22" reached = False t_elapsed = 0 while not reached and t_elapsed < timeout: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ipstring, 22)) reached = True except socket.error as err: time.sleep(wait_time) t_elapsed += wait_time sock.close() def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20): "Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections" # the reinstantiation from id is necessary to force boto3 # to correctly update the 'state' variable during init _id = booting_instance.id _instance = EC2.Instance(id=_id) _state = _instance.state['Name'] _ip = _instance.public_ip_address while _state != 'running' or _ip is None: time.sleep(wait_time) _instance = EC2.Instance(id=_id) _state = _instance.state['Name'] _ip = _instance.public_ip_address block_until_ssh_open(_ip) time.sleep(extra_wait_time) return _instance # Fabric Routines #------------------------------------------------------------------------------- def local_git_clone(repo_url): "clones master of repo_url" with lcd(LOGDIR): local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi') local('git clone %s letsencrypt'% repo_url) local('tar czf le.tar.gz letsencrypt') def local_git_branch(repo_url, branch_name): "clones branch <branch_name> of repo_url" with lcd(LOGDIR): local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi') local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name)) local('tar czf le.tar.gz letsencrypt') def local_git_PR(repo_url, PRnumstr, merge_master=True): "clones specified pull request from repo_url and optionally merges into master" with lcd(LOGDIR): local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi') local('git clone %s letsencrypt'% repo_url) local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr) local('cd letsencrypt && git co lePRtest') if merge_master: local('cd letsencrypt && git remote update origin') local('cd letsencrypt && git merge origin/master -m "testmerge"') local('tar czf le.tar.gz letsencrypt') def local_repo_to_remote(): "copies local tarball of repo to remote" with lcd(LOGDIR): put(local_path='le.tar.gz', remote_path='') run('tar xzf le.tar.gz') def local_repo_clean(): "delete tarball" with lcd(LOGDIR): local('rm le.tar.gz') def deploy_script(scriptpath, *args): "copies to remote and executes local script" #with lcd('scripts'): put(local_path=scriptpath, remote_path='', mirror_local_mode=True) scriptfile = os.path.split(scriptpath)[1] args_str = ' '.join(args) run('./'+scriptfile+' '+args_str) def run_boulder(): with cd('$GOPATH/src/github.com/letsencrypt/boulder'): run('go run cmd/rabbitmq-setup/main.go -server amqp://localhost') run('nohup ./start.py >& /dev/null < /dev/null &') def config_and_launch_boulder(instance): execute(deploy_script, 'scripts/boulder_config.sh') execute(run_boulder) def install_and_launch_letsencrypt(instance, boulder_url, target): execute(local_repo_to_remote) with shell_env(BOULDER_URL=boulder_url, PUBLIC_IP=instance.public_ip_address, PRIVATE_IP=instance.private_ip_address, PUBLIC_HOSTNAME=instance.public_dns_name, PIP_EXTRA_INDEX_URL=cl_args.alt_pip, OS_TYPE=target['type']): execute(deploy_script, cl_args.test_script) def grab_letsencrypt_log(): "grabs letsencrypt.log via cat into logged stdout" sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \ cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi') # fallback file if /var/log is unwriteable...? correct? sudo('if [ -f ./letsencrypt.log ]; then \ cat ./letsencrypt.log; else echo "[nolocallog]"; fi') def create_client_instances(targetlist): "Create a fleet of client instances" instances = [] print("Creating instances: ", end="") for target in targetlist: if target['virt'] == 'hvm': machine_type = 't2.medium' if cl_args.fast else 't2.micro' else: # 32 bit systems machine_type = 'c1.medium' if cl_args.fast else 't1.micro' if 'userdata' in target.keys(): userdata = target['userdata'] else: userdata = '' name = 'le-%s'%target['name'] print(name, end=" ") instances.append(make_instance(name, target['ami'], KEYNAME, machine_type=machine_type, userdata=userdata)) print() return instances def test_client_process(inqueue, outqueue): cur_proc = mp.current_process() for inreq in iter(inqueue.get, SENTINEL): ii, target = inreq #save all stdout to log file sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w') print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name'])) instances[ii] = block_until_instance_ready(instances[ii]) print("server %s at %s"%(instances[ii], instances[ii].public_ip_address)) env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address) print(env.host_string) try: install_and_launch_letsencrypt(instances[ii], boulder_url, target) outqueue.put((ii, target, 'pass')) print("%s - %s SUCCESS"%(target['ami'], target['name'])) except: outqueue.put((ii, target, 'fail')) print("%s - %s FAIL"%(target['ami'], target['name'])) pass # append server letsencrypt.log to each per-machine output log print("\n\nletsencrypt.log\n" + "-"*80 + "\n") try: execute(grab_letsencrypt_log) except: print("log fail\n") pass def cleanup(cl_args, instances, targetlist): print('Logs in ', LOGDIR) if not cl_args.saveinstances: print('Terminating EC2 Instances and Cleaning Dangling EBS Volumes') if cl_args.killboulder: boulder_server.terminate() terminate_and_clean(instances) else: # print login information for the boxes for debugging for ii, target in enumerate(targetlist): print(target['name'], target['ami'], "%s@%s"%(target['user'], instances[ii].public_ip_address)) #------------------------------------------------------------------------------- # SCRIPT BEGINS #------------------------------------------------------------------------------- # Fabric library controlled through global env parameters env.key_filename = KEYFILE env.shell = '/bin/bash -l -i -c' env.connection_attempts = 5 env.timeout = 10 # replace default SystemExit thrown by fabric during trouble class FabricException(Exception): pass env['abort_exception'] = FabricException # Set up local copy of git repo #------------------------------------------------------------------------------- LOGDIR = "letest-%d"%int(time.time()) print("Making local dir for test repo and logs: %s"%LOGDIR) local('mkdir %s'%LOGDIR) # figure out what git object to test and locally create it in LOGDIR print("Making local git repo") try: if cl_args.pull_request != '~': print('Testing PR %s '%cl_args.pull_request, "MERGING into master" if cl_args.merge_master else "") execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master) elif cl_args.branch != '~': print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo)) execute(local_git_branch, cl_args.repo, cl_args.branch) else: print('Testing master of %s'%cl_args.repo) execute(local_git_clone, cl_args.repo) except FabricException: print("FAIL: trouble with git repo") exit() # Set up EC2 instances #------------------------------------------------------------------------------- configdata = yaml.load(open(cl_args.config_file, 'r')) targetlist = configdata['targets'] print('Testing against these images: [%d total]'%len(targetlist)) for target in targetlist: print(target['ami'], target['name']) print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE)) AWS_SESSION = boto3.session.Session(profile_name=PROFILE) EC2 = AWS_SESSION.resource('ec2') print("Making Security Group") sg_exists = False for sg in EC2.security_groups.all(): if sg.group_name == 'letsencrypt_test': sg_exists = True print(" %s already exists"%'letsencrypt_test') if not sg_exists: make_security_group() time.sleep(30) boulder_preexists = False boulder_servers = EC2.instances.filter(Filters=[ {'Name': 'tag:Name', 'Values': ['le-boulderserver']}, {'Name': 'instance-state-name', 'Values': ['running']}]) boulder_server = next(iter(boulder_servers), None) print("Requesting Instances...") if boulder_server: print("Found existing boulder server:", boulder_server) boulder_preexists = True else: print("Can't find a boulder server, starting one...") boulder_server = make_instance('le-boulderserver', BOULDER_AMI, KEYNAME, machine_type='t2.micro', #machine_type='t2.medium', security_groups=['letsencrypt_test']) try: if not cl_args.boulderonly: instances = create_client_instances(targetlist) # Configure and launch boulder server #------------------------------------------------------------------------------- print("Waiting on Boulder Server") boulder_server = block_until_instance_ready(boulder_server) print(" server %s"%boulder_server) # env.host_string defines the ssh user and host for connection env.host_string = "ubuntu@%s"%boulder_server.public_ip_address print("Boulder Server at (SSH):", env.host_string) if not boulder_preexists: print("Configuring and Launching Boulder") config_and_launch_boulder(boulder_server) # blocking often unnecessary, but cheap EC2 VMs can get very slow block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address, wait_time=10, timeout=500) boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address) print("Boulder Server at (EC2 private ip): %s"%boulder_url) if cl_args.boulderonly: sys.exit(0) # Install and launch client scripts in parallel #------------------------------------------------------------------------------- print("Uploading and running test script in parallel: %s"%cl_args.test_script) print("Output routed to log files in %s"%LOGDIR) # (Advice: always use Manager.Queue, never regular multiprocessing.Queue # the latter has implementation flaws that deadlock it in some circumstances) manager = Manager() outqueue = manager.Queue() inqueue = manager.Queue() SENTINEL = None #queue kill signal # launch as many processes as clients to test num_processes = len(targetlist) jobs = [] #keep a reference to current procs # initiate process execution for i in range(num_processes): p = mp.Process(target=test_client_process, args=(inqueue, outqueue)) jobs.append(p) p.daemon = True # kills subprocesses if parent is killed p.start() # fill up work queue for ii, target in enumerate(targetlist): inqueue.put((ii, target)) # add SENTINELs to end client processes for i in range(num_processes): inqueue.put(SENTINEL) # wait on termination of client processes for p in jobs: p.join() # add SENTINEL to output queue outqueue.put(SENTINEL) # clean up execute(local_repo_clean) # print and save summary results results_file = open(LOGDIR+'/results', 'w') outputs = [outq for outq in iter(outqueue.get, SENTINEL)] outputs.sort(key=lambda x: x[0]) for outq in outputs: ii, target, status = outq print('%d %s %s'%(ii, target['name'], status)) results_file.write('%d %s %s\n'%(ii, target['name'], status)) results_file.close() finally: cleanup(cl_args, instances, targetlist) # kill any connections fabric.network.disconnect_all()
unknown
codeparrot/codeparrot-clean
//////////////////////////////////////////////////////////////////////////// // // Copyright 2014 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// import Foundation import Realm.Swift // These types don't change when wrapping in Swift // so we just typealias them to remove the 'RLM' prefix // MARK: Aliases /** `PropertyType` is an enum describing all property types supported in Realm models. For more information, see [Object Models and Schemas](https://www.mongodb.com/docs/atlas/device-sdks/sdk/swift/model-data/object-models/). ### Primitive types * `Int` * `Bool` * `Float` * `Double` ### Object types * `String` * `Data` * `Date` * `Decimal128` * `ObjectId` ### Relationships: Array (in Swift, `List`) and `Object` types * `Object` * `Array` */ public typealias PropertyType = RLMPropertyType /** An opaque token which is returned from methods which subscribe to changes to a Realm. - see: `Realm.observe(_:)` */ public typealias NotificationToken = RLMNotificationToken /// :nodoc: public typealias ObjectBase = RLMObjectBase extension ObjectBase { internal func _observe<T: ObjectBase>(keyPaths: [String]? = nil, on queue: DispatchQueue? = nil, _ block: @escaping (ObjectChange<T>) -> Void) -> NotificationToken { return RLMObjectBaseAddNotificationBlock(self, keyPaths, queue) { object, names, oldValues, newValues, error in assert(error == nil) block(.init(object: object as? T, names: names, oldValues: oldValues, newValues: newValues)) } } internal func _observe<T: ObjectBase>(keyPaths: [String]? = nil, on queue: DispatchQueue? = nil, _ block: @escaping (T?) -> Void) -> NotificationToken { return RLMObjectBaseAddNotificationBlock(self, keyPaths, queue) { object, _, _, _, _ in block(object as? T) } } internal func _observe(keyPaths: [String]? = nil, on queue: DispatchQueue? = nil, _ block: @escaping () -> Void) -> NotificationToken { return RLMObjectBaseAddNotificationBlock(self, keyPaths, queue) { _, _, _, _, _ in block() } } @available(macOS 10.15, tvOS 13.0, iOS 13.0, watchOS 6.0, *) internal func _observe<A: Actor, T: ObjectBase>( keyPaths: [String]? = nil, on actor: isolated A, _ block: @Sendable @escaping (isolated A, ObjectChange<T>) -> Void ) async -> NotificationToken { let token = RLMObjectNotificationToken() token.observe(self, keyPaths: keyPaths) { object, names, oldValues, newValues, error in assert(error == nil) actor.invokeIsolated(block, .init(object: object as? T, names: names, oldValues: oldValues, newValues: newValues)) } await withTaskCancellationHandler(operation: token.registrationComplete, onCancel: { token.invalidate() }) return token } }
swift
github
https://github.com/realm/realm-swift
RealmSwift/Aliases.swift
""" Render to gtk from agg """ from __future__ import division import os import matplotlib from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\ show, draw_if_interactive,\ error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \ NavigationToolbar2GTK from matplotlib.backends._gtkagg import agg_to_gtk_drawable DEBUG = False class NavigationToolbar2GTKAgg(NavigationToolbar2GTK): def _get_canvas(self, fig): return FigureCanvasGTKAgg(fig) class FigureManagerGTKAgg(FigureManagerGTK): def _get_toolbar(self, canvas): # must be inited after the window, drawingArea and figure # attrs are set if matplotlib.rcParams['toolbar']=='classic': toolbar = NavigationToolbar (canvas, self.window) elif matplotlib.rcParams['toolbar']=='toolbar2': toolbar = NavigationToolbar2GTKAgg (canvas, self.window) else: toolbar = None return toolbar def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ if DEBUG: print 'backend_gtkagg.new_figure_manager' FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) canvas = FigureCanvasGTKAgg(thisFig) return FigureManagerGTKAgg(canvas, num) if DEBUG: print 'backend_gtkagg.new_figure_manager done' class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg): filetypes = FigureCanvasGTK.filetypes.copy() filetypes.update(FigureCanvasAgg.filetypes) def configure_event(self, widget, event=None): if DEBUG: print 'FigureCanvasGTKAgg.configure_event' if widget.window is None: return try: del self.renderer except AttributeError: pass w,h = widget.window.get_size() if w==1 or h==1: return # empty fig # compute desired figure size in inches dpival = self.figure.dpi winch = w/dpival hinch = h/dpival self.figure.set_size_inches(winch, hinch) self._need_redraw = True self.resize_event() if DEBUG: print 'FigureCanvasGTKAgg.configure_event end' return True def _render_figure(self, pixmap, width, height): if DEBUG: print 'FigureCanvasGTKAgg.render_figure' FigureCanvasAgg.draw(self) if DEBUG: print 'FigureCanvasGTKAgg.render_figure pixmap', pixmap #agg_to_gtk_drawable(pixmap, self.renderer._renderer, None) buf = self.buffer_rgba(0,0) ren = self.get_renderer() w = int(ren.width) h = int(ren.height) pixbuf = gtk.gdk.pixbuf_new_from_data( buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4) pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h, gtk.gdk.RGB_DITHER_NONE, 0, 0) if DEBUG: print 'FigureCanvasGTKAgg.render_figure done' def blit(self, bbox=None): if DEBUG: print 'FigureCanvasGTKAgg.blit' if DEBUG: print 'FigureCanvasGTKAgg.blit', self._pixmap agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox) x, y, w, h = self.allocation self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap, 0, 0, 0, 0, w, h) if DEBUG: print 'FigureCanvasGTKAgg.done' def print_png(self, filename, *args, **kwargs): # Do this so we can save the resolution of figure in the PNG file agg = self.switch_backends(FigureCanvasAgg) return agg.print_png(filename, *args, **kwargs) """\ Traceback (most recent call last): File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event self._render_figure(self._pixmap, w, h) File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure pixbuf = gtk.gdk.pixbuf_new_from_data( ValueError: data length (3156672) is less then required by the other parameters (3160608) """
unknown
codeparrot/codeparrot-clean
from test.test_support import verbose, run_unittest, import_module #Skip these tests if either fcntl or termios is not available fcntl = import_module('fcntl') import_module('termios') import errno import pty import os import sys import select import signal import socket import unittest TEST_STRING_1 = "I wish to buy a fish license.\n" TEST_STRING_2 = "For my pet fish, Eric.\n" if verbose: def debug(msg): print msg else: def debug(msg): pass def normalize_output(data): # Some operating systems do conversions on newline. We could possibly # fix that by doing the appropriate termios.tcsetattr()s. I couldn't # figure out the right combo on Tru64 and I don't have an IRIX box. # So just normalize the output and doc the problem O/Ses by allowing # certain combinations for some platforms, but avoid allowing other # differences (like extra whitespace, trailing garbage, etc.) # This is about the best we can do without getting some feedback # from someone more knowledgable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith('\r\r\n'): return data.replace('\r\r\n', '\n') # IRIX apparently turns \n into \r\n. if data.endswith('\r\n'): return data.replace('\r\n', '\n') return data # Marginal testing of pty suite. Cannot do extensive 'do or fail' testing # because pty code is not too portable. # XXX(nnorwitz): these tests leak fds when there is an error. class PtyTest(unittest.TestCase): def setUp(self): # isatty() and close() can hang on some platforms. Set an alarm # before running the test to make sure we don't hang forever. self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig) signal.alarm(10) def tearDown(self): # remove alarm, restore old alarm handler signal.alarm(0) signal.signal(signal.SIGALRM, self.old_alarm) def handle_sig(self, sig, frame): self.fail("isatty hung") def test_basic(self): try: debug("Calling master_open()") master_fd, slave_name = pty.master_open() debug("Got master_fd '%d', slave_name '%s'" % (master_fd, slave_name)) debug("Calling slave_open(%r)" % (slave_name,)) slave_fd = pty.slave_open(slave_name) debug("Got slave_fd '%d'" % slave_fd) except OSError: # " An optional feature could not be imported " ... ? raise unittest.SkipTest, "Pseudo-terminals (seemingly) not functional." self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty') # Solaris requires reading the fd before anything is returned. # My guess is that since we open and close the slave fd # in master_open(), we need to read the EOF. # Ensure the fd is non-blocking in case there's nothing to read. orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL) fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK) try: s1 = os.read(master_fd, 1024) self.assertEqual('', s1) except OSError, e: if e.errno != errno.EAGAIN: raise # Restore the original flags. fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags) debug("Writing to slave_fd") os.write(slave_fd, TEST_STRING_1) s1 = os.read(master_fd, 1024) self.assertEqual('I wish to buy a fish license.\n', normalize_output(s1)) debug("Writing chunked output") os.write(slave_fd, TEST_STRING_2[:5]) os.write(slave_fd, TEST_STRING_2[5:]) s2 = os.read(master_fd, 1024) self.assertEqual('For my pet fish, Eric.\n', normalize_output(s2)) os.close(slave_fd) os.close(master_fd) def test_fork(self): debug("calling pty.fork()") pid, master_fd = pty.fork() if pid == pty.CHILD: # stdout should be connected to a tty. if not os.isatty(1): debug("Child's fd 1 is not a tty?!") os._exit(3) # After pty.fork(), the child should already be a session leader. # (on those systems that have that concept.) debug("In child, calling os.setsid()") try: os.setsid() except OSError: # Good, we already were session leader debug("Good: OSError was raised.") pass except AttributeError: # Have pty, but not setsid()? debug("No setsid() available?") pass except: # We don't want this error to propagate, escaping the call to # os._exit() and causing very peculiar behavior in the calling # regrtest.py ! # Note: could add traceback printing here. debug("An unexpected error was raised.") os._exit(1) else: debug("os.setsid() succeeded! (bad!)") os._exit(2) os._exit(4) else: debug("Waiting for child (%d) to finish." % pid) # In verbose mode, we have to consume the debug output from the # child or the child will block, causing this test to hang in the # parent's waitpid() call. The child blocks after a # platform-dependent amount of data is written to its fd. On # Linux 2.6, it's 4000 bytes and the child won't block, but on OS # X even the small writes in the child above will block it. Also # on Linux, the read() will throw an OSError (input/output error) # when it tries to read past the end of the buffer but the child's # already exited, so catch and discard those exceptions. It's not # worth checking for EIO. while True: try: data = os.read(master_fd, 80) except OSError: break if not data: break sys.stdout.write(data.replace('\r\n', '\n')) ##line = os.read(master_fd, 80) ##lines = line.replace('\r\n', '\n').split('\n') ##if False and lines != ['In child, calling os.setsid()', ## 'Good: OSError was raised.', '']: ## raise TestFailed("Unexpected output from child: %r" % line) (pid, status) = os.waitpid(pid, 0) res = status >> 8 debug("Child (%d) exited with status %d (%d)." % (pid, res, status)) if res == 1: self.fail("Child raised an unexpected exception in os.setsid()") elif res == 2: self.fail("pty.fork() failed to make child a session leader.") elif res == 3: self.fail("Child spawned by pty.fork() did not have a tty as stdout") elif res != 4: self.fail("pty.fork() failed for unknown reasons.") ##debug("Reading from master_fd now that the child has exited") ##try: ## s1 = os.read(master_fd, 1024) ##except os.error: ## pass ##else: ## raise TestFailed("Read from master_fd did not raise exception") os.close(master_fd) # pty.fork() passed. class SmallPtyTests(unittest.TestCase): """These tests don't spawn children or hang.""" def setUp(self): self.orig_stdin_fileno = pty.STDIN_FILENO self.orig_stdout_fileno = pty.STDOUT_FILENO self.orig_pty_select = pty.select self.fds = [] # A list of file descriptors to close. self.select_rfds_lengths = [] self.select_rfds_results = [] def tearDown(self): pty.STDIN_FILENO = self.orig_stdin_fileno pty.STDOUT_FILENO = self.orig_stdout_fileno pty.select = self.orig_pty_select for fd in self.fds: try: os.close(fd) except: pass def _pipe(self): pipe_fds = os.pipe() self.fds.extend(pipe_fds) return pipe_fds def _mock_select(self, rfds, wfds, xfds): # This will raise IndexError when no more expected calls exist. self.assertEqual(self.select_rfds_lengths.pop(0), len(rfds)) return self.select_rfds_results.pop(0), [], [] def test__copy_to_each(self): """Test the normal data case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = socket.socketpair() masters = [s.fileno() for s in socketpair] self.fds.extend(masters) # Feed data. Smaller than PIPEBUF. These writes will not block. os.write(masters[1], b'from master') os.write(write_to_stdin_fd, b'from stdin') # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) self.select_rfds_lengths.append(2) with self.assertRaises(IndexError): pty._copy(masters[0]) # Test that the right data went to the right places. rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0] self.assertEqual([read_from_stdout_fd, masters[1]], rfds) self.assertEqual(os.read(read_from_stdout_fd, 20), b'from master') self.assertEqual(os.read(masters[1], 20), b'from stdin') def test__copy_eof_on_all(self): """Test the empty read EOF case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = socket.socketpair() masters = [s.fileno() for s in socketpair] self.fds.extend(masters) os.close(masters[1]) socketpair[1].close() os.close(write_to_stdin_fd) # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) # We expect that both fds were removed from the fds list as they # both encountered an EOF before the second select call. self.select_rfds_lengths.append(0) with self.assertRaises(IndexError): pty._copy(masters[0]) def test_main(verbose=None): run_unittest(SmallPtyTests, PtyTest) if __name__ == "__main__": test_main()
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * A mechanism for selectively retrying methods that throw exceptions under * certain circumstances. * Typical usage is * UnreliableImplementation unreliableImpl = new UnreliableImplementation(); * UnreliableInterface unreliable = (UnreliableInterface) * RetryProxy.create(UnreliableInterface.class, unreliableImpl, * RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10, * TimeUnit.SECONDS)); * unreliable.call(); * * This will retry any method called on <code>unreliable</code> four times - * in this case the <code>call()</code> method - sleeping 10 seconds between * each retry. There are a number of * {@link org.apache.hadoop.io.retry.RetryPolicies retry policies} * available, or you can implement a custom one by implementing * {@link org.apache.hadoop.io.retry.RetryPolicy}. * It is also possible to specify retry policies on a * {@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map) * per-method basis}. */ @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"}) @InterfaceStability.Evolving package org.apache.hadoop.io.retry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
// Boost.Bimap // // Copyright (c) 2006-2007 Matias Capeletto // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) /// \file unordered_multiset_of.hpp /// \brief Include support for unordered_multiset constrains for the bimap container #ifndef BOOST_BIMAP_UNORDERED_MULTISET_OF_HPP #define BOOST_BIMAP_UNORDERED_MULTISET_OF_HPP #if defined(_MSC_VER) #pragma once #endif #include <boost/config.hpp> #include <boost/bimap/detail/user_interface_config.hpp> #include <cstdlib> #include <functional> #include <boost/functional/hash.hpp> #include <boost/mpl/bool.hpp> #include <boost/concept_check.hpp> #include <boost/bimap/detail/concept_tags.hpp> #include <boost/bimap/tags/support/value_type_of.hpp> #include <boost/bimap/detail/generate_index_binder.hpp> #include <boost/bimap/detail/generate_view_binder.hpp> #include <boost/bimap/detail/generate_relation_binder.hpp> #include <boost/multi_index/hashed_index.hpp> #include <boost/bimap/views/unordered_multimap_view.hpp> #include <boost/bimap/views/unordered_multiset_view.hpp> namespace boost { namespace bimaps { /// \brief Set Type Specification /** This struct is used to specify an unordered_multiset specification. It is not a container, it is just a metaprogramming facility to express the type of a set. Generally, this specification will be used in other place to create a container. It has the same syntax that an tr1::unordered_multiset instantiation, except that the allocator cannot be specified. The rationale behind this difference is that the allocator is not part of the unordered_multiset type specification, rather it is a container configuration parameter. The first parameter is the type of the objects in the set, the second one is a Hash Functor that takes objects of this type, and the third one is a Functor that compares them for equality. Bimap binding metafunctions can be used with this class in the following way: \code using namespace support; BOOST_STATIC_ASSERT( is_set_type_of< unordered_multiset_of<Type> >::value ) BOOST_STATIC_ASSERT ( is_same < compute_index_type < unordered_multiset_of<Type,HashFunctor,EqualKey>, KeyExtractor, Tag >::type , hashed_nonunique< tag<Tag>, KeyExtractor, HashFunctor, EqualKey > >::value ) typedef bimap < unordered_multiset_of<Type>, RightKeyType > bimap_with_left_type_as_unordered_multiset; BOOST_STATIC_ASSERT ( is_same < compute_map_view_type < member_at::left, bimap_with_left_type_as_unordered_multiset >::type, unordered_multimap_view < member_at::left, bimap_with_left_type_as_unordered_multiset > >::value ) \endcode See also unordered_multiset_of_relation. **/ template < class KeyType, class HashFunctor = hash< BOOST_DEDUCED_TYPENAME ::boost::bimaps::tags::support::value_type_of<KeyType>::type >, class EqualKey = std::equal_to< BOOST_DEDUCED_TYPENAME ::boost::bimaps::tags::support::value_type_of<KeyType>::type > > struct unordered_multiset_of : public ::boost::bimaps::detail::set_type_of_tag { /// User type, can be tagged typedef KeyType user_type; /// Type of the object that will be stored in the container typedef BOOST_DEDUCED_TYPENAME ::boost::bimaps::tags::support:: value_type_of<user_type>::type value_type; /// Hash Functor that takes value_type objects typedef HashFunctor hasher; /// Functor that compare two value_type objects for equality typedef EqualKey key_equal; struct lazy_concept_checked { BOOST_CLASS_REQUIRE ( value_type, boost, AssignableConcept ); BOOST_CLASS_REQUIRE3( hasher, std::size_t, value_type, boost, UnaryFunctionConcept ); BOOST_CLASS_REQUIRE4( key_equal, bool, value_type, value_type, boost, BinaryFunctionConcept ); typedef unordered_multiset_of type; }; BOOST_BIMAP_GENERATE_INDEX_BINDER_2CP( // binds to multi_index::hashed_non_unique, // with hasher, key_equal ) BOOST_BIMAP_GENERATE_MAP_VIEW_BINDER( // binds to views::unordered_multimap_view ) BOOST_BIMAP_GENERATE_SET_VIEW_BINDER( // binds to views::unordered_multiset_view ) typedef mpl::bool_<false> mutable_key; }; /// \brief Set Of Relation Specification /** This struct is similar to unordered_multiset_of but it is bind logically to a relation. It is used in the bimap instantiation to specify the desired type of the main view. This struct implements internally a metafunction named bind_to that manages the quite complicated task of finding the right type of the set for the relation. \code template<class Relation> struct bind_to { typedef -unspecified- type; }; \endcode See also unordered_multiset_of, is_set_type_of_relation. **/ template < class HashFunctor = hash< _relation >, class EqualKey = std::equal_to< _relation > > struct unordered_multiset_of_relation : public ::boost::bimaps::detail::set_type_of_relation_tag { /// Hash Functor that takes value_type objects typedef HashFunctor hasher; /// Functor that compare two value_type objects for equality typedef EqualKey key_equal; BOOST_BIMAP_GENERATE_RELATION_BINDER_2CP( // binds to unordered_multiset_of, // with hasher, key_equal ) typedef mpl::bool_<false> left_mutable_key; typedef mpl::bool_<false> right_mutable_key; }; } // namespace bimaps } // namespace boost #endif // BOOST_BIMAP_UNORDERED_MULTISET_OF_HPP
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/bimap/unordered_multiset_of.hpp
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_config import cfg from blazar import context from blazar.utils.openstack import base from blazar.utils.openstack import keystone CONF = cfg.CONF def create_trust(): """Creates trust via Keystone API v3 to use in plugins.""" client = keystone.BlazarKeystoneClient() trustee_id = keystone.BlazarKeystoneClient( username=CONF.os_admin_username, password=CONF.os_admin_password, tenant_name=CONF.os_admin_project_name).user_id ctx = context.current() trust = client.trusts.create(trustor_user=ctx.user_id, trustee_user=trustee_id, impersonation=False, role_names=ctx.roles, project=ctx.project_id) return trust def delete_trust(lease): """Deletes trust for the specified lease.""" if lease.trust_id: client = keystone.BlazarKeystoneClient(trust_id=lease.trust_id) client.trusts.delete(lease.trust_id) def create_ctx_from_trust(trust_id): """Return context built from given trust.""" ctx = context.current() ctx = context.BlazarContext( user_name=CONF.os_admin_username, project_name=CONF.os_admin_project_name, request_id=ctx.request_id, global_request_id=ctx.global_request_id ) auth_url = "%s://%s:%s" % (CONF.os_auth_protocol, base.get_os_auth_host(CONF), CONF.os_auth_port) if CONF.os_auth_prefix: auth_url += "/%s" % CONF.os_auth_prefix client = keystone.BlazarKeystoneClient( password=CONF.os_admin_password, trust_id=trust_id, auth_url=auth_url, ctx=ctx, ) # use 'with ctx' statement in the place you need context from trust return context.BlazarContext( user_name=ctx.user_name, project_name=ctx.project_name, auth_token=client.auth_token, service_catalog=client.service_catalog.catalog['catalog'], project_id=client.tenant_id, request_id=ctx.request_id, global_request_id=ctx.global_request_id ) def use_trust_auth(): """Decorator creates a keystone trust This decorator creates a keystone trust, and adds the trust_id to the parameter of the decorated method. """ def decorator(func): @functools.wraps(func) def wrapped(self, to_update): if to_update is not None: trust = create_trust() if isinstance(to_update, dict): to_update.update({'trust_id': trust.id}) elif isinstance(to_update, object): setattr(to_update, 'trust_id', trust.id) return func(self, to_update) return wrapped return decorator
unknown
codeparrot/codeparrot-clean
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- $id: http://devicetree.org/schemas/embedded-controller/acer,aspire1-ec.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Acer Aspire 1 Embedded Controller maintainers: - Nikita Travkin <nikita@trvn.ru> description: The Acer Aspire 1 laptop uses an embedded controller to control battery and charging as well as to provide a set of misc features such as the laptop lid status and HPD events for the USB Type-C DP alt mode. properties: compatible: const: acer,aspire1-ec reg: const: 0x76 interrupts: maxItems: 1 connector: $ref: /schemas/connector/usb-connector.yaml# required: - compatible - reg - interrupts additionalProperties: false examples: - | #include <dt-bindings/interrupt-controller/irq.h> i2c { #address-cells = <1>; #size-cells = <0>; embedded-controller@76 { compatible = "acer,aspire1-ec"; reg = <0x76>; interrupts-extended = <&tlmm 30 IRQ_TYPE_LEVEL_LOW>; connector { compatible = "usb-c-connector"; port { ec_dp_in: endpoint { remote-endpoint = <&mdss_dp_out>; }; }; }; }; };
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/embedded-controller/acer,aspire1-ec.yaml
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from collections.abc import MutableMapping from numbers import Integral, Real import numpy as np from sklearn.base import ( BaseEstimator, ClassifierMixin, MetaEstimatorMixin, _fit_context, clone, ) from sklearn.exceptions import NotFittedError from sklearn.metrics import check_scoring, get_scorer_names from sklearn.metrics._scorer import _CurveScorer, _threshold_scores_to_class_labels from sklearn.model_selection._split import StratifiedShuffleSplit, check_cv from sklearn.utils import _safe_indexing, get_tags from sklearn.utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions from sklearn.utils._response import _get_response_values_binary from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _raise_for_params, process_routing, ) from sklearn.utils.metaestimators import available_if from sklearn.utils.multiclass import type_of_target from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import ( _check_method_params, _estimator_has, _num_samples, check_is_fitted, indexable, ) def _check_is_fitted(estimator): try: check_is_fitted(estimator.estimator) except NotFittedError: check_is_fitted(estimator, "estimator_") class BaseThresholdClassifier(ClassifierMixin, MetaEstimatorMixin, BaseEstimator): """Base class for binary classifiers that set a non-default decision threshold. In this base class, we define the following interface: - the validation of common parameters in `fit`; - the different prediction methods that can be used with the classifier. .. versionadded:: 1.5 Parameters ---------- estimator : estimator instance The binary classifier, fitted or not, for which we want to optimize the decision threshold used during `predict`. response_method : {"auto", "decision_function", "predict_proba"}, default="auto" Methods by the classifier `estimator` corresponding to the decision function for which we want to find a threshold. It can be: * if `"auto"`, it will try to invoke, for each classifier, `"predict_proba"` or `"decision_function"` in that order. * otherwise, one of `"predict_proba"` or `"decision_function"`. If the method is not implemented by the classifier, it will raise an error. """ _parameter_constraints: dict = { "estimator": [ HasMethods(["fit", "predict_proba"]), HasMethods(["fit", "decision_function"]), ], "response_method": [StrOptions({"auto", "predict_proba", "decision_function"})], } def __init__(self, estimator, *, response_method="auto"): self.estimator = estimator self.response_method = response_method def _get_response_method(self): """Define the response method.""" if self.response_method == "auto": response_method = ["predict_proba", "decision_function"] else: response_method = self.response_method return response_method @_fit_context( # *ThresholdClassifier*.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, **params): """Fit the classifier. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. **params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """ _raise_for_params(params, self, None) X, y = indexable(X, y) y_type = type_of_target(y, input_name="y") if y_type != "binary": raise ValueError( f"Only binary classification is supported. Unknown label type: {y_type}" ) self._fit(X, y, **params) if hasattr(self.estimator_, "n_features_in_"): self.n_features_in_ = self.estimator_.n_features_in_ if hasattr(self.estimator_, "feature_names_in_"): self.feature_names_in_ = self.estimator_.feature_names_in_ return self @property def classes_(self): """Classes labels.""" return self.estimator_.classes_ @available_if(_estimator_has("predict_proba")) def predict_proba(self, X): """Predict class probabilities for `X` using the fitted estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- probabilities : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. """ _check_is_fitted(self) estimator = getattr(self, "estimator_", self.estimator) return estimator.predict_proba(X) @available_if(_estimator_has("predict_log_proba")) def predict_log_proba(self, X): """Predict logarithm class probabilities for `X` using the fitted estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- log_probabilities : ndarray of shape (n_samples, n_classes) The logarithm class probabilities of the input samples. """ _check_is_fitted(self) estimator = getattr(self, "estimator_", self.estimator) return estimator.predict_log_proba(X) @available_if(_estimator_has("decision_function")) def decision_function(self, X): """Decision function for samples in `X` using the fitted estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- decisions : ndarray of shape (n_samples,) The decision function computed the fitted estimator. """ _check_is_fitted(self) estimator = getattr(self, "estimator_", self.estimator) return estimator.decision_function(X) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.classifier_tags.multi_class = False tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse return tags class FixedThresholdClassifier(BaseThresholdClassifier): """Binary classifier that manually sets the decision threshold. This classifier allows to change the default decision threshold used for converting posterior probability estimates (i.e. output of `predict_proba`) or decision scores (i.e. output of `decision_function`) into a class label. Here, the threshold is not optimized and is set to a constant value. Read more in the :ref:`User Guide <FixedThresholdClassifier>`. .. versionadded:: 1.5 Parameters ---------- estimator : estimator instance The binary classifier, fitted or not, for which we want to optimize the decision threshold used during `predict`. threshold : {"auto"} or float, default="auto" The decision threshold to use when converting posterior probability estimates (i.e. output of `predict_proba`) or decision scores (i.e. output of `decision_function`) into a class label. When `"auto"`, the threshold is set to 0.5 if `predict_proba` is used as `response_method`, otherwise it is set to 0 (i.e. the default threshold for `decision_function`). pos_label : int, float, bool or str, default=None The label of the positive class. Used to process the output of the `response_method` method. When `pos_label=None`, if `y_true` is in `{-1, 1}` or `{0, 1}`, `pos_label` is set to 1, otherwise an error will be raised. response_method : {"auto", "decision_function", "predict_proba"}, default="auto" Methods by the classifier `estimator` corresponding to the decision function for which we want to find a threshold. It can be: * if `"auto"`, it will try to invoke `"predict_proba"` or `"decision_function"` in that order. * otherwise, one of `"predict_proba"` or `"decision_function"`. If the method is not implemented by the classifier, it will raise an error. Attributes ---------- estimator_ : estimator instance The fitted classifier used when predicting. classes_ : ndarray of shape (n_classes,) The class labels. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. See Also -------- sklearn.model_selection.TunedThresholdClassifierCV : Classifier that post-tunes the decision threshold based on some metrics and using cross-validation. sklearn.calibration.CalibratedClassifierCV : Estimator that calibrates probabilities. Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.metrics import confusion_matrix >>> from sklearn.model_selection import FixedThresholdClassifier, train_test_split >>> X, y = make_classification( ... n_samples=1_000, weights=[0.9, 0.1], class_sep=0.8, random_state=42 ... ) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, stratify=y, random_state=42 ... ) >>> classifier = LogisticRegression(random_state=0).fit(X_train, y_train) >>> print(confusion_matrix(y_test, classifier.predict(X_test))) [[217 7] [ 19 7]] >>> classifier_other_threshold = FixedThresholdClassifier( ... classifier, threshold=0.1, response_method="predict_proba" ... ).fit(X_train, y_train) >>> print(confusion_matrix(y_test, classifier_other_threshold.predict(X_test))) [[184 40] [ 6 20]] """ _parameter_constraints: dict = { **BaseThresholdClassifier._parameter_constraints, "threshold": [StrOptions({"auto"}), Real], "pos_label": [Real, str, "boolean", None], } def __init__( self, estimator, *, threshold="auto", pos_label=None, response_method="auto", ): super().__init__(estimator=estimator, response_method=response_method) self.pos_label = pos_label self.threshold = threshold @property def classes_(self): if estimator := getattr(self, "estimator_", None): return estimator.classes_ try: check_is_fitted(self.estimator) return self.estimator.classes_ except NotFittedError: raise AttributeError( "The underlying estimator is not fitted yet." ) from NotFittedError def _fit(self, X, y, **params): """Fit the classifier. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. **params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """ routed_params = process_routing(self, "fit", **params) self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit) return self def predict(self, X): """Predict the target of new samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The samples, as accepted by `estimator.predict`. Returns ------- class_labels : ndarray of shape (n_samples,) The predicted class. """ _check_is_fitted(self) estimator = getattr(self, "estimator_", self.estimator) y_score, _, response_method_used = _get_response_values_binary( estimator, X, self._get_response_method(), pos_label=self.pos_label, return_response_method_used=True, ) if self.threshold == "auto": decision_threshold = 0.5 if response_method_used == "predict_proba" else 0.0 else: decision_threshold = self.threshold return _threshold_scores_to_class_labels( y_score, decision_threshold, self.classes_, self.pos_label ) def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping().add(callee="fit", caller="fit"), ) return router def _fit_and_score_over_thresholds( classifier, X, y, *, fit_params, train_idx, val_idx, curve_scorer, score_params, ): """Fit a classifier and compute the scores for different decision thresholds. Parameters ---------- classifier : estimator instance The classifier to fit and use for scoring. If `classifier` is already fitted, it will be used as is. X : {array-like, sparse matrix} of shape (n_samples, n_features) The entire dataset. y : array-like of shape (n_samples,) The entire target vector. fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. train_idx : ndarray of shape (n_train_samples,) or None The indices of the training set. If `None`, `classifier` is expected to be already fitted. val_idx : ndarray of shape (n_val_samples,) The indices of the validation set used to score `classifier`. If `train_idx`, the entire set will be used. curve_scorer : scorer instance The scorer taking `classifier` and the validation set as input and outputting decision thresholds and scores as a curve. Note that this is different from the usual scorer that outputs a single score value as `curve_scorer` outputs a single score value for each threshold. score_params : dict Parameters to pass to the `score` method of the underlying scorer. Returns ------- scores : ndarray of shape (thresholds,) or tuple of such arrays The scores computed for each decision threshold. When TPR/TNR or precision/ recall are computed, `scores` is a tuple of two arrays. potential_thresholds : ndarray of shape (thresholds,) The decision thresholds used to compute the scores. They are returned in ascending order. """ if train_idx is not None: X_train, X_val = _safe_indexing(X, train_idx), _safe_indexing(X, val_idx) y_train, y_val = _safe_indexing(y, train_idx), _safe_indexing(y, val_idx) fit_params_train = _check_method_params(X, fit_params, indices=train_idx) score_params_val = _check_method_params(X, score_params, indices=val_idx) classifier.fit(X_train, y_train, **fit_params_train) else: # prefit estimator, only a validation set is provided X_val, y_val, score_params_val = X, y, score_params return curve_scorer(classifier, X_val, y_val, **score_params_val) def _mean_interpolated_score(target_thresholds, cv_thresholds, cv_scores): """Compute the mean interpolated score across folds by defining common thresholds. Parameters ---------- target_thresholds : ndarray of shape (thresholds,) The thresholds to use to compute the mean score. cv_thresholds : ndarray of shape (n_folds, thresholds_fold) The thresholds used to compute the scores for each fold. cv_scores : ndarray of shape (n_folds, thresholds_fold) The scores computed for each threshold for each fold. Returns ------- mean_score : ndarray of shape (thresholds,) The mean score across all folds for each target threshold. """ return np.mean( [ np.interp(target_thresholds, split_thresholds, split_score) for split_thresholds, split_score in zip(cv_thresholds, cv_scores) ], axis=0, ) class TunedThresholdClassifierCV(BaseThresholdClassifier): """Classifier that post-tunes the decision threshold using cross-validation. This estimator post-tunes the decision threshold (cut-off point) that is used for converting posterior probability estimates (i.e. output of `predict_proba`) or decision scores (i.e. output of `decision_function`) into a class label. The tuning is done by optimizing a binary metric, potentially constrained by another metric. Read more in the :ref:`User Guide <TunedThresholdClassifierCV>`. .. versionadded:: 1.5 Parameters ---------- estimator : estimator instance The classifier, fitted or not, for which we want to optimize the decision threshold used during `predict`. scoring : str or callable, default="balanced_accuracy" The objective metric to be optimized. Can be one of: - str: string associated to a scoring function for binary classification, see :ref:`scoring_string_names` for options. - callable: a scorer callable object (e.g., function) with signature ``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details. response_method : {"auto", "decision_function", "predict_proba"}, default="auto" Methods by the classifier `estimator` corresponding to the decision function for which we want to find a threshold. It can be: * if `"auto"`, it will try to invoke, for each classifier, `"predict_proba"` or `"decision_function"` in that order. * otherwise, one of `"predict_proba"` or `"decision_function"`. If the method is not implemented by the classifier, it will raise an error. thresholds : int or array-like, default=100 The number of decision threshold to use when discretizing the output of the classifier `method`. Pass an array-like to manually specify the thresholds to use. cv : int, float, cross-validation generator, iterable or "prefit", default=None Determines the cross-validation splitting strategy to train classifier. Possible inputs for cv are: * `None`, to use the default 5-fold stratified K-fold cross validation; * An integer number, to specify the number of folds in a stratified k-fold; * A float number, to specify a single shuffle split. The floating number should be in (0, 1) and represent the size of the validation set; * An object to be used as a cross-validation generator; * An iterable yielding train, test splits; * `"prefit"`, to bypass the cross-validation. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. warning:: Using `cv="prefit"` and passing the same dataset for fitting `estimator` and tuning the cut-off point is subject to undesired overfitting. You can refer to :ref:`TunedThresholdClassifierCV_no_cv` for an example. This option should only be used when the set used to fit `estimator` is different from the one used to tune the cut-off point (by calling :meth:`TunedThresholdClassifierCV.fit`). refit : bool, default=True Whether or not to refit the classifier on the entire training set once the decision threshold has been found. Note that forcing `refit=False` on cross-validation having more than a single split will raise an error. Similarly, `refit=True` in conjunction with `cv="prefit"` will raise an error. n_jobs : int, default=None The number of jobs to run in parallel. When `cv` represents a cross-validation strategy, the fitting and scoring on each data split is done in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, default=None Controls the randomness of cross-validation when `cv` is a float. See :term:`Glossary <random_state>`. store_cv_results : bool, default=False Whether to store all scores and thresholds computed during the cross-validation process. Attributes ---------- estimator_ : estimator instance The fitted classifier used when predicting. best_threshold_ : float The new decision threshold. best_score_ : float or None The optimal score of the objective metric, evaluated at `best_threshold_`. cv_results_ : dict or None A dictionary containing the scores and thresholds computed during the cross-validation process. Only exist if `store_cv_results=True`. The keys are `"thresholds"` and `"scores"`. classes_ : ndarray of shape (n_classes,) The class labels. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. See Also -------- sklearn.model_selection.FixedThresholdClassifier : Classifier that uses a constant threshold. sklearn.calibration.CalibratedClassifierCV : Estimator that calibrates probabilities. Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.ensemble import RandomForestClassifier >>> from sklearn.metrics import classification_report >>> from sklearn.model_selection import TunedThresholdClassifierCV, train_test_split >>> X, y = make_classification( ... n_samples=1_000, weights=[0.9, 0.1], class_sep=0.8, random_state=42 ... ) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, stratify=y, random_state=42 ... ) >>> classifier = RandomForestClassifier(random_state=0).fit(X_train, y_train) >>> print(classification_report(y_test, classifier.predict(X_test))) precision recall f1-score support <BLANKLINE> 0 0.94 0.99 0.96 224 1 0.80 0.46 0.59 26 <BLANKLINE> accuracy 0.93 250 macro avg 0.87 0.72 0.77 250 weighted avg 0.93 0.93 0.92 250 <BLANKLINE> >>> classifier_tuned = TunedThresholdClassifierCV( ... classifier, scoring="balanced_accuracy" ... ).fit(X_train, y_train) >>> print( ... f"Cut-off point found at {classifier_tuned.best_threshold_:.3f}" ... ) Cut-off point found at 0.342 >>> print(classification_report(y_test, classifier_tuned.predict(X_test))) precision recall f1-score support <BLANKLINE> 0 0.96 0.95 0.96 224 1 0.61 0.65 0.63 26 <BLANKLINE> accuracy 0.92 250 macro avg 0.78 0.80 0.79 250 weighted avg 0.92 0.92 0.92 250 <BLANKLINE> """ _parameter_constraints: dict = { **BaseThresholdClassifier._parameter_constraints, "scoring": [ StrOptions(set(get_scorer_names())), callable, MutableMapping, ], "thresholds": [Interval(Integral, 1, None, closed="left"), "array-like"], "cv": [ "cv_object", StrOptions({"prefit"}), Interval(RealNotInt, 0.0, 1.0, closed="neither"), ], "refit": ["boolean"], "n_jobs": [Integral, None], "random_state": ["random_state"], "store_cv_results": ["boolean"], } def __init__( self, estimator, *, scoring="balanced_accuracy", response_method="auto", thresholds=100, cv=None, refit=True, n_jobs=None, random_state=None, store_cv_results=False, ): super().__init__(estimator=estimator, response_method=response_method) self.scoring = scoring self.thresholds = thresholds self.cv = cv self.refit = refit self.n_jobs = n_jobs self.random_state = random_state self.store_cv_results = store_cv_results def _fit(self, X, y, **params): """Fit the classifier and post-tune the decision threshold. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. **params : dict Parameters to pass to the `fit` method of the underlying classifier and to the `scoring` scorer. Returns ------- self : object Returns an instance of self. """ if isinstance(self.cv, Real) and 0 < self.cv < 1: cv = StratifiedShuffleSplit( n_splits=1, test_size=self.cv, random_state=self.random_state ) elif self.cv == "prefit": if self.refit is True: raise ValueError("When cv='prefit', refit cannot be True.") try: check_is_fitted(self.estimator, "classes_") except NotFittedError as exc: raise NotFittedError( """When cv='prefit', `estimator` must be fitted.""" ) from exc cv = self.cv else: cv = check_cv(self.cv, y=y, classifier=True) if self.refit is False and cv.get_n_splits() > 1: raise ValueError("When cv has several folds, refit cannot be False.") routed_params = process_routing(self, "fit", **params) self._curve_scorer = self._get_curve_scorer() # in the following block, we: # - define the final classifier `self.estimator_` and train it if necessary # - define `classifier` to be used to post-tune the decision threshold # - define `split` to be used to fit/score `classifier` if cv == "prefit": self.estimator_ = self.estimator classifier = self.estimator_ splits = [(None, range(_num_samples(X)))] else: self.estimator_ = clone(self.estimator) classifier = clone(self.estimator) splits = cv.split(X, y, **routed_params.splitter.split) if self.refit: # train on the whole dataset X_train, y_train, fit_params_train = X, y, routed_params.estimator.fit else: # single split cross-validation train_idx, _ = next(cv.split(X, y, **routed_params.splitter.split)) X_train = _safe_indexing(X, train_idx) y_train = _safe_indexing(y, train_idx) fit_params_train = _check_method_params( X, routed_params.estimator.fit, indices=train_idx ) self.estimator_.fit(X_train, y_train, **fit_params_train) cv_scores, cv_thresholds = zip( *Parallel(n_jobs=self.n_jobs)( delayed(_fit_and_score_over_thresholds)( clone(classifier) if cv != "prefit" else classifier, X, y, fit_params=routed_params.estimator.fit, train_idx=train_idx, val_idx=val_idx, curve_scorer=self._curve_scorer, score_params=routed_params.scorer.score, ) for train_idx, val_idx in splits ) ) if any(np.isclose(th[0], th[-1]) for th in cv_thresholds): raise ValueError( "The provided estimator makes constant predictions. Therefore, it is " "impossible to optimize the decision threshold." ) # find the global min and max thresholds across all folds min_threshold = min( split_thresholds.min() for split_thresholds in cv_thresholds ) max_threshold = max( split_thresholds.max() for split_thresholds in cv_thresholds ) if isinstance(self.thresholds, Integral): decision_thresholds = np.linspace( min_threshold, max_threshold, num=self.thresholds ) else: decision_thresholds = np.asarray(self.thresholds) objective_scores = _mean_interpolated_score( decision_thresholds, cv_thresholds, cv_scores ) best_idx = objective_scores.argmax() self.best_score_ = objective_scores[best_idx] self.best_threshold_ = decision_thresholds[best_idx] if self.store_cv_results: self.cv_results_ = { "thresholds": decision_thresholds, "scores": objective_scores, } return self def predict(self, X): """Predict the target of new samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The samples, as accepted by `estimator.predict`. Returns ------- class_labels : ndarray of shape (n_samples,) The predicted class. """ check_is_fitted(self, "estimator_") pos_label = self._curve_scorer._get_pos_label() y_score, _ = _get_response_values_binary( self.estimator_, X, self._get_response_method(), pos_label=pos_label, ) return _threshold_scores_to_class_labels( y_score, self.best_threshold_, self.classes_, pos_label ) def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = ( MetadataRouter(owner=self) .add( estimator=self.estimator, method_mapping=MethodMapping().add(callee="fit", caller="fit"), ) .add( splitter=self.cv, method_mapping=MethodMapping().add(callee="split", caller="fit"), ) .add( scorer=self._get_curve_scorer(), method_mapping=MethodMapping().add(callee="score", caller="fit"), ) ) return router def _get_curve_scorer(self): """Get the curve scorer based on the objective metric used.""" scoring = check_scoring(self.estimator, scoring=self.scoring) curve_scorer = _CurveScorer.from_scorer( scoring, self._get_response_method(), self.thresholds ) return curve_scorer
python
github
https://github.com/scikit-learn/scikit-learn
sklearn/model_selection/_classification_threshold.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe import json from frappe.model.document import Document from frappe.utils import get_fullname class ToDo(Document): def validate(self): if self.is_new(): self.add_assign_comment(frappe._("Assigned to {0}").format(get_fullname(self.owner)), "Assigned") else: cur_status = frappe.db.get_value("ToDo", self.name, "status") if cur_status != self.status: self.add_assign_comment(frappe._("Assignment closed by {0}".format(get_fullname(frappe.session.user))), "Assignment Completed") def on_update(self): self.update_in_reference() def on_trash(self): self.update_in_reference() def add_assign_comment(self, text, comment_type): if not self.reference_type and self.reference_name: return frappe.get_doc({ "doctype":"Comment", "comment_by": frappe.session.user, "comment_type": comment_type, "comment_doctype": self.reference_type, "comment_docname": self.reference_name, "comment": """{text}""".format(text=text) }).insert(ignore_permissions=True) def update_in_reference(self): if not (self.reference_type and self.reference_name): return try: assignments = [d[0] for d in frappe.get_all("ToDo", filters={ "reference_type": self.reference_type, "reference_name": self.reference_name, "status": "Open" }, fields=["owner"], as_list=True)] assignments.reverse() frappe.db.set_value(self.reference_type, self.reference_name, "_assign", json.dumps(assignments)) except Exception, e: if e.args[0] == 1146 and frappe.flags.in_install: # no table return elif e.args[0]==1054: from frappe.model.db_schema import add_column add_column(self.reference_type, "_assign", "Text") self.update_in_reference() else: raise # NOTE: todo is viewable if either owner or assigned_to or System Manager in roles def on_doctype_update(): frappe.db.add_index("ToDo", ["reference_type", "reference_name"]) def get_permission_query_conditions(user): if not user: user = frappe.session.user if "System Manager" in frappe.get_roles(user): return None else: return """(tabToDo.owner = '{user}' or tabToDo.assigned_by = '{user}')"""\ .format(user=frappe.db.escape(user)) def has_permission(doc, user): if "System Manager" in frappe.get_roles(user): return True else: return doc.owner==user or doc.assigned_by==user
unknown
codeparrot/codeparrot-clean
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd F Y' # 25 Ottobre 2006 TIME_FORMAT = 'H:i:s' # 14:30:59 DATETIME_FORMAT = 'l d F Y H:i:s' # Mercoledì 25 Ottobre 2006 14:30:59 YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006 MONTH_DAY_FORMAT = 'j/F' # 10/2006 SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009 SHORT_DATETIME_FORMAT = 'd/m/Y H:i:s' # 25/10/2009 14:30:59 FIRST_DAY_OF_WEEK = 1 # Lunedì # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25' '%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25' '%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06' ) DATETIME_INPUT_FORMATS = ( '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59' '%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200' '%d-%m-%Y %H:%M', # '25-10-2006 14:30' '%d-%m-%Y', # '25-10-2006' '%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59' '%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200' '%d-%m-%y %H:%M', # '25-10-06 14:30' '%d-%m-%y', # '25-10-06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
unknown
codeparrot/codeparrot-clean
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### from itertools import product import bpy from bpy.props import BoolVectorProperty, EnumProperty from mathutils import Matrix from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import dataCorrect, updateNode class SvBBoxNodeMk3(bpy.types.Node, SverchCustomTreeNode): """ Triggers: Bbox 2D or 3D Tooltip: Get vertices bounding box (vertices, sizes, center) """ bl_idname = 'SvBBoxNodeMk3' bl_label = 'Bounding box' bl_icon = 'NONE' sv_icon = 'SV_BOUNDING_BOX' def update_sockets(self, context): bools = [self.min_list, self.max_list, self.size_list] dims = int(self.box_dimensions[0]) for i in range(3): for j in range(3): out_index = 4 + j + 3*i hidden = self.outputs[out_index].hide_safe if bools[i][j] and j < dims: if hidden: self.outputs[out_index].hide_safe = False else: self.outputs[out_index].hide_safe = True updateNode(self, context) min_list: BoolVectorProperty( name='Min', description="Show Minimum values sockets", size=3, update=update_sockets) max_list: BoolVectorProperty( name='Max', description="Show Maximun values sockets", size=3, update=update_sockets) size_list: BoolVectorProperty( name='Size', description="Show Size values sockets", size=3, update=update_sockets) implentation_modes = [ ("2D", "2D", "Outputs Rectangle over XY plane", 0), ("3D", "3D", "Outputs standard bounding box", 1)] box_dimensions: EnumProperty( name='Implementation', items=implentation_modes, description='Choose calculation method', default="3D", update=update_sockets) def draw_buttons(self, context, layout): layout .prop(self, 'box_dimensions', expand=True) col = layout.column(align=True) titles = ["Min", "Max", "Size"] prop = ['min_list', 'max_list', 'size_list'] dims = int(self.box_dimensions[0]) for i in range(3): row = col.row(align=True) row.label(text=titles[i]) row2 = row.row(align=True) for j in range(dims): row2 .prop(self, prop[i], index=j, text='XYZ'[j], toggle=True) def sv_init(self, context): son = self.outputs.new self.inputs.new('SvVerticesSocket', 'Vertices') son('SvVerticesSocket', 'Vertices') son('SvStringsSocket', 'Edges') son('SvVerticesSocket', 'Mean') son('SvMatrixSocket', 'Center') titles = ['Min', 'Max', 'Size'] for j in range(3): for i in range(3): son('SvStringsSocket', titles[j] + ' ' + 'XYZ'[i]) self.update_sockets(context) def migrate_from(self, old_node): self.box_dimensions = old_node.dimensions def generate_matrix(self, maxmin, dims, to_2d): center = [(u+v)*.5 for u, v in maxmin[:dims]] scale = [(u-v) for u, v in maxmin[:dims]] if to_2d: center += [0] scale += [1] mat = Matrix.Translation(center) for i, sca in enumerate(scale): mat[i][i] = sca return mat def generate_mean(self, verts, dims, to_2d): avr = list(map(sum, zip(*verts))) avr = [n/len(verts) for n in avr[:dims]] if to_2d: avr += [0] return [avr] def process(self): if not self.inputs['Vertices'].is_linked: return if not any(s.is_linked for s in self.outputs): return has_mat_out = bool(self.outputs['Center'].is_linked) has_mean = bool(self.outputs['Mean'].is_linked) has_vert_out = bool(self.outputs['Vertices'].is_linked) verts = self.inputs['Vertices'].sv_get(deepcopy=False) verts = dataCorrect(verts, nominal_dept=2) has_limits = any(s.is_linked for s in self.outputs[4:]) if verts: verts_out = [] edges_out = [] edges = [ (0, 1), (1, 3), (3, 2), (2, 0), # bottom edges (4, 5), (5, 7), (7, 6), (6, 4), # top edges (0, 4), (1, 5), (2, 6), (3, 7) # sides ] mat_out = [] mean_out = [] min_vals = [[], [], []] max_vals = [[], [], []] size_vals = [[], [], []] to_2d = self.box_dimensions == '2D' dims = int(self.box_dimensions[0]) for vec in verts: if has_mat_out or has_vert_out or has_limits: maxmin = list(zip(map(max, *vec), map(min, *vec))) if has_vert_out: out = list(product(*reversed(maxmin))) v_out = [l[::-1] for l in out[::-1]] if to_2d: verts_out.append([[v[0], v[1], 0] for v in v_out[:4]]) edges = edges[:4] else: verts_out.append(v_out) edges_out.append(edges) if has_mat_out: mat_out.append(self.generate_matrix(maxmin, dims, to_2d)) if has_mean: mean_out.append(self.generate_mean(vec, dims, to_2d)) if has_limits: for i in range(dims): min_vals[i].append([maxmin[i][1]]) max_vals[i].append([maxmin[i][0]]) size_vals[i].append([maxmin[i][0] - maxmin[i][1]]) if has_vert_out: self.outputs['Vertices'].sv_set(verts_out) if self.outputs['Edges'].is_linked: self.outputs['Edges'].sv_set(edges_out) if has_mean: self.outputs['Mean'].sv_set(mean_out) if self.outputs['Center'].is_linked: self.outputs['Center'].sv_set(mat_out) vals = [min_vals, max_vals, size_vals] for j in range(3): for i, socket in enumerate(self.outputs[4+3*j:7+3*j]): if socket.is_linked: socket.sv_set(vals[j][i]) def register(): bpy.utils.register_class(SvBBoxNodeMk3) def unregister(): bpy.utils.unregister_class(SvBBoxNodeMk3)
unknown
codeparrot/codeparrot-clean
// Copyright 2023 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. // Code generated by "stringer"; DO NOT EDIT. package sql import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[noEvent-0] _ = x[txnStart-1] _ = x[txnCommit-2] _ = x[txnRollback-3] _ = x[txnPrepare-4] _ = x[txnRestart-5] _ = x[txnUpgradeToExplicit-6] } func (i txnEventType) String() string { switch i { case noEvent: return "noEvent" case txnStart: return "txnStart" case txnCommit: return "txnCommit" case txnRollback: return "txnRollback" case txnPrepare: return "txnPrepare" case txnRestart: return "txnRestart" case txnUpgradeToExplicit: return "txnUpgradeToExplicit" default: return "txnEventType(" + strconv.FormatInt(int64(i), 10) + ")" } }
go
github
https://github.com/cockroachdb/cockroach
pkg/sql/txneventtype_string.go
import os import sys import json import shutil import pytest import virtool.jobs.pathoscope TEST_FILES_PATH = os.path.join(sys.path[0], "tests", "test_files") PATHOSCOPE_PATH = os.path.join(TEST_FILES_PATH, "pathoscope") BEST_HIT_PATH = os.path.join(PATHOSCOPE_PATH, "best_hit") RESULTS_PATH = os.path.join(PATHOSCOPE_PATH, "results.json") EM_PATH = os.path.join(PATHOSCOPE_PATH, "em") ISOLATES_VTA_PATH = os.path.join(PATHOSCOPE_PATH, "to_isolates.vta") MATRIX_PATH = os.path.join(PATHOSCOPE_PATH, "ps_matrix") REF_LENGTHS_PATH = os.path.join(PATHOSCOPE_PATH, "ref_lengths.json") SAM_PATH = os.path.join(PATHOSCOPE_PATH, "test_al.sam") SCORES = os.path.join(PATHOSCOPE_PATH, "scores") TO_SUBTRACTION_PATH = os.path.join(PATHOSCOPE_PATH, "to_subtraction.json") UNU_PATH = os.path.join(PATHOSCOPE_PATH, "unu") VTA_PATH = os.path.join(PATHOSCOPE_PATH, "test.vta") INDEX_PATH = os.path.join(TEST_FILES_PATH, "index") FASTQ_PATH = os.path.join(TEST_FILES_PATH, "test.fq") HOST_PATH = os.path.join(TEST_FILES_PATH, "index", "host") @pytest.fixture(scope="session") def otu_resource(): map_dict = dict() otus = dict() with open(VTA_PATH, "r") as handle: for line in handle: ref_id = line.split(",")[1] otu_id = "otu_{}".format(ref_id) map_dict[ref_id] = otu_id otus[otu_id] = { "otu": otu_id, "version": 2 } return map_dict, otus @pytest.fixture def mock_job(tmpdir, mocker, request, dbs, test_db_connection_string, test_db_name, otu_resource): # Add index files. shutil.copytree(INDEX_PATH, os.path.join(str(tmpdir), "references", "original", "index3")) # Add logs path. tmpdir.mkdir("logs").mkdir("jobs") # Add sample path. tmpdir.mkdir("samples").mkdir("foobar").mkdir("analysis") # Copy read files. shutil.copyfile(FASTQ_PATH, os.path.join(str(tmpdir), "samples", "foobar", "reads_1.fq")) settings = { "data_path": str(tmpdir), "db_name": test_db_name } sequence_otu_map, _ = otu_resource dbs.analyses.insert_one({ "_id": "baz", "workflow": "pathoscope_bowtie", "ready": False, "sample": { "id": "foobar" }, "subtraction": { "id": "Prunus persica" } }) dbs.jobs.insert_one({ "_id": "foobar", "task": "pathoscope_bowtie", "args": { "sample_id": "foobar", "analysis_id": "baz", "ref_id": "original", "index_id": "index3" }, "proc": 2, "mem": 8 }) dbs.indexes.insert_one({ "_id": "index3", "manifest": { "foobar": 10, "reo": 5, "baz": 6 }, "sequence_otu_map": sequence_otu_map }) dbs.samples.insert_one({ "_id": "foobar", "paired": False, "library_type": "normal", "subtraction": { "id": "Arabidopsis thaliana" }, "quality": { "count": 1337, "length": [78, 101] } }) queue = mocker.Mock() job = virtool.jobs.pathoscope.Job( test_db_connection_string, test_db_name, settings, "foobar", queue ) job.init_db() return job @pytest.mark.parametrize("paired", [False, True]) def test_check_db(tmpdir, paired, dbs, mock_job): """ Check that the method assigns various job attributes based on information from the database. """ dbs.samples.update_one({"_id": "foobar"}, { "$set": { "paired": paired } }) dbs.subtraction.insert_many([ {"_id": "Arabidopsis thaliana"}, {"_id": "Prunus persica"}, ]) mock_job.check_db() sample_path = os.path.join(str(tmpdir), "samples", "foobar") assert mock_job.params["read_count"] == 1337 expected_read_filenames = ["reads_1.fastq"] if paired: expected_read_filenames.append("reads_2.fastq") assert mock_job.params["subtraction_path"] == os.path.join( str(tmpdir), "subtractions", "prunus_persica", "reference" ) def test_make_analysis_dir(dbs, mock_job): mock_job.check_db() assert not os.path.isdir(mock_job.params["analysis_path"]) mock_job.make_analysis_dir() assert os.path.isdir(mock_job.params["analysis_path"]) def test_map_otus(tmpdir, dbs, mock_job): mock_job.check_db() os.makedirs(mock_job.params["analysis_path"]) mock_job.params["read_paths"] = [ os.path.join(str(tmpdir), "samples", "foobar", "reads_1.fq") ] mock_job.map_default_isolates() assert sorted(mock_job.intermediate["to_otus"]) == sorted([ "NC_013110", "NC_017938", "NC_006057", "NC_007448", "JQ080272", "NC_001836", "NC_003347", "NC_016509", "NC_017939", "NC_006056", "NC_003623", "KX109927", "NC_016416", "NC_001948", "NC_021148", "NC_003615", "NC_004006" ]) def test_map_isolates(snapshot, tmpdir, dbs, mock_job): mock_job.check_db() os.makedirs(mock_job.params["analysis_path"]) mock_job.params["read_paths"] = [ os.path.join(str(tmpdir), "samples", "foobar", "reads_1.fq") ] sample_path = os.path.join(str(tmpdir), "samples", "foobar") index_path = os.path.join(str(tmpdir), "references", "original", "index3") for filename in os.listdir(index_path): shutil.copyfile( os.path.join(index_path, filename), os.path.join(sample_path, "analysis", "baz", filename.replace("reference", "isolates")) ) mock_job.proc = 2 mock_job.map_isolates() vta_path = os.path.join(mock_job.params["analysis_path"], "to_isolates.vta") with open(vta_path, "r") as f: data = sorted([line.rstrip() for line in f]) snapshot.assert_match(data) def test_map_subtraction(snapshot, dbs, mock_job): mock_job.check_db() mock_job.proc = 2 mock_job.params["subtraction_path"] = HOST_PATH os.makedirs(mock_job.params["analysis_path"]) shutil.copyfile(FASTQ_PATH, os.path.join(mock_job.params["analysis_path"], "mapped.fastq")) mock_job.map_subtraction() snapshot.assert_match(mock_job.intermediate) def test_subtract_mapping(dbs, mock_job): mock_job.check_db() os.makedirs(mock_job.params["analysis_path"]) with open(TO_SUBTRACTION_PATH, "r") as handle: mock_job.intermediate["to_subtraction"] = json.load(handle) shutil.copyfile(VTA_PATH, os.path.join(mock_job.params["analysis_path"], "to_isolates.vta")) mock_job.subtract_mapping() assert mock_job.results["subtracted_count"] == 4 def test_pathoscope(snapshot, dbs, mock_job): mock_job.check_db() os.makedirs(mock_job.params["analysis_path"]) with open(REF_LENGTHS_PATH, "r") as handle: mock_job.intermediate["ref_lengths"] = json.load(handle) shutil.copyfile( VTA_PATH, os.path.join(mock_job.params["analysis_path"], "to_isolates.vta") ) mock_job.params["sequence_otu_map"] = { "NC_016509": "foobar", "NC_001948": "foobar", "13TF149_Reovirus_TF1_Seg06": "reo", "13TF149_Reovirus_TF1_Seg03": "reo", "13TF149_Reovirus_TF1_Seg07": "reo", "13TF149_Reovirus_TF1_Seg02": "reo", "13TF149_Reovirus_TF1_Seg08": "reo", "13TF149_Reovirus_TF1_Seg11": "reo", "13TF149_Reovirus_TF1_Seg04": "reo", "NC_004667": "foobar", "NC_003347": "foobar", "NC_003615": "foobar", "NC_003689": "foobar", "NC_011552": "foobar", "KX109927": "baz", "NC_008039": "foobar", "NC_015782": "foobar", "NC_016416": "foobar", "NC_003623": "foobar", "NC_008038": "foobar", "NC_001836": "foobar", "JQ080272": "baz", "NC_017938": "foobar", "NC_008037": "foobar", "NC_007448": "foobar" } mock_job.pathoscope() with open(os.path.join(mock_job.params["analysis_path"], "reassigned.vta"), "r") as f: data = sorted([line.rstrip() for line in f]) snapshot.assert_match(data) with open(os.path.join(mock_job.params["analysis_path"], "report.tsv"), "r") as f: data = sorted([line.rstrip() for line in f]) snapshot.assert_match(data) snapshot.assert_match(mock_job.results) def test_import_results(snapshot, dbs, mock_job): mock_job.check_db() mock_job.results = { "results": "results will be here", "read_count": 1337, "ready": True } mock_job.import_results() snapshot.assert_match(dbs.analyses.find_one()) snapshot.assert_match(dbs.samples.find_one())
unknown
codeparrot/codeparrot-clean
""" A DateTimeField and DateField that use the `dateutil` package for parsing. """ from __future__ import unicode_literals from dateutil import parser from wtforms.fields import Field from wtforms.validators import ValidationError from wtforms.widgets import TextInput __all__ = ( 'DateTimeField', 'DateField', ) # This is a fix to handle issues in dateutil which arose in version 2.2. # A bug ticket is filed: https://bugs.launchpad.net/dateutil/+bug/1247643 try: parser.parse('foobar') except TypeError: DATEUTIL_TYPEERROR_ISSUE = True except ValueError: DATEUTIL_TYPEERROR_ISSUE = False else: import warnings warnings.warn('In testing for a dateutil issue, we ran into a very strange error.', ImportWarning) class DateTimeField(Field): """ DateTimeField represented by a text input, accepts all input text formats that `dateutil.parser.parse` will. :param parse_kwargs: A dictionary of keyword args to pass to the dateutil parse() function. See dateutil docs for available keywords. :param display_format: A format string to pass to strftime() to format dates for display. """ widget = TextInput() def __init__(self, label=None, validators=None, parse_kwargs=None, display_format='%Y-%m-%d %H:%M', **kwargs): super(DateTimeField, self).__init__(label, validators, **kwargs) if parse_kwargs is None: parse_kwargs = {} self.parse_kwargs = parse_kwargs self.display_format = display_format def _value(self): if self.raw_data: return ' '.join(self.raw_data) else: return self.data and self.data.strftime(self.display_format) or '' def process_formdata(self, valuelist): if valuelist: date_str = ' '.join(valuelist) if not date_str: self.data = None raise ValidationError(self.gettext('Please input a date/time value')) parse_kwargs = self.parse_kwargs.copy() if 'default' not in parse_kwargs: try: parse_kwargs['default'] = self.default() except TypeError: parse_kwargs['default'] = self.default try: self.data = parser.parse(date_str, **parse_kwargs) except ValueError: self.data = None raise ValidationError(self.gettext('Invalid date/time input')) except TypeError: if not DATEUTIL_TYPEERROR_ISSUE: raise # If we're using dateutil 2.2, then consider it a normal # ValidationError. Hopefully dateutil fixes this issue soon. self.data = None raise ValidationError(self.gettext('Invalid date/time input')) class DateField(DateTimeField): """ Same as the DateTimeField, but stores only the date portion. """ def __init__(self, label=None, validators=None, parse_kwargs=None, display_format='%Y-%m-%d', **kwargs): super(DateField, self).__init__(label, validators, parse_kwargs=parse_kwargs, display_format=display_format, **kwargs) def process_formdata(self, valuelist): super(DateField, self).process_formdata(valuelist) if self.data is not None and hasattr(self.data, 'date'): self.data = self.data.date()
unknown
codeparrot/codeparrot-clean
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Created on Apr 17, 2012 """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __date__ = "Apr 17, 2012" import unittest import os from pymatgen.core.structure import Molecule from pymatgen.io.xyz import XYZ from pymatgen.io.vasp.inputs import Poscar test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files') class XYZTest(unittest.TestCase): def setUp(self): coords = [[0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.089000], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000]] coords2 = [[x + 10.0 for x in atom] for atom in coords] self.mol = Molecule(["C", "H", "H", "H", "H"], coords) self.multi_mols = [Molecule(["C", "H", "H", "H", "H"], coords) for coords in [coords, coords2]] self.xyz = XYZ(self.mol) self.multi_xyz = XYZ(self.multi_mols) def test_str(self): ans = """5 H4 C1 C 0.000000 0.000000 0.000000 H 0.000000 0.000000 1.089000 H 1.026719 0.000000 -0.363000 H -0.513360 -0.889165 -0.363000 H -0.513360 0.889165 -0.363000""" self.assertEqual(str(self.xyz), ans) mxyz = XYZ(self.multi_mols, coord_precision=3) mxyz_text = str(mxyz) ans_multi = """5 H4 C1 C 0.000 0.000 0.000 H 0.000 0.000 1.089 H 1.027 0.000 -0.363 H -0.513 -0.889 -0.363 H -0.513 0.889 -0.363 5 H4 C1 C 10.000 10.000 10.000 H 10.000 10.000 11.089 H 11.027 10.000 9.637 H 9.487 9.111 9.637 H 9.487 10.889 9.637""" self.assertEqual(mxyz_text, ans_multi) def test_from_string(self): ans = """5 H4 C1 C 0.000000 0.000000 0.000000 H 0.000000 0.000000 1.089000 H 1.026719 0.000000 -0.363000 H -0.513360 -0.889165 -0.363000 H -0.513360 0.889165 -0.363000""" xyz = XYZ.from_string(ans) mol = xyz.molecule sp = ["C", "H", "H", "H", "H"] for i, site in enumerate(mol): self.assertEqual(site.species_string, sp[i]) self.assertEqual(len(site.coords), 3) if i == 0: self.assertTrue(all([c == 0 for c in site.coords])) mol_str = """2 Random C 2.39132145462 -0.700993488928 -7.22293142224e-06 C 1.16730636786 -1.38166622735 -2.77112970359e-06 """ xyz = XYZ.from_string(mol_str) mol = xyz.molecule self.assertTrue(abs(mol[0].z) < 1e-5) self.assertTrue(abs(mol[1].z) < 1e-5) mol_str = """3 Random C 0.000000000000E+00 2.232615992397E+01 0.000000000000E+00 C -2.383225420567E-31 1.116307996198E+01 1.933502166311E+01 C -4.440892098501D-01 -1.116307996198d+01 1.933502166311E+01 """ xyz = XYZ.from_string(mol_str) mol = xyz.molecule self.assertAlmostEqual(mol[0].x, 0) self.assertAlmostEqual(mol[1].y, 11.16307996198) self.assertAlmostEqual(mol[2].x, -0.4440892098501) self.assertAlmostEqual(mol[2].y, -11.16307996198) # self.assertTrue(abs(mol[1].z) < 1e-5) def test_from_file(self): filepath = os.path.join(test_dir, 'multiple_frame_xyz.xyz') mxyz = XYZ.from_file(filepath) self.assertEqual(len(mxyz.all_molecules), 302) self.assertEqual(list(mxyz.all_molecules[0].cart_coords[0]), [0.20303525080000001, 2.8569761204000002, 0.44737723190000001]) self.assertEqual(list(mxyz.all_molecules[-1].cart_coords[-1]), [5.5355550720000002, 0.0282305931, -0.30993102189999999]) self.assertEqual(list(mxyz.molecule.cart_coords[-1]), [5.5355550720000002, 0.0282305931, -0.30993102189999999]) def test_init_from_structure(self): filepath = os.path.join(test_dir, 'POSCAR') poscar = Poscar.from_file(filepath) struct = poscar.structure xyz = XYZ(struct) ans = """24 Fe4 P4 O16 Fe 2.277347 4.550379 2.260125 Fe 2.928536 1.516793 4.639870 Fe 7.483231 4.550379 0.119620 Fe 8.134420 1.516793 2.499364 P 0.985089 1.516793 1.990624 P 4.220794 4.550379 4.370369 P 6.190973 1.516793 0.389120 P 9.426677 4.550379 2.768865 O 0.451582 4.550379 3.365614 O 1.006219 1.516793 3.528306 O 1.725331 0.279529 1.358282 O 1.725331 2.754057 1.358282 O 3.480552 3.313115 3.738027 O 3.480552 5.787643 3.738027 O 4.199665 4.550379 1.148562 O 4.754301 1.516793 0.985870 O 5.657466 4.550379 3.773620 O 6.212102 1.516793 3.610928 O 6.931215 0.279529 1.021463 O 6.931215 2.754057 1.021463 O 8.686436 3.313115 3.401208 O 8.686436 5.787643 3.401208 O 9.405548 4.550379 1.231183 O 9.960184 1.516793 1.393875""" self.assertEqual(str(xyz), ans) if __name__ == "__main__": unittest.main()
unknown
codeparrot/codeparrot-clean
from datetime import datetime from django.forms import DateTimeInput from django.test import override_settings from django.utils import translation from .base import WidgetTest class DateTimeInputTest(WidgetTest): widget = DateTimeInput() def test_render_none(self): self.check_html(self.widget, 'date', None, '<input type="text" name="date" />') def test_render_value(self): """ The microseconds are trimmed on display, by default. """ d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.assertEqual(str(d), '2007-09-17 12:51:34.482548') self.check_html(self.widget, 'date', d, html=( '<input type="text" name="date" value="2007-09-17 12:51:34" />' )) self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51, 34), html=( '<input type="text" name="date" value="2007-09-17 12:51:34" />' )) self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51), html=( '<input type="text" name="date" value="2007-09-17 12:51:00" />' )) def test_render_formatted(self): """ Use 'format' to change the way a value is displayed. """ widget = DateTimeInput( format='%d/%m/%Y %H:%M', attrs={'type': 'datetime'}, ) d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.check_html(widget, 'date', d, html='<input type="datetime" name="date" value="17/09/2007 12:51" />') @override_settings(USE_L10N=True) @translation.override('de-at') def test_l10n(self): d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.check_html(self.widget, 'date', d, html=( '<input type="text" name="date" value="17.09.2007 12:51:34" />' )) @override_settings(USE_L10N=True) @translation.override('de-at') def test_locale_aware(self): d = datetime(2007, 9, 17, 12, 51, 34, 482548) with self.settings(USE_L10N=False): self.check_html( self.widget, 'date', d, html='<input type="text" name="date" value="2007-09-17 12:51:34" />', ) with translation.override('es'): self.check_html( self.widget, 'date', d, html='<input type="text" name="date" value="17/09/2007 12:51:34" />', )
unknown
codeparrot/codeparrot-clean
"""Base email backend class.""" class BaseEmailBackend(object): """ Base class for email backend implementations. Subclasses must at least overwrite send_messages(). """ def __init__(self, fail_silently=False, **kwargs): self.fail_silently = fail_silently def open(self): """Open a network connection. This method can be overwritten by backend implementations to open a network connection. It's up to the backend implementation to track the status of a network connection if it's needed by the backend. This method can be called by applications to force a single network connection to be used when sending mails. See the send_messages() method of the SMTP backend for a reference implementation. The default implementation does nothing. """ pass def close(self): """Close a network connection.""" pass def send_messages(self, email_messages): """ Sends one or more EmailMessage objects and returns the number of email messages sent. """ raise NotImplementedError
unknown
codeparrot/codeparrot-clean
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use std::{ fs::{create_dir_all, File}, io::Write, path::{Path, PathBuf}, }; use handlebars::{to_json, Handlebars}; use include_dir::Dir; use serde::Serialize; use serde_json::value::{Map, Value as JsonValue}; use crate::error::ErrorExt; /// Map of template variable names and values. #[derive(Clone, Debug)] #[repr(transparent)] pub struct JsonMap(Map<String, JsonValue>); impl Default for JsonMap { fn default() -> Self { Self(Map::new()) } } impl JsonMap { pub fn insert(&mut self, name: &str, value: impl Serialize) { self.0.insert(name.to_owned(), to_json(value)); } pub fn inner(&self) -> &Map<String, JsonValue> { &self.0 } } pub fn render<P: AsRef<Path>, D: Serialize>( handlebars: &Handlebars<'_>, data: &D, dir: &Dir<'_>, out_dir: P, ) -> crate::Result<()> { let out_dir = out_dir.as_ref(); let mut created_dirs = Vec::new(); render_with_generator(handlebars, data, dir, out_dir, &mut |file_path: PathBuf| { let path = out_dir.join(file_path); let parent = path.parent().unwrap().to_path_buf(); if !created_dirs.contains(&parent) { create_dir_all(&parent)?; created_dirs.push(parent); } File::create(path).map(Some) }) } pub fn render_with_generator< P: AsRef<Path>, D: Serialize, F: FnMut(PathBuf) -> std::io::Result<Option<File>>, >( handlebars: &Handlebars<'_>, data: &D, dir: &Dir<'_>, out_dir: P, out_file_generator: &mut F, ) -> crate::Result<()> { let out_dir = out_dir.as_ref(); for file in dir.files() { let mut file_path = file.path().to_path_buf(); // cargo for some reason ignores the /templates folder packaging when it has a Cargo.toml file inside // so we rename the extension to `.crate-manifest` if let Some(extension) = file_path.extension() { if extension == "crate-manifest" { file_path.set_extension("toml"); } } if let Some(mut output_file) = out_file_generator(file_path.clone()) .fs_context("failed to generate output file", file_path.clone())? { if let Some(utf8) = file.contents_utf8() { handlebars .render_template_to_write(utf8, &data, &mut output_file) .expect("Failed to render template"); } else { output_file .write_all(file.contents()) .fs_context("failed to write template", file_path.clone())?; } } } for dir in dir.dirs() { render_with_generator(handlebars, data, dir, out_dir, out_file_generator)?; } Ok(()) }
rust
github
https://github.com/tauri-apps/tauri
crates/tauri-cli/src/helpers/template.rs
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for downloading and reading MNIST data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import tempfile import numpy from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
unknown
codeparrot/codeparrot-clean
package daemon import ( "context" "fmt" "maps" "os" "path/filepath" "slices" "strconv" "strings" cdcgroups "github.com/containerd/cgroups/v3" "github.com/containerd/containerd/v2/core/containers" "github.com/containerd/containerd/v2/pkg/apparmor" coci "github.com/containerd/containerd/v2/pkg/oci" "github.com/containerd/log" containertypes "github.com/moby/moby/api/types/container" dconfig "github.com/moby/moby/v2/daemon/config" "github.com/moby/moby/v2/daemon/container" "github.com/moby/moby/v2/daemon/internal/rootless/mountopts" "github.com/moby/moby/v2/daemon/internal/rootless/specconv" "github.com/moby/moby/v2/daemon/pkg/oci" "github.com/moby/moby/v2/daemon/pkg/oci/caps" volumemounts "github.com/moby/moby/v2/daemon/volume/mounts" "github.com/moby/moby/v2/errdefs" "github.com/moby/sys/mount" "github.com/moby/sys/mountinfo" "github.com/moby/sys/user" "github.com/moby/sys/userns" "github.com/opencontainers/cgroups" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) const inContainerInitPath = "/sbin/" + dconfig.DefaultInitBinary // withRlimits sets the container's rlimits along with merging the daemon's rlimits func withRlimits(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { var rlimits []specs.POSIXRlimit // We want to leave the original HostConfig alone so make a copy here hostConfig := *c.HostConfig // Merge with the daemon defaults daemon.mergeUlimits(&hostConfig, daemonCfg) for _, ul := range hostConfig.Ulimits { rlimits = append(rlimits, specs.POSIXRlimit{ Type: "RLIMIT_" + strings.ToUpper(ul.Name), Soft: uint64(ul.Soft), Hard: uint64(ul.Hard), }) } if s.Process == nil { s.Process = &specs.Process{} } s.Process.Rlimits = rlimits return nil } } // withRootless sets the spec to the rootless configuration func withRootless(daemon *Daemon, daemonCfg *dconfig.Config) coci.SpecOpts { return func(_ context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { var v2Controllers []string if cgroupDriver(daemonCfg) == cgroupSystemdDriver { if cdcgroups.Mode() != cdcgroups.Unified { return errors.New("rootless systemd driver doesn't support cgroup v1") } rootlesskitParentEUID := os.Getenv("ROOTLESSKIT_PARENT_EUID") if rootlesskitParentEUID == "" { return errors.New("$ROOTLESSKIT_PARENT_EUID is not set (requires RootlessKit v0.8.0)") } euid, err := strconv.Atoi(rootlesskitParentEUID) if err != nil { return errors.Wrap(err, "invalid $ROOTLESSKIT_PARENT_EUID: must be a numeric value") } controllersPath := fmt.Sprintf("/sys/fs/cgroup/user.slice/user-%d.slice/cgroup.controllers", euid) controllersFile, err := os.ReadFile(controllersPath) if err != nil { return err } v2Controllers = strings.Fields(string(controllersFile)) } return specconv.ToRootless(s, v2Controllers) } } // withRootfulInRootless is used for "rootful-in-rootless" dind; // the daemon is running in UserNS but has no access to RootlessKit API socket, host filesystem, etc. func withRootfulInRootless(daemon *Daemon, daemonCfg *dconfig.Config) coci.SpecOpts { return func(_ context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { specconv.ToRootfulInRootless(s) return nil } } // WithOOMScore sets the oom score func WithOOMScore(score *int) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { if s.Process == nil { s.Process = &specs.Process{} } s.Process.OOMScoreAdj = score return nil } } // WithSelinux sets the selinux labels func WithSelinux(c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { if s.Process == nil { s.Process = &specs.Process{} } if s.Linux == nil { s.Linux = &specs.Linux{} } s.Process.SelinuxLabel = c.GetProcessLabel() s.Linux.MountLabel = c.MountLabel return nil } } // WithApparmor sets the apparmor profile func WithApparmor(c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { if apparmor.HostSupports() { var appArmorProfile string if c.AppArmorProfile != "" { appArmorProfile = c.AppArmorProfile } else if c.HostConfig.Privileged { appArmorProfile = unconfinedAppArmorProfile } else { appArmorProfile = defaultAppArmorProfile } if appArmorProfile == defaultAppArmorProfile { // Unattended upgrades and other fun services can unload AppArmor // profiles inadvertently. Since we cannot store our profile in // /etc/apparmor.d, nor can we practically add other ways of // telling the system to keep our profile loaded, in order to make // sure that we keep the default profile enabled we dynamically // reload it if necessary. if err := ensureDefaultAppArmorProfile(); err != nil { return err } } if s.Process == nil { s.Process = &specs.Process{} } s.Process.ApparmorProfile = appArmorProfile } return nil } } // WithCapabilities adjusts the container's capabilities based on the // "CapAdd", "CapDrop", and "Privileged" fields in the container's HostConfig. func WithCapabilities(ctr *container.Container) coci.SpecOpts { return func(ctx context.Context, client coci.Client, c *containers.Container, s *specs.Spec) (err error) { capabilities, err := caps.TweakCapabilities( caps.DefaultCapabilities(), ctr.HostConfig.CapAdd, ctr.HostConfig.CapDrop, ctr.HostConfig.Privileged, ) if err != nil { return err } return coci.WithCapabilities(capabilities)(ctx, client, c, s) } } func resourcePath(c *container.Container, getPath func() (string, error)) (string, error) { p, err := getPath() if err != nil { return "", err } return c.GetResourcePath(p) } func getUser(c *container.Container, username string) (specs.User, error) { var usr specs.User passwdPath, err := resourcePath(c, user.GetPasswdPath) if err != nil { return usr, err } groupPath, err := resourcePath(c, user.GetGroupPath) if err != nil { return usr, err } execUser, err := user.GetExecUserPath(username, nil, passwdPath, groupPath) if err != nil { return usr, err } usr.UID = uint32(execUser.Uid) usr.GID = uint32(execUser.Gid) usr.AdditionalGids = []uint32{usr.GID} var addGroups []int if len(c.HostConfig.GroupAdd) > 0 { addGroups, err = user.GetAdditionalGroupsPath(c.HostConfig.GroupAdd, groupPath) if err != nil { return usr, err } } for _, g := range append(execUser.Sgids, addGroups...) { usr.AdditionalGids = append(usr.AdditionalGids, uint32(g)) } return usr, nil } func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { if s.Linux == nil { s.Linux = &specs.Linux{} } for i, n := range s.Linux.Namespaces { if n.Type == ns.Type { s.Linux.Namespaces[i] = ns return } } s.Linux.Namespaces = append(s.Linux.Namespaces, ns) } // WithNamespaces sets the container's namespaces func WithNamespaces(daemon *Daemon, c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { userNS := false // user if c.HostConfig.UsernsMode.IsPrivate() { if uidMap := daemon.idMapping.UIDMaps; uidMap != nil { userNS = true setNamespace(s, specs.LinuxNamespace{ Type: specs.UserNamespace, }) s.Linux.UIDMappings = specMapping(uidMap) s.Linux.GIDMappings = specMapping(daemon.idMapping.GIDMaps) } } // network if !c.Config.NetworkDisabled { networkMode := c.HostConfig.NetworkMode switch { case networkMode.IsContainer(): nc, err := daemon.getNetworkedContainer(c.ID, networkMode.ConnectedContainer()) if err != nil { return err } setNamespace(s, specs.LinuxNamespace{ Type: specs.NetworkNamespace, Path: fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()), }) if userNS { // to share a net namespace, the containers must also share a user namespace. // // FIXME(thaJeztah): this will silently overwrite an earlier user namespace when joining multiple containers: https://github.com/moby/moby/issues/46210 setNamespace(s, specs.LinuxNamespace{ Type: specs.UserNamespace, Path: fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()), }) } case networkMode.IsHost(): oci.RemoveNamespace(s, specs.NetworkNamespace) default: setNamespace(s, specs.LinuxNamespace{ Type: specs.NetworkNamespace, }) } } // ipc ipcMode := c.HostConfig.IpcMode if !ipcMode.Valid() { return errdefs.InvalidParameter(errors.Errorf("invalid IPC mode: %v", ipcMode)) } switch { case ipcMode.IsContainer(): ic, err := daemon.getIPCContainer(ipcMode.Container()) if err != nil { return errors.Wrap(err, "failed to join IPC namespace") } setNamespace(s, specs.LinuxNamespace{ Type: specs.IPCNamespace, Path: fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()), }) if userNS { // to share a IPC namespace, the containers must also share a user namespace. // // FIXME(thaJeztah): this will silently overwrite an earlier user namespace when joining multiple containers: https://github.com/moby/moby/issues/46210 setNamespace(s, specs.LinuxNamespace{ Type: specs.UserNamespace, Path: fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()), }) } case ipcMode.IsHost(): oci.RemoveNamespace(s, specs.IPCNamespace) case ipcMode.IsEmpty(): // A container was created by an older version of the daemon. // The default behavior used to be what is now called "shareable". fallthrough case ipcMode.IsPrivate(), ipcMode.IsShareable(), ipcMode.IsNone(): setNamespace(s, specs.LinuxNamespace{ Type: specs.IPCNamespace, }) } // pid pidMode := c.HostConfig.PidMode if !pidMode.Valid() { return errdefs.InvalidParameter(errors.Errorf("invalid PID mode: %v", pidMode)) } switch { case pidMode.IsContainer(): pc, err := daemon.getPIDContainer(pidMode.Container()) if err != nil { return errors.Wrap(err, "failed to join PID namespace") } setNamespace(s, specs.LinuxNamespace{ Type: specs.PIDNamespace, Path: fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()), }) if userNS { // to share a PID namespace, the containers must also share a user namespace. // // FIXME(thaJeztah): this will silently overwrite an earlier user namespace when joining multiple containers: https://github.com/moby/moby/issues/46210 setNamespace(s, specs.LinuxNamespace{ Type: specs.UserNamespace, Path: fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()), }) } case pidMode.IsHost(): oci.RemoveNamespace(s, specs.PIDNamespace) default: setNamespace(s, specs.LinuxNamespace{ Type: specs.PIDNamespace, }) } // uts if !c.HostConfig.UTSMode.Valid() { return errdefs.InvalidParameter(errors.Errorf("invalid UTS mode: %v", c.HostConfig.UTSMode)) } if c.HostConfig.UTSMode.IsHost() { oci.RemoveNamespace(s, specs.UTSNamespace) s.Hostname = "" } // cgroup if !c.HostConfig.CgroupnsMode.Valid() { return errdefs.InvalidParameter(errors.Errorf("invalid cgroup namespace mode: %v", c.HostConfig.CgroupnsMode)) } if c.HostConfig.CgroupnsMode.IsPrivate() { setNamespace(s, specs.LinuxNamespace{ Type: specs.CgroupNamespace, }) } return nil } } func specMapping(s []user.IDMap) []specs.LinuxIDMapping { var ids []specs.LinuxIDMapping for _, item := range s { ids = append(ids, specs.LinuxIDMapping{ HostID: uint32(item.ParentID), ContainerID: uint32(item.ID), Size: uint32(item.Count), }) } return ids } // Get the source mount point of directory passed in as argument. Also return // optional fields. func getSourceMount(source string) (string, string, error) { // Ensure any symlinks are resolved. sourcePath, err := filepath.EvalSymlinks(source) if err != nil { return "", "", err } mi, err := mountinfo.GetMounts(mountinfo.ParentsFilter(sourcePath)) if err != nil { return "", "", err } if len(mi) < 1 { return "", "", fmt.Errorf("Can't find mount point of %s", source) } // find the longest mount point var idx, maxlen int for i := range mi { if len(mi[i].Mountpoint) > maxlen { maxlen = len(mi[i].Mountpoint) idx = i } } return mi[idx].Mountpoint, mi[idx].Optional, nil } const ( sharedPropagationOption = "shared:" slavePropagationOption = "master:" ) // hasMountInfoOption checks if any of the passed any of the given option values // are set in the passed in option string. func hasMountInfoOption(opts string, vals ...string) bool { for opt := range strings.SplitSeq(opts, " ") { for _, val := range vals { if strings.HasPrefix(opt, val) { return true } } } return false } // Ensure mount point on which path is mounted, is shared. func ensureShared(path string) error { sourceMount, optionalOpts, err := getSourceMount(path) if err != nil { return err } // Make sure source mount point is shared. if !hasMountInfoOption(optionalOpts, sharedPropagationOption) { return errors.Errorf("path %s is mounted on %s but it is not a shared mount", path, sourceMount) } return nil } // Ensure mount point on which path is mounted, is either shared or slave. func ensureSharedOrSlave(path string) error { sourceMount, optionalOpts, err := getSourceMount(path) if err != nil { return err } if !hasMountInfoOption(optionalOpts, sharedPropagationOption, slavePropagationOption) { return errors.Errorf("path %s is mounted on %s but it is not a shared or slave mount", path, sourceMount) } return nil } var ( mountPropagationMap = map[string]int{ "private": mount.PRIVATE, "rprivate": mount.RPRIVATE, "shared": mount.SHARED, "rshared": mount.RSHARED, "slave": mount.SLAVE, "rslave": mount.RSLAVE, } mountPropagationReverseMap = map[int]string{ mount.PRIVATE: "private", mount.RPRIVATE: "rprivate", mount.SHARED: "shared", mount.RSHARED: "rshared", mount.SLAVE: "slave", mount.RSLAVE: "rslave", } ) // withMounts sets the container's mounts func withMounts(daemon *Daemon, daemonCfg *configStore, c *container.Container, mounts []container.Mount) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { sortMounts(mounts) userMounts := make(map[string]struct{}) for _, m := range mounts { userMounts[m.Destination] = struct{}{} } // Copy all mounts from spec to defaultMounts, except for // - mounts overridden by a user supplied mount; // - all mounts under /dev if a user supplied /dev is present; // - /dev/shm, in case IpcMode is none. // While at it, also // - set size for /dev/shm from shmsize. defaultMounts := s.Mounts[:0] _, mountDev := userMounts["/dev"] for _, m := range s.Mounts { if _, ok := userMounts[m.Destination]; ok { // filter out mount overridden by a user supplied mount continue } if mountDev && strings.HasPrefix(m.Destination, "/dev/") { // filter out everything under /dev if /dev is user-mounted continue } if m.Destination == "/dev/shm" { if c.HostConfig.IpcMode.IsNone() { // filter out /dev/shm for "none" IpcMode continue } // set size for /dev/shm mount from spec sizeOpt := "size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10) m.Options = append(m.Options, sizeOpt) } defaultMounts = append(defaultMounts, m) } s.Mounts = defaultMounts for _, m := range mounts { if m.Source == "tmpfs" { data := m.Data parser := volumemounts.NewParser() options := []string{"noexec", "nosuid", "nodev", string(parser.DefaultPropagationMode())} if data != "" { options = append(options, strings.Split(data, ",")...) } merged, err := mount.MergeTmpfsOptions(options) if err != nil { return err } s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) continue } mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} // Determine property of RootPropagation based on volume // properties. If a volume is shared, then keep root propagation // shared. This should work for slave and private volumes too. // // For slave volumes, it can be either [r]shared/[r]slave. // // For private volumes any root propagation value should work. pFlag := mountPropagationMap[m.Propagation] switch pFlag { case mount.SHARED, mount.RSHARED: if err := ensureShared(m.Source); err != nil { return err } rootpg := mountPropagationMap[s.Linux.RootfsPropagation] if rootpg != mount.SHARED && rootpg != mount.RSHARED { if s.Linux == nil { s.Linux = &specs.Linux{} } s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] } case mount.SLAVE, mount.RSLAVE: var fallback bool if err := ensureSharedOrSlave(m.Source); err != nil { // For backwards compatibility purposes, treat mounts from the daemon root // as special since we automatically add rslave propagation to these mounts // when the user did not set anything, so we should fallback to the old // behavior which is to use private propagation which is normally the // default. if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) { return err } cm, ok := c.MountPoints[m.Destination] if !ok { return err } if cm.Spec.BindOptions != nil && cm.Spec.BindOptions.Propagation != "" { // This means the user explicitly set a propagation, do not fallback in that case. return err } fallback = true log.G(ctx).WithField("container", c.ID).WithField("source", m.Source).Warn("Falling back to default propagation for bind source in daemon root") } if !fallback { rootpg := mountPropagationMap[s.Linux.RootfsPropagation] if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { if s.Linux == nil { s.Linux = &specs.Linux{} } s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] } } } bindMode := "rbind" if m.NonRecursive { bindMode = "bind" } opts := []string{bindMode} if !m.Writable { rro := true if m.ReadOnlyNonRecursive { rro = false if m.ReadOnlyForceRecursive { return errors.New("mount options conflict: ReadOnlyNonRecursive && ReadOnlyForceRecursive") } } if rroErr := supportsRecursivelyReadOnly(daemonCfg, c.HostConfig.Runtime); rroErr != nil { rro = false if m.ReadOnlyForceRecursive { return rroErr } } if rro { opts = append(opts, "rro") } else { opts = append(opts, "ro") } } if pFlag != 0 { opts = append(opts, mountPropagationReverseMap[pFlag]) } // If we are using user namespaces, then we must make sure that we // don't drop any of the CL_UNPRIVILEGED "locked" flags of the source // "mount" when we bind-mount. The reason for this is that at the point // when runc sets up the root filesystem, it is already inside a user // namespace, and thus cannot change any flags that are locked. if daemonCfg.RemappedRoot != "" || userns.RunningInUserNS() { unprivOpts, err := mountopts.UnprivilegedMountFlags(m.Source) if err != nil { return err } opts = append(opts, unprivOpts...) } mt.Options = opts s.Mounts = append(s.Mounts, mt) } if s.Root.Readonly { for i, m := range s.Mounts { switch m.Destination { case "/proc", "/dev/pts", "/dev/shm", "/dev/mqueue", "/dev": continue } if _, ok := userMounts[m.Destination]; !ok { if !slices.Contains(m.Options, "ro") { s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") } } } } if c.HostConfig.Privileged { // clear readonly for /sys for i := range s.Mounts { if s.Mounts[i].Destination == "/sys" { clearReadOnly(&s.Mounts[i]) } } if s.Linux != nil { s.Linux.ReadonlyPaths = nil s.Linux.MaskedPaths = nil } } // if the user didn't specify otherwise, default to the value of privileged writableCgroups := c.HostConfig.Privileged if c.WritableCgroups != nil { if daemonCfg.Rootless || daemon.idMapping.UIDMaps != nil { // error if the user requested a configuration we can't explicitly support return errors.New("option WritableCgroups conflicts with user namespaces and rootless mode") } writableCgroups = *c.WritableCgroups } // TODO: until a kernel/mount solution exists for handling remount in a user namespace, // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) if daemon.idMapping.UIDMaps != nil { writableCgroups = true } if writableCgroups { for i, m := range s.Mounts { if m.Type == "cgroup" { clearReadOnly(&s.Mounts[i]) } } } return nil } } // sysctlExists checks if a sysctl exists; runc will error if we add any that do not actually // exist, so do not add the default ones if running on an old kernel. func sysctlExists(s string) bool { f := filepath.Join("/proc", "sys", strings.ReplaceAll(s, ".", "/")) _, err := os.Stat(f) return err == nil } // withCommonOptions sets common docker options func withCommonOptions(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { if c.BaseFS == "" { return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly empty") } linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return err } s.Root = &specs.Root{ Path: c.BaseFS, Readonly: c.HostConfig.ReadonlyRootfs, } if err := c.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil { return err } cwd := c.Config.WorkingDir if cwd == "" { cwd = "/" } if s.Process == nil { s.Process = &specs.Process{} } s.Process.Args = append([]string{c.Path}, c.Args...) // only add the custom init if it is specified and the container is running in its // own private pid namespace. It does not make sense to add if it is running in the // host namespace or another container's pid namespace where we already have an init if c.HostConfig.PidMode.IsPrivate() { if (c.HostConfig.Init != nil && *c.HostConfig.Init) || (c.HostConfig.Init == nil && daemonCfg.Init) { s.Process.Args = append([]string{inContainerInitPath, "--", c.Path}, c.Args...) path, err := daemonCfg.LookupInitPath() // this will fall back to DefaultInitBinary and return an absolute path if err != nil { return err } s.Mounts = append(s.Mounts, specs.Mount{ Destination: inContainerInitPath, Type: "bind", Source: path, Options: []string{"bind", "ro"}, }) } } s.Process.Cwd = cwd s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) s.Process.Terminal = c.Config.Tty s.Hostname = c.Config.Hostname setLinuxDomainname(c, s) // Add default sysctls that are generally safe and useful; currently we // grant the capabilities to allow these anyway. You can override if // you want to restore the original behaviour. // We do not set network sysctls if network namespace is host, or if we are // joining an existing namespace, only if we create a new net namespace. if c.HostConfig.NetworkMode.IsPrivate() { // We cannot set up ping socket support in a user namespace userNS := daemonCfg.RemappedRoot != "" && c.HostConfig.UsernsMode.IsPrivate() if !userNS && !userns.RunningInUserNS() && sysctlExists("net.ipv4.ping_group_range") { // allow unprivileged ICMP echo sockets without CAP_NET_RAW s.Linux.Sysctl["net.ipv4.ping_group_range"] = "0 2147483647" } // allow opening any port less than 1024 without CAP_NET_BIND_SERVICE if sysctlExists("net.ipv4.ip_unprivileged_port_start") { s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"] = "0" } } return nil } } // withCgroups sets the container's cgroups func withCgroups(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { var cgroupsPath string scopePrefix := "docker" parent := "/docker" useSystemd := UsingSystemd(daemonCfg) if useSystemd { parent = "system.slice" if daemonCfg.Rootless { parent = "user.slice" } } if c.HostConfig.CgroupParent != "" { parent = c.HostConfig.CgroupParent } else if daemonCfg.CgroupParent != "" { parent = daemonCfg.CgroupParent } if useSystemd { cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID log.G(ctx).Debugf("createSpec: cgroupsPath: %s", cgroupsPath) } else { cgroupsPath = filepath.Join(parent, c.ID) } if s.Linux == nil { s.Linux = &specs.Linux{} } s.Linux.CgroupsPath = cgroupsPath // the rest is only needed for CPU RT controller if daemonCfg.CPURealtimePeriod == 0 && daemonCfg.CPURealtimeRuntime == 0 { return nil } p := cgroupsPath if useSystemd { path, err := cgroups.GetOwnCgroup("cpu") if err != nil { return errors.Wrap(err, "unable to init CPU RT controller") } p = filepath.Join(path, s.Linux.CgroupsPath) } // Clean path to guard against things like ../../../BAD parentPath := filepath.Dir(p) if !filepath.IsAbs(parentPath) { parentPath = filepath.Clean("/" + parentPath) } mnt, root, err := cgroups.FindCgroupMountpointAndRoot("", "cpu") if err != nil { return errors.Wrap(err, "unable to init CPU RT controller") } // When docker is run inside docker, the root is based of the host cgroup. // Should this be handled in runc/libcontainer/cgroups ? if strings.HasPrefix(root, "/docker/") { root = "/" } mnt = filepath.Join(mnt, root) if err := daemon.initCPURtController(daemonCfg, mnt, parentPath); err != nil { return errors.Wrap(err, "unable to init CPU RT controller") } return nil } } // WithDevices sets the container's devices func WithDevices(daemon *Daemon, c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { // Build lists of devices allowed and created within the container. var devs []specs.LinuxDevice devPermissions := s.Linux.Resources.Devices if c.HostConfig.Privileged { hostDevices, err := coci.HostDevices() if err != nil { return err } devs = append(devs, hostDevices...) // adding device mappings in privileged containers for _, deviceMapping := range c.HostConfig.Devices { // issue a warning that custom cgroup permissions are ignored in privileged mode if deviceMapping.CgroupPermissions != "rwm" { log.G(ctx).WithField("container", c.ID).Warnf("custom %s permissions for device %s are ignored in privileged mode", deviceMapping.CgroupPermissions, deviceMapping.PathOnHost) } // issue a warning that the device path already exists via /dev mounting in privileged mode if deviceMapping.PathOnHost == deviceMapping.PathInContainer { log.G(ctx).WithField("container", c.ID).Warnf("path in container %s already exists in privileged mode", deviceMapping.PathInContainer) continue } d, _, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, "rwm") if err != nil { return err } devs = append(devs, d...) } devPermissions = []specs.LinuxDeviceCgroup{ { Allow: true, Access: "rwm", }, } } else { for _, deviceMapping := range c.HostConfig.Devices { d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) if err != nil { return err } devs = append(devs, d...) devPermissions = append(devPermissions, dPermissions...) } var err error devPermissions, err = oci.AppendDevicePermissionsFromCgroupRules(devPermissions, c.HostConfig.DeviceCgroupRules) if err != nil { return err } } if s.Linux == nil { s.Linux = &specs.Linux{} } if s.Linux.Resources == nil { s.Linux.Resources = &specs.LinuxResources{} } s.Linux.Devices = append(s.Linux.Devices, devs...) s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, devPermissions...) for _, req := range c.HostConfig.DeviceRequests { if err := daemon.handleDevice(req, s); err != nil { return err } } return nil } } // WithResources applies the container resources func WithResources(c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { r := c.HostConfig.Resources weightDevices, err := getBlkioWeightDevices(r) if err != nil { return err } readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) if err != nil { return err } writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) if err != nil { return err } readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) if err != nil { return err } writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) if err != nil { return err } memoryRes := getMemoryResources(r) cpuRes, err := getCPUResources(r) if err != nil { return err } if s.Linux == nil { s.Linux = &specs.Linux{} } if s.Linux.Resources == nil { s.Linux.Resources = &specs.LinuxResources{} } s.Linux.Resources.Memory = memoryRes s.Linux.Resources.CPU = cpuRes s.Linux.Resources.BlockIO = &specs.LinuxBlockIO{ WeightDevice: weightDevices, ThrottleReadBpsDevice: readBpsDevice, ThrottleWriteBpsDevice: writeBpsDevice, ThrottleReadIOPSDevice: readIOpsDevice, ThrottleWriteIOPSDevice: writeIOpsDevice, } if r.BlkioWeight != 0 { w := r.BlkioWeight s.Linux.Resources.BlockIO.Weight = &w } s.Linux.Resources.Pids = getPidsLimit(r) return nil } } // WithSysctls sets the container's sysctls func WithSysctls(c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { if len(c.HostConfig.Sysctls) == 0 { return nil } if s.Linux == nil { s.Linux = &specs.Linux{} } if s.Linux.Sysctl == nil { s.Linux.Sysctl = make(map[string]string) } // We merge the sysctls injected above with the HostConfig (latter takes // precedence for backwards-compatibility reasons). maps.Copy(s.Linux.Sysctl, c.HostConfig.Sysctls) return nil } } // WithUser sets the container's user func WithUser(c *container.Container) coci.SpecOpts { return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { if s.Process == nil { s.Process = &specs.Process{} } var err error s.Process.User, err = getUser(c, c.Config.User) return err } } func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *configStore, c *container.Container, mounts []container.Mount) (retSpec *specs.Spec, _ error) { var ( opts []coci.SpecOpts s = oci.DefaultSpec() ) opts = append(opts, withCommonOptions(daemon, &daemonCfg.Config, c), withCgroups(daemon, &daemonCfg.Config, c), WithResources(c), WithSysctls(c), WithDevices(daemon, c), withRlimits(daemon, &daemonCfg.Config, c), WithNamespaces(daemon, c), WithCapabilities(c), WithSeccomp(daemon, c), withMounts(daemon, daemonCfg, c, mounts), WithApparmor(c), WithSelinux(c), WithOOMScore(&c.HostConfig.OomScoreAdj), coci.WithAnnotations(c.HostConfig.Annotations), WithUser(c), ) if c.NoNewPrivileges { opts = append(opts, coci.WithNoNewPrivileges) } if c.Config.Tty { opts = append(opts, WithConsoleSize(c)) } // Set the masked and readonly paths with regard to the host config options if they are set. if c.HostConfig.MaskedPaths != nil { opts = append(opts, coci.WithMaskedPaths(c.HostConfig.MaskedPaths)) } if c.HostConfig.ReadonlyPaths != nil { opts = append(opts, coci.WithReadonlyPaths(c.HostConfig.ReadonlyPaths)) } if daemonCfg.Rootless { opts = append(opts, withRootless(daemon, &daemonCfg.Config)) } else if userns.RunningInUserNS() { opts = append(opts, withRootfulInRootless(daemon, &daemonCfg.Config)) } var snapshotter, snapshotKey string if daemon.UsesSnapshotter() { snapshotter = daemon.imageService.StorageDriver() snapshotKey = c.ID } return &s, coci.ApplyOpts(ctx, daemon.containerdClient, &containers.Container{ ID: c.ID, Snapshotter: snapshotter, SnapshotKey: snapshotKey, }, &s, opts...) } func clearReadOnly(m *specs.Mount) { var opt []string for _, o := range m.Options { if o != "ro" { opt = append(opt, o) } } m.Options = opt } // mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig, daemonCfg *dconfig.Config) { ulimits := c.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]struct{}) for _, ul := range ulimits { ulIdx[ul.Name] = struct{}{} } for name, ul := range daemonCfg.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } c.Ulimits = ulimits }
go
github
https://github.com/moby/moby
daemon/oci_linux.go
# Copyright (c) 2013 ISP RAS. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) class PciPassthroughFilter(filters.BaseHostFilter): """Pci Passthrough Filter based on PCI request Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. PCI resource tracker provides updated summary information about the PCI devices for each host, like:: | [{"count": 5, "vendor_id": "8086", "product_id": "1520", | "extra_info":'{}'}], and VM requests PCI devices via PCI requests, like:: | [{"count": 1, "vendor_id": "8086", "product_id": "1520",}]. The filter checks if the host passes or not based on this information. """ def host_passes(self, host_state, filter_properties): """Return true if the host has the required PCI devices.""" pci_requests = filter_properties.get('pci_requests') if not pci_requests: return True if not host_state.pci_stats.support_requests(pci_requests): LOG.debug("%(host_state)s doesn't have the required PCI devices" " (%(requests)s)", {'host_state': host_state, 'requests': pci_requests}) return False return True
unknown
codeparrot/codeparrot-clean
""" Utilities functions assisting the system tests """ from six.moves import http_client from drift.systesthelper import uuid_string, DriftBaseTestCase class BaseCloudkitTest(DriftBaseTestCase): def make_player(self, username=None): username = username or uuid_string() self.auth(username=username) player_url = self.endpoints["my_player"] r = self.get(player_url) player_name = "Player #%s" % self.player_id self.patch(player_url, data={"name": player_name}) # start by getting a client session (this should be in utils!) clients_url = self.endpoints["clients"] data = { "client_type": "client_type", "build": "build", "platform_type": "platform_type", "app_guid": "app_guid", "version": "version" } r = self.post(clients_url, data=data, expected_status_code=http_client.CREATED) new_jti = r.json()["jti"] self.headers["Authorization"] = "JTI %s" % new_jti r = self.get("/") self.endpoints = r.json()["endpoints"] return username class BaseMatchTest(BaseCloudkitTest): def _create_machine(self): if "service" not in self.current_user["roles"]: raise RuntimeError("Only service users can call this method") data = {"realm": "aws", "instance_name": "awsinstance", "placement": "placement", "instance_type": "instance_type", "instance_id": "instance_id", "public_ip": "8.8.8.8", } resp = self.post("/machines", data=data, expected_status_code=http_client.CREATED) url = resp.json()["url"] resp = self.get(url) return resp.json() def _create_server(self, machine_id): if "service" not in self.current_user["roles"]: raise RuntimeError("Only service users can call this method") data = {"machine_id": machine_id, "version": "version", "public_ip": "8.8.8.8", "port": 50000, "command_line": "command_line", "command_line_custom": "command_line_custom", "pid": 666, "status": "active", "image_name": "image_name", "branch": "develop", "commit_id": "commit_id", "process_info": {"process_info": "yes"}, "details": {"details": "yes"}, "ref": "test/testing", } resp = self.post("/servers", data=data, expected_status_code=http_client.CREATED) return resp.json() def _create_match(self, server_id=None, **kwargs): if "service" not in self.current_user["roles"]: raise RuntimeError("Only service users can call this method") if not server_id: machine = self._create_machine() server = self._create_server(machine["machine_id"]) server_id = server["server_id"] data = {"server_id": server_id, "status": "idle", "map_name": "map_name", "game_mode": "game_mode", "max_players": 2, } data.update(**kwargs) resp = self.post("/matches", data=data, expected_status_code=http_client.CREATED) resp = self.get(resp.json()["url"]) return resp.json() def _filter_matches(self, resp, match_ids): return [m for m in resp.json() if m["match_id"] in match_ids]
unknown
codeparrot/codeparrot-clean
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and / or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DF from openerp.addons.website.models.website import slug from urlparse import urljoin from itertools import product from collections import Counter from collections import OrderedDict from openerp.exceptions import UserError import datetime import logging import re import uuid _logger = logging.getLogger(__name__) class survey_stage(osv.Model): """Stages for Kanban view of surveys""" _name = 'survey.stage' _description = 'Survey Stage' _order = 'sequence,id' _columns = { 'name': fields.char(string="Name", required=True, translate=True), 'sequence': fields.integer(string="Sequence"), 'closed': fields.boolean(string="Closed", help="If closed, people won't be able to answer to surveys in this column."), 'fold': fields.boolean(string="Folded in kanban view") } _defaults = { 'sequence': 1, 'closed': False } _sql_constraints = [ ('positive_sequence', 'CHECK(sequence >= 0)', 'Sequence number MUST be a natural') ] class survey_survey(osv.Model): '''Settings for a multi-page/multi-question survey. Each survey can have one or more attached pages, and each page can display one or more questions. ''' _name = 'survey.survey' _description = 'Survey' _rec_name = 'title' _inherit = ['mail.thread', 'ir.needaction_mixin'] # Protected methods # def _has_questions(self, cr, uid, ids, context=None): """ Ensure that this survey has at least one page with at least one question. """ for survey in self.browse(cr, uid, ids, context=context): if not survey.page_ids or not [page.question_ids for page in survey.page_ids if page.question_ids]: return False return True ## Function fields ## def _is_designed(self, cr, uid, ids, name, arg, context=None): res = dict() for survey in self.browse(cr, uid, ids, context=context): if not survey.page_ids or not [page.question_ids for page in survey.page_ids if page.question_ids]: res[survey.id] = False else: res[survey.id] = True return res def _get_tot_sent_survey(self, cr, uid, ids, name, arg, context=None): """ Returns the number of invitations sent for this survey, be they (partially) completed or not """ res = dict((id, 0) for id in ids) sur_res_obj = self.pool.get('survey.user_input') for id in ids: res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID, [('survey_id', '=', id), ('type', '=', 'link')], context=context, count=True) return res def _get_tot_start_survey(self, cr, uid, ids, name, arg, context=None): """ Returns the number of started instances of this survey, be they completed or not """ res = dict((id, 0) for id in ids) sur_res_obj = self.pool.get('survey.user_input') for id in ids: res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID, ['&', ('survey_id', '=', id), '|', ('state', '=', 'skip'), ('state', '=', 'done')], context=context, count=True) return res def _get_tot_comp_survey(self, cr, uid, ids, name, arg, context=None): """ Returns the number of completed instances of this survey """ res = dict((id, 0) for id in ids) sur_res_obj = self.pool.get('survey.user_input') for id in ids: res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID, [('survey_id', '=', id), ('state', '=', 'done')], context=context, count=True) return res def _get_public_url(self, cr, uid, ids, name, arg, context=None): """ Computes a public URL for the survey """ if context and context.get('relative_url'): base_url = '/' else: base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') res = {} for survey in self.browse(cr, uid, ids, context=context): res[survey.id] = urljoin(base_url, "survey/start/%s" % slug(survey)) return res def _get_public_url_html(self, cr, uid, ids, name, arg, context=None): """ Computes a public URL for the survey (html-embeddable version)""" urls = self._get_public_url(cr, uid, ids, name, arg, context=context) for id, url in urls.iteritems(): urls[id] = '<a href="%s">%s</a>' % (url, _("Click here to start survey")) return urls def _get_print_url(self, cr, uid, ids, name, arg, context=None): """ Computes a printing URL for the survey """ if context and context.get('relative_url'): base_url = '/' else: base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') res = {} for survey in self.browse(cr, uid, ids, context=context): res[survey.id] = urljoin(base_url, "survey/print/%s" % slug(survey)) return res def _get_result_url(self, cr, uid, ids, name, arg, context=None): """ Computes an URL for the survey results """ if context and context.get('relative_url'): base_url = '/' else: base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') res = {} for survey in self.browse(cr, uid, ids, context=context): res[survey.id] = urljoin(base_url, "survey/results/%s" % slug(survey)) return res # Model fields # _columns = { 'title': fields.char('Title', required=1, translate=True), 'page_ids': fields.one2many('survey.page', 'survey_id', 'Pages', copy=True), 'stage_id': fields.many2one('survey.stage', string="Stage", ondelete="set null", copy=False), 'auth_required': fields.boolean('Login required', help="Users with a public link will be requested to login before taking part to the survey", oldname="authenticate"), 'users_can_go_back': fields.boolean('Users can go back', help="If checked, users can go back to previous pages."), 'tot_sent_survey': fields.function(_get_tot_sent_survey, string="Number of sent surveys", type="integer"), 'tot_start_survey': fields.function(_get_tot_start_survey, string="Number of started surveys", type="integer"), 'tot_comp_survey': fields.function(_get_tot_comp_survey, string="Number of completed surveys", type="integer"), 'description': fields.html('Description', translate=True, oldname="description", help="A long description of the purpose of the survey"), 'color': fields.integer('Color Index'), 'user_input_ids': fields.one2many('survey.user_input', 'survey_id', 'User responses', readonly=1), 'designed': fields.function(_is_designed, string="Is designed?", type="boolean"), 'public_url': fields.function(_get_public_url, string="Public link", type="char"), 'public_url_html': fields.function(_get_public_url_html, string="Public link (html version)", type="char"), 'print_url': fields.function(_get_print_url, string="Print link", type="char"), 'result_url': fields.function(_get_result_url, string="Results link", type="char"), 'email_template_id': fields.many2one('mail.template', 'Email Template', ondelete='set null'), 'thank_you_message': fields.html('Thank you message', translate=True, help="This message will be displayed when survey is completed"), 'quizz_mode': fields.boolean(string='Quiz mode') } def _default_stage(self, cr, uid, context=None): ids = self.pool['survey.stage'].search(cr, uid, [], limit=1, context=context) if ids: return ids[0] return False _defaults = { 'color': 0, 'stage_id': lambda self, *a, **kw: self._default_stage(*a, **kw) } def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None): """ Read group customization in order to display all the stages in the kanban view, even if they are empty """ stage_obj = self.pool.get('survey.stage') order = stage_obj._order access_rights_uid = access_rights_uid or uid if read_group_order == 'stage_id desc': order = '%s desc' % order stage_ids = stage_obj._search(cr, uid, [], order=order, access_rights_uid=access_rights_uid, context=context) result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context) # restore order of the search result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0]))) fold = {} for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context): fold[stage.id] = stage.fold or False return result, fold _group_by_full = { 'stage_id': _read_group_stage_ids } # Public methods # def copy_data(self, cr, uid, id, default=None, context=None): current_rec = self.read(cr, uid, id, fields=['title'], context=context) title = _("%s (copy)") % (current_rec.get('title')) default = dict(default or {}, title=title) return super(survey_survey, self).copy_data(cr, uid, id, default, context=context) def next_page(self, cr, uid, user_input, page_id, go_back=False, context=None): '''The next page to display to the user, knowing that page_id is the id of the last displayed page. If page_id == 0, it will always return the first page of the survey. If all the pages have been displayed and go_back == False, it will return None If go_back == True, it will return the *previous* page instead of the next page. .. note:: It is assumed here that a careful user will not try to set go_back to True if she knows that the page to display is the first one! (doing this will probably cause a giant worm to eat her house)''' survey = user_input.survey_id pages = list(enumerate(survey.page_ids)) # First page if page_id == 0: return (pages[0][1], 0, len(pages) == 1) current_page_index = pages.index((filter(lambda p: p[1].id == page_id, pages))[0]) # All the pages have been displayed if current_page_index == len(pages) - 1 and not go_back: return (None, -1, False) # Let's get back, baby! elif go_back and survey.users_can_go_back: return (pages[current_page_index - 1][1], current_page_index - 1, False) else: # This will show the last page if current_page_index == len(pages) - 2: return (pages[current_page_index + 1][1], current_page_index + 1, True) # This will show a regular page else: return (pages[current_page_index + 1][1], current_page_index + 1, False) def filter_input_ids(self, cr, uid, survey, filters, finished=False, context=None): '''If user applies any filters, then this function returns list of filtered user_input_id and label's strings for display data in web. :param filters: list of dictionary (having: row_id, ansewr_id) :param finished: True for completely filled survey,Falser otherwise. :returns list of filtered user_input_ids. ''' context = context if context else {} if filters: input_line_obj = self.pool.get('survey.user_input_line') domain_filter, choice, filter_display_data = [], [], [] for filter in filters: row_id, answer_id = filter['row_id'], filter['answer_id'] if row_id == 0: choice.append(answer_id) else: domain_filter.extend(['|', ('value_suggested_row.id', '=', row_id), ('value_suggested.id', '=', answer_id)]) if choice: domain_filter.insert(0, ('value_suggested.id', 'in', choice)) else: domain_filter = domain_filter[1:] line_ids = input_line_obj.search(cr, uid, domain_filter, context=context) filtered_input_ids = [input.user_input_id.id for input in input_line_obj.browse(cr, uid, line_ids, context=context)] else: filtered_input_ids, filter_display_data = [], [] if finished: user_input = self.pool.get('survey.user_input') if not filtered_input_ids: current_filters = user_input.search(cr, uid, [('survey_id', '=', survey.id)], context=context) user_input_objs = user_input.browse(cr, uid, current_filters, context=context) else: user_input_objs = user_input.browse(cr, uid, filtered_input_ids, context=context) return [input.id for input in user_input_objs if input.state == 'done'] return filtered_input_ids def get_filter_display_data(self, cr, uid, filters, context): '''Returns data to display current filters :param filters: list of dictionary (having: row_id, answer_id) :param finished: True for completely filled survey, False otherwise. :returns list of dict having data to display filters. ''' filter_display_data = [] if filters: question_obj = self.pool.get('survey.question') label_obj = self.pool.get('survey.label') for filter in filters: row_id, answer_id = filter['row_id'], filter['answer_id'] question_id = label_obj.browse(cr, uid, answer_id, context=context).question_id.id question = question_obj.browse(cr, uid, question_id, context=context) if row_id == 0: labels = label_obj.browse(cr, uid, [answer_id], context=context) else: labels = label_obj.browse(cr, uid, [row_id, answer_id], context=context) filter_display_data.append({'question_text': question.question, 'labels': [label.value for label in labels]}) return filter_display_data def prepare_result(self, cr, uid, question, current_filters=None, context=None): ''' Compute statistical data for questions by counting number of vote per choice on basis of filter ''' current_filters = current_filters if current_filters else [] context = context if context else {} result_summary = {} #Calculate and return statistics for choice if question.type in ['simple_choice', 'multiple_choice']: answers = {} comments = [] [answers.update({label.id: {'text': label.value, 'count': 0, 'answer_id': label.id}}) for label in question.labels_ids] for input_line in question.user_input_line_ids: if input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters): answers[input_line.value_suggested.id]['count'] += 1 if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters): comments.append(input_line) result_summary = {'answers': answers.values(), 'comments': comments} #Calculate and return statistics for matrix if question.type == 'matrix': rows = OrderedDict() answers = OrderedDict() res = dict() comments = [] [rows.update({label.id: label.value}) for label in question.labels_ids_2] [answers.update({label.id: label.value}) for label in question.labels_ids] for cell in product(rows.keys(), answers.keys()): res[cell] = 0 for input_line in question.user_input_line_ids: if input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters): res[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1 if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters): comments.append(input_line) result_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments} #Calculate and return statistics for free_text, textbox, datetime if question.type in ['free_text', 'textbox', 'datetime']: result_summary = [] for input_line in question.user_input_line_ids: if not(current_filters) or input_line.user_input_id.id in current_filters: result_summary.append(input_line) #Calculate and return statistics for numerical_box if question.type == 'numerical_box': result_summary = {'input_lines': []} all_inputs = [] for input_line in question.user_input_line_ids: if not(current_filters) or input_line.user_input_id.id in current_filters: all_inputs.append(input_line.value_number) result_summary['input_lines'].append(input_line) if all_inputs: result_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2), 'max': round(max(all_inputs), 2), 'min': round(min(all_inputs), 2), 'sum': sum(all_inputs), 'most_comman': Counter(all_inputs).most_common(5)}) return result_summary def get_input_summary(self, cr, uid, question, current_filters=None, context=None): ''' Returns overall summary of question e.g. answered, skipped, total_inputs on basis of filter ''' current_filters = current_filters if current_filters else [] context = context if context else {} result = {} if question.survey_id.user_input_ids: total_input_ids = current_filters or [input_id.id for input_id in question.survey_id.user_input_ids if input_id.state != 'new'] result['total_inputs'] = len(total_input_ids) question_input_ids = [] for user_input in question.user_input_line_ids: if not user_input.skipped: question_input_ids.append(user_input.user_input_id.id) result['answered'] = len(set(question_input_ids) & set(total_input_ids)) result['skipped'] = result['total_inputs'] - result['answered'] return result # Actions def action_start_survey(self, cr, uid, ids, context=None): ''' Open the website page with the survey form ''' trail = "" context = dict(context or {}, relative_url=True) if 'survey_token' in context: trail = "/" + context['survey_token'] return { 'type': 'ir.actions.act_url', 'name': "Start Survey", 'target': 'self', 'url': self.read(cr, uid, ids, ['public_url'], context=context)[0]['public_url'] + trail } def action_send_survey(self, cr, uid, ids, context=None): ''' Open a window to compose an email, pre-filled with the survey message ''' if not self._has_questions(cr, uid, ids, context=None): raise UserError(_('You cannot send an invitation for a survey that has no questions.')) survey_browse = self.pool.get('survey.survey').browse(cr, uid, ids, context=context)[0] if survey_browse.stage_id.closed: raise UserError(_("You cannot send invitations for closed surveys.")) assert len(ids) == 1, 'This option should only be used for a single \ survey at a time.' ir_model_data = self.pool.get('ir.model.data') templates = ir_model_data.get_object_reference(cr, uid, 'survey', 'email_template_survey') template_id = templates[1] if len(templates) > 0 else False ctx = dict(context) ctx.update({'default_model': 'survey.survey', 'default_res_id': ids[0], 'default_survey_id': ids[0], 'default_use_template': bool(template_id), 'default_template_id': template_id, 'default_composition_mode': 'comment'} ) return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'survey.mail.compose.message', 'target': 'new', 'context': ctx, } def action_print_survey(self, cr, uid, ids, context=None): ''' Open the website page with the survey printable view ''' trail = "" context = dict(context or {}, relative_url=True) if 'survey_token' in context: trail = "/" + context['survey_token'] return { 'type': 'ir.actions.act_url', 'name': "Print Survey", 'target': 'self', 'url': self.read(cr, uid, ids, ['print_url'], context=context)[0]['print_url'] + trail } def action_result_survey(self, cr, uid, ids, context=None): ''' Open the website page with the survey results view ''' context = dict(context or {}, relative_url=True) return { 'type': 'ir.actions.act_url', 'name': "Results of the Survey", 'target': 'self', 'url': self.read(cr, uid, ids, ['result_url'], context=context)[0]['result_url'] } def action_test_survey(self, cr, uid, ids, context=None): ''' Open the website page with the survey form into test mode''' context = dict(context or {}, relative_url=True) return { 'type': 'ir.actions.act_url', 'name': "Results of the Survey", 'target': 'self', 'url': self.read(cr, uid, ids, ['public_url'], context=context)[0]['public_url'] + "/phantom" } class survey_page(osv.Model): '''A page for a survey. Pages are essentially containers, allowing to group questions by ordered screens. .. note:: A page should be deleted if the survey it belongs to is deleted. ''' _name = 'survey.page' _description = 'Survey Page' _rec_name = 'title' _order = 'sequence,id' # Model Fields # _columns = { 'title': fields.char('Page Title', required=1, translate=True), 'survey_id': fields.many2one('survey.survey', 'Survey', ondelete='cascade', required=True), 'question_ids': fields.one2many('survey.question', 'page_id', 'Questions', copy=True), 'sequence': fields.integer('Page number'), 'description': fields.html('Description', help="An introductory text to your page", translate=True, oldname="note"), } _defaults = { 'sequence': 10 } # Public methods # def copy_data(self, cr, uid, ids, default=None, context=None): current_rec = self.read(cr, uid, ids, fields=['title'], context=context) title = _("%s (copy)") % (current_rec.get('title')) default = dict(default or {}, title=title) return super(survey_page, self).copy_data(cr, uid, ids, default, context=context) class survey_question(osv.Model): ''' Questions that will be asked in a survey. Each question can have one of more suggested answers (eg. in case of dropdown choices, multi-answer checkboxes, radio buttons...).''' _name = 'survey.question' _description = 'Survey Question' _rec_name = 'question' _order = 'sequence,id' # Model fields # _columns = { # Question metadata 'page_id': fields.many2one('survey.page', 'Survey page', ondelete='cascade', required=1), 'survey_id': fields.related('page_id', 'survey_id', type='many2one', relation='survey.survey', string='Survey'), 'sequence': fields.integer(string='Sequence'), # Question 'question': fields.char('Question Name', required=1, translate=True), 'description': fields.html('Description', help="Use this field to add \ additional explanations about your question", translate=True, oldname='descriptive_text'), # Answer 'type': fields.selection([('free_text', 'Multiple Lines Text Box'), ('textbox', 'Single Line Text Box'), ('numerical_box', 'Numerical Value'), ('datetime', 'Date and Time'), ('simple_choice', 'Multiple choice: only one answer'), ('multiple_choice', 'Multiple choice: multiple answers allowed'), ('matrix', 'Matrix')], 'Type of Question', size=15, required=1), 'matrix_subtype': fields.selection([('simple', 'One choice per row'), ('multiple', 'Multiple choices per row')], 'Matrix Type'), 'labels_ids': fields.one2many('survey.label', 'question_id', 'Types of answers', oldname='answer_choice_ids', copy=True), 'labels_ids_2': fields.one2many('survey.label', 'question_id_2', 'Rows of the Matrix', copy=True), # labels are used for proposed choices # if question.type == simple choice | multiple choice # -> only labels_ids is used # if question.type == matrix # -> labels_ids are the columns of the matrix # -> labels_ids_2 are the rows of the matrix # Display options 'column_nb': fields.selection([('12', '1'), ('6', '2'), ('4', '3'), ('3', '4'), ('2', '6')], 'Number of columns'), # These options refer to col-xx-[12|6|4|3|2] classes in Bootstrap 'display_mode': fields.selection([('columns', 'Radio Buttons'), ('dropdown', 'Selection Box')], 'Display mode'), # Comments 'comments_allowed': fields.boolean('Show Comments Field', oldname="allow_comment"), 'comments_message': fields.char('Comment Message', translate=True), 'comment_count_as_answer': fields.boolean('Comment Field is an Answer Choice', oldname='make_comment_field'), # Validation 'validation_required': fields.boolean('Validate entry', oldname='is_validation_require'), 'validation_email': fields.boolean('Input must be an email'), 'validation_length_min': fields.integer('Minimum Text Length'), 'validation_length_max': fields.integer('Maximum Text Length'), 'validation_min_float_value': fields.float('Minimum value'), 'validation_max_float_value': fields.float('Maximum value'), 'validation_min_date': fields.datetime('Minimum Date'), 'validation_max_date': fields.datetime('Maximum Date'), 'validation_error_msg': fields.char('Error message', oldname='validation_valid_err_msg', translate=True), # Constraints on number of answers (matrices) 'constr_mandatory': fields.boolean('Mandatory Answer', oldname="is_require_answer"), 'constr_error_msg': fields.char("Error message", oldname='req_error_msg', translate=True), 'user_input_line_ids': fields.one2many('survey.user_input_line', 'question_id', 'Answers', domain=[('skipped', '=', False)]), } _defaults = { 'page_id': lambda self, cr, uid, context: context.get('page_id'), 'sequence': 10, 'type': 'free_text', 'matrix_subtype': 'simple', 'column_nb': '12', 'display_mode': 'columns', 'constr_error_msg': lambda s, cr, uid, c: _('This question requires an answer.'), 'validation_error_msg': lambda s, cr, uid, c: _('The answer you entered has an invalid format.'), 'validation_required': False, 'comments_message': lambda s, cr, uid, c: _('If other, precise:'), } _sql_constraints = [ ('positive_len_min', 'CHECK (validation_length_min >= 0)', 'A length must be positive!'), ('positive_len_max', 'CHECK (validation_length_max >= 0)', 'A length must be positive!'), ('validation_length', 'CHECK (validation_length_min <= validation_length_max)', 'Max length cannot be smaller than min length!'), ('validation_float', 'CHECK (validation_min_float_value <= validation_max_float_value)', 'Max value cannot be smaller than min value!'), ('validation_date', 'CHECK (validation_min_date <= validation_max_date)', 'Max date cannot be smaller than min date!') ] def onchange_validation_email(self, cr, uid, ids, validation_email, context=None): return {'value': {'validation_required': False}} if validation_email else {} def copy_data(self, cr, uid, ids, default=None, context=None): current_rec = self.read(cr, uid, ids, context=context) question = _("%s (copy)") % (current_rec.get('question')) default = dict(default or {}, question=question) return super(survey_question, self).copy_data(cr, uid, ids, default, context=context) # Validation methods def validate_question(self, cr, uid, question, post, answer_tag, context=None): ''' Validate question, depending on question type and parameters ''' try: checker = getattr(self, 'validate_' + question.type) except AttributeError: _logger.warning(question.type + ": This type of question has no validation method") return {} else: return checker(cr, uid, question, post, answer_tag, context=context) def validate_free_text(self, cr, uid, question, post, answer_tag, context=None): errors = {} answer = post[answer_tag].strip() # Empty answer to mandatory question if question.constr_mandatory and not answer: errors.update({answer_tag: question.constr_error_msg}) return errors def validate_textbox(self, cr, uid, question, post, answer_tag, context=None): errors = {} answer = post[answer_tag].strip() # Empty answer to mandatory question if question.constr_mandatory and not answer: errors.update({answer_tag: question.constr_error_msg}) # Email format validation # Note: this validation is very basic: # all the strings of the form # <something>@<anything>.<extension> # will be accepted if answer and question.validation_email: if not re.match(r"[^@]+@[^@]+\.[^@]+", answer): errors.update({answer_tag: _('This answer must be an email address')}) # Answer validation (if properly defined) # Length of the answer must be in a range if answer and question.validation_required: if not (question.validation_length_min <= len(answer) <= question.validation_length_max): errors.update({answer_tag: question.validation_error_msg}) return errors def validate_numerical_box(self, cr, uid, question, post, answer_tag, context=None): errors = {} answer = post[answer_tag].strip() # Empty answer to mandatory question if question.constr_mandatory and not answer: errors.update({answer_tag: question.constr_error_msg}) # Checks if user input is a number if answer: try: floatanswer = float(answer) except ValueError: errors.update({answer_tag: _('This is not a number')}) # Answer validation (if properly defined) if answer and question.validation_required: # Answer is not in the right range try: floatanswer = float(answer) # check that it is a float has been done hereunder if not (question.validation_min_float_value <= floatanswer <= question.validation_max_float_value): errors.update({answer_tag: question.validation_error_msg}) except ValueError: pass return errors def validate_datetime(self, cr, uid, question, post, answer_tag, context=None): errors = {} answer = post[answer_tag].strip() # Empty answer to mandatory question if question.constr_mandatory and not answer: errors.update({answer_tag: question.constr_error_msg}) # Checks if user input is a datetime if answer: try: dateanswer = datetime.datetime.strptime(answer, DF) except ValueError: errors.update({answer_tag: _('This is not a date/time')}) return errors # Answer validation (if properly defined) if answer and question.validation_required: # Answer is not in the right range try: dateanswer = datetime.datetime.strptime(answer, DF) min_date = question.validation_min_date and datetime.datetime.strptime(question.validation_min_date, DF) or False max_date = question.validation_max_date and datetime.datetime.strptime(question.validation_max_date, DF) or False if (min_date and max_date and not(min_date <= dateanswer <= max_date)): # If Minimum and Maximum Date are entered errors.update({answer_tag: question.validation_error_msg}) elif (min_date and not(min_date <= dateanswer)): # If only Minimum Date is entered and not Define Maximum Date errors.update({answer_tag: question.validation_error_msg}) elif (max_date and not(dateanswer <= max_date)): # If only Maximum Date is entered and not Define Minimum Date errors.update({answer_tag: question.validation_error_msg}) except ValueError: # check that it is a datetime has been done hereunder pass return errors def validate_simple_choice(self, cr, uid, question, post, answer_tag, context=None): errors = {} if question.comments_allowed: comment_tag = "%s_%s" % (answer_tag, 'comment') # Empty answer to mandatory question if question.constr_mandatory and not answer_tag in post: errors.update({answer_tag: question.constr_error_msg}) if question.constr_mandatory and answer_tag in post and post[answer_tag].strip() == '': errors.update({answer_tag: question.constr_error_msg}) # Answer is a comment and is empty if question.constr_mandatory and answer_tag in post and post[answer_tag] == "-1" and question.comment_count_as_answer and comment_tag in post and not post[comment_tag].strip(): errors.update({answer_tag: question.constr_error_msg}) return errors def validate_multiple_choice(self, cr, uid, question, post, answer_tag, context=None): errors = {} if question.constr_mandatory: answer_candidates = dict_keys_startswith(post, answer_tag) comment_flag = answer_candidates.pop(("%s_%s" % (answer_tag, -1)), None) if question.comments_allowed: comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip() # There is no answer neither comments (if comments count as answer) if not answer_candidates and question.comment_count_as_answer and (not comment_flag or not comment_answer): errors.update({answer_tag: question.constr_error_msg}) # There is no answer at all if not answer_candidates and not question.comment_count_as_answer: errors.update({answer_tag: question.constr_error_msg}) return errors def validate_matrix(self, cr, uid, question, post, answer_tag, context=None): errors = {} if question.constr_mandatory: lines_number = len(question.labels_ids_2) answer_candidates = dict_keys_startswith(post, answer_tag) comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip() # Number of lines that have been answered if question.matrix_subtype == 'simple': answer_number = len(answer_candidates) elif question.matrix_subtype == 'multiple': answer_number = len(set([sk.rsplit('_', 1)[0] for sk in answer_candidates.keys()])) else: raise RuntimeError("Invalid matrix subtype") # Validate that each line has been answered if answer_number != lines_number: errors.update({answer_tag: question.constr_error_msg}) return errors class survey_label(osv.Model): ''' A suggested answer for a question ''' _name = 'survey.label' _rec_name = 'value' _order = 'sequence,id' _description = 'Survey Label' def _check_question_not_empty(self, cr, uid, ids, context=None): '''Ensure that field question_id XOR field question_id_2 is not null''' for label in self.browse(cr, uid, ids, context=context): # 'bool()' is required in order to make '!=' act as XOR with objects return bool(label.question_id) != bool(label.question_id_2) _columns = { 'question_id': fields.many2one('survey.question', 'Question', ondelete='cascade'), 'question_id_2': fields.many2one('survey.question', 'Question', ondelete='cascade'), 'sequence': fields.integer('Label Sequence order'), 'value': fields.char("Suggested value", translate=True, required=True), 'quizz_mark': fields.float('Score for this choice', help="A positive score indicates a correct choice; a negative or null score indicates a wrong answer"), } _defaults = { 'sequence': 10, } _constraints = [ (_check_question_not_empty, "A label must be attached to one and only one question", ['question_id', 'question_id_2']) ] class survey_user_input(osv.Model): ''' Metadata for a set of one user's answers to a particular survey ''' _name = "survey.user_input" _rec_name = 'date_create' _description = 'Survey User Input' def _quizz_get_score(self, cr, uid, ids, name, args, context=None): ret = dict() for user_input in self.browse(cr, uid, ids, context=context): ret[user_input.id] = sum([uil.quizz_mark for uil in user_input.user_input_line_ids] or [0.0]) return ret _columns = { 'survey_id': fields.many2one('survey.survey', 'Survey', required=True, readonly=1, ondelete='restrict'), 'date_create': fields.datetime('Creation Date', required=True, readonly=1), 'deadline': fields.datetime("Deadline", help="Date by which the person can open the survey and submit answers", oldname="date_deadline"), 'type': fields.selection([('manually', 'Manually'), ('link', 'Link')], 'Answer Type', required=1, readonly=1, oldname="response_type"), 'state': fields.selection([('new', 'Not started yet'), ('skip', 'Partially completed'), ('done', 'Completed')], 'Status', readonly=True), 'test_entry': fields.boolean('Test entry', readonly=1), 'token': fields.char("Identification token", readonly=1, required=1), # Optional Identification data 'partner_id': fields.many2one('res.partner', 'Partner', readonly=1), 'email': fields.char("E-mail", readonly=1), # Displaying data 'last_displayed_page_id': fields.many2one('survey.page', 'Last displayed page'), # The answers ! 'user_input_line_ids': fields.one2many('survey.user_input_line', 'user_input_id', 'Answers'), # URLs used to display the answers 'result_url': fields.related('survey_id', 'result_url', type='char', string="Public link to the survey results"), 'print_url': fields.related('survey_id', 'print_url', type='char', string="Public link to the empty survey"), 'quizz_score': fields.function(_quizz_get_score, type="float", string="Score for the quiz") } _defaults = { 'date_create': fields.datetime.now, 'type': 'manually', 'state': 'new', 'token': lambda s, cr, uid, c: uuid.uuid4().__str__(), 'quizz_score': 0.0, } _sql_constraints = [ ('unique_token', 'UNIQUE (token)', 'A token must be unique!'), ('deadline_in_the_past', 'CHECK (deadline >= date_create)', 'The deadline cannot be in the past') ] def copy_data(self, cr, uid, id, default=None, context=None): raise UserError(_('You cannot duplicate this element!')) def do_clean_emptys(self, cr, uid, automatic=False, context=None): ''' Remove empty user inputs that have been created manually (used as a cronjob declared in data/survey_cron.xml) ''' empty_user_input_ids = self.search(cr, uid, [('type', '=', 'manually'), ('state', '=', 'new'), ('date_create', '<', (datetime.datetime.now() - datetime.timedelta(hours=1)).strftime(DF))], context=context) if empty_user_input_ids: self.unlink(cr, uid, empty_user_input_ids, context=context) def action_survey_resent(self, cr, uid, ids, context=None): ''' Sent again the invitation ''' record = self.browse(cr, uid, ids[0], context=context) context = dict(context or {}) context.update({ 'survey_resent_token': True, 'default_partner_ids': record.partner_id and [record.partner_id.id] or [], 'default_multi_email': record.email or "", 'default_public': 'email_private', }) return self.pool.get('survey.survey').action_send_survey(cr, uid, [record.survey_id.id], context=context) def action_view_answers(self, cr, uid, ids, context=None): ''' Open the website page with the survey form ''' user_input = self.read(cr, uid, ids, ['print_url', 'token'], context=context)[0] return { 'type': 'ir.actions.act_url', 'name': "View Answers", 'target': 'self', 'url': '%s/%s' % (user_input['print_url'], user_input['token']) } def action_survey_results(self, cr, uid, ids, context=None): ''' Open the website page with the survey results ''' return { 'type': 'ir.actions.act_url', 'name': "Survey Results", 'target': 'self', 'url': self.read(cr, uid, ids, ['result_url'], context=context)[0]['result_url'] } class survey_user_input_line(osv.Model): _name = 'survey.user_input_line' _description = 'Survey User Input Line' _rec_name = 'date_create' def _answered_or_skipped(self, cr, uid, ids, context=None): for uil in self.browse(cr, uid, ids, context=context): # 'bool()' is required in order to make '!=' act as XOR with objects return uil.skipped != bool(uil.answer_type) def _check_answer_type(self, cr, uid, ids, context=None): for uil in self.browse(cr, uid, ids, context=None): if uil.answer_type: if uil.answer_type == 'text': # 'bool()' is required in order to make '!=' act as XOR with objects return bool(uil.value_text) elif uil.answer_type == 'number': return (uil.value_number == 0) or (uil.value_number != False) elif uil.answer_type == 'date': return bool(uil.value_date) elif uil.answer_type == 'free_text': return bool(uil.value_free_text) elif uil.answer_type == 'suggestion': return bool(uil.value_suggested) return True _columns = { 'user_input_id': fields.many2one('survey.user_input', 'User Input', ondelete='cascade', required=1), 'question_id': fields.many2one('survey.question', 'Question', ondelete='restrict', required=1), 'page_id': fields.related('question_id', 'page_id', type='many2one', relation='survey.page', string="Page"), 'survey_id': fields.related('user_input_id', 'survey_id', type="many2one", relation="survey.survey", string='Survey', store=True), 'date_create': fields.datetime('Create Date', required=1), 'skipped': fields.boolean('Skipped'), 'answer_type': fields.selection([('text', 'Text'), ('number', 'Number'), ('date', 'Date'), ('free_text', 'Free Text'), ('suggestion', 'Suggestion')], 'Answer Type'), 'value_text': fields.char("Text answer"), 'value_number': fields.float("Numerical answer"), 'value_date': fields.datetime("Date answer"), 'value_free_text': fields.text("Free Text answer"), 'value_suggested': fields.many2one('survey.label', "Suggested answer"), 'value_suggested_row': fields.many2one('survey.label', "Row answer"), 'quizz_mark': fields.float("Score given for this choice") } _defaults = { 'skipped': False, 'date_create': fields.datetime.now() } _constraints = [ (_answered_or_skipped, "A question cannot be unanswered and skipped", ['skipped', 'answer_type']), (_check_answer_type, "The answer must be in the right type", ['answer_type', 'text', 'number', 'date', 'free_text', 'suggestion']) ] def __get_mark(self, cr, uid, value_suggested, context=None): try: mark = self.pool.get('survey.label').browse(cr, uid, int(value_suggested), context=context).quizz_mark except AttributeError: mark = 0.0 except KeyError: mark = 0.0 except ValueError: mark = 0.0 return mark def create(self, cr, uid, vals, context=None): value_suggested = vals.get('value_suggested') if value_suggested: vals.update({'quizz_mark': self.__get_mark(cr, uid, value_suggested)}) return super(survey_user_input_line, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): value_suggested = vals.get('value_suggested') if value_suggested: vals.update({'quizz_mark': self.__get_mark(cr, uid, value_suggested)}) return super(survey_user_input_line, self).write(cr, uid, ids, vals, context=context) def copy_data(self, cr, uid, id, default=None, context=None): raise UserError(_('You cannot duplicate this element!')) def save_lines(self, cr, uid, user_input_id, question, post, answer_tag, context=None): ''' Save answers to questions, depending on question type If an answer already exists for question and user_input_id, it will be overwritten (in order to maintain data consistency). ''' try: saver = getattr(self, 'save_line_' + question.type) except AttributeError: _logger.error(question.type + ": This type of question has no saving function") return False else: saver(cr, uid, user_input_id, question, post, answer_tag, context=context) def save_line_free_text(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False, } if answer_tag in post and post[answer_tag].strip() != '': vals.update({'answer_type': 'free_text', 'value_free_text': post[answer_tag]}) else: vals.update({'answer_type': None, 'skipped': True}) old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.write(cr, uid, old_uil[0], vals, context=context) else: self.create(cr, uid, vals, context=context) return True def save_line_textbox(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False } if answer_tag in post and post[answer_tag].strip() != '': vals.update({'answer_type': 'text', 'value_text': post[answer_tag]}) else: vals.update({'answer_type': None, 'skipped': True}) old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.write(cr, uid, old_uil[0], vals, context=context) else: self.create(cr, uid, vals, context=context) return True def save_line_numerical_box(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False } if answer_tag in post and post[answer_tag].strip() != '': vals.update({'answer_type': 'number', 'value_number': float(post[answer_tag])}) else: vals.update({'answer_type': None, 'skipped': True}) old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.write(cr, uid, old_uil[0], vals, context=context) else: self.create(cr, uid, vals, context=context) return True def save_line_datetime(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False } if answer_tag in post and post[answer_tag].strip() != '': vals.update({'answer_type': 'date', 'value_date': post[answer_tag]}) else: vals.update({'answer_type': None, 'skipped': True}) old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.write(cr, uid, old_uil[0], vals, context=context) else: self.create(cr, uid, vals, context=context) return True def save_line_simple_choice(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False } old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.unlink(cr, uid, old_uil, context=context) if answer_tag in post and post[answer_tag].strip() != '': vals.update({'answer_type': 'suggestion', 'value_suggested': post[answer_tag]}) else: vals.update({'answer_type': None, 'skipped': True}) # '-1' indicates 'comment count as an answer' so do not need to record it if post.get(answer_tag) and post.get(answer_tag) != '-1': self.create(cr, uid, vals, context=context) comment_answer = post.pop(("%s_%s" % (answer_tag, 'comment')), '').strip() if comment_answer: vals.update({'answer_type': 'text', 'value_text': comment_answer, 'skipped': False, 'value_suggested': False}) self.create(cr, uid, vals, context=context) return True def save_line_multiple_choice(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False } old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.unlink(cr, uid, old_uil, context=context) ca = dict_keys_startswith(post, answer_tag) comment_answer = ca.pop(("%s_%s" % (answer_tag, 'comment')), '').strip() if len(ca) > 0: for a in ca: # '-1' indicates 'comment count as an answer' so do not need to record it if a != ('%s_%s' % (answer_tag, '-1')): vals.update({'answer_type': 'suggestion', 'value_suggested': ca[a]}) self.create(cr, uid, vals, context=context) if comment_answer: vals.update({'answer_type': 'text', 'value_text': comment_answer, 'value_suggested': False}) self.create(cr, uid, vals, context=context) if not ca and not comment_answer: vals.update({'answer_type': None, 'skipped': True}) self.create(cr, uid, vals, context=context) return True def save_line_matrix(self, cr, uid, user_input_id, question, post, answer_tag, context=None): vals = { 'user_input_id': user_input_id, 'question_id': question.id, 'page_id': question.page_id.id, 'survey_id': question.survey_id.id, 'skipped': False } old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id), ('survey_id', '=', question.survey_id.id), ('question_id', '=', question.id)], context=context) if old_uil: self.unlink(cr, uid, old_uil, context=context) no_answers = True ca = dict_keys_startswith(post, answer_tag) comment_answer = ca.pop(("%s_%s" % (answer_tag, 'comment')), '').strip() if comment_answer: vals.update({'answer_type': 'text', 'value_text': comment_answer}) self.create(cr, uid, vals, context=context) no_answers = False if question.matrix_subtype == 'simple': for row in question.labels_ids_2: a_tag = "%s_%s" % (answer_tag, row.id) if a_tag in ca: no_answers = False vals.update({'answer_type': 'suggestion', 'value_suggested': ca[a_tag], 'value_suggested_row': row.id}) self.create(cr, uid, vals, context=context) elif question.matrix_subtype == 'multiple': for col in question.labels_ids: for row in question.labels_ids_2: a_tag = "%s_%s_%s" % (answer_tag, row.id, col.id) if a_tag in ca: no_answers = False vals.update({'answer_type': 'suggestion', 'value_suggested': col.id, 'value_suggested_row': row.id}) self.create(cr, uid, vals, context=context) if no_answers: vals.update({'answer_type': None, 'skipped': True}) self.create(cr, uid, vals, context=context) return True def dict_keys_startswith(dictionary, string): '''Returns a dictionary containing the elements of <dict> whose keys start with <string>. .. note:: This function uses dictionary comprehensions (Python >= 2.7)''' return {k: dictionary[k] for k in filter(lambda key: key.startswith(string), dictionary.keys())}
unknown
codeparrot/codeparrot-clean
import RPi.GPIO as GPIO, subprocess #if( int(time.strftime('%H')) >= 8 and int(time.strftime('%H')) <= 21 ): def checkFacebook(): nbr_notif = int(open("/home/pi/RaspiNotifier/nbr/nbr_facebook.txt", "r").read()) GPIO_PIN = int(config.get("Facebook", "gpioPin")) GPIO.setmode(GPIO.BOARD) GPIO.setup(GPIO_PIN, GPIO.OUT) proc = subprocess.Popen("php /home/pi/RaspiNotifier/FacebookAPI/FBChecker.php", shell=True, stdout=subprocess.PIPE) newnotif = proc.stdout.read() if newnotif.isdigit(): print("Facebook say: " + str(newnotif)) print("Last time: " + str(nbr_notif)) if int(newnotif) > nbr_notif: GPIO.output(GPIO_PIN, True) print("Turn on pin " + str(GPIO_PIN)) if int(newnotif) == nbr_notif: print("Don't change state on GPIO") if int(newnotif) < nbr_notif: GPIO.output(GPIO_PIN, False) print("Turn off pin " + str(GPIO_PIN)) open("/home/pi/RaspiNotifier/nbr/nbr_facebook.txt", "w").write(str(newnotif)) else: print("Error: " + newnotif) #else: # print("Silence !")
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.docker.compose.core; import java.util.List; import java.util.Locale; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link DockerJson}. * * @author Moritz Halbritter * @author Andy Wilkinson * @author Phillip Webb */ class DockerJsonTests { @Test void deserializeWhenSentenceCase() { String json = """ { "Value": 1 } """; TestResponse response = DockerJson.deserialize(json, TestResponse.class); assertThat(response).isEqualTo(new TestResponse(1)); } @Test void deserializeWhenLowerCase() { String json = """ { "value": 1 } """; TestResponse response = DockerJson.deserialize(json, TestResponse.class); assertThat(response).isEqualTo(new TestResponse(1)); } @Test void deserializeToListWhenArray() { String json = """ [{ "value": 1 }, { "value": 2 }] """; List<TestResponse> response = DockerJson.deserializeToList(json, TestResponse.class); assertThat(response).containsExactly(new TestResponse(1), new TestResponse(2)); } @Test void deserializeToListWhenMultipleLines() { String json = """ { "Value": 1 } { "Value": 2 } """; List<TestResponse> response = DockerJson.deserializeToList(json, TestResponse.class); assertThat(response).containsExactly(new TestResponse(1), new TestResponse(2)); } @Test void shouldBeLocaleAgnostic() { // Turkish locale lower cases the 'I' to a 'ı', not to an 'i' withLocale(Locale.forLanguageTag("tr-TR"), () -> { String json = """ { "INTEGER": 42 } """; TestLowercaseResponse response = DockerJson.deserialize(json, TestLowercaseResponse.class); assertThat(response.integer()).isEqualTo(42); }); } private void withLocale(Locale locale, Runnable runnable) { Locale defaultLocale = Locale.getDefault(); try { Locale.setDefault(locale); runnable.run(); } finally { Locale.setDefault(defaultLocale); } } record TestResponse(int value) { } record TestLowercaseResponse(int integer) { } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-docker-compose/src/test/java/org/springframework/boot/docker/compose/core/DockerJsonTests.java
groups: - name: my-group-name interval: 30s # defaults to global interval rules: - alert: HighErrors expr: | sum without(instance) (rate(errors_total[5m])) / sum without(instance) (rate(requests_total[5m])) for: 5m labels: severity: critical annotations: description: "stuff's happening with {{ $.labels.service }}" # Mix recording rules in the same list - record: "new_metric" expr: | sum without(instance) (rate(errors_total[5m])) / sum without(instance) (rate(requests_total[5m])) labels: abc: edf uvw: xyz - alert: HighErrors expr: | sum without(instance) (rate(errors_total[5m])) / sum without(instance) (rate(requests_total[5m])) for: 5m labels: severity: critical annotations: description: "stuff's happening with {{ $.labels.service }}" - name: my-another-name interval: 30s # defaults to global interval rules: - alert: HighErrors expr: | sum without(instance) (rate(errors_total[5m])) / sum without(instance) (rate(requests_total[5m])) for: 5m labels: severity: critical - record: "new_metric" expr: | sum without(instance) (rate(errors_total[5m])) / sum without(instance) (rate(requests_total[5m])) - alert: HighErrors expr: | sum without(instance) (rate(errors_total[5m])) / sum without(instance) (rate(requests_total[5m])) for: 5m labels: severity: critical annotations: description: "stuff's happening with {{ $.labels.service }}"
unknown
github
https://github.com/prometheus/prometheus
model/rulefmt/testdata/test.yaml
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import httplib2 import urlparse from novaclient import client as base_client from novaclient.v1_1 import client from heat.tests import fakes class FakeClient(fakes.FakeClient, client.Client): def __init__(self, *args, **kwargs): client.Client.__init__(self, 'username', 'password', 'project_id', 'auth_url') self.client = FakeHTTPClient(**kwargs) class FakeHTTPClient(base_client.HTTPClient): def __init__(self, **kwargs): self.username = 'username' self.password = 'password' self.auth_url = 'auth_url' self.callstack = [] def _cs_request(self, url, method, **kwargs): # Check that certain things are called correctly if method in ['GET', 'DELETE']: assert 'body' not in kwargs elif method == 'PUT': assert 'body' in kwargs # Call the method args = urlparse.parse_qsl(urlparse.urlparse(url)[4]) kwargs.update(args) munged_url = url.rsplit('?', 1)[0] munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_') munged_url = munged_url.replace('-', '_') callback = "%s_%s" % (method.lower(), munged_url) if not hasattr(self, callback): raise AssertionError('Called unknown API method: %s %s, ' 'expected fakes method name: %s' % (method, url, callback)) # Note the call self.callstack.append((method, url, kwargs.get('body', None))) status, body = getattr(self, callback)(**kwargs) if hasattr(status, 'items'): return httplib2.Response(status), body else: return httplib2.Response({"status": status}), body # # Limits # def get_limits(self, **kw): return (200, {"limits": { "rate": [{"uri": "*", "regex": ".*", "limit": [ {"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z"}, {"value": 10, "verb": "PUT", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z"}, {"value": 100, "verb": "DELETE", "remaining": 100, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z"}]}, {"uri": "*/servers", "regex": "^/servers", "limit": [{"verb": "POST", "value": 25, "remaining": 24, "unit": "DAY", "next-available": "2011-12-15T22:42:45Z"}]}], "absolute": {"maxTotalRAMSize": 51200, "maxServerMeta": 5, "maxImageMeta": 5, "maxPersonality": 5, "maxPersonalitySize": 10240}}}) # # Servers # def get_servers(self, **kw): return (200, {"servers": [ {'id': 1234, 'name': 'sample-server'}, {'id': 5678, 'name': 'sample-server2'}, {'id': 9101, 'name': 'hard-reboot'}, {'id': 9102, 'name': 'server-with-no-ip'}, {'id': 9999, 'name': 'sample-server3'} ]}) def get_servers_detail(self, **kw): return (200, {"servers": [{"id": 1234, "name": "sample-server", "OS-EXT-SRV-ATTR:instance_name": "sample-server", "image": {"id": 2, "name": "sample image"}, "flavor": {"id": 1, "name": "256 MB Server"}, "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", "status": "BUILD", "progress": 60, "addresses": {"public": [{"version": 4, "addr": "1.2.3.4"}, {"version": 4, "addr": "5.6.7.8"}], "private": [{"version": 4, "addr": "10.11.12.13"}]}, "accessIPv4": "", "accessIPv6": "", "metadata": {"Server Label": "Web Head 1", "Image Version": "2.1"}}, {"id": 5678, "name": "sample-server2", "OS-EXT-SRV-ATTR:instance_name": "sample-server2", "image": {"id": 2, "name": "sample image"}, "flavor": {"id": 1, "name": "256 MB Server"}, "hostId": "9e107d9d372bb6826bd81d3542a419d6", "status": "ACTIVE", "accessIPv4": "192.0.2.0", "accessIPv6": "::babe:4317:0A83", "addresses": {"public": [{"version": 4, "addr": "4.5.6.7"}, {"version": 4, "addr": "5.6.9.8"}], "private": [{"version": 4, "addr": "10.13.12.13"}]}, "metadata": {"Server Label": "DB 1"}}, {"id": 9101, "name": "hard-reboot", "OS-EXT-SRV-ATTR:instance_name": "hard-reboot", "image": {"id": 2, "name": "sample image"}, "flavor": {"id": 1, "name": "256 MB Server"}, "hostId": "9e44d8d435c43dd8d96bb63ed995605f", "status": "HARD_REBOOT", "accessIPv4": "", "accessIPv6": "", "addresses": {"public": [{"version": 4, "addr": "172.17.1.2"}, {"version": 4, "addr": "10.20.30.40"}], "private": [{"version": 4, "addr": "10.13.12.13"}]}, "metadata": {"Server Label": "DB 1"}}, {"id": 9102, "name": "server-with-no-ip", "OS-EXT-SRV-ATTR:instance_name": "server-with-no-ip", "image": {"id": 2, "name": "sample image"}, "flavor": {"id": 1, "name": "256 MB Server"}, "hostId": "c1365ba78c624df9b2ff446515a682f5", "status": "ACTIVE", "accessIPv4": "", "accessIPv6": "", "addresses": { "empty_net": []}, "metadata": {"Server Label": "DB 1"}}, {"id": 9999, "name": "sample-server3", "OS-EXT-SRV-ATTR:instance_name": "sample-server3", "image": {"id": 3, "name": "sample image"}, "flavor": {"id": 3, "name": "m1.large"}, "hostId": "9e107d9d372bb6826bd81d3542a419d6", "status": "ACTIVE", "accessIPv4": "", "accessIPv6": "", "addresses": { "public": [{"version": 4, "addr": "4.5.6.7"}, {"version": 4, "addr": "5.6.9.8"}], "private": [{"version": 4, "addr": "10.13.12.13"}]}, "metadata": {"Server Label": "DB 1"}}]}) def post_servers(self, body, **kw): assert body.keys() == ['server'] fakes.assert_has_keys(body['server'], required=['name', 'imageRef', 'flavorRef'], optional=['metadata', 'personality']) if 'personality' in body['server']: for pfile in body['server']['personality']: fakes.assert_has_keys(pfile, required=['path', 'contents']) return (202, self.get_servers_1234()[1]) def get_servers_1234(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][0]} return (200, r) def get_servers_WikiServerOne(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][0]} return (200, r) def get_servers_WikiServerOne1(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][0]} return (200, r) def get_servers_WikiServerOne2(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][3]} return (200, r) def get_servers_5678(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][1]} return (200, r) def put_servers_1234(self, body, **kw): assert body.keys() == ['server'] fakes.assert_has_keys(body['server'], optional=['name', 'adminPass']) return (204, None) def delete_servers_1234(self, **kw): return (202, None) def delete_servers_1234_metadata_test_key(self, **kw): return (204, None) def delete_servers_1234_metadata_key1(self, **kw): return (204, None) def delete_servers_1234_metadata_key2(self, **kw): return (204, None) def delete_servers_5678(self, **kw): return (202, None) def delete_servers_5678_metadata_test_key(self, **kw): return (204, None) def delete_servers_5678_metadata_key1(self, **kw): return (204, None) def delete_servers_5678_metadata_key2(self, **kw): return (204, None) def get_servers_9999(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][0]} return (200, r) def put_servers_9999(self, body, **kw): assert body.keys() == ['server'] fakes.assert_has_keys(body['server'], optional=['name', 'adminPass']) return (204, None) def delete_servers_9999(self, **kw): return (202, None) def delete_servers_9999_metadata_test_key(self, **kw): return (204, None) def delete_servers_9999_metadata_key1(self, **kw): return (204, None) def delete_servers_9999_metadata_key2(self, **kw): return (204, None) def post_servers_9999_metadata(self, **kw): return (204, {'metadata': {'test_key': 'test_value'}}) def get_servers_9999_diagnostics(self, **kw): return (200, 'Fake diagnostics') def get_servers_9102(self, **kw): r = {'server': self.get_servers_detail()[1]['servers'][3]} return (200, r) def get_servers_1234_actions(self, **kw): return (200, {'actions': [{'action': 'rebuild', 'error': None, 'created_at': '2011-12-30 11:45:36'}, {'action': 'reboot', 'error': 'Failed!', 'created_at': '2011-12-30 11:40:29'}]}) # # Server Addresses # def get_servers_1234_ips(self, **kw): return (200, {'addresses': self.get_servers_1234()[1]['server']['addresses']}) def get_servers_1234_ips_public(self, **kw): return (200, {'public': self.get_servers_1234_ips()[1]['addresses']['public']}) def get_servers_1234_ips_private(self, **kw): return (200, {'private': self.get_servers_1234_ips()[1]['addresses']['private']}) def delete_servers_1234_ips_public_1_2_3_4(self, **kw): return (202, None) # # Server actions # def post_servers_1234_action(self, body, **kw): _body = None resp = 202 assert len(body.keys()) == 1 action = body.keys()[0] if action == 'reboot': assert body[action].keys() == ['type'] assert body[action]['type'] in ['HARD', 'SOFT'] elif action == 'rebuild': keys = body[action].keys() if 'adminPass' in keys: keys.remove('adminPass') assert keys == ['imageRef'] _body = self.get_servers_1234()[1] elif action == 'resize': assert body[action].keys() == ['flavorRef'] elif action == 'confirmResize': assert body[action] is None # This one method returns a different response code return (204, None) elif action == 'revertResize': assert body[action] is None elif action == 'migrate': assert body[action] is None elif action == 'rescue': assert body[action] is None elif action == 'unrescue': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'suspend': assert body[action] is None elif action == 'resume': assert body[action] is None elif action == 'addFixedIp': assert body[action].keys() == ['networkId'] elif action == 'removeFixedIp': assert body[action].keys() == ['address'] elif action == 'addFloatingIp': assert body[action].keys() == ['address'] elif action == 'removeFloatingIp': assert body[action].keys() == ['address'] elif action == 'createImage': assert set(body[action].keys()) == set(['name', 'metadata']) resp = dict(status=202, location="http://blah/images/456") elif action == 'changePassword': assert body[action].keys() == ['adminPass'] elif action == 'os-getConsoleOutput': assert body[action].keys() == ['length'] return (202, {'output': 'foo'}) elif action == 'os-getVNCConsole': assert body[action].keys() == ['type'] elif action == 'os-migrateLive': assert set(body[action].keys()) == set(['host', 'block_migration', 'disk_over_commit']) else: raise AssertionError("Unexpected server action: %s" % action) return (resp, _body) # # Cloudpipe # def get_os_cloudpipe(self, **kw): return (200, {'cloudpipes': [{'project_id': 1}]}) def post_os_cloudpipe(self, **ks): return (202, {'instance_id': '9d5824aa-20e6-4b9f-b967-76a699fc51fd'}) # # Flavors # def get_flavors(self, **kw): return (200, {'flavors': [ {'id': 1, 'name': '256 MB Server'}, {'id': 2, 'name': 'm1.small'}, {'id': 3, 'name': 'm1.large'} ]}) def get_flavors_detail(self, **kw): return (200, {'flavors': [ {'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10, 'OS-FLV-EXT-DATA:ephemeral': 10}, {'id': 2, 'name': 'm1.small', 'ram': 512, 'disk': 20, 'OS-FLV-EXT-DATA:ephemeral': 20}, {'id': 3, 'name': 'm1.large', 'ram': 512, 'disk': 20, 'OS-FLV-EXT-DATA:ephemeral': 30} ]}) def get_flavors_1(self, **kw): return (200, {'flavor': self.get_flavors_detail()[1]['flavors'][0]}) def get_flavors_2(self, **kw): return (200, {'flavor': self.get_flavors_detail()[1]['flavors'][1]}) def get_flavors_3(self, **kw): # Diablo has no ephemeral return (200, {'flavor': {'id': 3, 'name': '256 MB Server', 'ram': 256, 'disk': 10}}) def delete_flavors_flavordelete(self, **kw): return (202, None) def post_flavors(self, body, **kw): return (202, {'flavor': self.get_flavors_detail()[1]['flavors'][0]}) # # Floating ips # def get_os_floating_ip_pools(self): return (200, {'floating_ip_pools': [{'name': 'foo', 'name': 'bar'}]}) def get_os_floating_ips(self, **kw): return (200, {'floating_ips': [ {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}, {'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'}, ]}) def get_os_floating_ips_1(self, **kw): return (200, {'floating_ip': {'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}}) def post_os_floating_ips(self, body, **kw): return (202, self.get_os_floating_ips_1()[1]) def delete_os_floating_ips_1(self, **kw): return (204, None) def get_os_floating_ip_dns(self, **kw): return (205, {'domain_entries': [{'domain': 'example.org'}, {'domain': 'example.com'}]}) def get_os_floating_ip_dns_testdomain_entries(self, **kw): if kw.get('ip'): return (205, {'dns_entries': [{'dns_entry': {'ip': kw.get('ip'), 'name': "host1", 'type': "A", 'domain': 'testdomain'}}, {'dns_entry': {'ip': kw.get('ip'), 'name': "host2", 'type': "A", 'domain': 'testdomain'}}]}) else: return (404, None) def get_os_floating_ip_dns_testdomain_entries_testname(self, **kw): return (205, {'dns_entry': {'ip': "10.10.10.10", 'name': 'testname', 'type': "A", 'domain': 'testdomain'}}) def put_os_floating_ip_dns_testdomain(self, body, **kw): if body['domain_entry']['scope'] == 'private': fakes.assert_has_keys(body['domain_entry'], required=['availability_zone', 'scope']) elif body['domain_entry']['scope'] == 'public': fakes.assert_has_keys(body['domain_entry'], required=['project', 'scope']) else: fakes.assert_has_keys(body['domain_entry'], required=['project', 'scope']) return (205, None) def put_os_floating_ip_dns_testdomain_entries_testname(self, body, **kw): fakes.assert_has_keys(body['dns_entry'], required=['ip', 'dns_type']) return (205, None) def delete_os_floating_ip_dns_testdomain(self, **kw): return (200, None) def delete_os_floating_ip_dns_testdomain_entries_testname(self, **kw): return (200, None) # # Images # def get_images(self, **kw): return (200, {'images': [{'id': 1, 'name': 'CentOS 5.2'}, {'id': 2, 'name': 'My Server Backup'}, {'id': 3, 'name': 'F17-x86_64-gold'}, {'id': 4, 'name': 'F17-x86_64-cfntools'}]}) def get_images_detail(self, **kw): return (200, {'images': [{'id': 1, 'name': 'CentOS 5.2', "updated": "2010-10-10T12:00:00Z", "created": "2010-08-10T12:00:00Z", "status": "ACTIVE", "metadata": {"test_key": "test_value"}, "links": {}}, {"id": 743, "name": "My Server Backup", "serverId": 1234, "updated": "2010-10-10T12:00:00Z", "created": "2010-08-10T12:00:00Z", "status": "SAVING", "progress": 80, "links": {}}, {"id": 744, "name": "F17-x86_64-gold", "serverId": 9999, "updated": "2010-10-10T12:00:00Z", "created": "2010-08-10T12:00:00Z", "status": "SAVING", "progress": 80, "links": {}}, {"id": 745, "name": "F17-x86_64-cfntools", "serverId": 9998, "updated": "2010-10-10T12:00:00Z", "created": "2010-08-10T12:00:00Z", "status": "SAVING", "progress": 80, "links": {}}]}) def get_images_1(self, **kw): return (200, {'image': self.get_images_detail()[1]['images'][0]}) def get_images_2(self, **kw): return (200, {'image': self.get_images_detail()[1]['images'][1]}) def post_images(self, body, **kw): assert body.keys() == ['image'] fakes.assert_has_keys(body['image'], required=['serverId', 'name']) return (202, self.get_images_1()[1]) def post_images_1_metadata(self, body, **kw): assert body.keys() == ['metadata'] fakes.assert_has_keys(body['metadata'], required=['test_key']) return (200, {'metadata': self.get_images_1()[1]['image']['metadata']}) def delete_images_1(self, **kw): return (204, None) def delete_images_1_metadata_test_key(self, **kw): return (204, None) # # Keypairs # def get_os_keypairs(self, *kw): return (200, {"keypairs": [{'fingerprint': 'FAKE_KEYPAIR', 'name': 'test'}]}) def delete_os_keypairs_test(self, **kw): return (202, None) def post_os_keypairs(self, body, **kw): assert body.keys() == ['keypair'] fakes.assert_has_keys(body['keypair'], required=['name']) r = {'keypair': self.get_os_keypairs()[1]['keypairs'][0]} return (202, r) # # Virtual Interfaces # def get_servers_1234_os_virtual_interfaces(self, **kw): return (200, {"virtual_interfaces": [ {'id': 'fakeid', 'mac_address': 'fakemac'} ]}) # # Quotas # def get_os_quota_sets_test(self, **kw): return (200, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1}}) def get_os_quota_sets_test_defaults(self): return (200, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1}}) def put_os_quota_sets_test(self, body, **kw): assert body.keys() == ['quota_set'] fakes.assert_has_keys(body['quota_set'], required=['tenant_id']) return (200, {'quota_set': { 'tenant_id': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'volumes': 2, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1}}) # # Quota Classes # def get_os_quota_class_sets_test(self, **kw): return (200, {'quota_class_set': { 'class_name': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'volumes': 1, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1}}) def put_os_quota_class_sets_test(self, body, **kw): assert body.keys() == ['quota_class_set'] fakes.assert_has_keys(body['quota_class_set'], required=['class_name']) return (200, {'quota_class_set': { 'class_name': 'test', 'metadata_items': [], 'injected_file_content_bytes': 1, 'volumes': 2, 'gigabytes': 1, 'ram': 1, 'floating_ips': 1, 'instances': 1, 'injected_files': 1, 'cores': 1}}) # # Security Groups # def get_os_security_groups(self, **kw): return (200, {"security_groups": [{'id': 1, 'name': 'test', 'description': 'FAKE_SECURITY_GROUP'}]}) def get_os_security_groups_1(self, **kw): return (200, {"security_group": {'id': 1, 'name': 'test', 'description': 'FAKE_SECURITY_GROUP'}}) def delete_os_security_groups_1(self, **kw): return (202, None) def post_os_security_groups(self, body, **kw): assert body.keys() == ['security_group'] fakes.assert_has_keys(body['security_group'], required=['name', 'description']) r = {'security_group': self.get_os_security_groups()[1]['security_groups'][0]} return (202, r) # # Security Group Rules # def get_os_security_group_rules(self, **kw): return (200, {"security_group_rules": [{'id': 1, 'parent_group_id': 1, 'group_id': 2, 'ip_protocol': 'TCP', 'from_port': '22', 'to_port': 22, 'cidr': '10.0.0.0/8'}]}) def delete_os_security_group_rules_1(self, **kw): return (202, None) def post_os_security_group_rules(self, body, **kw): assert body.keys() == ['security_group_rule'] fakes.assert_has_keys(body['security_group_rule'], required=['parent_group_id'], optional=['group_id', 'ip_protocol', 'from_port', 'to_port', 'cidr']) r = {'security_group_rule': self.get_os_security_group_rules()[1]['security_group_rules'][0]} return (202, r) # # Tenant Usage # def get_os_simple_tenant_usage(self, **kw): return (200, {u'tenant_usages': [{ u'total_memory_mb_usage': 25451.762807466665, u'total_vcpus_usage': 49.71047423333333, u'total_hours': 49.71047423333333, u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869', u'stop': u'2012-01-22 19:48:41.750722', u'server_usages': [{ u'hours': 49.71047423333333, u'uptime': 27035, u'local_gb': 0, u'ended_at': None, u'name': u'f15image1', u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869', u'vcpus': 1, u'memory_mb': 512, u'state': u'active', u'flavor': u'm1.tiny', u'started_at': u'2012-01-20 18:06:06.479998'}], u'start': u'2011-12-25 19:48:41.750687', u'total_local_gb_usage': 0.0}]}) def get_os_simple_tenant_usage_tenantfoo(self, **kw): return (200, {u'tenant_usage': { u'total_memory_mb_usage': 25451.762807466665, u'total_vcpus_usage': 49.71047423333333, u'total_hours': 49.71047423333333, u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869', u'stop': u'2012-01-22 19:48:41.750722', u'server_usages': [{ u'hours': 49.71047423333333, u'uptime': 27035, u'local_gb': 0, u'ended_at': None, u'name': u'f15image1', u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869', u'vcpus': 1, u'memory_mb': 512, u'state': u'active', u'flavor': u'm1.tiny', u'started_at': u'2012-01-20 18:06:06.479998'}], u'start': u'2011-12-25 19:48:41.750687', u'total_local_gb_usage': 0.0}}) # # Certificates # def get_os_certificates_root(self, **kw): return (200, {'certificate': {'private_key': None, 'data': 'foo'}}) def post_os_certificates(self, **kw): return (200, {'certificate': {'private_key': 'foo', 'data': 'bar'}}) # # Aggregates # def get_os_aggregates(self, *kw): return (200, {"aggregates": [ {'id': '1', 'name': 'test', 'availability_zone': 'nova1'}, {'id': '2', 'name': 'test2', 'availability_zone': 'nova1'}, ]}) def _return_aggregate(self): r = {'aggregate': self.get_os_aggregates()[1]['aggregates'][0]} return (200, r) def get_os_aggregates_1(self, **kw): return self._return_aggregate() def post_os_aggregates(self, body, **kw): return self._return_aggregate() def put_os_aggregates_1(self, body, **kw): return self._return_aggregate() def put_os_aggregates_2(self, body, **kw): return self._return_aggregate() def post_os_aggregates_1_action(self, body, **kw): return self._return_aggregate() def post_os_aggregates_2_action(self, body, **kw): return self._return_aggregate() def delete_os_aggregates_1(self, **kw): return (202, None) # # Hosts # def get_os_hosts_host(self, *kw): return (200, {'host': [{'resource': {'project': '(total)', 'host': 'dummy', 'cpu': 16, 'memory_mb': 32234, 'disk_gb': 128}}, {'resource': {'project': '(used_now)', 'host': 'dummy', 'cpu': 1, 'memory_mb': 2075, 'disk_gb': 45}}, {'resource': {'project': '(used_max)', 'host': 'dummy', 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}, {'resource': {'project': 'admin', 'host': 'dummy', 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}]}) def get_os_hosts_sample_host(self, *kw): return (200, {'host': [{'resource': {'host': 'sample_host'}}], }) def put_os_hosts_sample_host_1(self, body, **kw): return (200, {'host': 'sample-host_1', 'status': 'enabled'}) def put_os_hosts_sample_host_2(self, body, **kw): return (200, {'host': 'sample-host_2', 'maintenance_mode': 'on_maintenance'}) def put_os_hosts_sample_host_3(self, body, **kw): return (200, {'host': 'sample-host_3', 'status': 'enabled', 'maintenance_mode': 'on_maintenance'}) def get_os_hosts_sample_host_startup(self, **kw): return (200, {'host': 'sample_host', 'power_action': 'startup'}) def get_os_hosts_sample_host_reboot(self, **kw): return (200, {'host': 'sample_host', 'power_action': 'reboot'}) def get_os_hosts_sample_host_shutdown(self, **kw): return (200, {'host': 'sample_host', 'power_action': 'shutdown'}) def put_os_hosts_sample_host(self, body, **kw): result = {'host': 'dummy'} result.update(body) return (200, result) def get_os_availability_zone(self, *kw): return (200, {"availabilityZoneInfo": [{'zoneName': 'nova1'}]})
unknown
codeparrot/codeparrot-clean
""" This module contains backported functions that are not present in Python 2.4 but are standard in more recent versions. """ import re import sys # Import backported modules import simplejson import collections import itertools if not hasattr(itertools, 'product'): import _itertools itertools.product = _itertools.product # pylint: disable=I0011,W0622 # noinspection PyShadowingBuiltins def _next(*args): """ Retrieve the next item from the iterator by calling its next() method. If default is given, it is returned if the iterator is exhausted, otherwise StopIteration is raised. New in version 2.6. :param iterator: the iterator :type iterator: iterator :param default: the value to return if the iterator raises StopIteration :type default: object :return: The object returned by iterator.next() :rtype: object """ if len(args) == 2: try: return args[0].next() except StopIteration: return args[1] elif len(args) > 2: raise TypeError("next expected at most 2 arguments, %s" % len(args)) else: return args[0].next() # pylint: disable=W0622 # noinspection PyShadowingBuiltins def _any(iterable): """ From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods :codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker licensed under cc-wiki with attribution required """ for element in iterable: if element: return True return False # pylint: disable=W0622 # noinspection PyShadowingBuiltins def _all(iterable): """ From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods :codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker licensed under cc-wiki with attribution required """ for element in iterable: if not element: return False return True # Adapted from http://code.activestate.com/recipes/576847/ # :codeauthor: Vishal Sapre # :license: MIT BIN_HEX_DICT = { '0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111', 'L': ''} # match left leading zeroes, but don't match a single 0 for the case of # bin(0) == '0b0' BIN_ZSTRIP = re.compile(r'^0*(?=[01])') # pylint: disable=W0622 # noinspection PyShadowingBuiltins def _bin(number): """ Adapted from http://code.activestate.com/recipes/576847/ :codeauthor: Vishal Sapre :license: MIT A foolishly simple look-up method of getting binary string from an integer This happens to be faster than all other ways!!! """ # ========================================================= # create hex of int, remove '0x'. now for each hex char, # look up binary string, append in list and join at the end. # ========================================================= # replace leading left zeroes with '0b' tmp = [BIN_HEX_DICT[hstr] for hstr in hex(number)[2:]] return BIN_ZSTRIP.sub('0b', ''.join(tmp)) if not hasattr(__builtins__, 'next'): next = _next else: next = next if not hasattr(__builtins__, 'any'): any = _any else: any = any if not hasattr(__builtins__, 'all'): all = _all else: all = all if not hasattr(__builtins__, 'bin'): bin = _bin else: bin = bin
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import os import vtk from vtk.test import Testing class SimpleGlyph: """A simple class used to test vtkTensorGlyph.""" def __init__(self, reader): self.reader = reader sg = self.src_glyph = vtk.vtkSphereSource() sg.SetRadius(0.5) sg.SetCenter(0.5, 0.0, 0.0) g = self.glyph = vtk.vtkTensorGlyph() g.SetInputConnection(self.reader.GetOutputPort()) g.SetSourceConnection(self.src_glyph.GetOutputPort()) g.SetScaleFactor(0.25) # The normals are needed to generate the right colors and if # not used some of the glyphs are black. self.normals = vtk.vtkPolyDataNormals() self.normals.SetInputConnection(g.GetOutputPort()) self.map = vtk.vtkPolyDataMapper() self.map.SetInputConnection(self.normals.GetOutputPort()) self.act = vtk.vtkActor() self.act.SetMapper(self.map) # An outline. self.of = vtk.vtkOutlineFilter() self.of.SetInputConnection(self.reader.GetOutputPort()) self.out_map = vtk.vtkPolyDataMapper() self.out_map.SetInputConnection(self.of.GetOutputPort()) self.out_act = vtk.vtkActor() self.out_act.SetMapper(self.out_map) def GetActors(self): return self.act, self.out_act def Update(self): self.glyph.Update() s = self.glyph.GetOutput().GetPointData().GetScalars() if s: self.map.SetScalarRange(s.GetRange()) def SetPosition(self, pos): self.act.SetPosition(pos) self.out_act.SetPosition(pos) class TestTensorGlyph(Testing.vtkTest): def testGlyphs(self): '''Test if the glyphs are created nicely.''' reader = vtk.vtkDataSetReader() data_file = os.path.join(Testing.VTK_DATA_ROOT, "Data", "tensors.vtk") reader.SetFileName(data_file) g1 = SimpleGlyph(reader) g1.glyph.ColorGlyphsOff() g1.Update() g2 = SimpleGlyph(reader) g2.glyph.ExtractEigenvaluesOff() g2.Update() g2.SetPosition((2.0, 0.0, 0.0)) g3 = SimpleGlyph(reader) g3.glyph.SetColorModeToEigenvalues() g3.glyph.ThreeGlyphsOn() g3.Update() g3.SetPosition((0.0, 2.0, 0.0)) g4 = SimpleGlyph(reader) g4.glyph.SetColorModeToEigenvalues() g4.glyph.ThreeGlyphsOn() g4.glyph.SymmetricOn() g4.Update() g4.SetPosition((2.0, 2.0, 0.0)) ren = vtk.vtkRenderer() for i in (g1, g2, g3, g4): for j in i.GetActors(): ren.AddActor(j) ren.ResetCamera(); cam = ren.GetActiveCamera() cam.Azimuth(-20) cam.Elevation(20) cam.Zoom(1.5) ren.SetBackground(0.5, 0.5, 0.5) renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) renWin.Render() img_file = "TestTensorGlyph.png" Testing.compareImage(renWin, Testing.getAbsImagePath(img_file)) Testing.interact() def testParse(self): "Test if vtkTensorGlyph is parseable" tg = vtk.vtkTensorGlyph() self._testParse(tg) def testGetSet(self): "Testing Get/Set methods of vtkTensorGlyph" tg = vtk.vtkTensorGlyph() self._testGetSet(tg) def testParse(self): "Testing Boolean methods of vtkTensorGlyph" tg = vtk.vtkTensorGlyph() self._testBoolean(tg) if __name__ == "__main__": Testing.main([(TestTensorGlyph, 'test')])
unknown
codeparrot/codeparrot-clean
import sys sys.path.insert(1, "../../") import h2o, tests def scale_pca_rf_pipe(): from h2o.transforms.preprocessing import H2OScaler from h2o.transforms.decomposition import H2OPCA from h2o.estimators.random_forest import H2ORandomForestEstimator from sklearn.pipeline import Pipeline from sklearn.grid_search import RandomizedSearchCV from h2o.cross_validation import H2OKFold from h2o.model.regression import h2o_r2_score from sklearn.metrics.scorer import make_scorer from scipy.stats import randint iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv")) # build transformation pipeline using sklearn's Pipeline and H2O transforms pipe = Pipeline([("standardize", H2OScaler()), ("pca", H2OPCA(n_components=2)), ("rf", H2ORandomForestEstimator(seed=42,ntrees=50))]) params = {"standardize__center": [True, False], # Parameters to test "standardize__scale": [True, False], "pca__n_components": randint(2, iris[1:].shape[1]), "rf__ntrees": randint(50,60), "rf__max_depth": randint(4,8), "rf__min_rows": randint(5,10),} custom_cv = H2OKFold(iris, n_folds=5, seed=42) random_search = RandomizedSearchCV(pipe, params, n_iter=5, scoring=make_scorer(h2o_r2_score), cv=custom_cv, random_state=42, n_jobs=1) random_search.fit(iris[1:],iris[0]) print random_search.best_estimator_ if __name__ == "__main__": tests.run_test(sys.argv, scale_pca_rf_pipe)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # Copyright (c) 2015 Red Hat # Licensed under The MIT License (MIT) # http://opensource.org/licenses/MIT # from django.db import models from django.core.exceptions import ValidationError from pdc.apps.common.models import get_cached_id class Service(models.Model): # rhn, cdn, ftp name = models.CharField(max_length=50, unique=True) description = models.CharField(max_length=200) def __unicode__(self): return u"%s" % self.name CACHE = {} @classmethod def get_cached_id(cls, value): """cached `name` to `id`""" return get_cached_id(cls, "name", value) def export(self): return { 'name': self.name, 'description': self.description } class ContentFormat(models.Model): # rpm, kickstart, iso name = models.CharField(max_length=50, unique=True) pdc_endpoint = models.CharField(max_length=200, null=True) description = models.CharField(max_length=200) def __unicode__(self): return u"%s" % self.name class ContentCategory(models.Model): # binary, debug, source name = models.CharField(max_length=50, unique=True) description = models.CharField(max_length=200) def __unicode__(self): return u"%s" % self.name CACHE = {} @classmethod def get_cached_id(cls, value): """cached `name` to `id`""" return get_cached_id(cls, "name", value) class RepoFamily(models.Model): # dist, beta, htb name = models.CharField(max_length=50, unique=True) description = models.CharField(max_length=200) def __unicode__(self): return u"%s" % self.name class RepoManager(models.Manager): def get_queryset(self): return super(RepoManager, self).get_queryset().select_related("variant_arch", "variant_arch__variant", "variant_arch__variant__release", "service", "repo_family", "content_format", "content_category") class Repo(models.Model): variant_arch = models.ForeignKey("release.VariantArch", related_name="repos", on_delete=models.PROTECT) service = models.ForeignKey(Service, on_delete=models.CASCADE) repo_family = models.ForeignKey(RepoFamily, on_delete=models.CASCADE) content_format = models.ForeignKey(ContentFormat, on_delete=models.CASCADE) content_category = models.ForeignKey(ContentCategory, on_delete=models.CASCADE) shadow = models.BooleanField(default=False) name = models.CharField(max_length=2000, db_index=True) # Store engineering product ID which is used to identify products shipped via CDN # Next step would be product certificate attached to each compose variant, refer to PDC-504 product_id = models.PositiveIntegerField(blank=True, null=True) objects = RepoManager() class Meta: unique_together = ("variant_arch", "service", "repo_family", "content_format", "content_category", "name", "shadow") ordering = ["name"] def __unicode__(self): return u"%s" % self.name def export(self): return { "release_id": self.variant_arch.variant.release.release_id, "variant_uid": self.variant_arch.variant.variant_uid, "arch": self.variant_arch.arch.name, "service": self.service.name, "repo_family": self.repo_family.name, "content_format": self.content_format.name, "content_category": self.content_category.name, "name": self.name, "shadow": self.shadow, "product_id": self.product_id } @property def tree(self): """Return a string representation of a tree the repo belongs to.""" return '%s.%s' % (self.variant_arch.variant.variant_uid, self.variant_arch.arch.name) class PushTarget(models.Model): name = models.CharField(max_length=100, blank=False, db_index=True, unique=True) description = models.CharField(max_length=300, blank=True) host = models.URLField(max_length=255, blank=True) service = models.ForeignKey(Service) class Meta: ordering = ["name"] def __unicode__(self): return u"%s" % self.name def export(self): return { "name": self.name, "description": self.description, "host": self.host, "service": self.service.name, } class MultiDestination(models.Model): global_component = models.ForeignKey('component.GlobalComponent') origin_repo = models.ForeignKey(Repo, related_name='origin_repo') destination_repo = models.ForeignKey(Repo, related_name='destination_repo') subscribers = models.ManyToManyField('contact.Person', blank=True) active = models.BooleanField(default=True) class Meta: unique_together = ('global_component', 'origin_repo', 'destination_repo') ordering = ['global_component'] def __unicode__(self): return u"%s, %s -> %s" % ( self.global_component.name, self.origin_repo.name, self.destination_repo.name) def export(self): return { "global_component": self.global_component.name, "origin_repo_id": self.origin_repo.id, "destination_repo_id": self.destination_repo.id, "subscribers": [subscriber.username for subscriber in self.subscribers.all()], "active": self.active, } def clean(self): if self.origin_repo == self.destination_repo: raise ValidationError('Origin and destination repositories must differ.') if self.origin_repo.variant_arch.arch != self.destination_repo.variant_arch.arch: raise ValidationError('Architecture for origin and destination repositories must NOT differ.') if self.origin_repo.service != self.destination_repo.service: raise ValidationError('Service for origin and destination repositories must NOT differ.') super(MultiDestination, self).clean()
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Tests\Database; use Illuminate\Database\Capsule\Manager as DB; use Illuminate\Database\Eloquent\Casts\Attribute; use Illuminate\Database\Eloquent\Model; use Illuminate\Database\Schema\Builder; use PHPUnit\Framework\TestCase; class DatabaseEloquentWithAttributesPendingTest extends TestCase { protected function setUp(): void { $db = new DB; $db->addConnection([ 'driver' => 'sqlite', 'database' => ':memory:', ]); $db->bootEloquent(); $db->setAsGlobal(); } protected function tearDown(): void { $this->schema()->dropIfExists((new PendingAttributesModel)->getTable()); parent::tearDown(); } public function testAddsAttributes(): void { $key = 'a key'; $value = 'the value'; $query = PendingAttributesModel::query() ->withAttributes([$key => $value], asConditions: false); $model = $query->make(); $this->assertSame($value, $model->$key); } public function testDoesNotAddWheres(): void { $key = 'a key'; $value = 'the value'; $query = PendingAttributesModel::query() ->withAttributes([$key => $value], asConditions: false); $wheres = $query->toBase()->wheres; // Ensure no wheres exist $this->assertEmpty($wheres); } public function testAddsWithCasts(): void { $query = PendingAttributesModel::query() ->withAttributes([ 'is_admin' => 1, 'first_name' => 'FIRST', 'last_name' => 'LAST', 'type' => PendingAttributesEnum::internal, ], asConditions: false); $model = $query->make(); $this->assertSame(true, $model->is_admin); $this->assertSame('First', $model->first_name); $this->assertSame('Last', $model->last_name); $this->assertSame(PendingAttributesEnum::internal, $model->type); $this->assertEqualsCanonicalizing([ 'is_admin' => 1, 'first_name' => 'first', 'last_name' => 'last', 'type' => 'int', ], $model->getAttributes()); } public function testAddsWithCastsViaDb(): void { $this->bootTable(); $query = PendingAttributesModel::query() ->withAttributes([ 'is_admin' => 1, 'first_name' => 'FIRST', 'last_name' => 'LAST', 'type' => PendingAttributesEnum::internal, ], asConditions: false); $query->create(); $model = PendingAttributesModel::first(); $this->assertSame(true, $model->is_admin); $this->assertSame('First', $model->first_name); $this->assertSame('Last', $model->last_name); $this->assertSame(PendingAttributesEnum::internal, $model->type); } protected function bootTable(): void { $this->schema()->create((new PendingAttributesModel)->getTable(), function ($table) { $table->id(); $table->boolean('is_admin'); $table->string('first_name'); $table->string('last_name'); $table->string('type'); $table->timestamps(); }); } protected function schema(): Builder { return PendingAttributesModel::getConnectionResolver()->connection()->getSchemaBuilder(); } } class PendingAttributesModel extends Model { protected $guarded = []; protected $casts = [ 'is_admin' => 'boolean', 'type' => PendingAttributesEnum::class, ]; public function setFirstNameAttribute(string $value): void { $this->attributes['first_name'] = strtolower($value); } public function getFirstNameAttribute(?string $value): string { return ucfirst($value); } protected function lastName(): Attribute { return Attribute::make( get: fn (string $value) => ucfirst($value), set: fn (string $value) => strtolower($value), ); } } enum PendingAttributesEnum: string { case internal = 'int'; }
php
github
https://github.com/laravel/framework
tests/Database/DatabaseEloquentWithAttributesPendingTest.php
import math from django.db.models.expressions import Func, Value from django.db.models.fields import FloatField, IntegerField from django.db.models.functions import Cast from django.db.models.functions.mixins import ( FixDecimalInputMixin, NumericOutputFieldMixin, ) from django.db.models.lookups import Transform class Abs(Transform): function = "ABS" lookup_name = "abs" class ACos(NumericOutputFieldMixin, Transform): function = "ACOS" lookup_name = "acos" class ASin(NumericOutputFieldMixin, Transform): function = "ASIN" lookup_name = "asin" class ATan(NumericOutputFieldMixin, Transform): function = "ATAN" lookup_name = "atan" class ATan2(NumericOutputFieldMixin, Func): function = "ATAN2" arity = 2 def as_sqlite(self, compiler, connection, **extra_context): if not getattr( connection.ops, "spatialite", False ) or connection.ops.spatial_version >= (5, 0, 0): return self.as_sql(compiler, connection) # This function is usually ATan2(y, x), returning the inverse tangent # of y / x, but it's ATan2(x, y) on SpatiaLite < 5.0.0. # Cast integers to float to avoid inconsistent/buggy behavior if the # arguments are mixed between integer and float or decimal. # https://www.gaia-gis.it/fossil/libspatialite/tktview?name=0f72cca3a2 clone = self.copy() clone.set_source_expressions( [ ( Cast(expression, FloatField()) if isinstance(expression.output_field, IntegerField) else expression ) for expression in self.get_source_expressions()[::-1] ] ) return clone.as_sql(compiler, connection, **extra_context) class Ceil(Transform): function = "CEILING" lookup_name = "ceil" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="CEIL", **extra_context) class Cos(NumericOutputFieldMixin, Transform): function = "COS" lookup_name = "cos" class Cot(NumericOutputFieldMixin, Transform): function = "COT" lookup_name = "cot" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="(1 / TAN(%(expressions)s))", **extra_context ) class Degrees(NumericOutputFieldMixin, Transform): function = "DEGREES" lookup_name = "degrees" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="((%%(expressions)s) * 180 / %s)" % math.pi, **extra_context, ) class Exp(NumericOutputFieldMixin, Transform): function = "EXP" lookup_name = "exp" class Floor(Transform): function = "FLOOR" lookup_name = "floor" class Ln(NumericOutputFieldMixin, Transform): function = "LN" lookup_name = "ln" class Log(FixDecimalInputMixin, NumericOutputFieldMixin, Func): function = "LOG" arity = 2 def as_sqlite(self, compiler, connection, **extra_context): if not getattr(connection.ops, "spatialite", False): return self.as_sql(compiler, connection) # This function is usually Log(b, x) returning the logarithm of x to # the base b, but on SpatiaLite it's Log(x, b). clone = self.copy() clone.set_source_expressions(self.get_source_expressions()[::-1]) return clone.as_sql(compiler, connection, **extra_context) class Mod(FixDecimalInputMixin, NumericOutputFieldMixin, Func): function = "MOD" arity = 2 class Pi(NumericOutputFieldMixin, Func): function = "PI" arity = 0 def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template=str(math.pi), **extra_context ) class Power(NumericOutputFieldMixin, Func): function = "POWER" arity = 2 class Radians(NumericOutputFieldMixin, Transform): function = "RADIANS" lookup_name = "radians" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="((%%(expressions)s) * %s / 180)" % math.pi, **extra_context, ) class Random(NumericOutputFieldMixin, Func): function = "RANDOM" arity = 0 def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function="DBMS_RANDOM.VALUE", **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def get_group_by_cols(self): return [] class Round(FixDecimalInputMixin, Transform): function = "ROUND" lookup_name = "round" arity = None # Override Transform's arity=1 to enable passing precision. def __init__(self, expression, precision=0, **extra): super().__init__(expression, precision, **extra) def as_sqlite(self, compiler, connection, **extra_context): precision = self.get_source_expressions()[1] if isinstance(precision, Value) and precision.value < 0: raise ValueError("SQLite does not support negative precision.") return super().as_sqlite(compiler, connection, **extra_context) def _resolve_output_field(self): source = self.get_source_expressions()[0] return source.output_field class Sign(Transform): function = "SIGN" lookup_name = "sign" class Sin(NumericOutputFieldMixin, Transform): function = "SIN" lookup_name = "sin" class Sqrt(NumericOutputFieldMixin, Transform): function = "SQRT" lookup_name = "sqrt" class Tan(NumericOutputFieldMixin, Transform): function = "TAN" lookup_name = "tan"
python
github
https://github.com/django/django
django/db/models/functions/math.py
# # setup.py # # Copyright (C) 2009 Damien Churchill <damoxc@gmail.com> # # Basic plugin template created by: # Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com> # Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com> # # Deluge is free software. # # You may redistribute it and/or modify it under the terms of the # GNU General Public License, as published by the Free Software # Foundation; either version 3 of the License, or (at your option) # any later version. # # deluge is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with deluge. If not, write to: # The Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor # Boston, MA 02110-1301, USA. # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the OpenSSL # library. # You must obey the GNU General Public License in all respects for all of # the code used other than OpenSSL. If you modify file(s) with this # exception, you may extend this exception to your version of the file(s), # but you are not obligated to do so. If you do not wish to do so, delete # this exception statement from your version. If you delete this exception # statement from all source files in the program, then also delete it here. # from setuptools import setup __plugin_name__ = "WebUi" __author__ = "Damien Churchill" __author_email__ = "damoxc@gmail.com" __version__ = "0.1" __url__ = "http://deluge-torrent.org" __license__ = "GPLv3" __description__ = "Allows starting the web interface within the daemon." __long_description__ = """""" __pkg_data__ = {__plugin_name__.lower(): ["template/*", "data/*"]} setup( name=__plugin_name__, version=__version__, description=__description__, author=__author__, author_email=__author_email__, url=__url__, license=__license__, long_description=__long_description__ if __long_description__ else __description__, packages=[__plugin_name__.lower()], package_data = __pkg_data__, entry_points=""" [deluge.plugin.core] %s = %s:CorePlugin [deluge.plugin.gtkui] %s = %s:GtkUIPlugin """ % ((__plugin_name__, __plugin_name__.lower())*2) )
unknown
codeparrot/codeparrot-clean
<textarea name="{{ widget.name }}"{% include "django/forms/widgets/attrs.html" %}> {% if widget.value %}{{ widget.value }}{% endif %}</textarea>
html
github
https://github.com/django/django
django/forms/jinja2/django/forms/widgets/textarea.html
{ "title": "V32 No-Op Migration Test Dashboard", "schemaVersion": 31, "panels": [ { "type": "timeseries", "title": "Panel with transformations remains unchanged", "id": 1, "transformations": [ { "id": "labelsToFields", "options": { "mode": "rows", "keepLabels": ["job", "instance"] } }, { "id": "merge", "options": {} } ] }, { "type": "graph", "title": "Graph panel remains unchanged", "id": 2, "yAxes": [ { "show": true, "min": null, "max": null } ] }, { "type": "row", "title": "Row with nested panels", "id": 3, "collapsed": false, "panels": [ { "type": "stat", "title": "Nested stat panel", "id": 4, "fieldConfig": { "defaults": { "unit": "bytes" } } } ] } ], "templating": { "list": [ { "name": "environment", "type": "query", "datasource": "prometheus", "options": [] } ] }, "annotations": { "list": [ { "name": "Deployments", "datasource": "grafana", "enable": true } ] }, "time": { "from": "now-6h", "to": "now" }, "timepicker": { "refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"] } }
json
github
https://github.com/grafana/grafana
apps/dashboard/pkg/migration/testdata/input/v32.no_op_migration.json
import tempfile import os import shutil import sys import contextlib import site from ..compat import StringIO @contextlib.contextmanager def tempdir(cd=lambda dir:None, **kwargs): temp_dir = tempfile.mkdtemp(**kwargs) orig_dir = os.getcwd() try: cd(temp_dir) yield temp_dir finally: cd(orig_dir) shutil.rmtree(temp_dir) @contextlib.contextmanager def environment(**replacements): """ In a context, patch the environment with replacements. Pass None values to clear the values. """ saved = dict( (key, os.environ[key]) for key in replacements if key in os.environ ) # remove values that are null remove = (key for (key, value) in replacements.items() if value is None) for key in list(remove): os.environ.pop(key, None) replacements.pop(key) os.environ.update(replacements) try: yield saved finally: for key in replacements: os.environ.pop(key, None) os.environ.update(saved) @contextlib.contextmanager def quiet(): """ Redirect stdout/stderr to StringIO objects to prevent console output from distutils commands. """ old_stdout = sys.stdout old_stderr = sys.stderr new_stdout = sys.stdout = StringIO() new_stderr = sys.stderr = StringIO() try: yield new_stdout, new_stderr finally: new_stdout.seek(0) new_stderr.seek(0) sys.stdout = old_stdout sys.stderr = old_stderr @contextlib.contextmanager def save_user_site_setting(): saved = site.ENABLE_USER_SITE try: yield saved finally: site.ENABLE_USER_SITE = saved @contextlib.contextmanager def suppress_exceptions(*excs): try: yield except excs: pass
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for doc in ( { "doctype": "Sales Order", "stock_doctype": "Delivery Note", "invoice_doctype": "Sales Invoice", "stock_doctype_ref_field": "against_sales_order", "invoice_ref_field": "sales_order", "qty_field": "delivered_qty" }, { "doctype": "Purchase Order", "stock_doctype": "Purchase Receipt", "invoice_doctype": "Purchase Invoice", "stock_doctype_ref_field": "prevdoc_docname", "invoice_ref_field": "purchase_order", "qty_field": "received_qty" }): order_list = frappe.db.sql("""select name from `tab{0}` where docstatus=1 and is_recurring=1 and ifnull(recurring_id, '') != name and creation >= '2016-01-25'""" .format(doc["doctype"]), as_dict=1) for order in order_list: frappe.db.sql("""update `tab{0} Item` set {1}=0, billed_amt=0 where parent=%s""".format(doc["doctype"], doc["qty_field"]), order.name) # Check against Delivery Note and Purchase Receipt stock_doc_list = frappe.db.sql("""select distinct parent from `tab{0} Item` where docstatus=1 and ifnull({1}, '')=%s""" .format(doc["stock_doctype"], doc["stock_doctype_ref_field"]), order.name) if stock_doc_list: for dn in stock_doc_list: frappe.get_doc(doc["stock_doctype"], dn[0]).update_qty(update_modified=False) # Check against Invoice invoice_list = frappe.db.sql("""select distinct parent from `tab{0} Item` where docstatus=1 and ifnull({1}, '')=%s""" .format(doc["invoice_doctype"], doc["invoice_ref_field"]), order.name) if invoice_list: for dn in invoice_list: frappe.get_doc(doc["invoice_doctype"], dn[0]).update_qty(update_modified=False) frappe.get_doc(doc["doctype"], order.name).set_status(update=True, update_modified=False)
unknown
codeparrot/codeparrot-clean
""" Django settings for plants project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '(5lq03ub0m3dprqe&(nht*1uioi8q28snv*dtygltjdj*q)m##' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'plants.urls' WSGI_APPLICATION = 'plants.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
unknown
codeparrot/codeparrot-clean
"""Test correct setup/teardowns at module, class, and instance level.""" from typing import List import pytest from _pytest.pytester import Pytester def test_module_and_function_setup(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ modlevel = [] def setup_module(module): assert not modlevel module.modlevel.append(42) def teardown_module(module): modlevel.pop() def setup_function(function): function.answer = 17 def teardown_function(function): del function.answer def test_modlevel(): assert modlevel[0] == 42 assert test_modlevel.answer == 17 class TestFromClass(object): def test_module(self): assert modlevel[0] == 42 assert not hasattr(test_modlevel, 'answer') """ ) rep = reprec.matchreport("test_modlevel") assert rep.passed rep = reprec.matchreport("test_module") assert rep.passed def test_module_setup_failure_no_teardown(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ values = [] def setup_module(module): values.append(1) 0/0 def test_nothing(): pass def teardown_module(module): values.append(2) """ ) reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.values == [1] def test_setup_function_failure_no_teardown(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ modlevel = [] def setup_function(function): modlevel.append(1) 0/0 def teardown_function(module): modlevel.append(2) def test_func(): pass """ ) calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.modlevel == [1] def test_class_setup(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): cls.clslevel.append(23) def teardown_class(cls): cls.clslevel.pop() def test_classlevel(self): assert self.clslevel[0] == 23 class TestInheritedClassSetupStillWorks(TestSimpleClassSetup): def test_classlevel_anothertime(self): assert self.clslevel == [23] def test_cleanup(): assert not TestSimpleClassSetup.clslevel assert not TestInheritedClassSetupStillWorks.clslevel """ ) reprec.assertoutcome(passed=1 + 2 + 1) def test_class_setup_failure_no_teardown(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): 0/0 def teardown_class(cls): cls.clslevel.append(1) def test_classlevel(self): pass def test_cleanup(): assert not TestSimpleClassSetup.clslevel """ ) reprec.assertoutcome(failed=1, passed=1) def test_method_setup(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ class TestSetupMethod(object): def setup_method(self, meth): self.methsetup = meth def teardown_method(self, meth): del self.methsetup def test_some(self): assert self.methsetup == self.test_some def test_other(self): assert self.methsetup == self.test_other """ ) reprec.assertoutcome(passed=2) def test_method_setup_failure_no_teardown(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ class TestMethodSetup(object): clslevel = [] def setup_method(self, method): self.clslevel.append(1) 0/0 def teardown_method(self, method): self.clslevel.append(2) def test_method(self): pass def test_cleanup(): assert TestMethodSetup.clslevel == [1] """ ) reprec.assertoutcome(failed=1, passed=1) def test_method_setup_uses_fresh_instances(pytester: Pytester) -> None: reprec = pytester.inline_runsource( """ class TestSelfState1(object): memory = [] def test_hello(self): self.memory.append(self) def test_afterhello(self): assert self != self.memory[0] """ ) reprec.assertoutcome(passed=2, failed=0) def test_setup_that_skips_calledagain(pytester: Pytester) -> None: p = pytester.makepyfile( """ import pytest def setup_module(mod): pytest.skip("x") def test_function1(): pass def test_function2(): pass """ ) reprec = pytester.inline_run(p) reprec.assertoutcome(skipped=2) def test_setup_fails_again_on_all_tests(pytester: Pytester) -> None: p = pytester.makepyfile( """ import pytest def setup_module(mod): raise ValueError(42) def test_function1(): pass def test_function2(): pass """ ) reprec = pytester.inline_run(p) reprec.assertoutcome(failed=2) def test_setup_funcarg_setup_when_outer_scope_fails(pytester: Pytester) -> None: p = pytester.makepyfile( """ import pytest def setup_module(mod): raise ValueError(42) @pytest.fixture def hello(request): raise ValueError("xyz43") def test_function1(hello): pass def test_function2(hello): pass """ ) result = pytester.runpytest(p) result.stdout.fnmatch_lines( [ "*function1*", "*ValueError*42*", "*function2*", "*ValueError*42*", "*2 errors*", ] ) result.stdout.no_fnmatch_line("*xyz43*") @pytest.mark.parametrize("arg", ["", "arg"]) def test_setup_teardown_function_level_with_optional_argument( pytester: Pytester, monkeypatch, arg: str, ) -> None: """Parameter to setup/teardown xunit-style functions parameter is now optional (#1728).""" import sys trace_setups_teardowns: List[str] = [] monkeypatch.setattr( sys, "trace_setups_teardowns", trace_setups_teardowns, raising=False ) p = pytester.makepyfile( """ import pytest import sys trace = sys.trace_setups_teardowns.append def setup_module({arg}): trace('setup_module') def teardown_module({arg}): trace('teardown_module') def setup_function({arg}): trace('setup_function') def teardown_function({arg}): trace('teardown_function') def test_function_1(): pass def test_function_2(): pass class Test(object): def setup_method(self, {arg}): trace('setup_method') def teardown_method(self, {arg}): trace('teardown_method') def test_method_1(self): pass def test_method_2(self): pass """.format( arg=arg ) ) result = pytester.inline_run(p) result.assertoutcome(passed=4) expected = [ "setup_module", "setup_function", "teardown_function", "setup_function", "teardown_function", "setup_method", "teardown_method", "setup_method", "teardown_method", "teardown_module", ] assert trace_setups_teardowns == expected
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: cs_sshkeypair short_description: Manages SSH keys on Apache CloudStack based clouds. description: - Create, register and remove SSH keys. - If no key was found and no public key was provided and a new SSH private/public key pair will be created and the private key will be returned. version_added: '2.0' author: "René Moser (@resmo)" options: name: description: - Name of public key. required: true domain: description: - Domain the public key is related to. required: false default: null account: description: - Account the public key is related to. required: false default: null project: description: - Name of the project the public key to be registered in. required: false default: null state: description: - State of the public key. required: false default: 'present' choices: [ 'present', 'absent' ] public_key: description: - String of the public key. required: false default: null extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # create a new private / public key pair: - local_action: cs_sshkeypair name=linus@example.com register: key - debug: msg='private key is {{ key.private_key }}' # remove a public key by its name: - local_action: cs_sshkeypair name=linus@example.com state=absent # register your existing local public key: - local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' ''' RETURN = ''' --- id: description: UUID of the SSH public key. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f name: description: Name of the SSH public key. returned: success type: string sample: linus@example.com fingerprint: description: Fingerprint of the SSH public key. returned: success type: string sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28" private_key: description: Private key of generated SSH keypair. returned: changed type: string sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n" ''' try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True except ImportError: has_lib_cs = False try: import sshpubkeys has_lib_sshpubkeys = True except ImportError: has_lib_sshpubkeys = False from ansible.module_utils.cloudstack import * class AnsibleCloudStackSshKey(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackSshKey, self).__init__(module) self.returns = { 'privatekey': 'private_key', 'fingerprint': 'fingerprint', } self.ssh_key = None def register_ssh_key(self, public_key): ssh_key = self.get_ssh_key() args = {} args['domainid'] = self.get_domain('id') args['account'] = self.get_account('name') args['projectid'] = self.get_project('id') args['name'] = self.module.params.get('name') res = None if not ssh_key: self.result['changed'] = True args['publickey'] = public_key if not self.module.check_mode: res = self.cs.registerSSHKeyPair(**args) else: fingerprint = self._get_ssh_fingerprint(public_key) if ssh_key['fingerprint'] != fingerprint: self.result['changed'] = True if not self.module.check_mode: self.cs.deleteSSHKeyPair(**args) args['publickey'] = public_key res = self.cs.registerSSHKeyPair(**args) if res and 'keypair' in res: ssh_key = res['keypair'] return ssh_key def create_ssh_key(self): ssh_key = self.get_ssh_key() if not ssh_key: self.result['changed'] = True args = {} args['domainid'] = self.get_domain('id') args['account'] = self.get_account('name') args['projectid'] = self.get_project('id') args['name'] = self.module.params.get('name') if not self.module.check_mode: res = self.cs.createSSHKeyPair(**args) ssh_key = res['keypair'] return ssh_key def remove_ssh_key(self): ssh_key = self.get_ssh_key() if ssh_key: self.result['changed'] = True args = {} args['domainid'] = self.get_domain('id') args['account'] = self.get_account('name') args['projectid'] = self.get_project('id') args['name'] = self.module.params.get('name') if not self.module.check_mode: res = self.cs.deleteSSHKeyPair(**args) return ssh_key def get_ssh_key(self): if not self.ssh_key: args = {} args['domainid'] = self.get_domain('id') args['account'] = self.get_account('name') args['projectid'] = self.get_project('id') args['name'] = self.module.params.get('name') ssh_keys = self.cs.listSSHKeyPairs(**args) if ssh_keys and 'sshkeypair' in ssh_keys: self.ssh_key = ssh_keys['sshkeypair'][0] return self.ssh_key def _get_ssh_fingerprint(self, public_key): key = sshpubkeys.SSHKey(public_key) return key.hash() def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name = dict(required=True), public_key = dict(default=None), domain = dict(default=None), account = dict(default=None), project = dict(default=None), state = dict(choices=['present', 'absent'], default='present'), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") if not has_lib_sshpubkeys: module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys") try: acs_sshkey = AnsibleCloudStackSshKey(module) state = module.params.get('state') if state in ['absent']: ssh_key = acs_sshkey.remove_ssh_key() else: public_key = module.params.get('public_key') if public_key: ssh_key = acs_sshkey.register_ssh_key(public_key) else: ssh_key = acs_sshkey.create_ssh_key() result = acs_sshkey.get_result(ssh_key) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python3 from ..core import memory, device, register import time """ **py_register_machine2.core.processor**: The processor and his parts """ class EnigneControlBits(object): """ .. _EnigneControlBits: Container for the static engine controll bits. Used by the Processor_ to handle his ECR. """ engine_stop_bit = 0b00000001 class RegisterInterface(object): """ .. _RegisterInterface: Used by the Processor to perform read/write operations on the registers. """ def __init__(self, registers = [], debug = 0, width = 64): self.debug = 0 self.registers_by_name = {} self.registers_by_index = [] self.width = width self.size = 2 ** width - 1 self._lock = False for register in registers: self.add_register(register) def add_register(self, register): """ .. _add_register: Adds the Register_ ``register`` to the interface. Will raise a SetupError_ if the interface is locked (because it is running) or if there is already a Register with the name of the new Register or if the number of Registers would exceed the size of the interface. Returns the index of the new Register """ if(self._lock): raise SetupError("RegisterInterface is already running, no changes to the register layout can be performed") if(len(self.registers_by_index) >= self.size): raise SetupError("Number of Registers would exceed the address space({})".format(self.size)) if(register.name in self.registers_by_name): raise SetupError("Register with name '{}' already added.".format(register.name)) self.registers_by_name[register.name] = register self.registers_by_index.append(register) return len(self.registers_by_index) - 1 def write(self, name_or_index, word): """ Write a word in the Register with the name ``name_or_index`` or with the index ``name_or_index``. ``name_or_index`` hat to be either ``str`` or ``int``. If the type of ``name_or_index`` is wrong an AttributeError will be raised. If there is no Register with the specified name or index, a NameError will be raised. """ if(isinstance(name_or_index, str)): if(name_or_index in self.registers_by_name): self.registers_by_name[name_or_index].write(word) else: raise NameError("No Register with name '{}'".format(name_or_index)) elif( isinstance(name_or_index, int)): if(name_or_index < len(self.registers_by_index)): self.registers_by_index[name_or_index].write(word) else: raise NameError("No Register with index '{}'".format(name_or_index)) else: raise AttributeError("name_or_index has to be `str` or `int`, but is {}".format(type(name_or_index))) def read(self, name_or_index): """ Read a word from the Register with the name ``name_or_index`` or with the index ``name_or_index``. ``name_or_index`` hat to be either ``str`` or ``int``. If the type of ``name_or_index`` is wrong an AttributeError will be raised. If there is no Register with the specified name or index, a NameError will be raised. """ if(isinstance(name_or_index, str)): if(name_or_index in self.registers_by_name): return self.registers_by_name[name_or_index].read() else: raise NameError("No Register with name '{}'".format(name_or_index)) elif( isinstance(name_or_index, int)): if(name_or_index < len(self.registers_by_index)): return self.registers_by_index[name_or_index].read() else: raise NameError("No Register with index '{}'".format(name_or_index)) else: raise AttributeError("name_or_index has to be `str` or `int`, but is {}".format(type(name_or_index))) class Processor(object): """ .. _Processor: Fetches Opcodes from the ROM or RAM, decodes them and executes the commands. .. _processor_phases: Phases in one operation cycle: Fetch Phase The Processor fetches the Opcode (one word) from the ROM or RAM device according to the program counter and increases the program counter. Decode Phase The Processor looks up the Command to execute Fetch Operands Phase (optional) If requested the processor fetches the operands and increases the program counter. Execute Phase The Processor executes the Command. Write Back Phase (optional) If there is a result this result is written to a register or the RAM or a device. **Special Register** .. _PC: .. _ECR: .. _SP: 0. The first Register (index 0) is the Program Counter(PC). 1. The second Register (index 1) is the Engine Control Register (ECR) take a look at EnigneControlBits_. 2. The third Register (index 2) is the Stack Pointer (SP) and may be used for ``call``, ``ret``, ``push`` and ``pop`` .. _`internal constants`: **Internal Constants** Constants used by the Assembler, should be set using setup_done_ ``ROMEND_LOW`` First word of the ROM_ (always ``0``) ``ROMEND_HIGH`` Last word of the ROM_ ``RAMEND_LOW`` First word of the RAM_, (``ROMEND_HIGH + rom.size``) ``RAMEND_HIGH`` Last word of the RAM_ ``FLASH_START`` First word of the Flash_(always ``0``) ``FLASH_END`` Last word of the Flash_ *Interrupt Name* Address of the interrupt (set by invoking ``add_interrupt``) **Cycles** The number of cycles can be observed by acessing the ``cycles`` variable. """ def __init__(self, f_cpu = None, width = 64, interrupts = False, clock_barrier = None, debug = 0): self.memory_bus = memory.BUS(width = width, debug = debug) self.device_bus = device.BUS(width = width, debug = debug) self.register_interface = RegisterInterface(width = width, debug = debug) # program counter, engine control register, stack pointer self.register_interface.add_register(register.Register("PC", width = width)) self.register_interface.add_register(register.Register("ECR", width = width)) self.register_interface.add_register(register.Register("SP", width = width)) self.f_cpu = f_cpu self.clock_barrier = clock_barrier if(f_cpu != None and clock != None): raise SetupError("Software Clock (f_cpu) and Thread Clock (clock_barrier) are mutually exclusive") self.interrupt_enable = interrupts self.interrupts = [] self.debug = debug self.commands_by_opcode = {} self.last_cycle = None self.current_cycle = None self.pc = 0 self.ecr = 0 self.sp = 0 self.on_cycle_callbacks = [] self.constants = {} self.cycles = 0 self.push_pc = False def en_dis_able_interrupts(self, mask): """ This callback might be used by a Register to enable/disable Interrupts. ``mask`` is an ``int``, the Interrupts are bits in this mask, the first registered interrupt has the bit ``(1 << 0)``, the n-th Interrupt the bit ``(1 << (n - 1))``. If the bit is cleared (``0``) the Interrupt will be disabled. """ for shift, interrupt in enumerate(self.interrupts): if(mask & (1 << shift)): interrupt.enable = True else: interrupt.enable = False def add_interrupt(self, interrupt): """ Adds the interrupt to the internal interrupt storage ``self.interrupts`` and registers the interrupt address in the internal constants. """ self.interrupts.append(interrupt) self.constants[interrupt.name] = interrupt.address def interrupt(self, address): """ Interrupts the Processor and forces him to jump to ``address``. If ``push_pc`` is enabled this will push the PC to the stack. """ if(self.push_pc): self.memory_bus.write_word(self.sp, self.pc) self._set_sp(self.sp - 1) self._set_pc(address) def _increase_pc(self): self.pc += 1 self.register_interface.write(0, self.pc) self.pc = self.register_interface.read(0) def _refresh_pc(self): self.pc = self.register_interface.read(0) def _refresh_ecr(self): self.ecr = self.register_interface.read(1) if(self.debug > 5): print("ECR: {}".format(bin(self.ecr))) def _refresh_sp(self): self.sp = self.register_interface.read(2) if(self.debug > 5): print("SP: {}".format(bin(self.sp))) def _fetch_at_pc(self): opcode = self.memory_bus.read_word(self.pc) self._increase_pc() return opcode def _set_sp(self, sp): self.sp = sp self.register_interface.write(2, sp) self._refresh_sp() def _set_pc(self, pc): self.pc = pc self.register_interface.write(0, pc) self._refresh_pc() def _set_ecr(self, ecr): self.ecr = ecr self.register_interface.write(1, ecr) self._refresh_ecr() def _execute_on_cycle_callbacks(self): for callback in self.on_cycle_callbacks: callback() def setup_done(self): """ .. _setup_done: Finish the setup of the Processor. This should be the last call before the Processor is used. Sets the `internal constants`_ (used by the assembler) and sets the Stack Pointer to RAMEND_HIGH, if there is a RAM attached. If there is no RAM attached, SP will stay ``0``. If there is a RAM attached ``push_pc`` is set. Might raise SetupError_. """ if(self.memory_bus.device_count() < 1): raise SetupError("At least a ROM device has to be attached.") rom = self.memory_bus.devices[0] self.constants["ROMEND_HIGH"] = rom.size - 1 self.constants["ROMEND_LOW"] = 0 if(self.memory_bus.device_count() > 1): ram = self.memory_bus.devices[1] self.constants["RAMEND_HIGH"] = rom.size + ram.size - 1 self.constants["RAMEND_LOW"] = rom.size self._set_sp(rom.size + ram.size - 1) self.push_pc = True if(self.device_bus.device_count() > 0): flash = self.device_bus.devices[0] self.constants["FLASH_START"] = 0 self.constants["FLASH_END"] = flash.size - 1 def reset(self): """ Resets the control registers of the Processor_(PC_, ECR_ and SP_) """ rom = self.memory_bus.devices[0] if(self.memory_bus.device_count() > 1): ram = self.memory_bus.devices[1] self._set_sp(rom.size + ram.size - 1) self._set_pc(0) self._set_ecr(0) self.cycles = 0 def register_on_cycle_callback(self, callback): """ A on cycle callback is executed in every clock cycle of the Processor. No on cycle callback modifies the state of the Processor directly, but it might cause an Interrupt. The on cycle callback is a function without arguments:: def on_cycle_callback(): print("One cycle done") The return value of a callback is ignored and the callback must not raise Exceptions, but fatal Errors may stop the engine. """ self.on_cycle_callbacks.append(callback) def register_command(self, command): """ .. _register_command: Register a Command in the Processor, the Command can now be executed by the Processor. """ if(command.opcode() in self.commands_by_opcode): raise SetupError("Command with opcode {}(mnemonic: {}) already registered".format(command.opcode(), command.mnemonic())) command.membus = self.memory_bus command.devbus = self.device_bus command.register_interface = self.register_interface self.commands_by_opcode[command.opcode()] = command def register_memory_device(self, device): """ Registers a device in the memory BUS_. Invokes register_device_. """ self.memory_bus.register_device(device) def register_device(self, device): """ Registers a device in the device BUS_. Invokes register_device_. """ self.device_bus.register_device(device) def add_register(self, register): """ Adds a new register in the RegisterInterface_. Invokes add_register_. """ self.register_interface.add_register(register) def do_cycle(self): """ .. _do_cycle: Run one clock cycle of the Processor_, works according to processor_phases_. Then all ``on_cycle_callbacks`` are executed and the internal Registers are updated. If ``f_cpu`` is set and the execution took not long enough, ``do_cycle`` will wait until the right time for the next cycle. If ``clock_barrier`` is set, ``do_cycle`` will perform the ``clock_barrier.wait()``. Might raise SIGILL_, if there is an invalid opcode. """ if(self.f_cpu != None): if(self.last_cycle == None): self.last_cycle = time.time() opcode = self._fetch_at_pc() if(not opcode in self.commands_by_opcode): raise SIGILL("Invalid opcode ({}) at {}".format(opcode, self.pc - 1)) command = self.commands_by_opcode[opcode] numargs = command.numargs() args = [self._fetch_at_pc() for i in range(numargs)] if(self.debug > 2): print("{}|EXEC: [{}] {} $ ".format(self.pc, opcode, command.mnemonic()), *args) command.exec(*args) self._refresh_pc() self._refresh_ecr() self._refresh_sp() self._execute_on_cycle_callbacks() self.current_cycle = time.time() if(self.f_cpu != None): cycle_time = self.current_cycle - self.last_cycle if(cycle_time < ( 1 / self.f_cpu)): time.sleep(( 1 / self.f_cpu) - cycle_time ) self.last_cycle = time.time() if(self.clock_barrier != None): self.clock_barrier.wait() self.cycles += 1 def run(self): """ Runs do_cycle_, until either a stop bit in the ECR_ is set (see EnigneControlBits_), or if an Exception in do_cycle_ occurs. """ while(1): self.do_cycle() if(self.ecr & EnigneControlBits.engine_stop_bit): break class SetupError(Exception): """ .. _SetupError: Raised if the setup is invalid. """ def __init__(self, *args): Exception.__init__(self, *args) class SIGILL(Exception): """ .. _SIGILL: Raised if an invalid opcode occurs. """ def __init__(self, *args): Exception.__init__(self, *args)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 import json import os from wsgiref.handlers import CGIHandler from philologic.runtime.DB import DB from philologic.runtime.Query import split_terms from philologic.runtime.QuerySyntax import group_terms, parse_query import sys sys.path.append("..") import custom_functions try: from custom_functions import WebConfig except ImportError: from philologic.runtime import WebConfig try: from custom_functions import WSGIHandler except ImportError: from philologic.runtime import WSGIHandler def term_group(environ, start_response): status = "200 OK" headers = [("Content-type", "application/json; charset=UTF-8"), ("Access-Control-Allow-Origin", "*")] start_response(status, headers) config = WebConfig(os.path.abspath(os.path.dirname(__file__)).replace("scripts", "")) db = DB(config.db_path + "/data/") request = WSGIHandler(environ, config) if not request["q"]: dump = json.dumps({"original_query": "", "term_groups": []}) else: hits = db.query( request["q"], request["method"], request["arg"], sort_order=request["sort_order"], **request.metadata ) parsed = parse_query(request.q) group = group_terms(parsed) all_groups = split_terms(group) term_groups = [] for g in all_groups: term_group = "" not_started = False for kind, term in g: if kind == "NOT": if not_started is False: not_started = True term_group += " NOT " elif kind == "OR": term_group += "|" elif kind == "TERM": term_group += " %s " % term elif kind == "QUOTE": term_group += " %s " % term term_group = term_group.strip() term_groups.append(term_group) dump = json.dumps({"term_groups": term_groups, "original_query": request.original_q}) yield dump.encode("utf8") if __name__ == "__main__": CGIHandler().run(term_group)
unknown
codeparrot/codeparrot-clean
# intpyapp.py - Interactive Python application class # import win32con import win32api import win32ui import __main__ import sys import string from . import app import traceback from pywin.mfc import window, afxres, dialog import commctrl from . import dbgcommands lastLocateFileName = ".py" # used in the "File/Locate" dialog... # todo - _SetupSharedMenu should be moved to a framework class. def _SetupSharedMenu_(self): sharedMenu = self.GetSharedMenu() from pywin.framework import toolmenu toolmenu.SetToolsMenu(sharedMenu) from pywin.framework import help help.SetHelpMenuOtherHelp(sharedMenu) from pywin.mfc import docview docview.DocTemplate._SetupSharedMenu_=_SetupSharedMenu_ class MainFrame(app.MainFrame): def OnCreate(self, createStruct): self.closing = 0 if app.MainFrame.OnCreate(self, createStruct)==-1: return -1 style = win32con.WS_CHILD | afxres.CBRS_SIZE_DYNAMIC | afxres.CBRS_TOP | afxres.CBRS_TOOLTIPS | afxres.CBRS_FLYBY self.EnableDocking(afxres.CBRS_ALIGN_ANY) tb = win32ui.CreateToolBar (self, style | win32con.WS_VISIBLE) tb.ModifyStyle(0, commctrl.TBSTYLE_FLAT) tb.LoadToolBar(win32ui.IDR_MAINFRAME) tb.EnableDocking(afxres.CBRS_ALIGN_ANY) tb.SetWindowText("Standard") self.DockControlBar(tb) # Any other packages which use toolbars from pywin.debugger.debugger import PrepareControlBars PrepareControlBars(self) # Note "interact" also uses dockable windows, but they already happen # And a "Tools" menu on the main frame. menu = self.GetMenu() from . import toolmenu toolmenu.SetToolsMenu(menu, 2) # And fix the "Help" menu on the main frame from pywin.framework import help help.SetHelpMenuOtherHelp(menu) def OnClose(self): try: import pywin.debugger if pywin.debugger.currentDebugger is not None and pywin.debugger.currentDebugger.pumping: try: pywin.debugger.currentDebugger.close(1) except: traceback.print_exc() return except win32ui.error: pass self.closing = 1 self.SaveBarState("ToolbarDefault") self.SetActiveView(None) # Otherwise MFC's OnClose may _not_ prompt for save. from pywin.framework import help help.FinalizeHelp() self.DestroyControlBar(afxres.AFX_IDW_TOOLBAR) self.DestroyControlBar(win32ui.ID_VIEW_TOOLBAR_DBG) return self._obj_.OnClose() def DestroyControlBar(self, id): try: bar = self.GetControlBar(id) except win32ui.error: return bar.DestroyWindow() def OnCommand(self, wparam, lparam): # By default, the current MDI child frame will process WM_COMMAND # messages before any docked control bars - even if the control bar # has focus. This is a problem for the interactive window when docked. # Therefore, we detect the situation of a view having the main frame # as its parent, and assume it must be a docked view (which it will in an MDI app) try: v = self.GetActiveView() # Raise an exception if none - good - then we want default handling # Main frame _does_ have a current view (ie, a docking view) - see if it wants it. if v.OnCommand(wparam, lparam): return 1 except (win32ui.error, AttributeError): pass return self._obj_.OnCommand(wparam, lparam) class InteractivePythonApp(app.CApp): # This works if necessary - just we dont need to override the Run method. # def Run(self): # return self._obj_.Run() def HookCommands(self): app.CApp.HookCommands(self) dbgcommands.DebuggerCommandHandler().HookCommands() self.HookCommand(self.OnViewBrowse,win32ui.ID_VIEW_BROWSE) self.HookCommand(self.OnFileImport,win32ui.ID_FILE_IMPORT) self.HookCommand(self.OnFileCheck,win32ui.ID_FILE_CHECK) self.HookCommandUpdate(self.OnUpdateFileCheck, win32ui.ID_FILE_CHECK) self.HookCommand(self.OnFileRun,win32ui.ID_FILE_RUN) self.HookCommand(self.OnFileLocate,win32ui.ID_FILE_LOCATE) self.HookCommand(self.OnInteractiveWindow, win32ui.ID_VIEW_INTERACTIVE) self.HookCommandUpdate(self.OnUpdateInteractiveWindow, win32ui.ID_VIEW_INTERACTIVE) self.HookCommand(self.OnViewOptions, win32ui.ID_VIEW_OPTIONS) self.HookCommand(self.OnHelpIndex, afxres.ID_HELP_INDEX) self.HookCommand(self.OnFileSaveAll, win32ui.ID_FILE_SAVE_ALL) self.HookCommand(self.OnViewToolbarDbg, win32ui.ID_VIEW_TOOLBAR_DBG) self.HookCommandUpdate(self.OnUpdateViewToolbarDbg, win32ui.ID_VIEW_TOOLBAR_DBG) def CreateMainFrame(self): return MainFrame() def MakeExistingDDEConnection(self): # Use DDE to connect to an existing instance # Return None if no existing instance try: from . import intpydde except ImportError: # No dde support! return None conv = intpydde.CreateConversation(self.ddeServer) try: conv.ConnectTo("Pythonwin", "System") return conv except intpydde.error: return None def InitDDE(self): # Do all the magic DDE handling. # Returns TRUE if we have pumped the arguments to our # remote DDE app, and we should terminate. try: from . import intpydde except ImportError: self.ddeServer = None intpydde = None if intpydde is not None: self.ddeServer = intpydde.DDEServer(self) self.ddeServer.Create("Pythonwin", intpydde.CBF_FAIL_SELFCONNECTIONS ) try: # If there is an existing instance, pump the arguments to it. connection = self.MakeExistingDDEConnection() if connection is not None: if self.ProcessArgs(sys.argv, connection) is None: return 1 except: # It is too early to 'print' an exception - we # don't have stdout setup yet! win32ui.DisplayTraceback(sys.exc_info(), " - error in DDE conversation with Pythonwin") def InitInstance(self): # Allow "/nodde" and "/new" to optimize this! if "/nodde" not in sys.argv and "/new" not in sys.argv: if self.InitDDE(): return 1 # A remote DDE client is doing it for us! else: self.ddeServer = None win32ui.SetRegistryKey("Python %s" % (sys.winver,)) # MFC automatically puts the main frame caption on! app.CApp.InitInstance(self) # Create the taskbar icon win32ui.CreateDebuggerThread() # Allow Pythonwin to host OCX controls. win32ui.EnableControlContainer() # Display the interactive window if the user wants it. from . import interact interact.CreateInteractiveWindowUserPreference() # Load the modules we use internally. self.LoadSystemModules() # Load additional module the user may want. self.LoadUserModules() # Load the ToolBar state near the end of the init process, as # there may be Toolbar IDs created by the user or other modules. # By now all these modules should be loaded, so all the toolbar IDs loaded. try: self.frame.LoadBarState("ToolbarDefault") except win32ui.error: # MFC sucks. It does essentially "GetDlgItem(x)->Something", so if the # toolbar with ID x does not exist, MFC crashes! Pythonwin has a trap for this # but I need to investigate more how to prevent it (AFAIK, ensuring all the # toolbars are created by now _should_ stop it!) pass # Finally process the command line arguments. self.ProcessArgs(sys.argv) def ExitInstance(self): win32ui.DestroyDebuggerThread() try: from . import interact interact.DestroyInteractiveWindow() except: pass if self.ddeServer is not None: self.ddeServer.Shutdown() self.ddeServer = None return app.CApp.ExitInstance(self) def Activate(self): # Bring to the foreground. Mainly used when another app starts up, it asks # this one to activate itself, then it terminates. frame = win32ui.GetMainFrame() frame.SetForegroundWindow() if frame.GetWindowPlacement()[1]==win32con.SW_SHOWMINIMIZED: frame.ShowWindow(win32con.SW_RESTORE) def ProcessArgs(self, args, dde = None): # If we are going to talk to a remote app via DDE, then # activate it! if dde is not None: dde.Exec("self.Activate()") if len(args) and args[0] in ['/nodde','/new']: del args[0] # already handled. if len(args)<1 or not args[0]: # argv[0]=='' when started without args, just like Python.exe! return try: if args[0] and args[0][0]!='/': argStart = 0 argType = win32ui.GetProfileVal("Python","Default Arg Type","/edit").lower() else: argStart = 1 argType = args[0] if argStart >= len(args): raise TypeError("The command line requires an additional arg.") if argType=="/edit": # Load up the default application. if dde: fname = win32api.GetFullPathName(args[argStart]) dde.Exec("win32ui.GetApp().OpenDocumentFile(%s)" % (repr(fname))) else: win32ui.GetApp().OpenDocumentFile(args[argStart]) elif argType=="/rundlg": if dde: dde.Exec("from pywin.framework import scriptutils;scriptutils.RunScript('%s', '%s', 1)" % (args[argStart], ' '.join(args[argStart+1:]))) else: from . import scriptutils scriptutils.RunScript(args[argStart], ' '.join(args[argStart+1:])) elif argType=="/run": if dde: dde.Exec("from pywin.framework import scriptutils;scriptutils.RunScript('%s', '%s', 0)" % (args[argStart], ' '.join(args[argStart+1:]))) else: from . import scriptutils scriptutils.RunScript(args[argStart], ' '.join(args[argStart+1:]), 0) elif argType=="/app": raise RuntimeError("/app only supported for new instances of Pythonwin.exe") elif argType=='/dde': # Send arbitary command if dde is not None: dde.Exec(args[argStart]) else: win32ui.MessageBox("The /dde command can only be used\r\nwhen Pythonwin is already running") else: raise TypeError("Command line arguments not recognised") except: # too early for print anything. win32ui.DisplayTraceback(sys.exc_info(), " - error processing command line args") def LoadSystemModules(self): self.DoLoadModules("pywin.framework.editor,pywin.framework.stdin") def LoadUserModules(self, moduleNames = None): # Load the users modules. if moduleNames is None: default = "pywin.framework.sgrepmdi,pywin.framework.mdi_pychecker" moduleNames=win32ui.GetProfileVal('Python','Startup Modules',default) self.DoLoadModules(moduleNames) def DoLoadModules(self, moduleNames): # ", sep string of module names. if not moduleNames: return modules = moduleNames.split(",") for module in modules: try: __import__(module) except: # Catch em all, else the app itself dies! 'ImportError: traceback.print_exc() msg = 'Startup import of user module "%s" failed' % module print(msg) win32ui.MessageBox(msg) # # DDE Callback # def OnDDECommand(self, command): try: exec(command + "\n") except: print("ERROR executing DDE command: ", command) traceback.print_exc() raise # # General handlers # def OnViewBrowse( self, id, code ): " Called when ViewBrowse message is received " from pywin.mfc import dialog from pywin.tools import browser obName = dialog.GetSimpleInput('Object', '__builtins__', 'Browse Python Object') if obName is None: return try: browser.Browse(eval(obName, __main__.__dict__, __main__.__dict__)) except NameError: win32ui.MessageBox('This is no object with this name') except AttributeError: win32ui.MessageBox('The object has no attribute of that name') except: traceback.print_exc() win32ui.MessageBox('This object can not be browsed') def OnFileImport( self, id, code ): " Called when a FileImport message is received. Import the current or specified file" from . import scriptutils scriptutils.ImportFile() def OnFileCheck( self, id, code ): " Called when a FileCheck message is received. Check the current file." from . import scriptutils scriptutils.CheckFile() def OnUpdateFileCheck(self, cmdui): from . import scriptutils cmdui.Enable( scriptutils.GetActiveFileName(0) is not None ) def OnFileRun( self, id, code ): " Called when a FileRun message is received. " from . import scriptutils showDlg = win32api.GetKeyState(win32con.VK_SHIFT) >= 0 scriptutils.RunScript(None, None, showDlg) def OnFileLocate( self, id, code ): from pywin.mfc import dialog from . import scriptutils import os global lastLocateFileName # save the new version away for next time... name = dialog.GetSimpleInput('File name', lastLocateFileName, 'Locate Python File') if name is None: # Cancelled. return lastLocateFileName = name # if ".py" supplied, rip it off! # should also check for .pys and .pyw if lastLocateFileName[-3:].lower()=='.py': lastLocateFileName = lastLocateFileName[:-3] lastLocateFileName = lastLocateFileName.replace(".","\\") newName = scriptutils.LocatePythonFile(lastLocateFileName) if newName is None: win32ui.MessageBox("The file '%s' can not be located" % lastLocateFileName) else: win32ui.GetApp().OpenDocumentFile(newName) # Display all the "options" proprety pages we can find def OnViewOptions(self, id, code): win32ui.InitRichEdit() sheet = dialog.PropertySheet("Pythonwin Options") # Add property pages we know about that need manual work. from pywin.dialogs import ideoptions sheet.AddPage( ideoptions.OptionsPropPage() ) from . import toolmenu sheet.AddPage( toolmenu.ToolMenuPropPage() ) # Get other dynamic pages from templates. pages = [] for template in self.GetDocTemplateList(): try: # Dont actually call the function with the exception handler. getter = template.GetPythonPropertyPages except AttributeError: # Template does not provide property pages! continue pages = pages + getter() # Debugger template goes at the end try: from pywin.debugger import configui except ImportError: configui = None if configui is not None: pages.append(configui.DebuggerOptionsPropPage()) # Now simply add the pages, and display the dialog. for page in pages: sheet.AddPage(page) if sheet.DoModal()==win32con.IDOK: win32ui.SetStatusText("Applying configuration changes...", 1) win32ui.DoWaitCursor(1) # Tell every Window in our app that win.ini has changed! win32ui.GetMainFrame().SendMessageToDescendants(win32con.WM_WININICHANGE, 0, 0) win32ui.DoWaitCursor(0) def OnInteractiveWindow(self, id, code): # toggle the existing state. from . import interact interact.ToggleInteractiveWindow() def OnUpdateInteractiveWindow(self, cmdui): try: interact=sys.modules['pywin.framework.interact'] state = interact.IsInteractiveWindowVisible() except KeyError: # Interactive module hasnt ever been imported. state = 0 cmdui.Enable() cmdui.SetCheck(state) def OnFileSaveAll(self, id, code): # Only attempt to save editor documents. from pywin.framework.editor import editorTemplate num = 0 for doc in editorTemplate.GetDocumentList(): if doc.IsModified() and doc.GetPathName(): num = num = 1 doc.OnSaveDocument(doc.GetPathName()) win32ui.SetStatusText("%d documents saved" % num, 1) def OnViewToolbarDbg(self, id, code): if code==0: return not win32ui.GetMainFrame().OnBarCheck(id) def OnUpdateViewToolbarDbg(self, cmdui): win32ui.GetMainFrame().OnUpdateControlBarMenu(cmdui) cmdui.Enable(1) def OnHelpIndex( self, id, code ): from . import help help.SelectAndRunHelpFile() # As per the comments in app.py, this use is depreciated. # app.AppBuilder = InteractivePythonApp # Now all we do is create the application thisApp = InteractivePythonApp()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_device_syslog short_description: Manage system-level syslog settings on BIG-IP description: - Manage system-level syslog settings on BIG-IP. version_added: 2.8 options: auth_priv_from: description: - Specifies the lowest level of messages about user authentication to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning auth_priv_to: description: - Specifies the highest level of messages about user authentication to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning console_log: description: - Enables or disables logging emergency syslog messages to the console. type: bool cron_from: description: - Specifies the lowest level of messages about time-based scheduling to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning cron_to: description: - Specifies the highest level of messages about time-based scheduling to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning daemon_from: description: - Specifies the lowest level of messages about daemon performance to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning daemon_to: description: - Specifies the highest level of messages about daemon performance to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning include: description: - Syslog-NG configuration to include in the device syslog config. iso_date: description: - Enables or disables the ISO date format for messages in the log files. type: bool kern_from: description: - Specifies the lowest level of kernel messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning kern_to: description: - Specifies the highest level of kernel messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning local6_from: description: - Specifies the lowest error level for messages from the local6 facility to include in the log. choices: - alert - crit - debug - emerg - err - info - notice - warning local6_to: description: - Specifies the highest error level for messages from the local6 facility to include in the log. choices: - alert - crit - debug - emerg - err - info - notice - warning mail_from: description: - Specifies the lowest level of mail log messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning mail_to: description: - Specifies the highest level of mail log messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning messages_from: description: - Specifies the lowest level of system messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning messages_to: description: - Specifies the highest level of system messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning user_log_from: description: - Specifies the lowest level of user account messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning user_log_to: description: - Specifies the highest level of user account messages to include in the system log. choices: - alert - crit - debug - emerg - err - info - notice - warning extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create a ... bigip_device_syslog: name: foo provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' auth_priv_from: description: The new lowest user authentication logging level returned: changed type: string sample: alert auth_priv_to: description: The new highest user authentication logging level. returned: changed type: string sample: emerg console_log: description: Whether logging to console is enabled or not. returned: changed type: bool sample: yes iso_date: description: Whether ISO date format in logs is enabled or not returned: changed type: bool sample: no cron_from: description: The new lowest time-based scheduling logging level. returned: changed type: string sample: emerg cron_to: description: The new highest time-based scheduling logging level. returned: changed type: string sample: alert daemon_from: description: The new lowest daemon performance logging level. returned: changed type: string sample: alert daemon_to: description: The new highest daemon performance logging level. returned: changed type: string sample: alert include: description: The new extra syslog-ng configuration to include in syslog config. returned: changed type: string sample: "filter f_remote_syslog { not (facility(local6)) };" kern_from: description: The new lowest kernel messages logging level. returned: changed type: string sample: alert kern_to: description: The new highest kernel messages logging level. returned: changed type: string sample: alert local6_from: description: The new lowest local6 facility logging level. returned: changed type: string sample: alert local6_to: description: The new highest local6 facility logging level. returned: changed type: string sample: alert mail_from: description: The new lowest mail log logging level. returned: changed type: string sample: alert mail_to: description: The new highest mail log logging level. returned: changed type: string sample: alert messages_from: description: The new lowest system logging level. returned: changed type: string sample: alert messages_to: description: The new highest system logging level. returned: changed type: string sample: alert user_log_from: description: The new lowest user account logging level. returned: changed type: string sample: alert user_log_to: description: The new highest user account logging level. returned: changed type: string sample: alert ''' from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import exit_json from library.module_utils.network.f5.common import fail_json from library.module_utils.network.f5.common import flatten_boolean from library.module_utils.network.f5.compare import cmp_str_with_none except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import exit_json from ansible.module_utils.network.f5.common import fail_json from ansible.module_utils.network.f5.common import flatten_boolean from ansible.module_utils.network.f5.compare import cmp_str_with_none class Parameters(AnsibleF5Parameters): api_map = { 'authPrivFrom': 'auth_priv_from', 'authPrivTo': 'auth_priv_to', 'consoleLog': 'console_log', 'cronFrom': 'cron_from', 'cronTo': 'cron_to', 'daemonFrom': 'daemon_from', 'daemonTo': 'daemon_to', 'isoDate': 'iso_date', 'kernFrom': 'kern_from', 'kernTo': 'kern_to', 'local6From': 'local6_from', 'local6To': 'local6_to', 'mailFrom': 'mail_from', 'mailTo': 'mail_to', 'messagesFrom': 'messages_from', 'messagesTo': 'messages_to', 'userLogFrom': 'user_log_from', 'userLogTo': 'user_log_to', } api_attributes = [ 'include', 'authPrivFrom', 'authPrivTo', 'consoleLog', 'cronFrom', 'cronTo', 'daemonFrom', 'daemonTo', 'isoDate', 'kernFrom', 'kernTo', 'local6From', 'local6To', 'mailFrom', 'mailTo', 'messagesFrom', 'messagesTo', 'userLogFrom', 'userLogTo', ] returnables = [ 'include', 'auth_priv_from', 'auth_priv_to', 'console_log', 'cron_from', 'cron_to', 'daemon_from', 'daemon_to', 'iso_date', 'kern_from', 'kern_to', 'local6_from', 'local6_to', 'mail_from', 'mail_to', 'messages_from', 'messages_to', 'user_log_from', 'user_log_to', ] updatables = [ 'include', 'auth_priv_from', 'auth_priv_to', 'console_log', 'cron_from', 'cron_to', 'daemon_from', 'daemon_to', 'iso_date', 'kern_from', 'kern_to', 'local6_from', 'local6_to', 'mail_from', 'mail_to', 'messages_from', 'messages_to', 'user_log_from', 'user_log_to', ] @property def console_log(self): return flatten_boolean(self._values['console_log']) @property def iso_date(self): return flatten_boolean(self._values['iso_date']) class ApiParameters(Parameters): @property def include(self): if self._values['include'] in [None, 'none']: return None return self._values['include'] class ModuleParameters(Parameters): @property def include(self): if self._values['include'] is None: return None if self._values['include'] in ['', 'none']: return '' return self._values['include'].replace('"', "'") class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): @property def console_log(self): if self._values['console_log'] is None: return None elif self._values['console_log'] == 'yes': return 'enabled' return 'disabled' @property def iso_date(self): if self._values['iso_date'] is None: return None elif self._values['iso_date'] == 'yes': return 'enabled' return 'disabled' class ReportableChanges(Changes): @property def console_log(self): return flatten_boolean(self._values['console_log']) @property def iso_date(self): return flatten_boolean(self._values['iso_date']) class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def include(self): return cmp_str_with_none(self.want.include, self.have.include) class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): result = dict() changed = self.present() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): return self.update() def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/syslog".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/syslog".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True levels = [ 'alert', 'crit', 'debug', 'emerg', 'err', 'info', 'notice', 'warning' ] argument_spec = dict( auth_priv_from=dict(choices=levels), auth_priv_to=dict(choices=levels), console_log=dict(type='bool'), cron_from=dict(choices=levels), cron_to=dict(choices=levels), daemon_from=dict(choices=levels), daemon_to=dict(choices=levels), include=dict(), iso_date=dict(type='bool'), kern_from=dict(choices=levels), kern_to=dict(choices=levels), local6_from=dict(choices=levels), local6_to=dict(choices=levels), mail_from=dict(choices=levels), mail_to=dict(choices=levels), messages_from=dict(choices=levels), messages_to=dict(choices=levels), user_log_from=dict(choices=levels), user_log_to=dict(choices=levels), ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) client = F5RestClient(**module.params) try: mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) exit_json(module, results, client) except F5ModuleError as ex: cleanup_tokens(client) fail_json(module, ex, client) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: opendj_backendprop short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command. description: - This module will update settings for OpenDJ with the command set-backend-prop. - It will check first via de get-backend-prop if configuration needs to be applied. version_added: "2.2" author: - Werner Dijkerman options: opendj_bindir: description: - The path to the bin directory of OpenDJ. required: false default: /opt/opendj/bin hostname: description: - The hostname of the OpenDJ server. required: true port: description: - The Admin port on which the OpenDJ instance is available. required: true username: description: - The username to connect to. required: false default: cn=Directory Manager password: description: - The password for the cn=Directory Manager user. - Either password or passwordfile is needed. required: false passwordfile: description: - Location to the password file which holds the password for the cn=Directory Manager user. - Either password or passwordfile is needed. required: false backend: description: - The name of the backend on which the property needs to be updated. required: true name: description: - The configuration setting to update. required: true value: description: - The value for the configuration item. required: true state: description: - If configuration needs to be added/updated required: false default: "present" ''' EXAMPLES = ''' - name: "Add or update OpenDJ backend properties" action: opendj_backendprop hostname=localhost port=4444 username="cn=Directory Manager" password=password backend=userRoot name=index-entry-limit value=5000 ''' RETURN = ''' ''' from ansible.module_utils.basic import AnsibleModule class BackendProp(object): def __init__(self, module): self._module = module def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): my_command = [ opendj_bindir + '/dsconfig', 'get-backend-prop', '-h', hostname, '--port', str(port), '--bindDN', username, '--backend-name', backend_name, '-n', '-X', '-s' ] + password_method rc, stdout, stderr = self._module.run_command(my_command) if rc == 0: return stdout else: self._module.fail_json(msg="Error message: " + str(stderr)) def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): my_command = [ opendj_bindir + '/dsconfig', 'set-backend-prop', '-h', hostname, '--port', str(port), '--bindDN', username, '--backend-name', backend_name, '--set', name + ":" + value, '-n', '-X' ] + password_method rc, stdout, stderr = self._module.run_command(my_command) if rc == 0: return True else: self._module.fail_json(msg="Error message: " + stderr) def validate_data(self, data=None, name=None, value=None): for config_line in data.split('\n'): if config_line: split_line = config_line.split() if split_line[0] == name: if split_line[1] == value: return True return False def main(): module = AnsibleModule( argument_spec=dict( opendj_bindir=dict(default="/opt/opendj/bin", type="path"), hostname=dict(required=True), port=dict(required=True), username=dict(default="cn=Directory Manager", required=False), password=dict(required=False, no_log=True), passwordfile=dict(required=False, type="path"), backend=dict(required=True), name=dict(required=True), value=dict(required=True), state=dict(default="present"), ), supports_check_mode=True, mutually_exclusive=[['password', 'passwordfile']], required_one_of=[['password', 'passwordfile']] ) opendj_bindir = module.params['opendj_bindir'] hostname = module.params['hostname'] port = module.params['port'] username = module.params['username'] password = module.params['password'] passwordfile = module.params['passwordfile'] backend_name = module.params['backend'] name = module.params['name'] value = module.params['value'] state = module.params['state'] if module.params["password"] is not None: password_method = ['-w', password] elif module.params["passwordfile"] is not None: password_method = ['-j', passwordfile] opendj = BackendProp(module) validate = opendj.get_property(opendj_bindir=opendj_bindir, hostname=hostname, port=port, username=username, password_method=password_method, backend_name=backend_name) if validate: if not opendj.validate_data(data=validate, name=name, value=value): if module.check_mode: module.exit_json(changed=True) if opendj.set_property(opendj_bindir=opendj_bindir, hostname=hostname, port=port, username=username, password_method=password_method, backend_name=backend_name, name=name, value=value): module.exit_json(changed=True) else: module.exit_json(changed=False) else: module.exit_json(changed=False) else: module.exit_json(changed=False) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors. * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ package org.jetbrains.kotlin.analysis.api.standalone.fir.test.cases.generated.cases.components.symbolDeclarationRenderer; import com.intellij.testFramework.TestDataPath; import org.jetbrains.kotlin.test.util.KtTestUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.kotlin.analysis.api.standalone.fir.test.configurators.AnalysisApiFirStandaloneModeTestConfiguratorFactory; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode; import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.symbolDeclarationRenderer.AbstractRendererTest; import org.jetbrains.kotlin.test.TestMetadata; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; import java.io.File; import java.util.regex.Pattern; /** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */ @SuppressWarnings("all") @TestMetadata("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration") @TestDataPath("$PROJECT_ROOT") public class FirStandaloneNormalAnalysisSourceModuleRendererTestGenerated extends AbstractRendererTest { @NotNull @Override public AnalysisApiTestConfigurator getConfigurator() { return AnalysisApiFirStandaloneModeTestConfiguratorFactory.INSTANCE.createConfigurator( new AnalysisApiTestConfiguratorFactoryData( FrontendKind.Fir, TestModuleKind.Source, AnalysisSessionMode.Normal, AnalysisApiMode.Standalone ) ); } @Test @TestMetadata("actualInheritance.kt") public void testActualInheritance() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/actualInheritance.kt"); } @Test public void testAllFilesPresentInRenderDeclaration() { KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration"), Pattern.compile("^(.+)\\.kt$"), null, true); } @Test @TestMetadata("annotation.kt") public void testAnnotation() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/annotation.kt"); } @Test @TestMetadata("complexTypes.kt") public void testComplexTypes() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/complexTypes.kt"); } @Test @TestMetadata("constructorInObject.kt") public void testConstructorInObject() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/constructorInObject.kt"); } @Test @TestMetadata("constructorOfAnonymousObject.kt") public void testConstructorOfAnonymousObject() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/constructorOfAnonymousObject.kt"); } @Test @TestMetadata("contextParameter.kt") public void testContextParameter() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/contextParameter.kt"); } @Test @TestMetadata("contextReceiver.kt") public void testContextReceiver() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/contextReceiver.kt"); } @Test @TestMetadata("delegates.kt") public void testDelegates() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/delegates.kt"); } @Test @TestMetadata("derivedClass.kt") public void testDerivedClass() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/derivedClass.kt"); } @Test @TestMetadata("emptyAnonymousObject.kt") public void testEmptyAnonymousObject() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/emptyAnonymousObject.kt"); } @Test @TestMetadata("enums.kt") public void testEnums() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/enums.kt"); } @Test @TestMetadata("enums2.kt") public void testEnums2() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/enums2.kt"); } @Test @TestMetadata("expectActual.kt") public void testExpectActual() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/expectActual.kt"); } @Test @TestMetadata("F.kt") public void testF() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/F.kt"); } @Test @TestMetadata("functionTypes.kt") public void testFunctionTypes() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/functionTypes.kt"); } @Test @TestMetadata("functionalTypeAliases.kt") public void testFunctionalTypeAliases() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/functionalTypeAliases.kt"); } @Test @TestMetadata("genericFunctions.kt") public void testGenericFunctions() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/genericFunctions.kt"); } @Test @TestMetadata("genericProperty.kt") public void testGenericProperty() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/genericProperty.kt"); } @Test @TestMetadata("intersectionType.kt") public void testIntersectionType() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/intersectionType.kt"); } @Test @TestMetadata("nestedClass.kt") public void testNestedClass() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/nestedClass.kt"); } @Test @TestMetadata("NestedOfAliasedType.kt") public void testNestedOfAliasedType() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/NestedOfAliasedType.kt"); } @Test @TestMetadata("NestedSuperType.kt") public void testNestedSuperType() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/NestedSuperType.kt"); } @Test @TestMetadata("noPrimaryConstructor.kt") public void testNoPrimaryConstructor() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/noPrimaryConstructor.kt"); } @Test @TestMetadata("simpleClass.kt") public void testSimpleClass() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/simpleClass.kt"); } @Test @TestMetadata("simpleFun.kt") public void testSimpleFun() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/simpleFun.kt"); } @Test @TestMetadata("simpleTypeAlias.kt") public void testSimpleTypeAlias() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/simpleTypeAlias.kt"); } @Test @TestMetadata("typeAliasWithGeneric.kt") public void testTypeAliasWithGeneric() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/typeAliasWithGeneric.kt"); } @Test @TestMetadata("typeParameterVsNested.kt") public void testTypeParameterVsNested() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/typeParameterVsNested.kt"); } @Test @TestMetadata("typeParameters.kt") public void testTypeParameters() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/typeParameters.kt"); } @Test @TestMetadata("vararg.kt") public void testVararg() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/vararg.kt"); } @Test @TestMetadata("where.kt") public void testWhere() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/where.kt"); } @Nested @TestMetadata("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types") @TestDataPath("$PROJECT_ROOT") public class Types { @Test public void testAllFilesPresentInTypes() { KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types"), Pattern.compile("^(.+)\\.kt$"), null, true); } @Test @TestMetadata("annotationOnTypes.kt") public void testAnnotationOnTypes() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types/annotationOnTypes.kt"); } @Test @TestMetadata("annotationOnTypesWithComplexExpression.kt") public void testAnnotationOnTypesWithComplexExpression() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types/annotationOnTypesWithComplexExpression.kt"); } @Test @TestMetadata("approximatedCapturedType.kt") public void testApproximatedCapturedType() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types/approximatedCapturedType.kt"); } @Test @TestMetadata("contextFunctionTypes.kt") public void testContextFunctionTypes() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types/contextFunctionTypes.kt"); } @Test @TestMetadata("extraTypeArguments.kt") public void testExtraTypeArguments() { runTest("analysis/analysis-api/testData/components/symbolDeclarationRenderer/renderDeclaration/types/extraTypeArguments.kt"); } } }
java
github
https://github.com/JetBrains/kotlin
analysis/analysis-api-standalone/tests-gen/org/jetbrains/kotlin/analysis/api/standalone/fir/test/cases/generated/cases/components/symbolDeclarationRenderer/FirStandaloneNormalAnalysisSourceModuleRendererTestGenerated.java
import matplotlib.pyplot as plt import numpy as np import scattering import scipy.constants as consts d = np.linspace(0, 100, 500) * consts.milli T = 0.0 wavelengths = np.array([10.0, 5.5, 3.21]) * consts.centi lines = ['r--','b:','g-'] m_water = np.array([np.sqrt(80.255+24.313j), np.sqrt(65.476+37.026j), np.sqrt(44.593+41.449j)]) m_ice = np.array([np.sqrt(3.16835+0.02492j), np.sqrt(3.16835+0.01068j), np.sqrt(3.16835+0.0089j)]) plt.figure() for mw, mi, lam, line in zip(m_water, m_ice, wavelengths, lines): scat = scattering.scatterer(lam, T, 'water', diameters=d, ref_index=mw) scat.set_scattering_model('tmatrix') plt.subplot(1,2,1) plt.semilogy(d / consts.milli, scat.sigma_b / (consts.centi)**2, line, label='%5.2fcm Tmat' % (lam / consts.centi)) scat = scattering.scatterer(lam, T, 'ice', diameters=d, ref_index=mi) scat.set_scattering_model('tmatrix') plt.subplot(1,2,2) plt.semilogy(d / consts.milli, scat.sigma_b / (consts.centi)**2, line, label='%5.2fcm Tmat' % (lam / consts.centi)) plt.subplot(1,2,1) plt.xlabel('Diameter (mm)') plt.ylabel(r'Backscatter Cross-Section (cm$^{2}$)') plt.xlim(0,100.0) plt.ylim(1.0e-2,1e3) plt.subplot(1,2,2) plt.xlabel('Diameter (mm)') plt.ylabel(r'Backscatter Cross-Section (cm$^{2}$)') plt.xlim(0,100.0) plt.ylim(1.0e-2,1e3) plt.legend(loc='lower right') plt.show()
unknown
codeparrot/codeparrot-clean
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package auth import ( "context" "encoding/base64" "errors" "fmt" "strings" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc/metadata" "go.etcd.io/etcd/api/v3/authpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/pkg/v3/adt" ) func dummyIndexWaiter(index uint64) <-chan struct{} { ch := make(chan struct{}, 1) go func() { ch <- struct{}{} }() return ch } // TestNewAuthStoreRevision ensures newly auth store // keeps the old revision when there are no changes. func TestNewAuthStoreRevision(t *testing.T) { tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) if err != nil { t.Fatal(err) } be := newBackendMock() as := NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost) err = enableAuthAndCreateRoot(as) if err != nil { t.Fatal(err) } old := as.Revision() as.Close() // no changes to commit as = NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost) defer as.Close() new := as.Revision() require.Equalf(t, old, new, "expected revision %d, got %d", old, new) } // TestNewAuthStoreBcryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid func TestNewAuthStoreBcryptCost(t *testing.T) { tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) if err != nil { t.Fatal(err) } invalidCosts := [2]int{bcrypt.MinCost - 1, bcrypt.MaxCost + 1} for _, invalidCost := range invalidCosts { as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, invalidCost) defer as.Close() require.Equalf(t, bcrypt.DefaultCost, as.BcryptCost(), "expected DefaultCost when bcryptcost is invalid") } } func encodePassword(s string) string { hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(s), bcrypt.MinCost) return base64.StdEncoding.EncodeToString(hashedPassword) } func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) { tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) if err != nil { t.Fatal(err) } as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) err = enableAuthAndCreateRoot(as) if err != nil { t.Fatal(err) } // adds a new role _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"}) if err != nil { t.Fatal(err) } ua := &pb.AuthUserAddRequest{Name: "foo", HashedPassword: encodePassword("bar"), Options: &authpb.UserAddOptions{NoPassword: false}} _, err = as.UserAdd(ua) // add a non-existing user if err != nil { t.Fatal(err) } // The UserAdd function cannot generate old etcd version user data (user's option is nil) // add special users through the underlying interface asImpl, ok := as.(*authStore) require.Truef(t, ok, "addUserWithNoOption: needs an AuthStore implementation") addUserWithNoOption(asImpl) tearDown := func(_ *testing.T) { as.Close() } return asImpl, tearDown } func addUserWithNoOption(as *authStore) { tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() tx.UnsafePutUser(&authpb.User{ Name: []byte("foo-no-user-options"), Password: []byte("bar"), }) as.commitRevision(tx) as.refreshRangePermCache(tx) } func enableAuthAndCreateRoot(as AuthStore) error { _, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "root", HashedPassword: encodePassword("root"), Options: &authpb.UserAddOptions{NoPassword: false}}) if err != nil { return err } _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"}) if err != nil { return err } _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"}) if err != nil { return err } return as.AuthEnable() } func TestUserAdd(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) const userName = "foo" ua := &pb.AuthUserAddRequest{Name: userName, Options: &authpb.UserAddOptions{NoPassword: false}} _, err := as.UserAdd(ua) // add an existing user require.Errorf(t, err, "expected %v, got %v", ErrUserAlreadyExist, err) require.ErrorIsf(t, err, ErrUserAlreadyExist, "expected %v, got %v", ErrUserAlreadyExist, err) ua = &pb.AuthUserAddRequest{Name: "", Options: &authpb.UserAddOptions{NoPassword: false}} _, err = as.UserAdd(ua) // add a user with empty name if !errors.Is(err, ErrUserEmpty) { t.Fatal(err) } _, ok := as.rangePermCache[userName] require.Truef(t, ok, "user %s should be added but it doesn't exist in rangePermCache", userName) } func TestRecover(t *testing.T) { as, tearDown := setupAuthStore(t) defer as.Close() defer tearDown(t) as.enabled = false as.Recover(as.be) require.Truef(t, as.IsAuthEnabled(), "expected auth enabled got disabled") } func TestRecoverWithEmptyRangePermCache(t *testing.T) { as, tearDown := setupAuthStore(t) defer as.Close() defer tearDown(t) as.enabled = false as.rangePermCache = map[string]*unifiedRangePermissions{} as.Recover(as.be) require.Truef(t, as.IsAuthEnabled(), "expected auth enabled got disabled") require.Lenf(t, as.rangePermCache, 3, "rangePermCache should have permission information for 3 users (\"root\" and \"foo\",\"foo-no-user-options\"), but has %d information", len(as.rangePermCache)) _, ok := as.rangePermCache["root"] require.Truef(t, ok, "user \"root\" should be created by setupAuthStore() but doesn't exist in rangePermCache") _, ok = as.rangePermCache["foo"] require.Truef(t, ok, "user \"foo\" should be created by setupAuthStore() but doesn't exist in rangePermCache") } func TestCheckPassword(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // auth a non-existing user _, err := as.CheckPassword("foo-test", "bar") require.Errorf(t, err, "expected %v, got %v", ErrAuthFailed, err) require.ErrorIsf(t, err, ErrAuthFailed, "expected %v, got %v", ErrAuthFailed, err) // auth an existing user with correct password _, err = as.CheckPassword("foo", "bar") if err != nil { t.Fatal(err) } // auth an existing user but with wrong password _, err = as.CheckPassword("foo", "") require.Errorf(t, err, "expected %v, got %v", ErrAuthFailed, err) require.ErrorIsf(t, err, ErrAuthFailed, "expected %v, got %v", ErrAuthFailed, err) } func TestUserDelete(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // delete an existing user const userName = "foo" ud := &pb.AuthUserDeleteRequest{Name: userName} _, err := as.UserDelete(ud) if err != nil { t.Fatal(err) } // delete a non-existing user _, err = as.UserDelete(ud) require.Errorf(t, err, "expected %v, got %v", ErrUserNotFound, err) require.ErrorIsf(t, err, ErrUserNotFound, "expected %v, got %v", ErrUserNotFound, err) _, ok := as.rangePermCache[userName] require.Falsef(t, ok, "user %s should be deleted but it exists in rangePermCache", userName) } func TestUserDeleteAndPermCache(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // delete an existing user const deletedUserName = "foo" ud := &pb.AuthUserDeleteRequest{Name: deletedUserName} _, err := as.UserDelete(ud) if err != nil { t.Fatal(err) } // delete a non-existing user _, err = as.UserDelete(ud) require.ErrorIsf(t, err, ErrUserNotFound, "expected %v, got %v", ErrUserNotFound, err) _, ok := as.rangePermCache[deletedUserName] require.Falsef(t, ok, "user %s should be deleted but it exists in rangePermCache", deletedUserName) // add a new user const newUser = "bar" ua := &pb.AuthUserAddRequest{Name: newUser, HashedPassword: encodePassword("pwd1"), Options: &authpb.UserAddOptions{NoPassword: false}} _, err = as.UserAdd(ua) if err != nil { t.Fatal(err) } _, ok = as.rangePermCache[newUser] require.Truef(t, ok, "user %s should exist but it doesn't exist in rangePermCache", deletedUserName) } func TestUserChangePassword(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) ctx1 := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, err := as.Authenticate(ctx1, "foo", "bar") if err != nil { t.Fatal(err) } _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo", HashedPassword: encodePassword("baz")}) if err != nil { t.Fatal(err) } ctx2 := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, err = as.Authenticate(ctx2, "foo", "baz") if err != nil { t.Fatal(err) } // change a non-existing user _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo-test", HashedPassword: encodePassword("bar")}) require.Errorf(t, err, "expected %v, got %v", ErrUserNotFound, err) require.ErrorIsf(t, err, ErrUserNotFound, "expected %v, got %v", ErrUserNotFound, err) // change a user(user option is nil) password _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo-no-user-options", HashedPassword: encodePassword("bar")}) if err != nil { t.Fatal(err) } } func TestRoleAdd(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // adds a new role _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) if err != nil { t.Fatal(err) } // add a role with empty name _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: ""}) if !errors.Is(err, ErrRoleEmpty) { t.Fatal(err) } } func TestUserGrant(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // grants a role to the user _, err := as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"}) if err != nil { t.Fatal(err) } // grants a role to a non-existing user _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo-test", Role: "role-test"}) if err == nil { t.Errorf("expected %v, got %v", ErrUserNotFound, err) } if !errors.Is(err, ErrUserNotFound) { t.Errorf("expected %v, got %v", ErrUserNotFound, err) } } func TestHasRole(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // grants a role to the user _, err := as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"}) if err != nil { t.Fatal(err) } // checks role reflects correctly hr := as.HasRole("foo", "role-test") require.Truef(t, hr, "expected role granted, got false") // checks non existent role hr = as.HasRole("foo", "non-existent-role") require.Falsef(t, hr, "expected role not found, got true") // checks non existent user hr = as.HasRole("nouser", "role-test") require.Falsef(t, hr, "expected user not found got true") } func TestIsOpPermitted(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // add new role _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) if err != nil { t.Fatal(err) } perm := &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: []byte("RangeEnd"), } _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "role-test-1", Perm: perm, }) if err != nil { t.Fatal(err) } // grants a role to the user _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test-1"}) if err != nil { t.Fatal(err) } // check permission reflected to user err = as.isOpPermitted("foo", as.Revision(), perm.Key, perm.RangeEnd, perm.PermType) if err != nil { t.Fatal(err) } // Drop the user's permission from cache and expect a permission denied // error. as.rangePermCacheMu.Lock() delete(as.rangePermCache, "foo") as.rangePermCacheMu.Unlock() if err := as.isOpPermitted("foo", as.Revision(), perm.Key, perm.RangeEnd, perm.PermType); !errors.Is(err, ErrPermissionDenied) { t.Fatal(err) } } func TestGetUser(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) _, err := as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"}) if err != nil { t.Fatal(err) } u, err := as.UserGet(&pb.AuthUserGetRequest{Name: "foo"}) if err != nil { t.Fatal(err) } require.NotNilf(t, u, "expect user not nil, got nil") expected := []string{"role-test"} assert.Equal(t, expected, u.Roles) // check non existent user _, err = as.UserGet(&pb.AuthUserGetRequest{Name: "nouser"}) if err == nil { t.Errorf("expected %v, got %v", ErrUserNotFound, err) } } func TestListUsers(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) ua := &pb.AuthUserAddRequest{Name: "user1", HashedPassword: encodePassword("pwd1"), Options: &authpb.UserAddOptions{NoPassword: false}} _, err := as.UserAdd(ua) // add a non-existing user if err != nil { t.Fatal(err) } ul, err := as.UserList(&pb.AuthUserListRequest{}) if err != nil { t.Fatal(err) } if !contains(ul.Users, "root") { t.Errorf("expected %v in %v", "root", ul.Users) } if !contains(ul.Users, "user1") { t.Errorf("expected %v in %v", "user1", ul.Users) } } func TestRoleGrantPermission(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) if err != nil { t.Fatal(err) } perm := &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: []byte("RangeEnd"), } _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "role-test-1", Perm: perm, }) if err != nil { t.Error(err) } r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) if err != nil { t.Fatal(err) } assert.Equal(t, perm, r.Perm[0]) // trying to grant nil permissions returns an error (and doesn't change the actual permissions!) _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "role-test-1", }) if !errors.Is(err, ErrPermissionNotGiven) { t.Error(err) } r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) if err != nil { t.Fatal(err) } assert.Equal(t, perm, r.Perm[0]) } func TestRoleGrantInvalidPermission(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) if err != nil { t.Fatal(err) } tests := []struct { name string perm *authpb.Permission want error }{ { name: "valid range", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: []byte("RangeEnd"), }, want: nil, }, { name: "invalid range: nil key", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: nil, RangeEnd: []byte("RangeEnd"), }, want: ErrInvalidAuthMgmt, }, { name: "valid range: single key", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: nil, }, want: nil, }, { name: "valid range: single key", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: []byte{}, }, want: nil, }, { name: "invalid range: empty (Key == RangeEnd)", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("a"), RangeEnd: []byte("a"), }, want: ErrInvalidAuthMgmt, }, { name: "invalid range: empty (Key > RangeEnd)", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("b"), RangeEnd: []byte("a"), }, want: ErrInvalidAuthMgmt, }, { name: "invalid range: length of key is 0", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte(""), RangeEnd: []byte("a"), }, want: ErrInvalidAuthMgmt, }, { name: "invalid range: length of key is 0", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte(""), RangeEnd: []byte(""), }, want: ErrInvalidAuthMgmt, }, { name: "invalid range: length of key is 0", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte(""), RangeEnd: []byte{0x00}, }, want: ErrInvalidAuthMgmt, }, { name: "valid range: single key permission for []byte{0x00}", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte{0x00}, RangeEnd: []byte(""), }, want: nil, }, { name: "valid range: \"a\" or larger keys", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("a"), RangeEnd: []byte{0x00}, }, want: nil, }, { name: "valid range: the entire keys", perm: &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte{0x00}, RangeEnd: []byte{0x00}, }, want: nil, }, } for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "role-test-1", Perm: tt.perm, }) if !errors.Is(err, tt.want) { t.Errorf("#%d: result=%t, want=%t", i, err, tt.want) } }) } } func TestRootRoleGrantPermission(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) perm := &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: []byte("RangeEnd"), } _, err := as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "root", Perm: perm, }) if err != nil { t.Error(err) } r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "root"}) if err != nil { t.Fatal(err) } // whatever grant permission to root, it always return root permission. expectPerm := &authpb.Permission{ PermType: authpb.Permission_READWRITE, Key: []byte{}, RangeEnd: []byte{0}, } assert.Equal(t, expectPerm, r.Perm[0]) } func TestRoleRevokePermission(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) if err != nil { t.Fatal(err) } perm := &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("Keys"), RangeEnd: []byte("RangeEnd"), } _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "role-test-1", Perm: perm, }) if err != nil { t.Fatal(err) } _, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) if err != nil { t.Fatal(err) } _, err = as.RoleRevokePermission(&pb.AuthRoleRevokePermissionRequest{ Role: "role-test-1", Key: []byte("Keys"), RangeEnd: []byte("RangeEnd"), }) if err != nil { t.Fatal(err) } var r *pb.AuthRoleGetResponse r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) if err != nil { t.Fatal(err) } if len(r.Perm) != 0 { t.Errorf("expected %v, got %v", 0, len(r.Perm)) } } func TestUserRevokePermission(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) if err != nil { t.Fatal(err) } const userName = "foo" _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test"}) if err != nil { t.Fatal(err) } _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test-1"}) if err != nil { t.Fatal(err) } perm := &authpb.Permission{ PermType: authpb.Permission_WRITE, Key: []byte("WriteKeyBegin"), RangeEnd: []byte("WriteKeyEnd"), } _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ Name: "role-test-1", Perm: perm, }) if err != nil { t.Fatal(err) } _, ok := as.rangePermCache[userName] require.Truef(t, ok, "User %s should have its entry in rangePermCache", userName) unifiedPerm := as.rangePermCache[userName] pt1 := adt.NewBytesAffinePoint([]byte("WriteKeyBegin")) require.Truef(t, unifiedPerm.writePerms.Contains(pt1), "rangePermCache should contain WriteKeyBegin") pt2 := adt.NewBytesAffinePoint([]byte("OutOfRange")) require.Falsef(t, unifiedPerm.writePerms.Contains(pt2), "rangePermCache should not contain OutOfRange") u, err := as.UserGet(&pb.AuthUserGetRequest{Name: userName}) if err != nil { t.Fatal(err) } expected := []string{"role-test", "role-test-1"} assert.Equal(t, expected, u.Roles) _, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: userName, Role: "role-test-1"}) if err != nil { t.Fatal(err) } u, err = as.UserGet(&pb.AuthUserGetRequest{Name: userName}) if err != nil { t.Fatal(err) } expected = []string{"role-test"} assert.Equal(t, expected, u.Roles) } func TestRoleDelete(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) _, err := as.RoleDelete(&pb.AuthRoleDeleteRequest{Role: "role-test"}) if err != nil { t.Fatal(err) } rl, err := as.RoleList(&pb.AuthRoleListRequest{}) if err != nil { t.Fatal(err) } expected := []string{"root"} assert.Equal(t, expected, rl.Roles) } func TestAuthInfoFromCtx(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) ctx := t.Context() ai, err := as.AuthInfoFromCtx(ctx) if err != nil && ai != nil { t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) } // as if it came from RPC ctx = metadata.NewIncomingContext(t.Context(), metadata.New(map[string]string{"tokens": "dummy"})) ai, err = as.AuthInfoFromCtx(ctx) if err != nil && ai != nil { t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) } ctx = context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") resp, err := as.Authenticate(ctx, "foo", "bar") if err != nil { t.Error(err) } ctx = metadata.NewIncomingContext(t.Context(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "Invalid Token"})) _, err = as.AuthInfoFromCtx(ctx) if !errors.Is(err, ErrInvalidAuthToken) { t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) } ctx = metadata.NewIncomingContext(t.Context(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "Invalid.Token"})) _, err = as.AuthInfoFromCtx(ctx) if !errors.Is(err, ErrInvalidAuthToken) { t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) } ctx = metadata.NewIncomingContext(t.Context(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: resp.Token})) ai, err = as.AuthInfoFromCtx(ctx) if err != nil { t.Error(err) } if ai.Username != "foo" { t.Errorf("expected %v, got %v", "foo", ai.Username) } } func TestAuthDisable(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) as.AuthDisable() ctx := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, err := as.Authenticate(ctx, "foo", "bar") if !errors.Is(err, ErrAuthNotEnabled) { t.Errorf("expected %v, got %v", ErrAuthNotEnabled, err) } // Disabling disabled auth to make sure it can return safely if store is already disabled. as.AuthDisable() _, err = as.Authenticate(ctx, "foo", "bar") if !errors.Is(err, ErrAuthNotEnabled) { t.Errorf("expected %v, got %v", ErrAuthNotEnabled, err) } } func TestIsAuthEnabled(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) // enable authentication to test the first possible condition as.AuthEnable() status := as.IsAuthEnabled() ctx := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, _ = as.Authenticate(ctx, "foo", "bar") if status != true { t.Errorf("expected %v, got %v", true, false) } // Disabling disabled auth to test the other condition that can be return as.AuthDisable() status = as.IsAuthEnabled() _, _ = as.Authenticate(ctx, "foo", "bar") if status != false { t.Errorf("expected %v, got %v", false, true) } } // TestAuthInfoFromCtxRace ensures that access to authStore.revision is thread-safe. func TestAuthInfoFromCtxRace(t *testing.T) { tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) if err != nil { t.Fatal(err) } as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) defer as.Close() donec := make(chan struct{}) go func() { defer close(donec) ctx := metadata.NewIncomingContext(t.Context(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "test"})) as.AuthInfoFromCtx(ctx) }() as.UserAdd(&pb.AuthUserAddRequest{Name: "test", Options: &authpb.UserAddOptions{NoPassword: false}}) <-donec } func TestIsAdminPermitted(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) err := as.IsAdminPermitted(&AuthInfo{Username: "root", Revision: 1}) if err != nil { t.Errorf("expected nil, got %v", err) } // invalid user err = as.IsAdminPermitted(&AuthInfo{Username: "rooti", Revision: 1}) if !errors.Is(err, ErrUserNotFound) { t.Errorf("expected %v, got %v", ErrUserNotFound, err) } // empty user err = as.IsAdminPermitted(&AuthInfo{Username: "", Revision: 1}) if !errors.Is(err, ErrUserEmpty) { t.Errorf("expected %v, got %v", ErrUserEmpty, err) } // non-admin user err = as.IsAdminPermitted(&AuthInfo{Username: "foo", Revision: 1}) if !errors.Is(err, ErrPermissionDenied) { t.Errorf("expected %v, got %v", ErrPermissionDenied, err) } // disabled auth should return nil as.AuthDisable() err = as.IsAdminPermitted(&AuthInfo{Username: "root", Revision: 1}) if err != nil { t.Errorf("expected nil, got %v", err) } } func TestRecoverFromSnapshot(t *testing.T) { as, teardown := setupAuthStore(t) defer teardown(t) ua := &pb.AuthUserAddRequest{Name: "foo", Options: &authpb.UserAddOptions{NoPassword: false}} _, err := as.UserAdd(ua) // add an existing user require.Errorf(t, err, "expected %v, got %v", ErrUserAlreadyExist, err) require.ErrorIsf(t, err, ErrUserAlreadyExist, "expected %v, got %v", ErrUserAlreadyExist, err) ua = &pb.AuthUserAddRequest{Name: "", Options: &authpb.UserAddOptions{NoPassword: false}} _, err = as.UserAdd(ua) // add a user with empty name if !errors.Is(err, ErrUserEmpty) { t.Fatal(err) } as.Close() tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) if err != nil { t.Fatal(err) } as2 := NewAuthStore(zaptest.NewLogger(t), as.be, tp, bcrypt.MinCost) defer as2.Close() require.Truef(t, as2.IsAuthEnabled(), "recovering authStore from existing backend failed") ul, err := as.UserList(&pb.AuthUserListRequest{}) if err != nil { t.Fatal(err) } if !contains(ul.Users, "root") { t.Errorf("expected %v in %v", "root", ul.Users) } } func contains(array []string, str string) bool { for _, s := range array { if s == str { return true } } return false } func TestHammerSimpleAuthenticate(t *testing.T) { // set TTL values low to try to trigger races oldTTL, oldTTLRes := simpleTokenTTLDefault, simpleTokenTTLResolution defer func() { simpleTokenTTLDefault = oldTTL simpleTokenTTLResolution = oldTTLRes }() simpleTokenTTLDefault = 10 * time.Millisecond simpleTokenTTLResolution = simpleTokenTTLDefault users := make(map[string]struct{}) as, tearDown := setupAuthStore(t) defer tearDown(t) // create lots of users for i := 0; i < 50; i++ { u := fmt.Sprintf("user-%d", i) ua := &pb.AuthUserAddRequest{Name: u, HashedPassword: encodePassword("123"), Options: &authpb.UserAddOptions{NoPassword: false}} if _, err := as.UserAdd(ua); err != nil { t.Fatal(err) } users[u] = struct{}{} } // hammer on authenticate with lots of users for i := 0; i < 10; i++ { var wg sync.WaitGroup wg.Add(len(users)) for u := range users { go func(user string) { defer wg.Done() token := fmt.Sprintf("%s(%d)", user, i) ctx := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, token) if _, err := as.Authenticate(ctx, user, "123"); err != nil { t.Error(err) } if _, err := as.AuthInfoFromCtx(ctx); err != nil { t.Error(err) } }(u) } time.Sleep(time.Millisecond) wg.Wait() } } // TestRolesOrder tests authpb.User.Roles is sorted func TestRolesOrder(t *testing.T) { tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) defer tp.disable() if err != nil { t.Fatal(err) } as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) defer as.Close() err = enableAuthAndCreateRoot(as) if err != nil { t.Fatal(err) } username := "user" _, err = as.UserAdd(&pb.AuthUserAddRequest{Name: username, HashedPassword: encodePassword("pass"), Options: &authpb.UserAddOptions{NoPassword: false}}) if err != nil { t.Fatal(err) } roles := []string{"role1", "role2", "abc", "xyz", "role3"} for _, role := range roles { _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: role}) if err != nil { t.Fatal(err) } _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: username, Role: role}) if err != nil { t.Fatal(err) } } user, err := as.UserGet(&pb.AuthUserGetRequest{Name: username}) if err != nil { t.Fatal(err) } for i := 1; i < len(user.Roles); i++ { if strings.Compare(user.Roles[i-1], user.Roles[i]) != -1 { t.Errorf("User.Roles isn't sorted (%s vs %s)", user.Roles[i-1], user.Roles[i]) } } } func TestAuthInfoFromCtxWithRootSimple(t *testing.T) { testAuthInfoFromCtxWithRoot(t, tokenTypeSimple) } func TestAuthInfoFromCtxWithRootJWT(t *testing.T) { opts := testJWTOpts() testAuthInfoFromCtxWithRoot(t, opts) } // testAuthInfoFromCtxWithRoot ensures "WithRoot" properly embeds token in the context. func testAuthInfoFromCtxWithRoot(t *testing.T, opts string) { tp, err := NewTokenProvider(zaptest.NewLogger(t), opts, dummyIndexWaiter, simpleTokenTTLDefault) if err != nil { t.Fatal(err) } as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) defer as.Close() if err = enableAuthAndCreateRoot(as); err != nil { t.Fatal(err) } ctx := t.Context() ctx = as.WithRoot(ctx) ai, aerr := as.AuthInfoFromCtx(ctx) if aerr != nil { t.Fatal(err) } require.NotNilf(t, ai, "expected non-nil *AuthInfo") if ai.Username != "root" { t.Errorf("expected user name 'root', got %+v", ai) } } func TestUserNoPasswordAdd(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) username := "usernopass" ua := &pb.AuthUserAddRequest{Name: username, Options: &authpb.UserAddOptions{NoPassword: true}} _, err := as.UserAdd(ua) if err != nil { t.Fatal(err) } ctx := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, err = as.Authenticate(ctx, username, "") require.ErrorIsf(t, err, ErrAuthFailed, "expected %v, got %v", ErrAuthFailed, err) } func TestUserAddWithOldLog(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) ua := &pb.AuthUserAddRequest{Name: "bar", Password: "baz", Options: &authpb.UserAddOptions{NoPassword: false}} _, err := as.UserAdd(ua) if err != nil { t.Fatal(err) } } func TestUserChangePasswordWithOldLog(t *testing.T) { as, tearDown := setupAuthStore(t) defer tearDown(t) ctx1 := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, err := as.Authenticate(ctx1, "foo", "bar") if err != nil { t.Fatal(err) } _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo", Password: "baz"}) if err != nil { t.Fatal(err) } ctx2 := context.WithValue(context.WithValue(t.Context(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") _, err = as.Authenticate(ctx2, "foo", "baz") if err != nil { t.Fatal(err) } // change a non-existing user _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo-test", HashedPassword: encodePassword("bar")}) require.Errorf(t, err, "expected %v, got %v", ErrUserNotFound, err) require.ErrorIsf(t, err, ErrUserNotFound, "expected %v, got %v", ErrUserNotFound, err) }
go
github
https://github.com/etcd-io/etcd
server/auth/store_test.go
""" collate_content.py ================== (c) 2014 - Edward Stronge Connects to the content generator finalized signal to combine articles and pages sharing a category into lists that will be available in the template context. Thanks to #pelican member @kura for suggestions on creating this plugin. """ from collections import defaultdict import functools import re from pelican import signals def group_content(generator, content_type): """ Assembles articles and pages into lists based on each article or page's content. These lists are available through the global context passed to the template engine. When multiple categories are present, splits category names based on commas and trims whitespace surrounding a category's name. Thus, commas may not appear within a category but they can be used to delimit categories and may be surrounded by arbitrary amounts of whitespace. For each category, substitutes '_' for all whitespace and '-' characters, then creates a list named `SUBSTITUTED_CATEGORY_NAME`_articles or `SUBSTITUTED_CATEGORY_NAME`_pages for Articles or Pages, respectively. Note that the *original* category name must appear in the `CATEGORIES_TO_COLLATE` when using this plugin with category filtering enabled. """ category_filter = generator.settings.get('CATEGORIES_TO_COLLATE', None) filtering_active = type(category_filter) in (list, tuple, set) collations = generator.context.get('collations', defaultdict(list)) for content in generator.context[content_type]: category_list = [c.strip() for c in content.category.name.split(',')] for category in category_list: if filtering_active and category not in category_filter: continue category = substitute_category_name(category) collations['%s_%s' % (category, content_type)].append(content) generator.context['collations'] = collations def substitute_category_name(category_name): """ Replaces whitespace and '-' characters in `category_name` to allow category_name to be made into a valid Python identifier. Doesn't check all possible ways a string might be invalid; the user of the collate_content module is advised to use categories with Python-friendly names. """ return re.sub(r'\s', '_', category_name).replace('-', '_').lower() ARTICLE_GROUPER = functools.partial(group_content, content_type='articles') PAGE_GROUPER = functools.partial(group_content, content_type='pages') def register(): """Register the new plugin""" signals.article_generator_finalized.connect(ARTICLE_GROUPER) signals.page_generator_finalized.connect(PAGE_GROUPER)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import os import webapp2 from webapp2_extras import jinja2 import test_base current_dir = os.path.abspath(os.path.dirname(__file__)) template_path = os.path.join(current_dir, 'resources', 'jinja2_templates') compiled_path = os.path.join(current_dir, 'resources', 'jinja2_templates_compiled') class TestJinja2(test_base.BaseTestCase): def test_render_template_with_i18n(self): app = webapp2.WSGIApplication(config={ 'webapp2_extras.jinja2': { 'template_path': template_path, 'environment_args': { 'autoescape': True, 'extensions': [ 'jinja2.ext.autoescape', 'jinja2.ext.with_', 'jinja2.ext.i18n', ], }, }, }) req = webapp2.Request.blank('/') app.set_globals(app=app, request=req) j = jinja2.Jinja2(app) message = 'Hello, i18n World!' res = j.render_template('template2.html', message=message) self.assertEqual(res, message) def test_render_template_globals_filters(self): app = webapp2.WSGIApplication(config={ 'webapp2_extras.jinja2': { 'template_path': template_path, 'globals': dict(foo='fooglobal'), 'filters': dict(foo=lambda x: x + '-foofilter'), }, }) req = webapp2.Request.blank('/') app.set_globals(app=app, request=req) j = jinja2.Jinja2(app) message = 'fooglobal-foofilter' res = j.render_template('template3.html', message=message) self.assertEqual(res, message) def test_render_template_force_compiled(self): app = webapp2.WSGIApplication(config={ 'webapp2_extras.jinja2': { 'template_path': template_path, 'compiled_path': compiled_path, 'force_compiled': True, } }) req = webapp2.Request.blank('/') app.set_globals(app=app, request=req) j = jinja2.Jinja2(app) message = 'Hello, World!' res = j.render_template('template1.html', message=message) self.assertEqual(res, message) def test_get_template_attribute(self): app = webapp2.WSGIApplication(config={ 'webapp2_extras.jinja2': { 'template_path': template_path, } }) j = jinja2.Jinja2(app) hello = j.get_template_attribute('hello.html', 'hello') self.assertEqual(hello('World'), 'Hello, World!') def test_set_jinja2(self): app = webapp2.WSGIApplication() self.assertEqual(len(app.registry), 0) jinja2.set_jinja2(jinja2.Jinja2(app), app=app) self.assertEqual(len(app.registry), 1) j = jinja2.get_jinja2(app=app) self.assertTrue(isinstance(j, jinja2.Jinja2)) def test_get_jinja2(self): app = webapp2.WSGIApplication() self.assertEqual(len(app.registry), 0) j = jinja2.get_jinja2(app=app) self.assertEqual(len(app.registry), 1) self.assertTrue(isinstance(j, jinja2.Jinja2)) if __name__ == '__main__': test_base.main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf8 -*- from __future__ import print_function import sqlite3,zlib from bs4 import BeautifulSoup import codecs import urllib2 import json db = sqlite3.connect('raw_real.db') cur = db.cursor() info = sqlite3.connect('info.db') infocur = info.cursor() startid = 10000153 print("sucks1") cur.execute('select * from books where id>?', str(startid)) cnt = 0 while True: cnt += 1 if cnt > 50: info.commit() cnt = 0 row = cur.fetchone() if row == None: break try: html = BeautifulSoup(zlib.decompress(row[1]), from_encoding="utf-8") # f.write(html.prettify()) print(html.find('div', id="name").find('h1').get_text(strip=True)) print(json.load(urllib2.urlopen('http://p.3.cn/prices/get?skuid=J_'+str(row[0])))[0]['p']) print('http:'+html.find('div', id='spec-n1').find('img')['src']) print(html.find('div', id='p-author').get_text(strip=True)) print('http://item.jd.com/'+str(row[0])+'.html') print(html.find('div', class_='breadcrumb').find('span').get_text(strip=True)[1:-1]) print(html.find('ul', id='parameter2').get_text(strip=True)) print(row[0]) infocur.execute("INSERT INTO info VALUES (?,?,?,?,?,?,?,?)", (row[0], \ html.find('div', id="name").find('h1').get_text(strip=True), \ json.load(urllib2.urlopen('http://p.3.cn/prices/get?skuid=J_'+str(row[0])))[0]['p'], \ 'http:'+html.find('div', id='spec-n1').find('img')['src'], \ html.find('div', id='p-author').get_text(strip=True), \ 'http://item.jd.com/'+str(row[0])+'.html', \ html.find('div', class_='breadcrumb').find('span').get_text(strip=True)[1:-1], \ html.find('ul', id='parameter2').get_text(strip=True))) except Exception as e: print(e) db.close() info.close()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.test.context; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.function.Consumer; import org.jspecify.annotations.Nullable; import org.springframework.aot.hint.ExecutableMode; import org.springframework.aot.hint.RuntimeHints; import org.springframework.beans.BeanUtils; import org.springframework.boot.ApplicationContextFactory; import org.springframework.boot.Banner; import org.springframework.boot.SpringApplication; import org.springframework.boot.SpringApplication.AbandonedRunException; import org.springframework.boot.SpringApplicationHook; import org.springframework.boot.SpringApplicationRunListener; import org.springframework.boot.SpringBootConfiguration; import org.springframework.boot.WebApplicationType; import org.springframework.boot.bootstrap.ConfigurableBootstrapContext; import org.springframework.boot.context.event.ApplicationEnvironmentPreparedEvent; import org.springframework.boot.test.context.SpringBootTest.UseMainMethod; import org.springframework.boot.test.mock.web.SpringBootMockServletContext; import org.springframework.boot.test.util.TestPropertyValues; import org.springframework.boot.test.util.TestPropertyValues.Type; import org.springframework.boot.web.context.reactive.GenericReactiveWebApplicationContext; import org.springframework.boot.web.servlet.support.ServletContextApplicationContextInitializer; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextInitializer; import org.springframework.context.ApplicationListener; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.context.aot.AotApplicationContextInitializer; import org.springframework.core.KotlinDetector; import org.springframework.core.Ordered; import org.springframework.core.PriorityOrdered; import org.springframework.core.SpringVersion; import org.springframework.core.annotation.MergedAnnotations; import org.springframework.core.annotation.MergedAnnotations.SearchStrategy; import org.springframework.core.annotation.Order; import org.springframework.core.env.ConfigurableEnvironment; import org.springframework.core.io.DefaultResourceLoader; import org.springframework.core.io.ResourceLoader; import org.springframework.test.context.ContextConfigurationAttributes; import org.springframework.test.context.ContextCustomizer; import org.springframework.test.context.ContextLoadException; import org.springframework.test.context.ContextLoader; import org.springframework.test.context.MergedContextConfiguration; import org.springframework.test.context.SmartContextLoader; import org.springframework.test.context.aot.AotContextLoader; import org.springframework.test.context.support.AbstractContextLoader; import org.springframework.test.context.support.AnnotationConfigContextLoaderUtils; import org.springframework.test.context.support.TestPropertySourceUtils; import org.springframework.test.context.web.WebMergedContextConfiguration; import org.springframework.util.Assert; import org.springframework.util.ClassUtils; import org.springframework.util.ObjectUtils; import org.springframework.util.ReflectionUtils; import org.springframework.util.StringUtils; import org.springframework.util.function.ThrowingSupplier; import org.springframework.web.context.ConfigurableWebApplicationContext; import org.springframework.web.context.support.GenericWebApplicationContext; /** * A {@link ContextLoader} that can be used to test Spring Boot applications (those that * normally startup using {@link SpringApplication}). Although this loader can be used * directly, most test will instead want to use it with * {@link SpringBootTest @SpringBootTest}. * <p> * The loader supports both standard {@link MergedContextConfiguration} as well as * {@link WebMergedContextConfiguration}. If {@link WebMergedContextConfiguration} is used * the context will either use a mock servlet environment, or start the full embedded web * server. * <p> * If {@code @ActiveProfiles} are provided in the test class they will be used to create * the application context. * * @author Dave Syer * @author Phillip Webb * @author Andy Wilkinson * @author Stephane Nicoll * @author Madhura Bhave * @author Scott Frederick * @since 1.4.0 * @see SpringBootTest */ public class SpringBootContextLoader extends AbstractContextLoader implements AotContextLoader { private static final Consumer<SpringApplication> ALREADY_CONFIGURED = (springApplication) -> { }; private static final Object NONE = new Object(); @Override public ApplicationContext loadContext(MergedContextConfiguration mergedConfig) throws Exception { return loadContext(mergedConfig, Mode.STANDARD, null, null); } @Override public ApplicationContext loadContextForAotProcessing(MergedContextConfiguration mergedConfig, RuntimeHints runtimeHints) throws Exception { return loadContext(mergedConfig, Mode.AOT_PROCESSING, null, runtimeHints); } @Override public ApplicationContext loadContextForAotRuntime(MergedContextConfiguration mergedConfig, ApplicationContextInitializer<ConfigurableApplicationContext> initializer) throws Exception { return loadContext(mergedConfig, Mode.AOT_RUNTIME, initializer, null); } private ApplicationContext loadContext(MergedContextConfiguration mergedConfig, Mode mode, @Nullable ApplicationContextInitializer<ConfigurableApplicationContext> initializer, @Nullable RuntimeHints runtimeHints) throws Exception { assertHasClassesOrLocations(mergedConfig); SpringBootTestAnnotation annotation = SpringBootTestAnnotation.get(mergedConfig); String[] args = annotation.getArgs(); UseMainMethod useMainMethod = annotation.getUseMainMethod(); Method mainMethod = getMainMethod(mergedConfig, useMainMethod); if (mainMethod != null) { if (runtimeHints != null) { runtimeHints.reflection().registerMethod(mainMethod, ExecutableMode.INVOKE); } ContextLoaderHook hook = new ContextLoaderHook(mode, initializer, (application) -> configure(mergedConfig, application)); return hook.runMain(() -> { if (mainMethod.getParameterCount() == 0) { ReflectionUtils.invokeMethod(mainMethod, null); } else { ReflectionUtils.invokeMethod(mainMethod, null, new Object[] { args }); } }); } SpringApplication application = getSpringApplication(); configure(mergedConfig, application); ContextLoaderHook hook = new ContextLoaderHook(mode, initializer, ALREADY_CONFIGURED); return hook.run(() -> application.run(args)); } private void assertHasClassesOrLocations(MergedContextConfiguration mergedConfig) { boolean hasClasses = !ObjectUtils.isEmpty(mergedConfig.getClasses()); boolean hasLocations = !ObjectUtils.isEmpty(mergedConfig.getLocations()); Assert.state(hasClasses || hasLocations, () -> "No configuration classes or locations found in @SpringApplicationConfiguration. " + "For default configuration detection to work you need Spring 4.0.3 or better (found " + SpringVersion.getVersion() + ")."); } private @Nullable Method getMainMethod(MergedContextConfiguration mergedConfig, UseMainMethod useMainMethod) { if (useMainMethod == UseMainMethod.NEVER) { return null; } Assert.state(mergedConfig.getParent() == null, () -> "UseMainMethod.%s cannot be used with @ContextHierarchy tests".formatted(useMainMethod)); Class<?> springBootConfiguration = Arrays.stream(mergedConfig.getClasses()) .filter(this::isSpringBootConfiguration) .findFirst() .orElse(null); Assert.state(springBootConfiguration != null || useMainMethod == UseMainMethod.WHEN_AVAILABLE, "Cannot use main method as no @SpringBootConfiguration-annotated class is available"); Method mainMethod = findMainMethod(springBootConfiguration); Assert.state(mainMethod != null || useMainMethod == UseMainMethod.WHEN_AVAILABLE, () -> "Main method not found on '%s'" .formatted((springBootConfiguration != null) ? springBootConfiguration.getName() : null)); return mainMethod; } private static @Nullable Method findMainMethod(@Nullable Class<?> type) { Method mainMethod = (type != null) ? findMainJavaMethod(type) : null; if (mainMethod == null && KotlinDetector.isKotlinPresent()) { try { Assert.state(type != null, "'type' must not be null"); Class<?> kotlinClass = ClassUtils.forName(type.getName() + "Kt", type.getClassLoader()); mainMethod = ReflectionUtils.findMethod(kotlinClass, "main", String[].class); } catch (ClassNotFoundException ex) { // Ignore } } return mainMethod; } private static @Nullable Method findMainJavaMethod(Class<?> type) { try { Method method = getMainMethod(type); if (Modifier.isStatic(method.getModifiers())) { method.setAccessible(true); return method; } } catch (Exception ex) { // Ignore } return null; } private static Method getMainMethod(Class<?> type) throws NoSuchMethodException { try { return type.getDeclaredMethod("main", String[].class); } catch (NoSuchMethodException ex) { return type.getDeclaredMethod("main"); } } private boolean isSpringBootConfiguration(Class<?> candidate) { return MergedAnnotations.from(candidate, SearchStrategy.TYPE_HIERARCHY) .isPresent(SpringBootConfiguration.class); } private void configure(MergedContextConfiguration mergedConfig, SpringApplication application) { application.setMainApplicationClass(mergedConfig.getTestClass()); application.addPrimarySources(Arrays.asList(mergedConfig.getClasses())); application.getSources().addAll(Arrays.asList(mergedConfig.getLocations())); List<ApplicationContextInitializer<?>> initializers = getInitializers(mergedConfig, application); if (mergedConfig instanceof WebMergedContextConfiguration) { application.setWebApplicationType(WebApplicationType.SERVLET); if (!isEmbeddedWebEnvironment(mergedConfig)) { new WebConfigurer().configure(mergedConfig, initializers); } } else if (mergedConfig instanceof ReactiveWebMergedContextConfiguration) { application.setWebApplicationType(WebApplicationType.REACTIVE); } else { application.setWebApplicationType(WebApplicationType.NONE); } application.setApplicationContextFactory(getApplicationContextFactory(mergedConfig)); if (mergedConfig.getParent() != null) { application.setBannerMode(Banner.Mode.OFF); } application.setInitializers(initializers); ConfigurableEnvironment environment = getEnvironment(); if (environment != null) { prepareEnvironment(mergedConfig, application, environment, false); application.setEnvironment(environment); } else { application.addListeners(new PrepareEnvironmentListener(mergedConfig)); } } /** * Return the {@link ApplicationContextFactory} that should be used for the test. By * default this method will return a factory that will create an appropriate * {@link ApplicationContext} for the {@link WebApplicationType}. * @param mergedConfig the merged context configuration * @return the application context factory to use * @since 3.2.0 */ protected ApplicationContextFactory getApplicationContextFactory(MergedContextConfiguration mergedConfig) { return (webApplicationType) -> { if (webApplicationType != WebApplicationType.NONE && !isEmbeddedWebEnvironment(mergedConfig)) { if (webApplicationType == WebApplicationType.REACTIVE) { return new GenericReactiveWebApplicationContext(); } if (webApplicationType == WebApplicationType.SERVLET) { return new GenericWebApplicationContext(); } } return ApplicationContextFactory.DEFAULT.create(webApplicationType); }; } private void prepareEnvironment(MergedContextConfiguration mergedConfig, SpringApplication application, ConfigurableEnvironment environment, boolean applicationEnvironment) { setActiveProfiles(environment, mergedConfig.getActiveProfiles(), applicationEnvironment); ResourceLoader resourceLoader = (application.getResourceLoader() != null) ? application.getResourceLoader() : new DefaultResourceLoader(null); TestPropertySourceUtils.addPropertySourcesToEnvironment(environment, resourceLoader, mergedConfig.getPropertySourceDescriptors()); TestPropertySourceUtils.addInlinedPropertiesToEnvironment(environment, getInlinedProperties(mergedConfig)); } private void setActiveProfiles(ConfigurableEnvironment environment, String[] profiles, boolean applicationEnvironment) { if (ObjectUtils.isEmpty(profiles)) { return; } if (!applicationEnvironment) { environment.setActiveProfiles(profiles); } String[] pairs = new String[profiles.length]; for (int i = 0; i < profiles.length; i++) { pairs[i] = "spring.profiles.active[" + i + "]=" + profiles[i]; } TestPropertyValues.of(pairs).applyTo(environment, Type.MAP, "active-test-profiles"); } /** * Builds new {@link org.springframework.boot.SpringApplication} instance. This method * is only called when a {@code main} method isn't being used to create the * {@link SpringApplication}. * @return a {@link SpringApplication} instance */ protected SpringApplication getSpringApplication() { return new SpringApplication(); } /** * Returns the {@link ConfigurableEnvironment} instance that should be applied to * {@link SpringApplication} or {@code null} to use the default. You can override this * method if you need a custom environment. * @return a {@link ConfigurableEnvironment} instance */ protected @Nullable ConfigurableEnvironment getEnvironment() { return null; } protected String[] getInlinedProperties(MergedContextConfiguration mergedConfig) { ArrayList<String> properties = new ArrayList<>(); // JMX bean names will clash if the same bean is used in multiple contexts properties.add("spring.jmx.enabled=false"); properties.addAll(Arrays.asList(mergedConfig.getPropertySourceProperties())); return StringUtils.toStringArray(properties); } /** * Return the {@link ApplicationContextInitializer initializers} that will be applied * to the context. By default this method will adapt {@link ContextCustomizer context * customizers}, add {@link SpringApplication#getInitializers() application * initializers} and add * {@link MergedContextConfiguration#getContextInitializerClasses() initializers * specified on the test}. * @param mergedConfig the source context configuration * @param application the application instance * @return the initializers to apply * @since 2.0.0 */ protected List<ApplicationContextInitializer<?>> getInitializers(MergedContextConfiguration mergedConfig, SpringApplication application) { List<ApplicationContextInitializer<?>> initializers = new ArrayList<>(); for (ContextCustomizer contextCustomizer : mergedConfig.getContextCustomizers()) { initializers.add(new ContextCustomizerAdapter(contextCustomizer, mergedConfig)); } initializers.addAll(application.getInitializers()); for (Class<? extends ApplicationContextInitializer<?>> initializerClass : mergedConfig .getContextInitializerClasses()) { initializers.add(BeanUtils.instantiateClass(initializerClass)); } if (mergedConfig.getParent() != null) { ApplicationContext parentApplicationContext = mergedConfig.getParentApplicationContext(); initializers.add(new ParentContextApplicationContextInitializer(parentApplicationContext)); } return initializers; } private boolean isEmbeddedWebEnvironment(MergedContextConfiguration mergedConfig) { return SpringBootTestAnnotation.get(mergedConfig).getWebEnvironment().isEmbedded(); } @Override public void processContextConfiguration(ContextConfigurationAttributes configAttributes) { super.processContextConfiguration(configAttributes); if (!configAttributes.hasResources()) { Class<?>[] defaultConfigClasses = detectDefaultConfigurationClasses(configAttributes.getDeclaringClass()); configAttributes.setClasses(defaultConfigClasses); } } /** * Detect the default configuration classes for the supplied test class. By default * simply delegates to * {@link AnnotationConfigContextLoaderUtils#detectDefaultConfigurationClasses}. * @param declaringClass the test class that declared {@code @ContextConfiguration} * @return an array of default configuration classes, potentially empty but never * {@code null} * @see AnnotationConfigContextLoaderUtils */ protected Class<?>[] detectDefaultConfigurationClasses(Class<?> declaringClass) { return AnnotationConfigContextLoaderUtils.detectDefaultConfigurationClasses(declaringClass); } @Override protected String[] getResourceSuffixes() { return new String[] { "-context.xml", "Context.groovy" }; } @Override protected String getResourceSuffix() { throw new IllegalStateException(); } /** * Modes that the {@link SpringBootContextLoader} can operate. */ private enum Mode { /** * Load for regular usage. * @see SmartContextLoader#loadContext */ STANDARD, /** * Load for AOT processing. * @see AotContextLoader#loadContextForAotProcessing */ AOT_PROCESSING, /** * Load for AOT runtime. * @see AotContextLoader#loadContextForAotRuntime */ AOT_RUNTIME } /** * Inner class to configure {@link WebMergedContextConfiguration}. */ private static final class WebConfigurer { void configure(MergedContextConfiguration mergedConfig, List<ApplicationContextInitializer<?>> initializers) { WebMergedContextConfiguration webMergedConfig = (WebMergedContextConfiguration) mergedConfig; addMockServletContext(initializers, webMergedConfig); } private void addMockServletContext(List<ApplicationContextInitializer<?>> initializers, WebMergedContextConfiguration webMergedConfig) { SpringBootMockServletContext servletContext = new SpringBootMockServletContext( webMergedConfig.getResourceBasePath()); initializers.add(0, new DefensiveWebApplicationContextInitializer( new ServletContextApplicationContextInitializer(servletContext, true))); } /** * Decorator for {@link ServletContextApplicationContextInitializer} that prevents * a failure when the context type is not as was predicted when the initializer * was registered. This can occur when spring.main.web-application-type is set to * something other than servlet. */ private static final class DefensiveWebApplicationContextInitializer implements ApplicationContextInitializer<ConfigurableApplicationContext> { private final ServletContextApplicationContextInitializer delegate; private DefensiveWebApplicationContextInitializer(ServletContextApplicationContextInitializer delegate) { this.delegate = delegate; } @Override public void initialize(ConfigurableApplicationContext applicationContext) { if (applicationContext instanceof ConfigurableWebApplicationContext webApplicationContext) { this.delegate.initialize(webApplicationContext); } } } } /** * Adapts a {@link ContextCustomizer} to a {@link ApplicationContextInitializer} so * that it can be triggered through {@link SpringApplication}. */ private static class ContextCustomizerAdapter implements ApplicationContextInitializer<ConfigurableApplicationContext> { private final ContextCustomizer contextCustomizer; private final MergedContextConfiguration mergedConfig; ContextCustomizerAdapter(ContextCustomizer contextCustomizer, MergedContextConfiguration mergedConfig) { this.contextCustomizer = contextCustomizer; this.mergedConfig = mergedConfig; } @Override public void initialize(ConfigurableApplicationContext applicationContext) { this.contextCustomizer.customizeContext(applicationContext, this.mergedConfig); } } /** * {@link ApplicationContextInitializer} used to set the parent context. */ @Order(Ordered.HIGHEST_PRECEDENCE) private static class ParentContextApplicationContextInitializer implements ApplicationContextInitializer<ConfigurableApplicationContext> { private final @Nullable ApplicationContext parent; ParentContextApplicationContextInitializer(@Nullable ApplicationContext parent) { this.parent = parent; } @Override public void initialize(ConfigurableApplicationContext applicationContext) { applicationContext.setParent(this.parent); } } /** * {@link ApplicationListener} used to prepare the application created environment. */ private class PrepareEnvironmentListener implements ApplicationListener<ApplicationEnvironmentPreparedEvent>, PriorityOrdered { private final MergedContextConfiguration mergedConfig; PrepareEnvironmentListener(MergedContextConfiguration mergedConfig) { this.mergedConfig = mergedConfig; } @Override public int getOrder() { return Ordered.HIGHEST_PRECEDENCE; } @Override public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) { prepareEnvironment(this.mergedConfig, event.getSpringApplication(), event.getEnvironment(), true); } } /** * {@link SpringApplicationHook} used to capture {@link ApplicationContext} instances * and to trigger early exit for the {@link Mode#AOT_PROCESSING} mode. */ private static class ContextLoaderHook implements SpringApplicationHook { private final Mode mode; private final @Nullable ApplicationContextInitializer<ConfigurableApplicationContext> initializer; private final Consumer<SpringApplication> configurer; private final List<ApplicationContext> contexts = Collections.synchronizedList(new ArrayList<>()); private final List<ApplicationContext> failedContexts = Collections.synchronizedList(new ArrayList<>()); ContextLoaderHook(Mode mode, @Nullable ApplicationContextInitializer<ConfigurableApplicationContext> initializer, Consumer<SpringApplication> configurer) { this.mode = mode; this.initializer = initializer; this.configurer = configurer; } @Override public SpringApplicationRunListener getRunListener(SpringApplication application) { return new SpringApplicationRunListener() { @Override public void starting(ConfigurableBootstrapContext bootstrapContext) { ContextLoaderHook.this.configurer.accept(application); if (ContextLoaderHook.this.mode == Mode.AOT_RUNTIME) { Assert.state(ContextLoaderHook.this.initializer != null, "'initializer' must not be null"); application.addInitializers( (AotApplicationContextInitializer<?>) ContextLoaderHook.this.initializer::initialize); } } @Override public void contextLoaded(ConfigurableApplicationContext context) { ContextLoaderHook.this.contexts.add(context); if (ContextLoaderHook.this.mode == Mode.AOT_PROCESSING) { throw new AbandonedRunException(context); } } @Override public void failed(@Nullable ConfigurableApplicationContext context, Throwable exception) { if (context != null) { ContextLoaderHook.this.failedContexts.add(context); } } }; } private ApplicationContext runMain(Runnable action) throws Exception { return run(() -> { action.run(); return NONE; }); } private ApplicationContext run(ThrowingSupplier<?> action) throws Exception { try { Object result = SpringApplication.withHook(this, action); if (result instanceof ApplicationContext context) { return context; } } catch (AbandonedRunException ex) { // Ignore } catch (Exception ex) { if (this.failedContexts.size() == 1) { throw new ContextLoadException(this.failedContexts.get(0), ex); } throw ex; } List<ApplicationContext> rootContexts = this.contexts.stream() .filter((context) -> context.getParent() == null) .toList(); Assert.state(!rootContexts.isEmpty(), "No root application context located"); Assert.state(rootContexts.size() == 1, "No unique root application context located"); return rootContexts.get(0); } } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-test/src/main/java/org/springframework/boot/test/context/SpringBootContextLoader.java
# socket example - server side using thread # usage: ruby tsvr.rb require "socket" gs = TCPServer.open(0) addr = gs.addr addr.shift printf("server is on %s\n", addr.join(":")) loop do Thread.start(gs.accept) do |s| print(s, " is accepted\n") while line = s.gets s.write(line) end print(s, " is gone\n") s.close end end
ruby
github
https://github.com/ruby/ruby
sample/tsvr.rb
import os import pprint import sys import shutil import qcengine as qcng import psi4 import pytest pp = pprint.PrettyPrinter(width=120) __all__ = [ 'a2a', 'compare', 'compare_integers', 'compare_strings', 'compare_values', 'compare_arrays', 'compare_recursive', 'compare_molrecs', 'compare_cubes', 'compare_vectors', 'compare_matrices', 'compare_wavefunctions', 'compare_fcidumps', 'compare_fchkfiles', 'run_psi4_cli', 'tnm', ] # CODATA ratio 2014 / 2010 Bohr to Angstroms conversion factor a2a = 0.52917721067 / 0.52917720859 def true_false_decorator(compare_fn, *args, **kwargs): """Turns `compare_fn` that raises `psi4.TestComparisonError` on failure into a function that returns True on success and False on failure, suitable for assertions in pytest. """ def true_false_wrapper(*args, **kwargs): try: compare_fn(*args, **kwargs) except psi4.TestComparisonError as err: return (False, err) if "return_message" in kwargs else False else: return (True, "") if "return_message" in kwargs else True return true_false_wrapper compare = true_false_decorator(psi4.compare) compare_integers = true_false_decorator(psi4.compare_integers) compare_strings = true_false_decorator(psi4.compare_strings) compare_values = true_false_decorator(psi4.compare_values) compare_arrays = true_false_decorator(psi4.compare_arrays) compare_recursive = true_false_decorator(psi4.compare_recursive) compare_molrecs = true_false_decorator(psi4.compare_molrecs) compare_cubes = true_false_decorator(psi4.compare_cubes) compare_vectors = true_false_decorator(psi4.compare_vectors) compare_matrices = true_false_decorator(psi4.compare_matrices) compare_fcidumps = true_false_decorator(psi4.compare_fcidumps) compare_wavefunctions = true_false_decorator(psi4.compare_wavefunctions) compare_fchkfiles = true_false_decorator(psi4.compare_fchkfiles) def tnm(): """Returns the name of the calling function, usually name of test case.""" return sys._getframe().f_back.f_code.co_name def run_psi4_cli(inputs, outputs, extra_commands=None, as_binary=None): """ Runs Psi4 from the CLI in a subprocess. """ if extra_commands is None: extra_commands = [] cmds = None psidir = os.path.dirname(os.path.abspath(psi4.__file__)) # Check inplace psi_runner = os.path.join(psidir, "run_psi4.py") if os.path.isfile(psi_runner): cmds = [sys.executable, psi_runner, "--inplace"] # Check test install if cmds is None: binpath = os.path.join(os.path.dirname(os.path.dirname(psidir)), "bin") psi_bin = shutil.which('psi4', path=binpath) if psi_bin: cmds = [psi_bin] # Check if Psi4 in path if cmds is None: pytest.skip("Could not find Psi4 executable.") cmds = cmds + extra_commands + list(inputs.keys()) success, ret = qcng.util.execute(cmds, inputs, outputs, as_binary=as_binary) return (success, ret)
unknown
codeparrot/codeparrot-clean
{ "columns": { "description": "Opis", "key": "Klucz", "name": "Nazwa", "team": "Zespół", "value": "Wartość" }, "config": { "columns": { "section": "Sekcja" }, "title": "Konfiguracja Airflowa" }, "connections": { "add": "Dodaj połączenie", "columns": { "connectionId": "Identyfikator połączenia", "connectionType": "Typ połączenia", "host": "Host", "port": "Port" }, "connection_few": "Połączenia", "connection_many": "Połączeń", "connection_one": "Połączenie", "connection_other": "Połączenia", "delete": { "deleteConnection_few": "Usuń {{count}} połączenia", "deleteConnection_many": "Usuń {{count}} połączeń", "deleteConnection_one": "Usuń 1 połączenie", "deleteConnection_other": "Usuń {{count}} połączenia", "firstConfirmMessage_few": "Zamierzasz usunąć następujące połączenia:", "firstConfirmMessage_many": "Zamierzasz usunąć następujące połączenia:", "firstConfirmMessage_one": "Zamierzasz usunąć następujące połączenie:", "firstConfirmMessage_other": "Zamierzasz usunąć następujące połączenia:", "title": "Usuń połączenie" }, "edit": "Edytuj połączenie", "form": { "connectionIdRequired": "Identyfikator połączenia jest wymagany", "connectionIdRequirement": "Identyfikator połączenia nie może zawierać wyłącznie spacji", "connectionTypeRequired": "Typ połączenia jest wymagany", "extraFields": "Dodatkowe pola", "extraFieldsJson": "Dodatkowe pola JSON", "helperText": "Brakuje typu połączenia? Upewnij się, że zainstalowałeś odpowiedniego dostawcę.", "helperTextForRedactedFields": "Ukryte pola ('***') nie bedą zapisane jeśli nie zostaną zmienione.", "selectConnectionType": "Wybierz typ połączenia", "standardFields": "Standardowe pola" }, "nothingFound": { "description": "Połączenia zdefiniowane za pomocą zmiennych środowiskowych lub menedżerów sekretów nie są tutaj wyświetlane.", "documentationLink": "Dowiedz się więcej w dokumentacji Airflow.", "learnMore": "Są one rozwiązywane w czasie wykonywania i nie są widoczne w interfejsie użytkownika.", "title": "Nie znaleziono połączeń!" }, "searchPlaceholder": "Szukaj połączeń", "test": "Test połączenia", "testDisabled": "Testowanie połączeń wyłączone. Skontaktuj się z administratorem, aby je włączyć.", "testError": { "title": "Test połączenia nieudany" }, "testSuccess": { "title": "Test połączenia udany" }, "typeMeta": { "error": "Nie udało się pobrać metadanych typu połączenia", "standardFields": { "description": "Opis", "host": "Host", "login": "Login", "password": "Hasło", "port": "Port", "url_schema": "Schemat" } } }, "deleteActions": { "button": "Usuń", "modal": { "confirmButton": "Tak, usuń", "secondConfirmMessage": "Ta akcja jest nieodwracalna.", "thirdConfirmMessage": "Czy na pewno chcesz kontynuować?" }, "selected": "Wybrano", "tooltip": "Usuń wybrane połączenia" }, "formActions": { "save": "Zapisz" }, "plugins": { "columns": { "source": "Źródło" }, "importError_few": "Błędy importu wtyczek", "importError_many": "Błędów importu wtyczek", "importError_one": "Błąd importu wtyczki", "importError_other": "Błędy importu wtyczek", "searchPlaceholder": "Szukaj po pliku" }, "pools": { "add": "Dodaj pulę", "deferredSlotsIncluded": "Uwzględniono odroczone miejsca", "delete": { "title": "Usuń pulę", "warning": "To usunie wszystkie metadane związane z pulą i może wpłynąć na zadania korzystające z tej puli." }, "edit": "Edytuj pulę", "form": { "checkbox": "Zaznacz, aby uwzględnić zadania odroczone przy obliczaniu wolnych miejsc w puli", "description": "Opis", "includeDeferred": "Uwzględnij odroczone", "nameMaxLength": "Nazwa może zawierać maksymalnie 256 znaków", "nameRequired": "Nazwa jest wymagana", "slots": "Miejsca" }, "noPoolsFound": "Nie znaleziono pul", "pool_few": "Pule", "pool_many": "Puli", "pool_one": "Pula", "pool_other": "Pule", "searchPlaceholder": "Szukaj pul", "sort": { "asc": "Nazwa (A-Z)", "desc": "Nazwa (Z-A)", "placeholder": "Sortuj według" } }, "providers": { "columns": { "packageName": "Nazwa paczki", "version": "Wersja" } }, "variables": { "add": "Dodaj zmienną", "columns": { "isEncrypted": "Zaszyfrowana" }, "delete": { "deleteVariable_few": "Usuń {{count}} zmienne", "deleteVariable_many": "Usuń {{count}} zmiennych", "deleteVariable_one": "Usuń 1 zmienną", "deleteVariable_other": "Usuń {{count}} zmienne", "firstConfirmMessage_few": "Zamierzasz usunąć następujące zmienne:", "firstConfirmMessage_many": "Zamierzasz usunąć następujące zmienne:", "firstConfirmMessage_one": "Zamierzasz usunąć następującą zmienną:", "firstConfirmMessage_other": "Zamierzasz usunąć następujące zmienne:", "title": "Usuń zmienną", "tooltip": "Usuń wybrane zmienne" }, "edit": "Edytuj zmienną", "form": { "invalidJson": "Nieprawidłowy JSON", "keyMaxLength": "Klucz może zawierać maksymalnie 250 znaków", "keyRequired": "Klucz jest wymagany", "valueRequired": "Wartość jest wymagana" }, "import": { "button": "Importuj", "conflictResolution": "Wybierz sposób rozwiązywania konfliktów zmiennych", "errorParsingJsonFile": "Błąd podczas przetwarzania pliku JSON: Prześlij plik JSON zawierający zmienne (np. {\"key\": \"value\", ...}).", "options": { "fail": { "description": "Import nie powiedzie się, jeśli wykryte zostaną istniejące zmienne.", "title": "Przerwij" }, "overwrite": { "description": "Nadpisuje zmienną w przypadku konfliktu.", "title": "Nadpisz" }, "skip": { "description": "Pomija import zmiennych, które już istnieją.", "title": "Pomiń" } }, "title": "Importuj zmienne", "upload": "Prześlij plik JSON", "uploadPlaceholder": "Prześlij plik JSON zawierający zmienne (np. {\"key\": \"value\", ...})" }, "noRowsMessage": "Nie znaleziono zmiennych", "searchPlaceholder": "Szukaj kluczy", "variable_few": "Zmienne", "variable_many": "Zmiennych", "variable_one": "Zmienna", "variable_other": "Zmienne" } }
json
github
https://github.com/apache/airflow
airflow-core/src/airflow/ui/public/i18n/locales/pl/admin.json
#!/usr/bin/python2.4 # # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Oauth2client tests Unit tests for oauth2client. """ __author__ = 'jcgregorio@google.com (Joe Gregorio)' import base64 import datetime import json try: from mox3 import mox except ImportError: import mox import os import time import unittest import six from six.moves import urllib from .http_mock import HttpMock from .http_mock import HttpMockSequence from oauth2client import GOOGLE_REVOKE_URI from oauth2client import GOOGLE_TOKEN_URI from oauth2client import client from oauth2client.client import AccessTokenCredentials from oauth2client.client import AccessTokenCredentialsError from oauth2client.client import AccessTokenRefreshError from oauth2client.client import ADC_HELP_MSG from oauth2client.client import AssertionCredentials from oauth2client.client import AUTHORIZED_USER from oauth2client.client import Credentials from oauth2client.client import DEFAULT_ENV_NAME from oauth2client.client import ApplicationDefaultCredentialsError from oauth2client.client import FlowExchangeError from oauth2client.client import GoogleCredentials from oauth2client.client import GOOGLE_APPLICATION_CREDENTIALS from oauth2client.client import MemoryCache from oauth2client.client import NonAsciiHeaderError from oauth2client.client import OAuth2Credentials from oauth2client.client import OAuth2WebServerFlow from oauth2client.client import OOB_CALLBACK_URN from oauth2client.client import REFRESH_STATUS_CODES from oauth2client.client import SERVICE_ACCOUNT from oauth2client.client import Storage from oauth2client.client import TokenRevokeError from oauth2client.client import VerifyJwtTokenError from oauth2client.client import _extract_id_token from oauth2client.client import _get_application_default_credential_from_file from oauth2client.client import _get_environment from oauth2client.client import _get_environment_variable_file from oauth2client.client import _get_well_known_file from oauth2client.client import _raise_exception_for_missing_fields from oauth2client.client import _raise_exception_for_reading_json from oauth2client.client import _update_query_params from oauth2client.client import credentials_from_clientsecrets_and_code from oauth2client.client import credentials_from_code from oauth2client.client import flow_from_clientsecrets from oauth2client.client import save_to_well_known_file from oauth2client.clientsecrets import _loadfile from oauth2client.service_account import _ServiceAccountCredentials DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') # TODO(craigcitro): This is duplicated from # googleapiclient.test_discovery; consolidate these definitions. def assertUrisEqual(testcase, expected, actual): """Test that URIs are the same, up to reordering of query parameters.""" expected = urllib.parse.urlparse(expected) actual = urllib.parse.urlparse(actual) testcase.assertEqual(expected.scheme, actual.scheme) testcase.assertEqual(expected.netloc, actual.netloc) testcase.assertEqual(expected.path, actual.path) testcase.assertEqual(expected.params, actual.params) testcase.assertEqual(expected.fragment, actual.fragment) expected_query = urllib.parse.parse_qs(expected.query) actual_query = urllib.parse.parse_qs(actual.query) for name in expected_query.keys(): testcase.assertEqual(expected_query[name], actual_query[name]) for name in actual_query.keys(): testcase.assertEqual(expected_query[name], actual_query[name]) def datafile(filename): return os.path.join(DATA_DIR, filename) def load_and_cache(existing_file, fakename, cache_mock): client_type, client_info = _loadfile(datafile(existing_file)) cache_mock.cache[fakename] = {client_type: client_info} class CacheMock(object): def __init__(self): self.cache = {} def get(self, key, namespace=''): # ignoring namespace for easier testing return self.cache.get(key, None) def set(self, key, value, namespace=''): # ignoring namespace for easier testing self.cache[key] = value class CredentialsTests(unittest.TestCase): def test_to_from_json(self): credentials = Credentials() json = credentials.to_json() restored = Credentials.new_from_json(json) class MockResponse(object): """Mock the response of urllib2.urlopen() call.""" def __init__(self, headers): self._headers = headers def info(self): class Info: def __init__(self, headers): self.headers = headers return Info(self._headers) class GoogleCredentialsTests(unittest.TestCase): def setUp(self): self.env_server_software = os.environ.get('SERVER_SOFTWARE', None) self.env_google_application_credentials = ( os.environ.get(GOOGLE_APPLICATION_CREDENTIALS, None)) self.env_appdata = os.environ.get('APPDATA', None) self.os_name = os.name from oauth2client import client client.SETTINGS.env_name = None def tearDown(self): self.reset_env('SERVER_SOFTWARE', self.env_server_software) self.reset_env(GOOGLE_APPLICATION_CREDENTIALS, self.env_google_application_credentials) self.reset_env('APPDATA', self.env_appdata) os.name = self.os_name def reset_env(self, env, value): """Set the environment variable 'env' to 'value'.""" if value is not None: os.environ[env] = value else: os.environ.pop(env, '') def validate_service_account_credentials(self, credentials): self.assertTrue(isinstance(credentials, _ServiceAccountCredentials)) self.assertEqual('123', credentials._service_account_id) self.assertEqual('dummy@google.com', credentials._service_account_email) self.assertEqual('ABCDEF', credentials._private_key_id) self.assertEqual('', credentials._scopes) def validate_google_credentials(self, credentials): self.assertTrue(isinstance(credentials, GoogleCredentials)) self.assertEqual(None, credentials.access_token) self.assertEqual('123', credentials.client_id) self.assertEqual('secret', credentials.client_secret) self.assertEqual('alabalaportocala', credentials.refresh_token) self.assertEqual(None, credentials.token_expiry) self.assertEqual(GOOGLE_TOKEN_URI, credentials.token_uri) self.assertEqual('Python client library', credentials.user_agent) def get_a_google_credentials_object(self): return GoogleCredentials(None, None, None, None, None, None, None, None) def test_create_scoped_required(self): self.assertFalse( self.get_a_google_credentials_object().create_scoped_required()) def test_create_scoped(self): credentials = self.get_a_google_credentials_object() self.assertEqual(credentials, credentials.create_scoped(None)) self.assertEqual(credentials, credentials.create_scoped(['dummy_scope'])) def test_get_environment_gae_production(self): os.environ['SERVER_SOFTWARE'] = 'Google App Engine/XYZ' self.assertEqual('GAE_PRODUCTION', _get_environment()) def test_get_environment_gae_local(self): os.environ['SERVER_SOFTWARE'] = 'Development/XYZ' self.assertEqual('GAE_LOCAL', _get_environment()) def test_get_environment_gce_production(self): os.environ['SERVER_SOFTWARE'] = '' mockResponse = MockResponse(['Metadata-Flavor: Google\r\n']) m = mox.Mox() urllib2_urlopen = m.CreateMock(object) urllib2_urlopen.__call__(('http://metadata.google.internal' )).AndReturn(mockResponse) m.ReplayAll() self.assertEqual('GCE_PRODUCTION', _get_environment(urllib2_urlopen)) m.UnsetStubs() m.VerifyAll() def test_get_environment_unknown(self): os.environ['SERVER_SOFTWARE'] = '' mockResponse = MockResponse([]) m = mox.Mox() urllib2_urlopen = m.CreateMock(object) urllib2_urlopen.__call__(('http://metadata.google.internal' )).AndReturn(mockResponse) m.ReplayAll() self.assertEqual(DEFAULT_ENV_NAME, _get_environment(urllib2_urlopen)) m.UnsetStubs() m.VerifyAll() def test_get_environment_variable_file(self): environment_variable_file = datafile( os.path.join('gcloud', 'application_default_credentials.json')) os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file self.assertEqual(environment_variable_file, _get_environment_variable_file()) def test_get_environment_variable_file_error(self): nonexistent_file = datafile('nonexistent') os.environ[GOOGLE_APPLICATION_CREDENTIALS] = nonexistent_file # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: _get_environment_variable_file() self.fail(nonexistent_file + ' should not exist.') except ApplicationDefaultCredentialsError as error: self.assertEqual('File ' + nonexistent_file + ' (pointed by ' + GOOGLE_APPLICATION_CREDENTIALS + ' environment variable) does not exist!', str(error)) def test_get_well_known_file_on_windows(self): well_known_file = datafile( os.path.join('gcloud', 'application_default_credentials.json')) os.name = 'nt' os.environ['APPDATA'] = DATA_DIR self.assertEqual(well_known_file, _get_well_known_file()) def test_get_application_default_credential_from_file_service_account(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials.json')) credentials = _get_application_default_credential_from_file( credentials_file) self.validate_service_account_credentials(credentials) def test_save_to_well_known_file_service_account(self): credential_file = datafile( os.path.join('gcloud', 'application_default_credentials.json')) credentials = _get_application_default_credential_from_file( credential_file) temp_credential_file = datafile( os.path.join('gcloud', 'temp_well_known_file_service_account.json')) save_to_well_known_file(credentials, temp_credential_file) with open(temp_credential_file) as f: d = json.load(f) self.assertEqual('service_account', d['type']) self.assertEqual('123', d['client_id']) self.assertEqual('dummy@google.com', d['client_email']) self.assertEqual('ABCDEF', d['private_key_id']) os.remove(temp_credential_file) def test_get_application_default_credential_from_file_authorized_user(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_authorized_user.json')) credentials = _get_application_default_credential_from_file( credentials_file) self.validate_google_credentials(credentials) def test_save_to_well_known_file_authorized_user(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_authorized_user.json')) credentials = _get_application_default_credential_from_file( credentials_file) temp_credential_file = datafile( os.path.join('gcloud', 'temp_well_known_file_authorized_user.json')) save_to_well_known_file(credentials, temp_credential_file) with open(temp_credential_file) as f: d = json.load(f) self.assertEqual('authorized_user', d['type']) self.assertEqual('123', d['client_id']) self.assertEqual('secret', d['client_secret']) self.assertEqual('alabalaportocala', d['refresh_token']) os.remove(temp_credential_file) def test_get_application_default_credential_from_malformed_file_1(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_1.json')) # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: _get_application_default_credential_from_file(credentials_file) self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertEqual("'type' field should be defined " "(and have one of the '" + AUTHORIZED_USER + "' or '" + SERVICE_ACCOUNT + "' values)", str(error)) def test_get_application_default_credential_from_malformed_file_2(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_2.json')) # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: _get_application_default_credential_from_file(credentials_file) self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertEqual('The following field(s) must be defined: private_key_id', str(error)) def test_get_application_default_credential_from_malformed_file_3(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_3.json')) self.assertRaises(ValueError, _get_application_default_credential_from_file, credentials_file) def test_raise_exception_for_missing_fields(self): missing_fields = ['first', 'second', 'third'] # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: _raise_exception_for_missing_fields(missing_fields) self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertEqual('The following field(s) must be defined: ' + ', '.join(missing_fields), str(error)) def test_raise_exception_for_reading_json(self): credential_file = 'any_file' extra_help = ' be good' error = ApplicationDefaultCredentialsError('stuff happens') # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: _raise_exception_for_reading_json(credential_file, extra_help, error) self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as ex: self.assertEqual('An error was encountered while reading ' 'json file: '+ credential_file + extra_help + ': ' + str(error), str(ex)) def test_get_application_default_from_environment_variable_service_account( self): os.environ['SERVER_SOFTWARE'] = '' environment_variable_file = datafile( os.path.join('gcloud', 'application_default_credentials.json')) os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file self.validate_service_account_credentials( GoogleCredentials.get_application_default()) def test_env_name(self): from oauth2client import client self.assertEqual(None, client.SETTINGS.env_name) self.test_get_application_default_from_environment_variable_service_account() self.assertEqual(DEFAULT_ENV_NAME, client.SETTINGS.env_name) def test_get_application_default_from_environment_variable_authorized_user( self): os.environ['SERVER_SOFTWARE'] = '' environment_variable_file = datafile( os.path.join('gcloud', 'application_default_credentials_authorized_user.json')) os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file self.validate_google_credentials( GoogleCredentials.get_application_default()) def test_get_application_default_from_environment_variable_malformed_file( self): os.environ['SERVER_SOFTWARE'] = '' environment_variable_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_3.json')) os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: GoogleCredentials.get_application_default() self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertTrue(str(error).startswith( 'An error was encountered while reading json file: ' + environment_variable_file + ' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS + ' environment variable):')) def test_get_application_default_environment_not_set_up(self): # It is normal for this test to fail if run inside # a Google Compute Engine VM or after 'gcloud auth login' command # has been executed on a non Windows machine. os.environ['SERVER_SOFTWARE'] = '' os.environ[GOOGLE_APPLICATION_CREDENTIALS] = '' os.environ['APPDATA'] = '' # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: GoogleCredentials.get_application_default() self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertEqual(ADC_HELP_MSG, str(error)) def test_from_stream_service_account(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials.json')) credentials = ( self.get_a_google_credentials_object().from_stream(credentials_file)) self.validate_service_account_credentials(credentials) def test_from_stream_authorized_user(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_authorized_user.json')) credentials = ( self.get_a_google_credentials_object().from_stream(credentials_file)) self.validate_google_credentials(credentials) def test_from_stream_malformed_file_1(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_1.json')) # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: self.get_a_google_credentials_object().from_stream(credentials_file) self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertEqual("An error was encountered while reading json file: " + credentials_file + " (provided as parameter to the from_stream() method): " "'type' field should be defined (and have one of the '" + AUTHORIZED_USER + "' or '" + SERVICE_ACCOUNT + "' values)", str(error)) def test_from_stream_malformed_file_2(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_2.json')) # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+ try: self.get_a_google_credentials_object().from_stream(credentials_file) self.fail('An exception was expected!') except ApplicationDefaultCredentialsError as error: self.assertEqual('An error was encountered while reading json file: ' + credentials_file + ' (provided as parameter to the from_stream() method): ' 'The following field(s) must be defined: ' 'private_key_id', str(error)) def test_from_stream_malformed_file_3(self): credentials_file = datafile( os.path.join('gcloud', 'application_default_credentials_malformed_3.json')) self.assertRaises( ApplicationDefaultCredentialsError, self.get_a_google_credentials_object().from_stream, credentials_file) class DummyDeleteStorage(Storage): delete_called = False def locked_delete(self): self.delete_called = True def _token_revoke_test_helper(testcase, status, revoke_raise, valid_bool_value, token_attr): current_store = getattr(testcase.credentials, 'store', None) dummy_store = DummyDeleteStorage() testcase.credentials.set_store(dummy_store) actual_do_revoke = testcase.credentials._do_revoke testcase.token_from_revoke = None def do_revoke_stub(http_request, token): testcase.token_from_revoke = token return actual_do_revoke(http_request, token) testcase.credentials._do_revoke = do_revoke_stub http = HttpMock(headers={'status': status}) if revoke_raise: testcase.assertRaises(TokenRevokeError, testcase.credentials.revoke, http) else: testcase.credentials.revoke(http) testcase.assertEqual(getattr(testcase.credentials, token_attr), testcase.token_from_revoke) testcase.assertEqual(valid_bool_value, testcase.credentials.invalid) testcase.assertEqual(valid_bool_value, dummy_store.delete_called) testcase.credentials.set_store(current_store) class BasicCredentialsTests(unittest.TestCase): def setUp(self): access_token = 'foo' client_id = 'some_client_id' client_secret = 'cOuDdkfjxxnv+' refresh_token = '1/0/a.df219fjls0' token_expiry = datetime.datetime.utcnow() user_agent = 'refresh_checker/1.0' self.credentials = OAuth2Credentials( access_token, client_id, client_secret, refresh_token, token_expiry, GOOGLE_TOKEN_URI, user_agent, revoke_uri=GOOGLE_REVOKE_URI) def test_token_refresh_success(self): for status_code in REFRESH_STATUS_CODES: token_response = {'access_token': '1/3w', 'expires_in': 3600} http = HttpMockSequence([ ({'status': status_code}, b''), ({'status': '200'}, json.dumps(token_response).encode('utf-8')), ({'status': '200'}, 'echo_request_headers'), ]) http = self.credentials.authorize(http) resp, content = http.request('http://example.com') self.assertEqual(b'Bearer 1/3w', content[b'Authorization']) self.assertFalse(self.credentials.access_token_expired) self.assertEqual(token_response, self.credentials.token_response) def test_token_refresh_failure(self): for status_code in REFRESH_STATUS_CODES: http = HttpMockSequence([ ({'status': status_code}, b''), ({'status': '400'}, b'{"error":"access_denied"}'), ]) http = self.credentials.authorize(http) try: http.request('http://example.com') self.fail('should raise AccessTokenRefreshError exception') except AccessTokenRefreshError: pass self.assertTrue(self.credentials.access_token_expired) self.assertEqual(None, self.credentials.token_response) def test_token_revoke_success(self): _token_revoke_test_helper( self, '200', revoke_raise=False, valid_bool_value=True, token_attr='refresh_token') def test_token_revoke_failure(self): _token_revoke_test_helper( self, '400', revoke_raise=True, valid_bool_value=False, token_attr='refresh_token') def test_non_401_error_response(self): http = HttpMockSequence([ ({'status': '400'}, b''), ]) http = self.credentials.authorize(http) resp, content = http.request('http://example.com') self.assertEqual(400, resp.status) self.assertEqual(None, self.credentials.token_response) def test_to_from_json(self): json = self.credentials.to_json() instance = OAuth2Credentials.from_json(json) self.assertEqual(OAuth2Credentials, type(instance)) instance.token_expiry = None self.credentials.token_expiry = None self.assertEqual(instance.__dict__, self.credentials.__dict__) def test_from_json_token_expiry(self): data = json.loads(self.credentials.to_json()) data['token_expiry'] = None instance = OAuth2Credentials.from_json(json.dumps(data)) self.assertTrue(isinstance(instance, OAuth2Credentials)) def test_unicode_header_checks(self): access_token = u'foo' client_id = u'some_client_id' client_secret = u'cOuDdkfjxxnv+' refresh_token = u'1/0/a.df219fjls0' token_expiry = str(datetime.datetime.utcnow()) token_uri = str(GOOGLE_TOKEN_URI) revoke_uri = str(GOOGLE_REVOKE_URI) user_agent = u'refresh_checker/1.0' credentials = OAuth2Credentials(access_token, client_id, client_secret, refresh_token, token_expiry, token_uri, user_agent, revoke_uri=revoke_uri) # First, test that we correctly encode basic objects, making sure # to include a bytes object. Note that oauth2client will normalize # everything to bytes, no matter what python version we're in. http = credentials.authorize(HttpMock(headers={'status': '200'})) headers = {u'foo': 3, b'bar': True, 'baz': b'abc'} cleaned_headers = {b'foo': b'3', b'bar': b'True', b'baz': b'abc'} http.request(u'http://example.com', method=u'GET', headers=headers) for k, v in cleaned_headers.items(): self.assertTrue(k in http.headers) self.assertEqual(v, http.headers[k]) # Next, test that we do fail on unicode. unicode_str = six.unichr(40960) + 'abcd' self.assertRaises( NonAsciiHeaderError, http.request, u'http://example.com', method=u'GET', headers={u'foo': unicode_str}) def test_no_unicode_in_request_params(self): access_token = u'foo' client_id = u'some_client_id' client_secret = u'cOuDdkfjxxnv+' refresh_token = u'1/0/a.df219fjls0' token_expiry = str(datetime.datetime.utcnow()) token_uri = str(GOOGLE_TOKEN_URI) revoke_uri = str(GOOGLE_REVOKE_URI) user_agent = u'refresh_checker/1.0' credentials = OAuth2Credentials(access_token, client_id, client_secret, refresh_token, token_expiry, token_uri, user_agent, revoke_uri=revoke_uri) http = HttpMock(headers={'status': '200'}) http = credentials.authorize(http) http.request(u'http://example.com', method=u'GET', headers={u'foo': u'bar'}) for k, v in six.iteritems(http.headers): self.assertEqual(six.binary_type, type(k)) self.assertEqual(six.binary_type, type(v)) # Test again with unicode strings that can't simply be converted to ASCII. try: http.request( u'http://example.com', method=u'GET', headers={u'foo': u'\N{COMET}'}) self.fail('Expected exception to be raised.') except NonAsciiHeaderError: pass self.credentials.token_response = 'foobar' instance = OAuth2Credentials.from_json(self.credentials.to_json()) self.assertEqual('foobar', instance.token_response) def test_get_access_token(self): S = 2 # number of seconds in which the token expires token_response_first = {'access_token': 'first_token', 'expires_in': S} token_response_second = {'access_token': 'second_token', 'expires_in': S} http = HttpMockSequence([ ({'status': '200'}, json.dumps(token_response_first).encode('utf-8')), ({'status': '200'}, json.dumps(token_response_second).encode('utf-8')), ]) token = self.credentials.get_access_token(http=http) self.assertEqual('first_token', token.access_token) self.assertEqual(S - 1, token.expires_in) self.assertFalse(self.credentials.access_token_expired) self.assertEqual(token_response_first, self.credentials.token_response) token = self.credentials.get_access_token(http=http) self.assertEqual('first_token', token.access_token) self.assertEqual(S - 1, token.expires_in) self.assertFalse(self.credentials.access_token_expired) self.assertEqual(token_response_first, self.credentials.token_response) time.sleep(S + 0.5) # some margin to avoid flakiness self.assertTrue(self.credentials.access_token_expired) token = self.credentials.get_access_token(http=http) self.assertEqual('second_token', token.access_token) self.assertEqual(S - 1, token.expires_in) self.assertFalse(self.credentials.access_token_expired) self.assertEqual(token_response_second, self.credentials.token_response) class AccessTokenCredentialsTests(unittest.TestCase): def setUp(self): access_token = 'foo' user_agent = 'refresh_checker/1.0' self.credentials = AccessTokenCredentials(access_token, user_agent, revoke_uri=GOOGLE_REVOKE_URI) def test_token_refresh_success(self): for status_code in REFRESH_STATUS_CODES: http = HttpMockSequence([ ({'status': status_code}, b''), ]) http = self.credentials.authorize(http) try: resp, content = http.request('http://example.com') self.fail('should throw exception if token expires') except AccessTokenCredentialsError: pass except Exception: self.fail('should only throw AccessTokenCredentialsError') def test_token_revoke_success(self): _token_revoke_test_helper( self, '200', revoke_raise=False, valid_bool_value=True, token_attr='access_token') def test_token_revoke_failure(self): _token_revoke_test_helper( self, '400', revoke_raise=True, valid_bool_value=False, token_attr='access_token') def test_non_401_error_response(self): http = HttpMockSequence([ ({'status': '400'}, b''), ]) http = self.credentials.authorize(http) resp, content = http.request('http://example.com') self.assertEqual(400, resp.status) def test_auth_header_sent(self): http = HttpMockSequence([ ({'status': '200'}, 'echo_request_headers'), ]) http = self.credentials.authorize(http) resp, content = http.request('http://example.com') self.assertEqual(b'Bearer foo', content[b'Authorization']) class TestAssertionCredentials(unittest.TestCase): assertion_text = 'This is the assertion' assertion_type = 'http://www.google.com/assertionType' class AssertionCredentialsTestImpl(AssertionCredentials): def _generate_assertion(self): return TestAssertionCredentials.assertion_text def setUp(self): user_agent = 'fun/2.0' self.credentials = self.AssertionCredentialsTestImpl(self.assertion_type, user_agent=user_agent) def test_assertion_body(self): body = urllib.parse.parse_qs( self.credentials._generate_refresh_request_body()) self.assertEqual(self.assertion_text, body['assertion'][0]) self.assertEqual('urn:ietf:params:oauth:grant-type:jwt-bearer', body['grant_type'][0]) def test_assertion_refresh(self): http = HttpMockSequence([ ({'status': '200'}, b'{"access_token":"1/3w"}'), ({'status': '200'}, 'echo_request_headers'), ]) http = self.credentials.authorize(http) resp, content = http.request('http://example.com') self.assertEqual(b'Bearer 1/3w', content[b'Authorization']) def test_token_revoke_success(self): _token_revoke_test_helper( self, '200', revoke_raise=False, valid_bool_value=True, token_attr='access_token') def test_token_revoke_failure(self): _token_revoke_test_helper( self, '400', revoke_raise=True, valid_bool_value=False, token_attr='access_token') class UpdateQueryParamsTest(unittest.TestCase): def test_update_query_params_no_params(self): uri = 'http://www.google.com' updated = _update_query_params(uri, {'a': 'b'}) self.assertEqual(updated, uri + '?a=b') def test_update_query_params_existing_params(self): uri = 'http://www.google.com?x=y' updated = _update_query_params(uri, {'a': 'b', 'c': 'd&'}) hardcoded_update = uri + '&a=b&c=d%26' assertUrisEqual(self, updated, hardcoded_update) class ExtractIdTokenTest(unittest.TestCase): """Tests _extract_id_token().""" def test_extract_success(self): body = {'foo': 'bar'} body_json = json.dumps(body).encode('ascii') payload = base64.urlsafe_b64encode(body_json).strip(b'=') jwt = b'stuff.' + payload + b'.signature' extracted = _extract_id_token(jwt) self.assertEqual(extracted, body) def test_extract_failure(self): body = {'foo': 'bar'} body_json = json.dumps(body).encode('ascii') payload = base64.urlsafe_b64encode(body_json).strip(b'=') jwt = b'stuff.' + payload self.assertRaises(VerifyJwtTokenError, _extract_id_token, jwt) class OAuth2WebServerFlowTest(unittest.TestCase): def setUp(self): self.flow = OAuth2WebServerFlow( client_id='client_id+1', client_secret='secret+1', scope='foo', redirect_uri=OOB_CALLBACK_URN, user_agent='unittest-sample/1.0', revoke_uri='dummy_revoke_uri', ) def test_construct_authorize_url(self): authorize_url = self.flow.step1_get_authorize_url() parsed = urllib.parse.urlparse(authorize_url) q = urllib.parse.parse_qs(parsed[4]) self.assertEqual('client_id+1', q['client_id'][0]) self.assertEqual('code', q['response_type'][0]) self.assertEqual('foo', q['scope'][0]) self.assertEqual(OOB_CALLBACK_URN, q['redirect_uri'][0]) self.assertEqual('offline', q['access_type'][0]) def test_override_flow_via_kwargs(self): """Passing kwargs to override defaults.""" flow = OAuth2WebServerFlow( client_id='client_id+1', client_secret='secret+1', scope='foo', redirect_uri=OOB_CALLBACK_URN, user_agent='unittest-sample/1.0', access_type='online', response_type='token' ) authorize_url = flow.step1_get_authorize_url() parsed = urllib.parse.urlparse(authorize_url) q = urllib.parse.parse_qs(parsed[4]) self.assertEqual('client_id+1', q['client_id'][0]) self.assertEqual('token', q['response_type'][0]) self.assertEqual('foo', q['scope'][0]) self.assertEqual(OOB_CALLBACK_URN, q['redirect_uri'][0]) self.assertEqual('online', q['access_type'][0]) def test_exchange_failure(self): http = HttpMockSequence([ ({'status': '400'}, b'{"error":"invalid_request"}'), ]) try: credentials = self.flow.step2_exchange('some random code', http=http) self.fail('should raise exception if exchange doesn\'t get 200') except FlowExchangeError: pass def test_urlencoded_exchange_failure(self): http = HttpMockSequence([ ({'status': '400'}, b'error=invalid_request'), ]) try: credentials = self.flow.step2_exchange('some random code', http=http) self.fail('should raise exception if exchange doesn\'t get 200') except FlowExchangeError as e: self.assertEqual('invalid_request', str(e)) def test_exchange_failure_with_json_error(self): # Some providers have 'error' attribute as a JSON object # in place of regular string. # This test makes sure no strange object-to-string coversion # exceptions are being raised instead of FlowExchangeError. http = HttpMockSequence([ ({'status': '400'}, b""" {"error": { "type": "OAuthException", "message": "Error validating verification code."} }"""), ]) try: credentials = self.flow.step2_exchange('some random code', http=http) self.fail('should raise exception if exchange doesn\'t get 200') except FlowExchangeError as e: pass def test_exchange_success(self): http = HttpMockSequence([ ({'status': '200'}, b"""{ "access_token":"SlAV32hkKG", "expires_in":3600, "refresh_token":"8xLOxBtZp8" }"""), ]) credentials = self.flow.step2_exchange('some random code', http=http) self.assertEqual('SlAV32hkKG', credentials.access_token) self.assertNotEqual(None, credentials.token_expiry) self.assertEqual('8xLOxBtZp8', credentials.refresh_token) self.assertEqual('dummy_revoke_uri', credentials.revoke_uri) def test_exchange_dictlike(self): class FakeDict(object): def __init__(self, d): self.d = d def __getitem__(self, name): return self.d[name] def __contains__(self, name): return name in self.d code = 'some random code' not_a_dict = FakeDict({'code': code}) payload = (b'{' b' "access_token":"SlAV32hkKG",' b' "expires_in":3600,' b' "refresh_token":"8xLOxBtZp8"' b'}') http = HttpMockSequence([({'status': '200'}, payload),]) credentials = self.flow.step2_exchange(not_a_dict, http=http) self.assertEqual('SlAV32hkKG', credentials.access_token) self.assertNotEqual(None, credentials.token_expiry) self.assertEqual('8xLOxBtZp8', credentials.refresh_token) self.assertEqual('dummy_revoke_uri', credentials.revoke_uri) request_code = urllib.parse.parse_qs(http.requests[0]['body'])['code'][0] self.assertEqual(code, request_code) def test_urlencoded_exchange_success(self): http = HttpMockSequence([ ({'status': '200'}, b'access_token=SlAV32hkKG&expires_in=3600'), ]) credentials = self.flow.step2_exchange('some random code', http=http) self.assertEqual('SlAV32hkKG', credentials.access_token) self.assertNotEqual(None, credentials.token_expiry) def test_urlencoded_expires_param(self): http = HttpMockSequence([ # Note the 'expires=3600' where you'd normally # have if named 'expires_in' ({'status': '200'}, b'access_token=SlAV32hkKG&expires=3600'), ]) credentials = self.flow.step2_exchange('some random code', http=http) self.assertNotEqual(None, credentials.token_expiry) def test_exchange_no_expires_in(self): http = HttpMockSequence([ ({'status': '200'}, b"""{ "access_token":"SlAV32hkKG", "refresh_token":"8xLOxBtZp8" }"""), ]) credentials = self.flow.step2_exchange('some random code', http=http) self.assertEqual(None, credentials.token_expiry) def test_urlencoded_exchange_no_expires_in(self): http = HttpMockSequence([ # This might be redundant but just to make sure # urlencoded access_token gets parsed correctly ({'status': '200'}, b'access_token=SlAV32hkKG'), ]) credentials = self.flow.step2_exchange('some random code', http=http) self.assertEqual(None, credentials.token_expiry) def test_exchange_fails_if_no_code(self): http = HttpMockSequence([ ({'status': '200'}, b"""{ "access_token":"SlAV32hkKG", "refresh_token":"8xLOxBtZp8" }"""), ]) code = {'error': 'thou shall not pass'} try: credentials = self.flow.step2_exchange(code, http=http) self.fail('should raise exception if no code in dictionary.') except FlowExchangeError as e: self.assertTrue('shall not pass' in str(e)) def test_exchange_id_token_fail(self): http = HttpMockSequence([ ({'status': '200'}, b"""{ "access_token":"SlAV32hkKG", "refresh_token":"8xLOxBtZp8", "id_token": "stuff.payload"}"""), ]) self.assertRaises(VerifyJwtTokenError, self.flow.step2_exchange, 'some random code', http=http) def test_exchange_id_token(self): body = {'foo': 'bar'} body_json = json.dumps(body).encode('ascii') payload = base64.urlsafe_b64encode(body_json).strip(b'=') jwt = (base64.urlsafe_b64encode(b'stuff') + b'.' + payload + b'.' + base64.urlsafe_b64encode(b'signature')) http = HttpMockSequence([ ({'status': '200'}, ("""{ "access_token":"SlAV32hkKG", "refresh_token":"8xLOxBtZp8", "id_token": "%s"}""" % jwt).encode('utf-8')), ]) credentials = self.flow.step2_exchange('some random code', http=http) self.assertEqual(credentials.id_token, body) class FlowFromCachedClientsecrets(unittest.TestCase): def test_flow_from_clientsecrets_cached(self): cache_mock = CacheMock() load_and_cache('client_secrets.json', 'some_secrets', cache_mock) flow = flow_from_clientsecrets( 'some_secrets', '', redirect_uri='oob', cache=cache_mock) self.assertEqual('foo_client_secret', flow.client_secret) class CredentialsFromCodeTests(unittest.TestCase): def setUp(self): self.client_id = 'client_id_abc' self.client_secret = 'secret_use_code' self.scope = 'foo' self.code = '12345abcde' self.redirect_uri = 'postmessage' def test_exchange_code_for_token(self): token = 'asdfghjkl' payload = json.dumps({'access_token': token, 'expires_in': 3600}) http = HttpMockSequence([ ({'status': '200'}, payload.encode('utf-8')), ]) credentials = credentials_from_code(self.client_id, self.client_secret, self.scope, self.code, redirect_uri=self.redirect_uri, http=http) self.assertEqual(credentials.access_token, token) self.assertNotEqual(None, credentials.token_expiry) def test_exchange_code_for_token_fail(self): http = HttpMockSequence([ ({'status': '400'}, b'{"error":"invalid_request"}'), ]) try: credentials = credentials_from_code(self.client_id, self.client_secret, self.scope, self.code, redirect_uri=self.redirect_uri, http=http) self.fail('should raise exception if exchange doesn\'t get 200') except FlowExchangeError: pass def test_exchange_code_and_file_for_token(self): http = HttpMockSequence([ ({'status': '200'}, b"""{ "access_token":"asdfghjkl", "expires_in":3600 }"""), ]) credentials = credentials_from_clientsecrets_and_code( datafile('client_secrets.json'), self.scope, self.code, http=http) self.assertEqual(credentials.access_token, 'asdfghjkl') self.assertNotEqual(None, credentials.token_expiry) def test_exchange_code_and_cached_file_for_token(self): http = HttpMockSequence([ ({'status': '200'}, b'{ "access_token":"asdfghjkl"}'), ]) cache_mock = CacheMock() load_and_cache('client_secrets.json', 'some_secrets', cache_mock) credentials = credentials_from_clientsecrets_and_code( 'some_secrets', self.scope, self.code, http=http, cache=cache_mock) self.assertEqual(credentials.access_token, 'asdfghjkl') def test_exchange_code_and_file_for_token_fail(self): http = HttpMockSequence([ ({'status': '400'}, b'{"error":"invalid_request"}'), ]) try: credentials = credentials_from_clientsecrets_and_code( datafile('client_secrets.json'), self.scope, self.code, http=http) self.fail('should raise exception if exchange doesn\'t get 200') except FlowExchangeError: pass class MemoryCacheTests(unittest.TestCase): def test_get_set_delete(self): m = MemoryCache() self.assertEqual(None, m.get('foo')) self.assertEqual(None, m.delete('foo')) m.set('foo', 'bar') self.assertEqual('bar', m.get('foo')) m.delete('foo') self.assertEqual(None, m.get('foo')) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
import datetime from django.forms import ChoiceField, Form, MultiWidget, RadioSelect, TextInput from django.test import override_settings from django.utils.safestring import mark_safe from .test_choicewidget import ChoiceWidgetTest BLANK_CHOICE_DASH = (("", "------"),) class RadioSelectTest(ChoiceWidgetTest): widget = RadioSelect def test_render(self): html = """ <div> <div> <label><input type="radio" name="beatle" value="">------</label> </div> <div> <label><input checked type="radio" name="beatle" value="J">John</label> </div> <div> <label><input type="radio" name="beatle" value="P">Paul</label> </div> <div> <label><input type="radio" name="beatle" value="G">George</label> </div> <div> <label><input type="radio" name="beatle" value="R">Ringo</label> </div> </div> """ beatles_with_blank = BLANK_CHOICE_DASH + self.beatles for choices in (beatles_with_blank, dict(beatles_with_blank)): with self.subTest(choices): self.check_html(self.widget(choices=choices), "beatle", "J", html=html) def test_nested_choices(self): nested_choices = ( ("unknown", "Unknown"), ("Audio", (("vinyl", "Vinyl"), ("cd", "CD"))), ("Video", (("vhs", "VHS"), ("dvd", "DVD"))), ) html = """ <div id="media"> <div> <label for="media_0"> <input type="radio" name="nestchoice" value="unknown" id="media_0"> Unknown </label></div> <div> <label>Audio</label> <div> <label for="media_1_0"> <input type="radio" name="nestchoice" value="vinyl" id="media_1_0"> Vinyl </label></div> <div> <label for="media_1_1"> <input type="radio" name="nestchoice" value="cd" id="media_1_1"> CD </label></div> </div><div> <label>Video</label> <div> <label for="media_2_0"> <input type="radio" name="nestchoice" value="vhs" id="media_2_0"> VHS </label></div> <div> <label for="media_2_1"> <input type="radio" name="nestchoice" value="dvd" id="media_2_1" checked> DVD </label></div> </div> </div> """ self.check_html( self.widget(choices=nested_choices), "nestchoice", "dvd", attrs={"id": "media"}, html=html, ) def test_render_none(self): """ If value is None, none of the options are selected. """ choices = BLANK_CHOICE_DASH + self.beatles html = """ <div> <div> <label><input checked type="radio" name="beatle" value="">------</label> </div> <div> <label><input type="radio" name="beatle" value="J">John</label> </div> <div> <label><input type="radio" name="beatle" value="P">Paul</label> </div> <div> <label><input type="radio" name="beatle" value="G">George</label> </div> <div> <label><input type="radio" name="beatle" value="R">Ringo</label> </div> </div> """ self.check_html(self.widget(choices=choices), "beatle", None, html=html) def test_render_label_value(self): """ If the value corresponds to a label (but not to an option value), none of the options are selected. """ html = """ <div> <div> <label><input type="radio" name="beatle" value="J">John</label> </div> <div> <label><input type="radio" name="beatle" value="P">Paul</label> </div> <div> <label><input type="radio" name="beatle" value="G">George</label> </div> <div> <label><input type="radio" name="beatle" value="R">Ringo</label> </div> </div> """ self.check_html(self.widget(choices=self.beatles), "beatle", "Ringo", html=html) def test_render_selected(self): """ Only one option can be selected. """ choices = [("0", "0"), ("1", "1"), ("2", "2"), ("3", "3"), ("0", "extra")] html = """ <div> <div> <label><input checked type="radio" name="choices" value="0">0</label> </div> <div> <label><input type="radio" name="choices" value="1">1</label> </div> <div> <label><input type="radio" name="choices" value="2">2</label> </div> <div> <label><input type="radio" name="choices" value="3">3</label> </div> <div> <label><input type="radio" name="choices" value="0">extra</label> </div> </div> """ self.check_html(self.widget(choices=choices), "choices", "0", html=html) def test_constructor_attrs(self): """ Attributes provided at instantiation are passed to the constituent inputs. """ widget = self.widget(attrs={"id": "foo"}, choices=self.beatles) html = """ <div id="foo"> <div> <label for="foo_0"> <input checked type="radio" id="foo_0" value="J" name="beatle">John</label> </div> <div><label for="foo_1"> <input type="radio" id="foo_1" value="P" name="beatle">Paul</label> </div> <div><label for="foo_2"> <input type="radio" id="foo_2" value="G" name="beatle">George</label> </div> <div><label for="foo_3"> <input type="radio" id="foo_3" value="R" name="beatle">Ringo</label> </div> </div> """ self.check_html(widget, "beatle", "J", html=html) def test_compare_to_str(self): """ The value is compared to its str(). """ html = """ <div> <div> <label><input type="radio" name="num" value="1">1</label> </div> <div> <label><input type="radio" name="num" value="2">2</label> </div> <div> <label><input checked type="radio" name="num" value="3">3</label> </div> </div> """ self.check_html( self.widget(choices=[("1", "1"), ("2", "2"), ("3", "3")]), "num", 3, html=html, ) self.check_html( self.widget(choices=[(1, 1), (2, 2), (3, 3)]), "num", "3", html=html ) self.check_html( self.widget(choices=[(1, 1), (2, 2), (3, 3)]), "num", 3, html=html ) def test_choices_constructor(self): widget = self.widget(choices=[(1, 1), (2, 2), (3, 3)]) html = """ <div> <div> <label><input type="radio" name="num" value="1">1</label> </div> <div> <label><input type="radio" name="num" value="2">2</label> </div> <div> <label><input checked type="radio" name="num" value="3">3</label> </div> </div> """ self.check_html(widget, "num", 3, html=html) def test_choices_constructor_generator(self): """ If choices is passed to the constructor and is a generator, it can be iterated over multiple times without getting consumed. """ def get_choices(): for i in range(4): yield (i, i) html = """ <div> <div> <label><input type="radio" name="num" value="0">0</label> </div> <div> <label><input type="radio" name="num" value="1">1</label> </div> <div> <label><input type="radio" name="num" value="2">2</label> </div> <div> <label><input checked type="radio" name="num" value="3">3</label> </div> </div> """ widget = self.widget(choices=get_choices()) self.check_html(widget, "num", 3, html=html) def test_choices_escaping(self): choices = (("bad", "you & me"), ("good", mark_safe("you &gt; me"))) html = """ <div> <div> <label><input type="radio" name="escape" value="bad">you & me</label> </div> <div> <label><input type="radio" name="escape" value="good">you &gt; me</label> </div> </div> """ self.check_html(self.widget(choices=choices), "escape", None, html=html) def test_choices_unicode(self): html = """ <div> <div> <label> <input checked type="radio" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label> </div> <div> <label> <input type="radio" name="email" value="\u0107\u017e\u0161\u0111"> abc\u0107\u017e\u0161\u0111</label> </div> </div> """ self.check_html( self.widget(choices=[("ŠĐĆŽćžšđ", "ŠĐabcĆŽćžšđ"), ("ćžšđ", "abcćžšđ")]), "email", "ŠĐĆŽćžšđ", html=html, ) def test_choices_optgroup(self): """ Choices can be nested one level in order to create HTML optgroups. """ html = """ <div> <div> <label><input type="radio" name="nestchoice" value="outer1">Outer 1</label> </div> <div> <label>Group &quot;1&quot;</label> <div> <label> <input type="radio" name="nestchoice" value="inner1">Inner 1</label> </div> <div> <label> <input type="radio" name="nestchoice" value="inner2">Inner 2</label> </div> </div> </div> """ for widget in self.nested_widgets: with self.subTest(widget): self.check_html(widget, "nestchoice", None, html=html) def test_choices_select_outer(self): html = """ <div> <div> <label> <input checked type="radio" name="nestchoice" value="outer1">Outer 1</label> </div> <div> <label>Group &quot;1&quot;</label> <div> <label> <input type="radio" name="nestchoice" value="inner1">Inner 1</label> </div> <div> <label> <input type="radio" name="nestchoice" value="inner2">Inner 2</label> </div> </div> </div> """ for widget in self.nested_widgets: with self.subTest(widget): self.check_html(widget, "nestchoice", "outer1", html=html) def test_choices_select_inner(self): html = """ <div> <div> <label><input type="radio" name="nestchoice" value="outer1">Outer 1</label> </div> <div> <label>Group &quot;1&quot;</label> <div> <label> <input type="radio" name="nestchoice" value="inner1">Inner 1</label> </div> <div> <label> <input checked type="radio" name="nestchoice" value="inner2">Inner 2 </label> </div> </div> </div> """ for widget in self.nested_widgets: with self.subTest(widget): self.check_html(widget, "nestchoice", "inner2", html=html) def test_render_attrs(self): """ Attributes provided at render-time are passed to the constituent inputs. """ html = """ <div id="bar"> <div> <label for="bar_0"> <input checked type="radio" id="bar_0" value="J" name="beatle">John</label> </div> <div><label for="bar_1"> <input type="radio" id="bar_1" value="P" name="beatle">Paul</label> </div> <div><label for="bar_2"> <input type="radio" id="bar_2" value="G" name="beatle">George</label> </div> <div><label for="bar_3"> <input type="radio" id="bar_3" value="R" name="beatle">Ringo</label> </div> </div> """ self.check_html( self.widget(choices=self.beatles), "beatle", "J", attrs={"id": "bar"}, html=html, ) def test_class_attrs(self): """ The <div> in the multiple_input.html widget template include the class attribute. """ html = """ <div class="bar"> <div><label> <input checked type="radio" class="bar" value="J" name="beatle">John</label> </div> <div><label> <input type="radio" class="bar" value="P" name="beatle">Paul</label> </div> <div><label> <input type="radio" class="bar" value="G" name="beatle">George</label> </div> <div><label> <input type="radio" class="bar" value="R" name="beatle">Ringo</label> </div> </div> """ self.check_html( self.widget(choices=self.beatles), "beatle", "J", attrs={"class": "bar"}, html=html, ) @override_settings(USE_THOUSAND_SEPARATOR=True) def test_doesnt_localize_input_value(self): choices = [ (1, "One"), (1000, "One thousand"), (1000000, "One million"), ] html = """ <div> <div><label><input type="radio" name="number" value="1">One</label></div> <div> <label><input type="radio" name="number" value="1000">One thousand</label> </div> <div> <label><input type="radio" name="number" value="1000000">One million</label> </div> </div> """ self.check_html(self.widget(choices=choices), "number", None, html=html) choices = [ (datetime.time(0, 0), "midnight"), (datetime.time(12, 0), "noon"), ] html = """ <div> <div> <label><input type="radio" name="time" value="00:00:00">midnight</label> </div> <div> <label><input type="radio" name="time" value="12:00:00">noon</label> </div> </div> """ self.check_html(self.widget(choices=choices), "time", None, html=html) def test_render_as_subwidget(self): """A RadioSelect as a subwidget of MultiWidget.""" choices = BLANK_CHOICE_DASH + self.beatles html = """ <div> <div><label> <input type="radio" name="beatle_0" value="">------</label> </div> <div><label> <input checked type="radio" name="beatle_0" value="J">John</label> </div> <div><label> <input type="radio" name="beatle_0" value="P">Paul</label> </div> <div><label> <input type="radio" name="beatle_0" value="G">George</label> </div> <div><label> <input type="radio" name="beatle_0" value="R">Ringo</label> </div> </div> <input name="beatle_1" type="text" value="Some text"> """ self.check_html( MultiWidget([self.widget(choices=choices), TextInput()]), "beatle", ["J", "Some text"], html=html, ) def test_fieldset(self): class TestForm(Form): template_name = "forms_tests/use_fieldset.html" field = ChoiceField( widget=self.widget, choices=self.beatles, required=False ) form = TestForm() self.assertIs(self.widget.use_fieldset, True) self.assertHTMLEqual( '<div><fieldset><legend>Field:</legend><div id="id_field">' '<div><label for="id_field_0">' '<input type="radio" name="field" value="J" id="id_field_0"> John' '</label></div><div><label for="id_field_1">' '<input type="radio" name="field" value="P" id="id_field_1">Paul' '</label></div><div><label for="id_field_2"><input type="radio" ' 'name="field" value="G" id="id_field_2"> George</label></div>' '<div><label for="id_field_3"><input type="radio" name="field" ' 'value="R" id="id_field_3">Ringo</label></div></div></fieldset>' "</div>", form.render(), )
python
github
https://github.com/django/django
tests/forms_tests/widget_tests/test_radioselect.py
/* * Copyright (c) 2021-Present, Redis Ltd. * All rights reserved. * * Licensed under your choice of (a) the Redis Source Available License 2.0 * (RSALv2); or (b) the Server Side Public License v1 (SSPLv1); or (c) the * GNU Affero General Public License v3 (AGPLv3). */ #ifndef __FUNCTIONS_H_ #define __FUNCTIONS_H_ /* * functions.c unit provides the Redis Functions API: * * FUNCTION LOAD * * FUNCTION LIST * * FUNCTION CALL (FCALL and FCALL_RO) * * FUNCTION DELETE * * FUNCTION STATS * * FUNCTION KILL * * FUNCTION FLUSH * * FUNCTION DUMP * * FUNCTION RESTORE * * FUNCTION HELP * * Also contains implementation for: * * Save/Load function from rdb * * Register engines */ #include "server.h" #include "script.h" #include "redismodule.h" typedef struct functionLibInfo functionLibInfo; typedef struct engine { /* engine specific context */ void *engine_ctx; /* Create function callback, get the engine_ctx, and function code * engine_ctx - opaque struct that was created on engine initialization * li - library information that need to be provided and when add functions * code - the library code * timeout - timeout for the library creation (0 for no timeout) * err - description of error (if occurred) * returns C_ERR on error and set err to be the error message */ int (*create)(void *engine_ctx, functionLibInfo *li, sds code, size_t timeout, sds *err); /* Invoking a function, r_ctx is an opaque object (from engine POV). * The r_ctx should be used by the engine to interaction with Redis, * such interaction could be running commands, set resp, or set * replication mode */ void (*call)(scriptRunCtx *r_ctx, void *engine_ctx, void *compiled_function, robj **keys, size_t nkeys, robj **args, size_t nargs); /* get current used memory by the engine */ size_t (*get_used_memory)(void *engine_ctx); /* Return memory overhead for a given function, * such memory is not counted as engine memory but as general * structs memory that hold different information */ size_t (*get_function_memory_overhead)(void *compiled_function); /* Return memory overhead for engine (struct size holding the engine)*/ size_t (*get_engine_memory_overhead)(void *engine_ctx); /* free the given function */ void (*free_function)(void *engine_ctx, void *compiled_function); /* Free the engine context. */ void (*free_ctx)(void *engine_ctx); } engine; /* Hold information about an engine. * Used on rdb.c so it must be declared here. */ typedef struct engineInfo { sds name; /* Name of the engine */ engine *engine; /* engine callbacks that allows to interact with the engine */ client *c; /* Client that is used to run commands */ } engineInfo; /* Hold information about the specific function. * Used on rdb.c so it must be declared here. */ typedef struct functionInfo { sds name; /* Function name */ void *function; /* Opaque object that set by the function's engine and allow it to run the function, usually it's the function compiled code. */ functionLibInfo* li; /* Pointer to the library created the function */ sds desc; /* Function description */ uint64_t f_flags; /* Function flags */ } functionInfo; /* Hold information about the specific library. * Used on rdb.c so it must be declared here. */ struct functionLibInfo { sds name; /* Library name */ dict *functions; /* Functions dictionary */ engineInfo *ei; /* Pointer to the function engine */ sds code; /* Library code */ }; int functionsRegisterEngine(const char *engine_name, engine *engine_ctx); sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx, size_t timeout); unsigned long functionsMemoryVM(void); unsigned long functionsMemoryEngine(void); unsigned long functionsNum(void); unsigned long functionsLibNum(void); dict* functionsLibGet(void); size_t functionsLibCtxFunctionsLen(functionsLibCtx *functions_ctx); functionsLibCtx* functionsLibCtxGetCurrent(void); functionsLibCtx* functionsLibCtxCreate(void); void functionsLibCtxClearCurrent(int async); void functionsLibCtxFree(functionsLibCtx *lib_ctx); void functionsLibCtxClear(functionsLibCtx *lib_ctx); void functionsLibCtxSwapWithCurrent(functionsLibCtx *lib_ctx); int functionLibCreateFunction(sds name, void *function, functionLibInfo *li, sds desc, uint64_t f_flags, sds *err); int luaEngineInitEngine(void); int functionsInit(void); void functionsFree(functionsLibCtx *lib_ctx, dict *engs); void createFunctionDumpPayload(rio *payload); #endif /* __FUNCTIONS_H_ */
c
github
https://github.com/redis/redis
src/functions.h
//! Unwind info generation (`.eh_frame`) use cranelift_codegen::FinalizedMachExceptionHandler; use cranelift_codegen::ir::Endianness; use cranelift_codegen::isa::unwind::UnwindInfo; use cranelift_module::DataId; use cranelift_object::ObjectProduct; use gimli::write::{Address, CieId, EhFrame, FrameTable, Section}; use gimli::{Encoding, Format, RunTimeEndian}; use super::emit::{DebugRelocName, address_for_data, address_for_func}; use super::gcc_except_table::{ Action, ActionKind, ActionTable, CallSite, CallSiteTable, GccExceptTable, TypeInfoTable, }; use super::object::WriteDebugInfo; use crate::prelude::*; pub(crate) const EXCEPTION_HANDLER_CLEANUP: u32 = 0; pub(crate) const EXCEPTION_HANDLER_CATCH: u32 = 1; pub(crate) struct UnwindContext { endian: RunTimeEndian, frame_table: FrameTable, cie_id: Option<CieId>, } impl UnwindContext { pub(crate) fn new(module: &mut dyn Module, pic_eh_frame: bool) -> Self { let endian = match module.isa().endianness() { Endianness::Little => RunTimeEndian::Little, Endianness::Big => RunTimeEndian::Big, }; let mut frame_table = FrameTable::default(); let cie_id = if let Some(mut cie) = module.isa().create_systemv_cie() { let ptr_encoding = if pic_eh_frame { gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0) } else { gimli::DW_EH_PE_absptr }; cie.fde_address_encoding = ptr_encoding; // FIXME only add personality function and lsda when necessary: https://github.com/rust-lang/rust/blob/1f76d219c906f0112bb1872f33aa977164c53fa6/compiler/rustc_codegen_ssa/src/mir/mod.rs#L200-L204 if cfg!(feature = "unwinding") { let code_ptr_encoding = if pic_eh_frame { if module.isa().triple().architecture == target_lexicon::Architecture::X86_64 { gimli::DwEhPe( gimli::DW_EH_PE_indirect.0 | gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0, ) } else if let target_lexicon::Architecture::Aarch64(_) = module.isa().triple().architecture { gimli::DwEhPe( gimli::DW_EH_PE_indirect.0 | gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata8.0, ) } else { todo!() } } else { gimli::DwEhPe(gimli::DW_EH_PE_indirect.0 | gimli::DW_EH_PE_absptr.0) }; cie.lsda_encoding = Some(ptr_encoding); // FIXME use eh_personality lang item instead let personality = module .declare_function( "rust_eh_personality", Linkage::Import, &Signature { params: vec![ AbiParam::new(types::I32), AbiParam::new(types::I32), AbiParam::new(types::I64), AbiParam::new(module.target_config().pointer_type()), AbiParam::new(module.target_config().pointer_type()), ], returns: vec![AbiParam::new(types::I32)], call_conv: module.target_config().default_call_conv, }, ) .unwrap(); // Use indirection here to support PIC the case where rust_eh_personality is defined in // another DSO. let personality_ref = module .declare_data("DW.ref.rust_eh_personality", Linkage::Local, false, false) .unwrap(); let mut personality_ref_data = DataDescription::new(); // Note: Must not use define_zeroinit. The unwinder can't handle this being in the .bss // section. let pointer_bytes = usize::from(module.target_config().pointer_bytes()); personality_ref_data.define(vec![0; pointer_bytes].into_boxed_slice()); let personality_func_ref = module.declare_func_in_data(personality, &mut personality_ref_data); personality_ref_data.write_function_addr(0, personality_func_ref); module.define_data(personality_ref, &personality_ref_data).unwrap(); cie.personality = Some((code_ptr_encoding, address_for_data(personality_ref))); } Some(frame_table.add_cie(cie)) } else { None }; UnwindContext { endian, frame_table, cie_id } } pub(crate) fn add_function( &mut self, module: &mut dyn Module, func_id: FuncId, context: &Context, ) { if let target_lexicon::OperatingSystem::MacOSX { .. } = module.isa().triple().operating_system { // The object crate doesn't currently support DW_GNU_EH_PE_absptr, which macOS // requires for unwinding tables. In addition on arm64 it currently doesn't // support 32bit relocations as we currently use for the unwinding table. // See gimli-rs/object#415 and rust-lang/rustc_codegen_cranelift#1371 return; } let Some(unwind_info) = context.compiled_code().unwrap().create_unwind_info(module.isa()).unwrap() else { return; }; match unwind_info { UnwindInfo::SystemV(unwind_info) => { let mut fde = unwind_info.to_fde(address_for_func(func_id)); // FIXME only add personality function and lsda when necessary: https://github.com/rust-lang/rust/blob/1f76d219c906f0112bb1872f33aa977164c53fa6/compiler/rustc_codegen_ssa/src/mir/mod.rs#L200-L204 if cfg!(feature = "unwinding") { // FIXME use unique symbol name derived from function name let lsda = module.declare_anonymous_data(false, false).unwrap(); let encoding = Encoding { format: Format::Dwarf32, version: 1, address_size: module.isa().frontend_config().pointer_bytes(), }; let mut gcc_except_table_data = GccExceptTable { call_sites: CallSiteTable(vec![]), actions: ActionTable::new(), type_info: TypeInfoTable::new(gimli::DW_EH_PE_udata4), }; let catch_type = gcc_except_table_data.type_info.add(Address::Constant(0)); let catch_action = gcc_except_table_data .actions .add(Action { kind: ActionKind::Catch(catch_type), next_action: None }); for call_site in context.compiled_code().unwrap().buffer.call_sites() { if call_site.exception_handlers.is_empty() { gcc_except_table_data.call_sites.0.push(CallSite { start: u64::from(call_site.ret_addr - 1), length: 1, landing_pad: 0, action_entry: None, }); } for &handler in call_site.exception_handlers { match handler { FinalizedMachExceptionHandler::Tag(tag, landingpad) => { match tag.as_u32() { EXCEPTION_HANDLER_CLEANUP => { gcc_except_table_data.call_sites.0.push(CallSite { start: u64::from(call_site.ret_addr - 1), length: 1, landing_pad: u64::from(landingpad), action_entry: None, }) } EXCEPTION_HANDLER_CATCH => { gcc_except_table_data.call_sites.0.push(CallSite { start: u64::from(call_site.ret_addr - 1), length: 1, landing_pad: u64::from(landingpad), action_entry: Some(catch_action), }) } _ => unreachable!(), } } _ => unreachable!(), } } } let mut gcc_except_table = super::emit::WriterRelocate::new(self.endian); gcc_except_table_data.write(&mut gcc_except_table, encoding).unwrap(); let mut data = DataDescription::new(); data.define(gcc_except_table.writer.into_vec().into_boxed_slice()); data.set_segment_section("", ".gcc_except_table"); for reloc in &gcc_except_table.relocs { match reloc.name { DebugRelocName::Section(_id) => unreachable!(), DebugRelocName::Symbol(id) => { let id = id.try_into().unwrap(); if id & 1 << 31 == 0 { let func_ref = module .declare_func_in_data(FuncId::from_u32(id), &mut data); data.write_function_addr(reloc.offset, func_ref); } else { let gv = module.declare_data_in_data( DataId::from_u32(id & !(1 << 31)), &mut data, ); data.write_data_addr(reloc.offset, gv, 0); } } }; } module.define_data(lsda, &data).unwrap(); fde.lsda = Some(address_for_data(lsda)); } self.frame_table.add_fde(self.cie_id.unwrap(), fde); } UnwindInfo::WindowsX64(_) | UnwindInfo::WindowsArm64(_) => { // Windows does not have debug info for its unwind info. } unwind_info => unimplemented!("{:?}", unwind_info), } } pub(crate) fn emit(self, product: &mut ObjectProduct) { let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian)); self.frame_table.write_eh_frame(&mut eh_frame).unwrap(); if !eh_frame.0.writer.slice().is_empty() { let id = eh_frame.id(); let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec()); let mut section_map = FxHashMap::default(); section_map.insert(id, section_id); for reloc in &eh_frame.0.relocs { product.add_debug_reloc(&section_map, &section_id, reloc); } } } #[cfg(all(feature = "jit", windows))] pub(crate) unsafe fn register_jit(self, _jit_module: &cranelift_jit::JITModule) {} #[cfg(all(feature = "jit", not(windows)))] pub(crate) unsafe fn register_jit(self, jit_module: &cranelift_jit::JITModule) { use std::mem::ManuallyDrop; let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian)); self.frame_table.write_eh_frame(&mut eh_frame).unwrap(); if eh_frame.0.writer.slice().is_empty() { return; } let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module); // GCC expects a terminating "empty" length, so write a 0 length at the end of the table. eh_frame.extend(&[0, 0, 0, 0]); // FIXME support unregistering unwind tables once cranelift-jit supports deallocating // individual functions let eh_frame = ManuallyDrop::new(eh_frame); // ======================================================================= // Everything after this line up to the end of the file is loosely based on // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs #[cfg(target_os = "macos")] unsafe { // On macOS, `__register_frame` takes a pointer to a single FDE let start = eh_frame.as_ptr(); let end = start.add(eh_frame.len()); let mut current = start; // Walk all of the entries in the frame table and register them while current < end { let len = std::ptr::read::<u32>(current as *const u32) as usize; // Skip over the CIE if current != start { __register_frame(current); } // Move to the next table entry (+4 because the length itself is not inclusive) current = current.add(len + 4); } } #[cfg(not(target_os = "macos"))] { // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0 unsafe { __register_frame(eh_frame.as_ptr()) }; } } } unsafe extern "C" { // libunwind import fn __register_frame(fde: *const u8); }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
from rest_framework.decorators import api_view from rest_framework.response import Response from website.serializers import * from rest_framework import status from django.http import HttpResponse @api_view([u'GET']) def actionInventoryItems(request, pk): try: return Response(InventoryItemSerializer(Action.objects.get(ActionID=pk).inventoryitem_set.all(), many=True).data, status=200) except Action.DoesNotExist: return Response(status=404) @api_view([u'POST']) def addInventoryItemToAction(request, pk): ## TODO: Make sure the item doesn't conflict try: item = InventoryItem.objects.get(pk=pk) action = Action.objects.get(pk=request.POST['action']) except (InventoryItem.DoesNotExist, Action.DoesNotExist): return Response(status=404) try: action.inventoryitem_set.add(item) except: return Response(status=500) return Response(status=201) # TODO: make sure to call .save() after every db change? @api_view([u'POST']) def removeInventoryItemfromAction(request, pk): try: item = InventoryItem.objects.get(pk=pk) action = Action.objects.get(pk=request.POST['action']) except (InventoryItem.DoesNotExist, Action.DoesNotExist): return Response(status=404) try: action.inventoryitem_set.remove(item) except: return Response(status=500) return Response(status=200) @api_view([u'GET', u'POST']) def inventoryItemList(request, label=None, format=None): ''' Retrieve a list of all Label Notes ''' if request.method == u'GET': inventoryItems = InventoryItem.objects.all() if label != None: itemsWithLabel = ItemLabel.objects.filter(LabelID__LabelName__iexact=label).values(u'ItemID') i = [] for eachItem in itemsWithLabel: i.append(InventoryItem.objects.get(pk=eachItem[u'ItemID'])) inventoryItems = i serializer = InventoryItemSerializer(inventoryItems, many=True) return Response(serializer.data) elif request.method == u'POST': serializer = InventoryItemSerializer(data=request.DATA) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @api_view([u'GET', u'PUT', u'DELETE']) def inventoryItemDetail(request, pk, format=None): ''' Retrieve, update or delete Inventory Item. ''' try: inventoryItem = InventoryItem.objects.get(ItemID=pk) except InventoryItem.DoesNotExist: return HttpResponse(status=status.HTTP_404_NOT_FOUND) if request.method == u'GET': serializer = InventoryItemSerializer(inventoryItem) return Response(serializer.data) elif request.method == u'PUT': serializer = InventoryItemSerializer(inventoryItem, data=request.DATA) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) elif request.method == u'DELETE': inventoryItem.delete() return HttpResponse(status=status.HTTP_204_NO_CONTENT)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) | Author: Thadeus Burgess | Contributors: | - Massimo Di Pierro for creating the original gluon/template.py | - Jonathan Lundell for extensively testing the regex on Jython. | - Limodou (creater of uliweb) who inspired the block-element support for web2py. Templating syntax ------------------ """ import os import cgi import logging from re import compile, sub, escape, DOTALL try: import cStringIO as StringIO except: from io import StringIO try: # have web2py from gluon.restricted import RestrictedError from gluon.globals import current except ImportError: # do not have web2py current = None def RestrictedError(a, b, c): logging.error(str(a) + ':' + str(b) + ':' + str(c)) return RuntimeError class Node(object): """ Basic Container Object """ def __init__(self, value=None, pre_extend=False): self.value = value self.pre_extend = pre_extend def __str__(self): return str(self.value) class SuperNode(Node): def __init__(self, name='', pre_extend=False): self.name = name self.value = None self.pre_extend = pre_extend def __str__(self): if self.value: return str(self.value) else: # raise SyntaxError("Undefined parent block ``%s``. \n" % self.name + "You must define a block before referencing it.\nMake sure you have not left out an ``{{end}}`` tag." ) return '' def __repr__(self): return "%s->%s" % (self.name, self.value) def output_aux(node, blocks): # If we have a block level # If we can override this block. # Override block from vars. # Else we take the default # Else its just a string return (blocks[node.name].output(blocks) if node.name in blocks else node.output(blocks)) \ if isinstance(node, BlockNode) \ else str(node) class BlockNode(Node): """ Block Container. This Node can contain other Nodes and will render in a hierarchical order of when nodes were added. ie:: {{ block test }} This is default block test {{ end }} """ def __init__(self, name='', pre_extend=False, delimiters=('{{', '}}')): """ name - Name of this Node. """ self.nodes = [] self.name = name self.pre_extend = pre_extend self.left, self.right = delimiters def __repr__(self): lines = ['%sblock %s%s' % (self.left, self.name, self.right)] lines += [str(node) for node in self.nodes] lines.append('%send%s' % (self.left, self.right)) return ''.join(lines) def __str__(self): """ Get this BlockNodes content, not including child Nodes """ return ''.join(str(node) for node in self.nodes if not isinstance(node, BlockNode)) def append(self, node): """ Adds an element to the nodes. Args: node: Node object or string to append. """ if isinstance(node, str) or isinstance(node, Node): self.nodes.append(node) else: raise TypeError("Invalid type; must be instance of ``str`` or ``BlockNode``. %s" % node) def extend(self, other): """ Extends the list of nodes with another BlockNode class. Args: other: BlockNode or Content object to extend from. """ if isinstance(other, BlockNode): self.nodes.extend(other.nodes) else: raise TypeError( "Invalid type; must be instance of ``BlockNode``. %s" % other) def output(self, blocks): """ Merges all nodes into a single string. Args: blocks: Dictionary of blocks that are extending from this template. """ return ''.join(output_aux(node, blocks) for node in self.nodes) class Content(BlockNode): """ Parent Container -- Used as the root level BlockNode. Contains functions that operate as such. Args: name: Unique name for this BlockNode """ def __init__(self, name="ContentBlock", pre_extend=False): self.name = name self.nodes = [] self.blocks = {} self.pre_extend = pre_extend def __str__(self): return ''.join(output_aux(node, self.blocks) for node in self.nodes) def _insert(self, other, index=0): """ Inserts object at index. """ if isinstance(other, (str, Node)): self.nodes.insert(index, other) else: raise TypeError( "Invalid type, must be instance of ``str`` or ``Node``.") def insert(self, other, index=0): """ Inserts object at index. You may pass a list of objects and have them inserted. """ if isinstance(other, (list, tuple)): # Must reverse so the order stays the same. other.reverse() for item in other: self._insert(item, index) else: self._insert(other, index) def append(self, node): """ Adds a node to list. If it is a BlockNode then we assign a block for it. """ if isinstance(node, (str, Node)): self.nodes.append(node) if isinstance(node, BlockNode): self.blocks[node.name] = node else: raise TypeError("Invalid type, must be instance of ``str`` or ``BlockNode``. %s" % node) def extend(self, other): """ Extends the objects list of nodes with another objects nodes """ if isinstance(other, BlockNode): self.nodes.extend(other.nodes) self.blocks.update(other.blocks) else: raise TypeError( "Invalid type; must be instance of ``BlockNode``. %s" % other) def clear_content(self): self.nodes = [] class TemplateParser(object): """Parse all blocks Args: text: text to parse context: context to parse in path: folder path to templates writer: string of writer class to use lexers: dict of custom lexers to use. delimiters: for example `('{{','}}')` _super_nodes: a list of nodes to check for inclusion this should only be set by "self.extend" It contains a list of SuperNodes from a child template that need to be handled. """ default_delimiters = ('{{', '}}') r_tag = compile(r'(\{\{.*?\}\})', DOTALL) r_multiline = compile(r'(""".*?""")|(\'\'\'.*?\'\'\')', DOTALL) # These are used for re-indentation. # Indent + 1 re_block = compile('^(elif |else:|except:|except |finally:).*$', DOTALL) # Indent - 1 re_unblock = compile('^(return|continue|break|raise)( .*)?$', DOTALL) # Indent - 1 re_pass = compile('^pass( .*)?$', DOTALL) def __init__(self, text, name="ParserContainer", context=dict(), path='views/', writer='response.write', lexers={}, delimiters=('{{', '}}'), _super_nodes = [], ): # Keep a root level name. self.name = name # Raw text to start parsing. self.text = text # Writer to use (refer to the default for an example). # This will end up as # "%s(%s, escape=False)" % (self.writer, value) self.writer = writer # Dictionary of custom name lexers to use. if isinstance(lexers, dict): self.lexers = lexers else: self.lexers = {} # Path of templates self.path = path # Context for templates. self.context = context # allow optional alternative delimiters if delimiters != self.default_delimiters: escaped_delimiters = (escape(delimiters[0]), escape(delimiters[1])) self.r_tag = compile(r'(%s.*?%s)' % escaped_delimiters, DOTALL) elif hasattr(context.get('response', None), 'delimiters'): if context['response'].delimiters != self.default_delimiters: delimiters = context['response'].delimiters escaped_delimiters = ( escape(delimiters[0]), escape(delimiters[1])) self.r_tag = compile(r'(%s.*?%s)' % escaped_delimiters, DOTALL) self.delimiters = delimiters # Create a root level Content that everything will go into. self.content = Content(name=name) # Stack will hold our current stack of nodes. # As we descend into a node, it will be added to the stack # And when we leave, it will be removed from the stack. # self.content should stay on the stack at all times. self.stack = [self.content] # This variable will hold a reference to every super block # that we come across in this template. self.super_nodes = [] # This variable will hold a reference to the child # super nodes that need handling. self.child_super_nodes = _super_nodes # This variable will hold a reference to every block # that we come across in this template self.blocks = {} # Begin parsing. self.parse(text) def to_string(self): """ Returns the parsed template with correct indentation. Used to make it easier to port to python3. """ return self.reindent(str(self.content)) def __str__(self): "Makes sure str works exactly the same as python 3" return self.to_string() def __unicode__(self): "Makes sure str works exactly the same as python 3" return self.to_string() def reindent(self, text): """ Reindents a string of unindented python code. """ # Get each of our lines into an array. lines = text.split('\n') # Our new lines new_lines = [] # Keeps track of how many indents we have. # Used for when we need to drop a level of indentation # only to reindent on the next line. credit = 0 # Current indentation k = 0 ################# # THINGS TO KNOW ################# # k += 1 means indent # k -= 1 means unindent # credit = 1 means unindent on the next line. for raw_line in lines: line = raw_line.strip() # ignore empty lines if not line: continue # If we have a line that contains python code that # should be unindented for this line of code. # and then reindented for the next line. if TemplateParser.re_block.match(line): k = k + credit - 1 # We obviously can't have a negative indentation k = max(k, 0) # Add the indentation! new_lines.append(' ' * (4 * k) + line) # Bank account back to 0 again :( credit = 0 # If we are a pass block, we obviously de-dent. if TemplateParser.re_pass.match(line): k -= 1 # If we are any of the following, de-dent. # However, we should stay on the same level # But the line right after us will be de-dented. # So we add one credit to keep us at the level # while moving back one indentation level. if TemplateParser.re_unblock.match(line): credit = 1 k -= 1 # If we are an if statement, a try, or a semi-colon we # probably need to indent the next line. if line.endswith(':') and not line.startswith('#'): k += 1 # This must come before so that we can raise an error with the # right content. new_text = '\n'.join(new_lines) if k > 0: self._raise_error('missing "pass" in view', new_text) elif k < 0: self._raise_error('too many "pass" in view', new_text) return new_text def _raise_error(self, message='', text=None): """ Raises an error using itself as the filename and textual content. """ raise RestrictedError(self.name, text or self.text, message) def _get_file_text(self, filename): """ Attempts to open ``filename`` and retrieve its text. This will use self.path to search for the file. """ # If they didn't specify a filename, how can we find one! if not filename.strip(): self._raise_error('Invalid template filename') # Allow Views to include other views dynamically context = self.context if current and not "response" in context: context["response"] = getattr(current, 'response', None) # Get the filename; filename looks like ``"template.html"``. # We need to eval to remove the quotes and get the string type. filename = eval(filename, context) # Allow empty filename for conditional extend and include directives. if not filename: return '' # Get the path of the file on the system. filepath = self.path and os.path.join(self.path, filename) or filename # try to read the text. try: fileobj = open(filepath, 'rb') text = fileobj.read() fileobj.close() except IOError: self._raise_error('Unable to open included view file: ' + filepath) return text def include(self, content, filename): """ Includes ``filename`` here. """ text = self._get_file_text(filename) t = TemplateParser(text, name=filename, context=self.context, path=self.path, writer=self.writer, delimiters=self.delimiters) content.append(t.content) def extend(self, filename): """ Extends `filename`. Anything not declared in a block defined by the parent will be placed in the parent templates `{{include}}` block. """ # If no filename, create a dummy layout with only an {{include}}. text = self._get_file_text(filename) or '%sinclude%s' % tuple(self.delimiters) # Create out nodes list to send to the parent super_nodes = [] # We want to include any non-handled nodes. super_nodes.extend(self.child_super_nodes) # And our nodes as well. super_nodes.extend(self.super_nodes) t = TemplateParser(text, name=filename, context=self.context, path=self.path, writer=self.writer, delimiters=self.delimiters, _super_nodes=super_nodes) # Make a temporary buffer that is unique for parent # template. buf = BlockNode( name='__include__' + filename, delimiters=self.delimiters) pre = [] # Iterate through each of our nodes for node in self.content.nodes: # If a node is a block if isinstance(node, BlockNode): # That happens to be in the parent template if node.name in t.content.blocks: # Do not include it continue if isinstance(node, Node): # Or if the node was before the extension # we should not include it if node.pre_extend: pre.append(node) continue # Otherwise, it should go int the # Parent templates {{include}} section. buf.append(node) else: buf.append(node) # Clear our current nodes. We will be replacing this with # the parent nodes. self.content.nodes = [] t_content = t.content # Set our include, unique by filename t_content.blocks['__include__' + filename] = buf # Make sure our pre_extended nodes go first t_content.insert(pre) # Then we extend our blocks t_content.extend(self.content) # Work off the parent node. self.content = t_content def parse(self, text): # Basically, r_tag.split will split the text into # an array containing, 'non-tag', 'tag', 'non-tag', 'tag' # so if we alternate this variable, we know # what to look for. This is alternate to # line.startswith("{{") in_tag = False extend = None pre_extend = True # Use a list to store everything in # This is because later the code will "look ahead" # for missing strings or brackets. ij = self.r_tag.split(text) # j = current index # i = current item stack = self.stack for j in range(len(ij)): i = ij[j] if i: if not stack: self._raise_error('The "end" tag is unmatched, please check if you have a starting "block" tag') # Our current element in the stack. top = stack[-1] if in_tag: line = i # Get rid of delimiters line = line[len(self.delimiters[0]): \ -len(self.delimiters[1])].strip() # This is bad juju, but let's do it anyway if not line: continue # We do not want to replace the newlines in code, # only in block comments. def remove_newline(re_val): # Take the entire match and replace newlines with # escaped newlines. return re_val.group(0).replace('\n', '\\n') # Perform block comment escaping. # This performs escaping ON anything # in between """ and """ line = sub(TemplateParser.r_multiline, remove_newline, line) if line.startswith('='): # IE: {{=response.title}} name, value = '=', line[1:].strip() else: v = line.split(' ', 1) if len(v) == 1: # Example # {{ include }} # {{ end }} name = v[0] value = '' else: # Example # {{ block pie }} # {{ include "layout.html" }} # {{ for i in range(10): }} name = v[0] value = v[1] # This will replace newlines in block comments # with the newline character. This is so that they # retain their formatting, but squish down to one # line in the rendered template. # First check if we have any custom lexers if name in self.lexers: # Pass the information to the lexer # and allow it to inject in the environment # You can define custom names such as # '{{<<variable}}' which could potentially # write unescaped version of the variable. self.lexers[name](parser=self, value=value, top=top, stack=stack) elif name == '=': # So we have a variable to insert into # the template buf = "\n%s(%s)" % (self.writer, value) top.append(Node(buf, pre_extend=pre_extend)) elif name == 'block' and not value.startswith('='): # Make a new node with name. node = BlockNode(name=value.strip(), pre_extend=pre_extend, delimiters=self.delimiters) # Append this node to our active node top.append(node) # Make sure to add the node to the stack. # so anything after this gets added # to this node. This allows us to # "nest" nodes. stack.append(node) elif name == 'end' and not value.startswith('='): # We are done with this node. # Save an instance of it self.blocks[top.name] = top # Pop it. stack.pop() elif name == 'super' and not value.startswith('='): # Get our correct target name # If they just called {{super}} without a name # attempt to assume the top blocks name. if value: target_node = value else: target_node = top.name # Create a SuperNode instance node = SuperNode(name=target_node, pre_extend=pre_extend) # Add this to our list to be taken care of self.super_nodes.append(node) # And put in in the tree top.append(node) elif name == 'include' and not value.startswith('='): # If we know the target file to include if value: self.include(top, value) # Otherwise, make a temporary include node # That the child node will know to hook into. else: include_node = BlockNode( name='__include__' + self.name, pre_extend=pre_extend, delimiters=self.delimiters) top.append(include_node) elif name == 'extend' and not value.startswith('='): # We need to extend the following # template. extend = value pre_extend = False else: # If we don't know where it belongs # we just add it anyways without formatting. if line and in_tag: # Split on the newlines >.< tokens = line.split('\n') # We need to look for any instances of # for i in range(10): # = i # pass # So we can properly put a response.write() in place. continuation = False len_parsed = 0 for k, token in enumerate(tokens): token = tokens[k] = token.strip() len_parsed += len(token) if token.startswith('='): if token.endswith('\\'): continuation = True tokens[k] = "\n%s(%s" % ( self.writer, token[1:].strip()) else: tokens[k] = "\n%s(%s)" % ( self.writer, token[1:].strip()) elif continuation: tokens[k] += ')' continuation = False buf = "\n%s" % '\n'.join(tokens) top.append(Node(buf, pre_extend=pre_extend)) else: # It is HTML so just include it. buf = "\n%s(%r, escape=False)" % (self.writer, i) top.append(Node(buf, pre_extend=pre_extend)) # Remember: tag, not tag, tag, not tag in_tag = not in_tag # Make a list of items to remove from child to_rm = [] # Go through each of the children nodes for node in self.child_super_nodes: # If we declared a block that this node wants to include if node.name in self.blocks: # Go ahead and include it! node.value = self.blocks[node.name] # Since we processed this child, we don't need to # pass it along to the parent to_rm.append(node) # Remove some of the processed nodes for node in to_rm: # Since this is a pointer, it works beautifully. # Sometimes I miss C-Style pointers... I want my asterisk... self.child_super_nodes.remove(node) # If we need to extend a template. if extend: self.extend(extend) # We need this for integration with gluon def parse_template(filename, path='views/', context=dict(), lexers={}, delimiters=('{{', '}}') ): """ Args: filename: can be a view filename in the views folder or an input stream path: is the path of a views folder context: is a dictionary of symbols used to render the template lexers: dict of custom lexers to use delimiters: opening and closing tags """ # First, if we have a str try to open the file if isinstance(filename, str): try: fp = open(os.path.join(path, filename), 'rb') text = fp.read() fp.close() except IOError: raise RestrictedError(filename, '', 'Unable to find the file') else: text = filename.read() # Use the file contents to get a parsed template and return it. return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters)) def get_parsed(text): """ Returns the indented python code of text. Useful for unit testing. """ return str(TemplateParser(text)) class DummyResponse(): def __init__(self): self.body = StringIO.StringIO() def write(self, data, escape=True): if not escape: self.body.write(str(data)) elif hasattr(data, 'xml') and callable(data.xml): self.body.write(data.xml()) else: # make it a string if not isinstance(data, (str, unicode)): data = str(data) elif isinstance(data, unicode): data = data.encode('utf8', 'xmlcharrefreplace') data = cgi.escape(data, True).replace("'", "&#x27;") self.body.write(data) class NOESCAPE(): """ A little helper to avoid escaping. """ def __init__(self, text): self.text = text def xml(self): return self.text # And this is a generic render function. # Here for integration with gluon. def render(content="hello world", stream=None, filename=None, path=None, context={}, lexers={}, delimiters=('{{', '}}'), writer='response.write' ): """ Generic render function Args: content: default content stream: file-like obj to read template from filename: where to find template path: base path for templates context: env lexers: custom lexers to use delimiters: opening and closing tags writer: where to inject the resulting stream Example:: >>> render() 'hello world' >>> render(content='abc') 'abc' >>> render(content="abc'") "abc'" >>> render(content=''''a"'bc''') 'a"'bc' >>> render(content='a\\nbc') 'a\\nbc' >>> render(content='a"bcd"e') 'a"bcd"e' >>> render(content="'''a\\nc'''") "'''a\\nc'''" >>> render(content="'''a\\'c'''") "'''a\'c'''" >>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5)) '0<br />1<br />2<br />3<br />4<br />' >>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}')) '0<br />1<br />2<br />3<br />4<br />' >>> render(content="{{='''hello\\nworld'''}}") 'hello\\nworld' >>> render(content='{{for i in range(3):\\n=i\\npass}}') '012' """ # here to avoid circular Imports try: from globals import Response except ImportError: # Working standalone. Build a mock Response object. Response = DummyResponse # Add it to the context so we can use it. if not 'NOESCAPE' in context: context['NOESCAPE'] = NOESCAPE # save current response class if context and 'response' in context: old_response_body = context['response'].body context['response'].body = StringIO.StringIO() else: old_response_body = None context['response'] = Response() # If we don't have anything to render, why bother? if not content and not stream and not filename: raise SyntaxError("Must specify a stream or filename or content") # Here for legacy purposes, probably can be reduced to # something more simple. close_stream = False if not stream: if filename: stream = open(filename, 'rb') close_stream = True elif content: stream = StringIO.StringIO(content) # Execute the template. code = str(TemplateParser(stream.read( ), context=context, path=path, lexers=lexers, delimiters=delimiters, writer=writer)) try: exec(code) in context except Exception: # for i,line in enumerate(code.split('\n')): print i,line raise if close_stream: stream.close() # Returned the rendered content. text = context['response'].body.getvalue() if old_response_body is not None: context['response'].body = old_response_body return text if __name__ == '__main__': import doctest doctest.testmod()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*- # localetzonemap.py # Python module for osinstaller. # # Copyright (C) 2010 Ylmf, Inc. # # # Ylmf Author(s): wkt <weikting@gmail.com> # # import locale from gettext import * def N_(s): return s LangList=[ {"C":[['C'], "GMT"] }, {"中文简体":[['zh_CN.UTF-8','zh_CN'], 'Asia/Chongqing'] }, {"中文繁体(香港)":[['zh_HK.UTF-8','en_HK.UTF-8','zh_HK','en_HK'], ###the first will default locale 'Asia/Hong_Kong'] ###default timezone }, {"中文繁体(台灣)":[['zh_TW.UTF-8','zh_TW'],'Asia/Taipei'] }, {"English(United States)":[['en_US.UTF-8','en_US'],'America/New_York'] }, {"English(Hong Kong)":[['en_HK.UTF-8','zh_HK.UTF-8','zh_HK','en_HK'],'Asia/Hong_Kong'] }, ] TZones={ 'C': {'GMT':'GMT'}, 'CN':{'Asia/Chongqing':N_('China(Chongqing,Shanghai,Beijing)')}, 'TW':{'Asia/Taipei':N_('China(Taibei)')}, 'HK':{'Asia/Hong_Kong':N_('China(xianggang)')}, 'US':{"America/Indiana/Tell_City":N_("United States (Tell City, Indiana)"), "America/Phoenix":N_("United States (Phoenix)"), } }
unknown
codeparrot/codeparrot-clean