language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
ansible__ansible
test/lib/ansible_test/_internal/cli/parsers/__init__.py
{ "start": 9969, "end": 10488 }
class ____(NetworkTargetParser): """Composite argument parser for a network SSH target.""" @property def option_name(self) -> str: """The option name used for this parser.""" return '--target-network' @property def allow_inventory(self) -> bool: """True if inventory is allowed, otherwise False.""" return False @property def limit_one(self) -> bool: """True if only one target is allowed, otherwise False.""" return True
NetworkSshTargetParser
python
gevent__gevent
src/greentest/3.11/test_subprocess.py
{ "start": 80531, "end": 145863 }
class ____(BaseTestCase): def setUp(self): super().setUp() self._nonexistent_dir = "/_this/pa.th/does/not/exist" def _get_chdir_exception(self): try: os.chdir(self._nonexistent_dir) except OSError as e: # This avoids hard coding the errno value or the OS perror() # string and instead capture the exception that we want to see # below for comparison. desired_exception = e else: self.fail("chdir to nonexistent directory %s succeeded." % self._nonexistent_dir) return desired_exception def test_exception_cwd(self): """Test error in the child raised in the parent for a bad cwd.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], cwd=self._nonexistent_dir) except OSError as e: # Test that the child process chdir failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) self.assertEqual(desired_exception.filename, e.filename) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_executable(self): """Test error in the child raised in the parent for a bad executable.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], executable=self._nonexistent_dir) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) self.assertEqual(desired_exception.filename, e.filename) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_args_0(self): """Test error in the child raised in the parent for a bad args[0].""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([self._nonexistent_dir, "-c", ""]) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) self.assertEqual(desired_exception.filename, e.filename) else: self.fail("Expected OSError: %s" % desired_exception) # We mock the __del__ method for Popen in the next two tests # because it does cleanup based on the pid returned by fork_exec # along with issuing a resource warning if it still exists. Since # we don't actually spawn a process in these tests we can forego # the destructor. An alternative would be to set _child_created to # False before the destructor is called but there is no easy way # to do that class PopenNoDestructor(subprocess.Popen): def __del__(self): pass @mock.patch("subprocess._fork_exec") def test_exception_errpipe_normal(self, fork_exec): """Test error passing done through errpipe_write in the good case""" def proper_error(*args): errpipe_write = args[13] # Write the hex for the error code EISDIR: 'is a directory' err_code = '{:x}'.format(errno.EISDIR).encode() os.write(errpipe_write, b"OSError:" + err_code + b":") return 0 fork_exec.side_effect = proper_error with mock.patch("subprocess.os.waitpid", side_effect=ChildProcessError): with self.assertRaises(IsADirectoryError): self.PopenNoDestructor(["non_existent_command"]) @mock.patch("subprocess._fork_exec") def test_exception_errpipe_bad_data(self, fork_exec): """Test error passing done through errpipe_write where its not in the expected format""" error_data = b"\xFF\x00\xDE\xAD" def bad_error(*args): errpipe_write = args[13] # Anything can be in the pipe, no assumptions should # be made about its encoding, so we'll write some # arbitrary hex bytes to test it out os.write(errpipe_write, error_data) return 0 fork_exec.side_effect = bad_error with mock.patch("subprocess.os.waitpid", side_effect=ChildProcessError): with self.assertRaises(subprocess.SubprocessError) as e: self.PopenNoDestructor(["non_existent_command"]) self.assertIn(repr(error_data), str(e.exception)) @unittest.skipIf(not os.path.exists('/proc/self/status'), "need /proc/self/status") def test_restore_signals(self): # Blindly assume that cat exists on systems with /proc/self/status... default_proc_status = subprocess.check_output( ['cat', '/proc/self/status'], restore_signals=False) for line in default_proc_status.splitlines(): if line.startswith(b'SigIgn'): default_sig_ign_mask = line break else: self.skipTest("SigIgn not found in /proc/self/status.") restored_proc_status = subprocess.check_output( ['cat', '/proc/self/status'], restore_signals=True) for line in restored_proc_status.splitlines(): if line.startswith(b'SigIgn'): restored_sig_ign_mask = line break self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask, msg="restore_signals=True should've unblocked " "SIGPIPE and friends.") def test_start_new_session(self): # For code coverage of calling setsid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getsid(0))"], start_new_session=True) except PermissionError as e: if e.errno != errno.EPERM: raise # EACCES? else: parent_sid = os.getsid(0) child_sid = int(output) self.assertNotEqual(parent_sid, child_sid) @unittest.skipUnless(hasattr(os, 'setpgid') and hasattr(os, 'getpgid'), 'no setpgid or getpgid on platform') def test_process_group_0(self): # For code coverage of calling setpgid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getpgid(0))"], process_group=0) except PermissionError as e: if e.errno != errno.EPERM: raise # EACCES? else: parent_pgid = os.getpgid(0) child_pgid = int(output) self.assertNotEqual(parent_pgid, child_pgid) @unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform') def test_user(self): # For code coverage of the user parameter. We don't care if we get a # permission error from it depending on the test execution environment, # that still indicates that it was called. uid = os.geteuid() test_users = [65534 if uid != 65534 else 65533, uid] name_uid = "nobody" if sys.platform != 'darwin' else "unknown" if pwd is not None: try: pwd.getpwnam(name_uid) test_users.append(name_uid) except KeyError: # unknown user name name_uid = None for user in test_users: # posix_spawn() may be used with close_fds=False for close_fds in (False, True): with self.subTest(user=user, close_fds=close_fds): try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getuid())"], user=user, close_fds=close_fds) except PermissionError as e: # (EACCES, EPERM) if e.errno == errno.EACCES: self.assertEqual(e.filename, sys.executable) else: self.assertIsNone(e.filename) else: if isinstance(user, str): user_uid = pwd.getpwnam(user).pw_uid else: user_uid = user child_user = int(output) self.assertEqual(child_user, user_uid) with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, user=-1) with self.assertRaises(OverflowError): subprocess.check_call(ZERO_RETURN_CMD, cwd=os.curdir, env=os.environ, user=2**64) if pwd is None and name_uid is not None: with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, user=name_uid) @unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform') def test_user_error(self): with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, user=65535) @unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform') def test_group(self): gid = os.getegid() group_list = [65534 if gid != 65534 else 65533] name_group = _get_test_grp_name() if grp is not None: group_list.append(name_group) for group in group_list + [gid]: # posix_spawn() may be used with close_fds=False for close_fds in (False, True): with self.subTest(group=group, close_fds=close_fds): try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getgid())"], group=group, close_fds=close_fds) except PermissionError as e: # (EACCES, EPERM) self.assertIsNone(e.filename) else: if isinstance(group, str): group_gid = grp.getgrnam(group).gr_gid else: group_gid = group child_group = int(output) self.assertEqual(child_group, group_gid) # make sure we bomb on negative values with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, group=-1) with self.assertRaises(OverflowError): subprocess.check_call(ZERO_RETURN_CMD, cwd=os.curdir, env=os.environ, group=2**64) if grp is None: with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, group=name_group) @unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform') def test_group_error(self): with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, group=65535) @unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform') def test_extra_groups(self): gid = os.getegid() group_list = [65534 if gid != 65534 else 65533] name_group = _get_test_grp_name() perm_error = False if grp is not None: group_list.append(name_group) try: output = subprocess.check_output( [sys.executable, "-c", "import os, sys, json; json.dump(os.getgroups(), sys.stdout)"], extra_groups=group_list) except OSError as ex: if ex.errno != errno.EPERM: raise self.assertIsNone(ex.filename) perm_error = True else: parent_groups = os.getgroups() child_groups = json.loads(output) if grp is not None: desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g for g in group_list] else: desired_gids = group_list if perm_error: self.assertEqual(set(child_groups), set(parent_groups)) else: self.assertEqual(set(desired_gids), set(child_groups)) # make sure we bomb on negative values with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1]) with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, cwd=os.curdir, env=os.environ, extra_groups=[2**64]) if grp is None: with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[name_group]) @unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform') def test_extra_groups_error(self): with self.assertRaises(ValueError): subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[]) @unittest.skipIf(mswindows or not hasattr(os, 'umask'), 'POSIX umask() is not available.') def test_umask(self): tmpdir = None try: tmpdir = tempfile.mkdtemp() name = os.path.join(tmpdir, "beans") # We set an unusual umask in the child so as a unique mode # for us to test the child's touched file for. subprocess.check_call( [sys.executable, "-c", f"open({name!r}, 'w').close()"], umask=0o053) # Ignore execute permissions entirely in our test, # filesystems could be mounted to ignore or force that. st_mode = os.stat(name).st_mode & 0o666 expected_mode = 0o624 self.assertEqual(expected_mode, st_mode, msg=f'{oct(expected_mode)} != {oct(st_mode)}') finally: if tmpdir is not None: shutil.rmtree(tmpdir) def test_run_abort(self): # returncode handles signal termination with support.SuppressCrashReport(): p = subprocess.Popen([sys.executable, "-c", 'import os; os.abort()']) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) def test_CalledProcessError_str_signal(self): err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd") error_string = str(err) # We're relying on the repr() of the signal.Signals intenum to provide # the word signal, the signal name and the numeric value. self.assertIn("signal", error_string.lower()) # We're not being specific about the signal name as some signals have # multiple names and which name is revealed can vary. self.assertIn("SIG", error_string) self.assertIn(str(signal.SIGABRT), error_string) def test_CalledProcessError_str_unknown_signal(self): err = subprocess.CalledProcessError(-9876543, "fake cmd") error_string = str(err) self.assertIn("unknown signal 9876543.", error_string) def test_CalledProcessError_str_non_zero(self): err = subprocess.CalledProcessError(2, "fake cmd") error_string = str(err) self.assertIn("non-zero exit status 2.", error_string) def test_preexec(self): # DISCLAIMER: Setting environment variables is *not* a good use # of a preexec_fn. This is merely a test. p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) with p: self.assertEqual(p.stdout.read(), b"apple") def test_preexec_exception(self): def raise_it(): raise ValueError("What if two swallows carried a coconut?") try: p = subprocess.Popen([sys.executable, "-c", ""], preexec_fn=raise_it) except subprocess.SubprocessError as e: self.assertTrue( subprocess._fork_exec, "Expected a ValueError from the preexec_fn") except ValueError as e: self.assertIn("coconut", e.args[0]) else: self.fail("Exception raised by preexec_fn did not make it " "to the parent process.") class _TestExecuteChildPopen(subprocess.Popen): """Used to test behavior at the end of _execute_child.""" def __init__(self, testcase, *args, **kwargs): self._testcase = testcase subprocess.Popen.__init__(self, *args, **kwargs) def _execute_child(self, *args, **kwargs): try: subprocess.Popen._execute_child(self, *args, **kwargs) finally: # Open a bunch of file descriptors and verify that # none of them are the same as the ones the Popen # instance is using for stdin/stdout/stderr. devzero_fds = [os.open("/dev/zero", os.O_RDONLY) for _ in range(8)] try: for fd in devzero_fds: self._testcase.assertNotIn( fd, (self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()), msg="At least one fd was closed early.") finally: for fd in devzero_fds: os.close(fd) @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.") def test_preexec_errpipe_does_not_double_close_pipes(self): """Issue16140: Don't double close pipes on preexec error.""" def raise_it(): raise subprocess.SubprocessError( "force the _execute_child() errpipe_data path.") with self.assertRaises(subprocess.SubprocessError): self._TestExecuteChildPopen( self, ZERO_RETURN_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) def test_preexec_gc_module_failure(self): # This tests the code that disables garbage collection if the child # process will execute any Python. enabled = gc.isenabled() try: gc.disable() self.assertFalse(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertFalse(gc.isenabled(), "Popen enabled gc when it shouldn't.") gc.enable() self.assertTrue(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertTrue(gc.isenabled(), "Popen left gc disabled.") finally: if not enabled: gc.disable() @unittest.skipIf( sys.platform == 'darwin', 'setrlimit() seems to fail on OS X') def test_preexec_fork_failure(self): # The internal code did not preserve the previous exception when # re-enabling garbage collection try: from resource import getrlimit, setrlimit, RLIMIT_NPROC except ImportError as err: self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD limits = getrlimit(RLIMIT_NPROC) [_, hard] = limits setrlimit(RLIMIT_NPROC, (0, hard)) self.addCleanup(setrlimit, RLIMIT_NPROC, limits) try: subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) except BlockingIOError: # Forking should raise EAGAIN, translated to BlockingIOError pass else: self.skipTest('RLIMIT_NPROC had no effect; probably superuser') def test_args_string(self): # args is a string fd, fname = tempfile.mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!%s\n" % support.unix_shell) fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) p = subprocess.Popen(fname) p.wait() os.remove(fname) self.assertEqual(p.returncode, 47) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], startupinfo=47) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], creationflags=47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen(["echo $FRUIT"], shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen("echo $FRUIT", shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_call_string(self): # call() function with string argument on UNIX fd, fname = tempfile.mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!%s\n" % support.unix_shell) fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47) def test_specific_shell(self): # Issue #9265: Incorrect name passed as arg[0]. shells = [] for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']: for name in ['bash', 'ksh']: sh = os.path.join(prefix, name) if os.path.isfile(sh): shells.append(sh) if not shells: # Will probably work for any shell but csh. self.skipTest("bash or ksh required for this test") sh = '/bin/sh' if os.path.isfile(sh) and not os.path.islink(sh): # Test will fail if /bin/sh is a symlink to csh. shells.append(sh) for sh in shells: p = subprocess.Popen("echo $0", executable=sh, shell=True, stdout=subprocess.PIPE) with p: self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii')) def _kill_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. # Also set the SIGINT handler to the default to make sure it's not # being ignored (some tests rely on that.) old_handler = signal.signal(signal.SIGINT, signal.default_int_handler) try: p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: signal.signal(signal.SIGINT, old_handler) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) return p @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')), "Due to known OS bug (issue #16762)") def _kill_dead_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) p.communicate() def test_send_signal(self): p = self._kill_process('send_signal', signal.SIGINT) _, stderr = p.communicate() self.assertIn(b'KeyboardInterrupt', stderr) self.assertNotEqual(p.wait(), 0) def test_kill(self): p = self._kill_process('kill') _, stderr = p.communicate() self.assertEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGKILL) def test_terminate(self): p = self._kill_process('terminate') _, stderr = p.communicate() self.assertEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGTERM) def test_send_signal_dead(self): # Sending a signal to a dead process self._kill_dead_process('send_signal', signal.SIGINT) def test_kill_dead(self): # Killing a dead process self._kill_dead_process('kill') def test_terminate_dead(self): # Terminating a dead process self._kill_dead_process('terminate') def _save_fds(self, save_fds): fds = [] for fd in save_fds: inheritable = os.get_inheritable(fd) saved = os.dup(fd) fds.append((fd, saved, inheritable)) return fds def _restore_fds(self, fds): for fd, saved, inheritable in fds: os.dup2(saved, fd, inheritable=inheritable) os.close(saved) def check_close_std_fds(self, fds): # Issue #9905: test that subprocess pipes still work properly with # some standard fds closed stdin = 0 saved_fds = self._save_fds(fds) for fd, saved, inheritable in saved_fds: if fd == 0: stdin = saved break try: for fd in fds: os.close(fd) out, err = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() self.assertEqual(out, b'apple') self.assertEqual(err, b'orange') finally: self._restore_fds(saved_fds) def test_close_fd_0(self): self.check_close_std_fds([0]) def test_close_fd_1(self): self.check_close_std_fds([1]) def test_close_fd_2(self): self.check_close_std_fds([2]) def test_close_fds_0_1(self): self.check_close_std_fds([0, 1]) def test_close_fds_0_2(self): self.check_close_std_fds([0, 2]) def test_close_fds_1_2(self): self.check_close_std_fds([1, 2]) def test_close_fds_0_1_2(self): # Issue #10806: test that subprocess pipes still work properly with # all standard fds closed. self.check_close_std_fds([0, 1, 2]) def test_small_errpipe_write_fd(self): """Issue #15798: Popen should work when stdio fds are available.""" new_stdin = os.dup(0) new_stdout = os.dup(1) try: os.close(0) os.close(1) # Side test: if errpipe_write fails to have its CLOEXEC # flag set this should cause the parent to think the exec # failed. Extremely unlikely: everyone supports CLOEXEC. subprocess.Popen([ sys.executable, "-c", "print('AssertionError:0:CLOEXEC failure.')"]).wait() finally: # Restore original stdin and stdout os.dup2(new_stdin, 0) os.dup2(new_stdout, 1) os.close(new_stdin) os.close(new_stdout) def test_remapping_std_fds(self): # open up some temporary files temps = [tempfile.mkstemp() for i in range(3)] try: temp_fds = [fd for fd, fname in temps] # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # write some data to what will become stdin, and rewind os.write(temp_fds[1], b"STDIN") os.lseek(temp_fds[1], 0, 0) # move the standard file descriptors out of the way saved_fds = self._save_fds(range(3)) try: # duplicate the file objects over the standard fd's for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # now use those files in the "wrong" order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=temp_fds[1], stdout=temp_fds[2], stderr=temp_fds[0]) p.wait() finally: self._restore_fds(saved_fds) for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(temp_fds[2], 1024) err = os.read(temp_fds[0], 1024).strip() self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files temps = [tempfile.mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # save a copy of the standard file descriptors saved_fds = self._save_fds(range(3)) try: # duplicate the temp files over the standard fd's 0, 1, 2 for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # write some data to what will become stdin, and rewind os.write(stdin_no, b"STDIN") os.lseek(stdin_no, 0, 0) # now use those files in the given order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=stdin_no, stdout=stdout_no, stderr=stderr_no) p.wait() for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(stdout_no, 1024) err = os.read(stderr_no, 1024).strip() finally: self._restore_fds(saved_fds) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) # When duping fds, if there arises a situation where one of the fds is # either 0, 1 or 2, it is possible that it is overwritten (#12607). # This tests all combinations of this. def test_swap_fds(self): self.check_swap_fds(0, 1, 2) self.check_swap_fds(0, 2, 1) self.check_swap_fds(1, 0, 2) self.check_swap_fds(1, 2, 0) self.check_swap_fds(2, 0, 1) self.check_swap_fds(2, 1, 0) def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds): saved_fds = self._save_fds(range(3)) try: for from_fd in from_fds: with tempfile.TemporaryFile() as f: os.dup2(f.fileno(), from_fd) fd_to_close = (set(range(3)) - set(from_fds)).pop() os.close(fd_to_close) arg_names = ['stdin', 'stdout', 'stderr'] kwargs = {} for from_fd, to_fd in zip(from_fds, to_fds): kwargs[arg_names[to_fd]] = from_fd code = textwrap.dedent(r''' import os, sys skipped_fd = int(sys.argv[1]) for fd in range(3): if fd != skipped_fd: os.write(fd, str(fd).encode('ascii')) ''') skipped_fd = (set(range(3)) - set(to_fds)).pop() rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)], **kwargs) self.assertEqual(rc, 0) for from_fd, to_fd in zip(from_fds, to_fds): os.lseek(from_fd, 0, os.SEEK_SET) read_bytes = os.read(from_fd, 1024) read_fds = list(map(int, read_bytes.decode('ascii'))) msg = textwrap.dedent(f""" When testing {from_fds} to {to_fds} redirection, parent descriptor {from_fd} got redirected to descriptor(s) {read_fds} instead of descriptor {to_fd}. """) self.assertEqual([to_fd], read_fds, msg) finally: self._restore_fds(saved_fds) # Check that subprocess can remap std fds correctly even # if one of them is closed (#32844). def test_swap_std_fds_with_one_closed(self): for from_fds in itertools.combinations(range(3), 2): for to_fds in itertools.permutations(range(3), 2): self._check_swap_std_fds_with_one_closed(from_fds, to_fds) def test_surrogates_error_message(self): def prepare(): raise ValueError("surrogate:\uDCff") try: subprocess.call( ZERO_RETURN_CMD, preexec_fn=prepare) except ValueError as err: # Pure Python implementations keeps the message self.assertIsNone(subprocess._fork_exec) self.assertEqual(str(err), "surrogate:\uDCff") except subprocess.SubprocessError as err: # _posixsubprocess uses a default message self.assertIsNotNone(subprocess._fork_exec) self.assertEqual(str(err), "Exception occurred in preexec_fn.") else: self.fail("Expected ValueError or subprocess.SubprocessError") def test_undecodable_env(self): for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')): encoded_value = value.encode("ascii", "surrogateescape") # test str with surrogates script = "import os; print(ascii(os.getenv(%s)))" % repr(key) env = os.environ.copy() env[key] = value # Use C locale to get ASCII for the locale encoding to force # surrogate-escaping of \xFF in the child process env['LC_ALL'] = 'C' decoded_value = value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(decoded_value)) # test bytes key = key.encode("ascii", "surrogateescape") script = "import os; print(ascii(os.getenvb(%s)))" % repr(key) env = os.environ.copy() env[key] = encoded_value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(encoded_value)) def test_bytes_program(self): abs_program = os.fsencode(ZERO_RETURN_CMD[0]) args = list(ZERO_RETURN_CMD[1:]) path, program = os.path.split(ZERO_RETURN_CMD[0]) program = os.fsencode(program) # absolute bytes path exitcode = subprocess.call([abs_program]+args) self.assertEqual(exitcode, 0) # absolute bytes path as a string cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8")) exitcode = subprocess.call(cmd, shell=True) self.assertEqual(exitcode, 0) # bytes program, unicode PATH env = os.environ.copy() env["PATH"] = path exitcode = subprocess.call([program]+args, env=env) self.assertEqual(exitcode, 0) # bytes program, bytes PATH envb = os.environb.copy() envb[b"PATH"] = os.fsencode(path) exitcode = subprocess.call([program]+args, env=envb) self.assertEqual(exitcode, 0) def test_pipe_cloexec(self): sleeper = support.findfile("input_reader.py", subdir="subprocessdata") fd_status = support.findfile("fd_status.py", subdir="subprocessdata") p1 = subprocess.Popen([sys.executable, sleeper], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) self.addCleanup(p1.communicate, b'') p2 = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, error = p2.communicate() result_fds = set(map(int, output.split(b','))) unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(), p1.stderr.fileno()]) self.assertFalse(result_fds & unwanted_fds, "Expected no fds from %r to be open in child, " "found %r" % (unwanted_fds, result_fds & unwanted_fds)) def test_pipe_cloexec_real_tools(self): qcat = support.findfile("qcat.py", subdir="subprocessdata") qgrep = support.findfile("qgrep.py", subdir="subprocessdata") subdata = b'zxcvbn' data = subdata * 4 + b'\n' p1 = subprocess.Popen([sys.executable, qcat], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=False) p2 = subprocess.Popen([sys.executable, qgrep, subdata], stdin=p1.stdout, stdout=subprocess.PIPE, close_fds=False) self.addCleanup(p1.wait) self.addCleanup(p2.wait) def kill_p1(): try: p1.terminate() except ProcessLookupError: pass def kill_p2(): try: p2.terminate() except ProcessLookupError: pass self.addCleanup(kill_p1) self.addCleanup(kill_p2) p1.stdin.write(data) p1.stdin.close() readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10) self.assertTrue(readfiles, "The child hung") self.assertEqual(p2.stdout.read(), data) p1.stdout.close() p2.stdout.close() def test_close_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) open_fds = set(fds) # add a bunch more fds for _ in range(9): fd = os.open(os.devnull, os.O_RDONLY) self.addCleanup(os.close, fd) open_fds.add(fd) for fd in open_fds: os.set_inheritable(fd, True) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertEqual(remaining_fds & open_fds, open_fds, "Some fds were closed") p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & open_fds, "Some fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") # Keep some of the fd's we opened open in the subprocess. # This tests _posixsubprocess.c's proper handling of fds_to_keep. fds_to_keep = set(open_fds.pop() for _ in range(8)) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=fds_to_keep) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse((remaining_fds - fds_to_keep) & open_fds, "Some fds not in pass_fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") @unittest.skipIf(sys.platform.startswith("freebsd") and os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev, "Requires fdescfs mounted on /dev/fd on FreeBSD") def test_close_fds_when_max_fd_is_lowered(self): """Confirm that issue21618 is fixed (may fail under valgrind).""" fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # This launches the meat of the test in a child process to # avoid messing with the larger unittest processes maximum # number of file descriptors. # This process launches: # +--> Process that lowers its RLIMIT_NOFILE aftr setting up # a bunch of high open fds above the new lower rlimit. # Those are reported via stdout before launching a new # process with close_fds=False to run the actual test: # +--> The TEST: This one launches a fd_status.py # subprocess with close_fds=True so we can find out if # any of the fds above the lowered rlimit are still open. p = subprocess.Popen([sys.executable, '-c', textwrap.dedent( ''' import os, resource, subprocess, sys, textwrap open_fds = set() # Add a bunch more fds to pass down. for _ in range(40): fd = os.open(os.devnull, os.O_RDONLY) open_fds.add(fd) # Leave a two pairs of low ones available for use by the # internal child error pipe and the stdout pipe. # We also leave 10 more open as some Python buildbots run into # "too many open files" errors during the test if we do not. for fd in sorted(open_fds)[:14]: os.close(fd) open_fds.remove(fd) for fd in open_fds: #self.addCleanup(os.close, fd) os.set_inheritable(fd, True) max_fd_open = max(open_fds) # Communicate the open_fds to the parent unittest.TestCase process. print(','.join(map(str, sorted(open_fds)))) sys.stdout.flush() rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE) try: # 29 is lower than the highest fds we are leaving open. resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max)) # Launch a new Python interpreter with our low fd rlim_cur that # inherits open fds above that limit. It then uses subprocess # with close_fds=True to get a report of open fds in the child. # An explicit list of fds to check is passed to fd_status.py as # letting fd_status rely on its default logic would miss the # fds above rlim_cur as it normally only checks up to that limit. subprocess.Popen( [sys.executable, '-c', textwrap.dedent(""" import subprocess, sys subprocess.Popen([sys.executable, %r] + [str(x) for x in range({max_fd})], close_fds=True).wait() """.format(max_fd=max_fd_open+1))], close_fds=False).wait() finally: resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max)) ''' % fd_status)], stdout=subprocess.PIPE) output, unused_stderr = p.communicate() output_lines = output.splitlines() self.assertEqual(len(output_lines), 2, msg="expected exactly two lines of output:\n%r" % output) opened_fds = set(map(int, output_lines[0].strip().split(b','))) remaining_fds = set(map(int, output_lines[1].strip().split(b','))) self.assertFalse(remaining_fds & opened_fds, msg="Some fds were left open.") # Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file # descriptor of a pipe closed in the parent process is valid in the # child process according to fstat(), but the mode of the file # descriptor is invalid, and read or write raise an error. @support.requires_mac_ver(10, 5) def test_pass_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") open_fds = set() for x in range(5): fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) os.set_inheritable(fds[0], True) os.set_inheritable(fds[1], True) open_fds.update(fds) for fd in open_fds: p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=(fd, )) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) to_be_closed = open_fds - {fd} self.assertIn(fd, remaining_fds, "fd to be passed not passed") self.assertFalse(remaining_fds & to_be_closed, "fd to be closed passed") # pass_fds overrides close_fds with a warning. with self.assertWarns(RuntimeWarning) as context: self.assertFalse(subprocess.call( ZERO_RETURN_CMD, close_fds=False, pass_fds=(fd, ))) self.assertIn('overriding close_fds', str(context.warning)) def test_pass_fds_inheritable(self): script = support.findfile("fd_status.py", subdir="subprocessdata") inheritable, non_inheritable = os.pipe() self.addCleanup(os.close, inheritable) self.addCleanup(os.close, non_inheritable) os.set_inheritable(inheritable, True) os.set_inheritable(non_inheritable, False) pass_fds = (inheritable, non_inheritable) args = [sys.executable, script] args += list(map(str, pass_fds)) p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True, pass_fds=pass_fds) output, ignored = p.communicate() fds = set(map(int, output.split(b','))) # the inheritable file descriptor must be inherited, so its inheritable # flag must be set in the child process after fork() and before exec() self.assertEqual(fds, set(pass_fds), "output=%a" % output) # inheritable flag must not be changed in the parent process self.assertEqual(os.get_inheritable(inheritable), True) self.assertEqual(os.get_inheritable(non_inheritable), False) # bpo-32270: Ensure that descriptors specified in pass_fds # are inherited even if they are used in redirections. # Contributed by @izbyshev. def test_pass_fds_redirected(self): """Regression test for https://bugs.python.org/issue32270.""" fd_status = support.findfile("fd_status.py", subdir="subprocessdata") pass_fds = [] for _ in range(2): fd = os.open(os.devnull, os.O_RDWR) self.addCleanup(os.close, fd) pass_fds.append(fd) stdout_r, stdout_w = os.pipe() self.addCleanup(os.close, stdout_r) self.addCleanup(os.close, stdout_w) pass_fds.insert(1, stdout_w) with subprocess.Popen([sys.executable, fd_status], stdin=pass_fds[0], stdout=pass_fds[1], stderr=pass_fds[2], close_fds=True, pass_fds=pass_fds): output = os.read(stdout_r, 1024) fds = {int(num) for num in output.split(b',')} self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}") def test_stdout_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen(ZERO_RETURN_CMD, stdout=inout, stdin=inout) p.wait() def test_stdout_stderr_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen(ZERO_RETURN_CMD, stdout=inout, stderr=inout) p.wait() def test_stderr_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen(ZERO_RETURN_CMD, stderr=inout, stdin=inout) p.wait() def test_wait_when_sigchild_ignored(self): # NOTE: sigchild_ignore.py may not be an effective test on all OSes. sigchild_ignore = support.findfile("sigchild_ignore.py", subdir="subprocessdata") p = subprocess.Popen([sys.executable, sigchild_ignore], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() self.assertEqual(0, p.returncode, "sigchild_ignore.py exited" " non-zero with this error:\n%s" % stderr.decode('utf-8')) def test_select_unbuffered(self): # Issue #11459: bufsize=0 should really set the pipes as # unbuffered (and therefore let select() work properly). select = import_helper.import_module("select") p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple")'], stdout=subprocess.PIPE, bufsize=0) f = p.stdout self.addCleanup(f.close) try: self.assertEqual(f.read(4), b"appl") self.assertIn(f, select.select([f], [], [], 0.0)[0]) finally: p.wait() def test_zombie_fast_process_del(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, it wouldn't be added to subprocess._active, and would # remain a zombie. # spawn a Popen, and delete its reference before it exits p = subprocess.Popen([sys.executable, "-c", 'import sys, time;' 'time.sleep(0.2)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid with warnings_helper.check_warnings(('', ResourceWarning)): p = None if mswindows: # subprocess._active is not used on Windows and is set to None. self.assertIsNone(subprocess._active) else: # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) def test_leak_fast_process_del_killed(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, and the process got killed by a signal, it would never # be removed from subprocess._active, which triggered a FD and memory # leak. # spawn a Popen, delete its reference and kill it p = subprocess.Popen([sys.executable, "-c", 'import time;' 'time.sleep(3)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid with warnings_helper.check_warnings(('', ResourceWarning)): p = None support.gc_collect() # For PyPy or other GCs. os.kill(pid, signal.SIGKILL) if mswindows: # subprocess._active is not used on Windows and is set to None. self.assertIsNone(subprocess._active) else: # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) # let some time for the process to exit, and create a new Popen: this # should trigger the wait() of p time.sleep(0.2) with self.assertRaises(OSError): with subprocess.Popen(NONEXISTING_CMD, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass # p should have been wait()ed on, and removed from the _active list self.assertRaises(OSError, os.waitpid, pid, 0) if mswindows: # subprocess._active is not used on Windows and is set to None. self.assertIsNone(subprocess._active) else: self.assertNotIn(ident, [id(o) for o in subprocess._active]) def test_close_fds_after_preexec(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # this FD is used as dup2() target by preexec_fn, and should be closed # in the child process fd = os.dup(1) self.addCleanup(os.close, fd) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, preexec_fn=lambda: os.dup2(1, fd)) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertNotIn(fd, remaining_fds) @support.cpython_only def test_fork_exec(self): # Issue #22290: fork_exec() must not crash on memory allocation failure # or other errors import _posixsubprocess gc_enabled = gc.isenabled() try: # Use a preexec function and enable the garbage collector # to force fork_exec() to re-enable the garbage collector # on error. func = lambda: None gc.enable() for args, exe_list, cwd, env_list in ( (123, [b"exe"], None, [b"env"]), ([b"arg"], 123, None, [b"env"]), ([b"arg"], [b"exe"], 123, [b"env"]), ([b"arg"], [b"exe"], None, 123), ): with self.assertRaises(TypeError) as err: _posixsubprocess.fork_exec( args, exe_list, True, (), cwd, env_list, -1, -1, -1, -1, 1, 2, 3, 4, True, True, 0, False, [], 0, -1, func, False) # Attempt to prevent # "TypeError: fork_exec() takes exactly N arguments (M given)" # from passing the test. More refactoring to have us start # with a valid *args list, confirm a good call with that works # before mutating it in various ways to ensure that bad calls # with individual arg type errors raise a typeerror would be # ideal. Saving that for a future PR... self.assertNotIn('takes exactly', str(err.exception)) finally: if not gc_enabled: gc.disable() @support.cpython_only def test_fork_exec_sorted_fd_sanity_check(self): # Issue #23564: sanity check the fork_exec() fds_to_keep sanity check. import _posixsubprocess class BadInt: first = True def __init__(self, value): self.value = value def __int__(self): if self.first: self.first = False return self.value raise ValueError gc_enabled = gc.isenabled() try: gc.enable() for fds_to_keep in ( (-1, 2, 3, 4, 5), # Negative number. ('str', 4), # Not an int. (18, 23, 42, 2**63), # Out of range. (5, 4), # Not sorted. (6, 7, 7, 8), # Duplicate. (BadInt(1), BadInt(2)), ): with self.assertRaises( ValueError, msg='fds_to_keep={}'.format(fds_to_keep)) as c: _posixsubprocess.fork_exec( [b"false"], [b"false"], True, fds_to_keep, None, [b"env"], -1, -1, -1, -1, 1, 2, 3, 4, True, True, 0, None, None, None, -1, None, True) self.assertIn('fds_to_keep', str(c.exception)) finally: if not gc_enabled: gc.disable() def test_communicate_BrokenPipeError_stdin_close(self): # By not setting stdout or stderr or a timeout we force the fast path # that just calls _stdin_write() internally due to our mock. proc = subprocess.Popen(ZERO_RETURN_CMD) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin: mock_proc_stdin.close.side_effect = BrokenPipeError proc.communicate() # Should swallow BrokenPipeError from close. mock_proc_stdin.close.assert_called_with() def test_communicate_BrokenPipeError_stdin_write(self): # By not setting stdout or stderr or a timeout we force the fast path # that just calls _stdin_write() internally due to our mock. proc = subprocess.Popen(ZERO_RETURN_CMD) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin: mock_proc_stdin.write.side_effect = BrokenPipeError proc.communicate(b'stuff') # Should swallow the BrokenPipeError. mock_proc_stdin.write.assert_called_once_with(b'stuff') mock_proc_stdin.close.assert_called_once_with() def test_communicate_BrokenPipeError_stdin_flush(self): # Setting stdin and stdout forces the ._communicate() code path. # python -h exits faster than python -c pass (but spams stdout). proc = subprocess.Popen([sys.executable, '-h'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \ open(os.devnull, 'wb') as dev_null: mock_proc_stdin.flush.side_effect = BrokenPipeError # because _communicate registers a selector using proc.stdin... mock_proc_stdin.fileno.return_value = dev_null.fileno() # _communicate() should swallow BrokenPipeError from flush. proc.communicate(b'stuff') mock_proc_stdin.flush.assert_called_once_with() def test_communicate_BrokenPipeError_stdin_close_with_timeout(self): # Setting stdin and stdout forces the ._communicate() code path. # python -h exits faster than python -c pass (but spams stdout). proc = subprocess.Popen([sys.executable, '-h'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin: mock_proc_stdin.close.side_effect = BrokenPipeError # _communicate() should swallow BrokenPipeError from close. proc.communicate(timeout=999) mock_proc_stdin.close.assert_called_once_with() @unittest.skipUnless(_testcapi is not None and hasattr(_testcapi, 'W_STOPCODE'), 'need _testcapi.W_STOPCODE') def test_stopped(self): """Test wait() behavior when waitpid returns WIFSTOPPED; issue29335.""" args = ZERO_RETURN_CMD proc = subprocess.Popen(args) # Wait until the real process completes to avoid zombie process support.wait_process(proc.pid, exitcode=0) status = _testcapi.W_STOPCODE(3) with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)): returncode = proc.wait() self.assertEqual(returncode, -3) def test_send_signal_race(self): # bpo-38630: send_signal() must poll the process exit status to reduce # the risk of sending the signal to the wrong process. proc = subprocess.Popen(ZERO_RETURN_CMD) # wait until the process completes without using the Popen APIs. support.wait_process(proc.pid, exitcode=0) # returncode is still None but the process completed. self.assertIsNone(proc.returncode) with mock.patch("os.kill") as mock_kill: proc.send_signal(signal.SIGTERM) # send_signal() didn't call os.kill() since the process already # completed. mock_kill.assert_not_called() # Don't check the returncode value: the test reads the exit status, # so Popen failed to read it and uses a default returncode instead. self.assertIsNotNone(proc.returncode) def test_send_signal_race2(self): # bpo-40550: the process might exist between the returncode check and # the kill operation p = subprocess.Popen([sys.executable, '-c', 'exit(1)']) # wait for process to exit while not p.returncode: p.poll() with mock.patch.object(p, 'poll', new=lambda: None): p.returncode = None p.send_signal(signal.SIGTERM) p.kill() def test_communicate_repeated_call_after_stdout_close(self): proc = subprocess.Popen([sys.executable, '-c', 'import os, time; os.close(1), time.sleep(2)'], stdout=subprocess.PIPE) while True: try: proc.communicate(timeout=0.1) return except subprocess.TimeoutExpired: pass @unittest.skipUnless(mswindows, "Windows specific tests")
POSIXProcessTestCase
python
encode__django-rest-framework
tests/test_request.py
{ "start": 5225, "end": 5514 }
class ____(APIView): authentication_classes = (SessionAuthentication,) def post(self, request): if request.POST.get('example') is not None: return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
MockView
python
walkccc__LeetCode
solutions/1390. Four Divisors/1390.py
{ "start": 0, "end": 417 }
class ____: def sumFourDivisors(self, nums: list[int]) -> int: ans = 0 for num in nums: divisor = 0 for i in range(2, math.isqrt(num) + 1): if num % i == 0: if divisor == 0: divisor = i else: divisor = 0 break if divisor > 0 and divisor * divisor < num: ans += 1 + num + divisor + num // divisor return ans
Solution
python
kamyu104__LeetCode-Solutions
Python/find-all-the-lonely-nodes.py
{ "start": 803, "end": 1345 }
class ____(object): def getLonelyNodes(self, root): """ :type root: TreeNode :rtype: List[int] """ def dfs(node, result): if not node: return if node.left and not node.right: result.append(node.left.val) elif node.right and not node.left: result.append(node.right.val) dfs(node.left, result) dfs(node.right, result) result = [] dfs(root, result) return result
Solution2
python
explosion__spaCy
spacy/lang/ga/__init__.py
{ "start": 350, "end": 819 }
class ____(Language): lang = "ga" Defaults = IrishDefaults @Irish.factory( "lemmatizer", assigns=["token.lemma"], default_config={"model": None, "mode": "pos_lookup", "overwrite": False}, default_score_weights={"lemma_acc": 1.0}, ) def make_lemmatizer( nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool ): return IrishLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite) __all__ = ["Irish"]
Irish
python
sqlalchemy__sqlalchemy
test/sql/test_defaults.py
{ "start": 29300, "end": 31604 }
class ____(fixtures.TestBase): __requires__ = ("subqueries",) __sparse_driver_backend__ = True @testing.fixture def table_fixture(self, metadata, connection): def go(implicit_returning): t2 = Table( "t2", metadata, Column("nextid", Integer), implicit_returning=implicit_returning, ) t1 = Table( "t1", metadata, Column( "id", Integer, primary_key=True, default=sa.select(func.max(t2.c.nextid)).scalar_subquery(), ), Column("data", String(30)), implicit_returning=implicit_returning, ) date_table = Table( "date_table", metadata, Column( "date_id", # we want no tzinfo normally since pymssql doesn't do # it right now DateTime().with_variant( DateTime(timezone=True), "postgresql" ), default=text("current_timestamp"), primary_key=True, ), implicit_returning=implicit_returning, ) metadata.create_all(connection) return t1, t2, date_table return go @testing.crashes( "+mariadbconnector", "https://jira.mariadb.org/browse/CONPY-206" ) @testing.combinations( (True, testing.requires.insert_returning), (False,), argnames="implicit_returning", ) def test_pk_default(self, connection, table_fixture, implicit_returning): t1, t2, date_table = table_fixture(implicit_returning) conn = connection conn.execute(t2.insert(), dict(nextid=1)) r = conn.execute(t1.insert(), dict(data="hi")) eq_((1,), r.inserted_primary_key) conn.execute(t2.insert(), dict(nextid=2)) r = conn.execute(t1.insert(), dict(data="there")) eq_((2,), r.inserted_primary_key) r = conn.execute(date_table.insert()) assert isinstance(r.inserted_primary_key[0], datetime.datetime)
PKDefaultTest
python
plotly__plotly.py
plotly/graph_objs/histogram/marker/_pattern.py
{ "start": 233, "end": 15300 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "histogram.marker" _path_str = "histogram.marker.pattern" _valid_props = { "bgcolor", "bgcolorsrc", "fgcolor", "fgcolorsrc", "fgopacity", "fillmode", "path", "pathsrc", "shape", "shapesrc", "size", "sizesrc", "solidity", "soliditysrc", } @property def bgcolor(self): """ When there is no colorscale sets the color of background pattern fill. Defaults to a `marker.color` background when `fillmode` is "overlay". Otherwise, defaults to a transparent background. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val @property def bgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bgcolor`. The 'bgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bgcolorsrc"] @bgcolorsrc.setter def bgcolorsrc(self, val): self["bgcolorsrc"] = val @property def fgcolor(self): """ When there is no colorscale sets the color of foreground pattern fill. Defaults to a `marker.color` background when `fillmode` is "replace". Otherwise, defaults to dark grey or white to increase contrast with the `bgcolor`. The 'fgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["fgcolor"] @fgcolor.setter def fgcolor(self, val): self["fgcolor"] = val @property def fgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `fgcolor`. The 'fgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["fgcolorsrc"] @fgcolorsrc.setter def fgcolorsrc(self, val): self["fgcolorsrc"] = val @property def fgopacity(self): """ Sets the opacity of the foreground pattern fill. Defaults to a 0.5 when `fillmode` is "overlay". Otherwise, defaults to 1. The 'fgopacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["fgopacity"] @fgopacity.setter def fgopacity(self, val): self["fgopacity"] = val @property def fillmode(self): """ Determines whether `marker.color` should be used as a default to `bgcolor` or a `fgcolor`. The 'fillmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['replace', 'overlay'] Returns ------- Any """ return self["fillmode"] @fillmode.setter def fillmode(self, val): self["fillmode"] = val @property def path(self): """ Sets a custom path for pattern fill. Use with no `shape` or `solidity`, provide an SVG path string for the regions of the square from (0,0) to (`size`,`size`) to color. The 'path' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["path"] @path.setter def path(self, val): self["path"] = val @property def pathsrc(self): """ Sets the source reference on Chart Studio Cloud for `path`. The 'pathsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["pathsrc"] @pathsrc.setter def pathsrc(self, val): self["pathsrc"] = val @property def shape(self): """ Sets the shape of the pattern fill. By default, no pattern is used for filling the area. The 'shape' property is an enumeration that may be specified as: - One of the following enumeration values: ['', '/', '\\', 'x', '-', '|', '+', '.'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["shape"] @shape.setter def shape(self, val): self["shape"] = val @property def shapesrc(self): """ Sets the source reference on Chart Studio Cloud for `shape`. The 'shapesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["shapesrc"] @shapesrc.setter def shapesrc(self, val): self["shapesrc"] = val @property def size(self): """ Sets the size of unit squares of the pattern fill in pixels, which corresponds to the interval of repetition of the pattern. The 'size' property is a number and may be specified as: - An int or float in the interval [0, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for `size`. The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"] @sizesrc.setter def sizesrc(self, val): self["sizesrc"] = val @property def solidity(self): """ Sets the solidity of the pattern fill. Solidity is roughly the fraction of the area filled by the pattern. Solidity of 0 shows only the background color without pattern and solidty of 1 shows only the foreground color without pattern. The 'solidity' property is a number and may be specified as: - An int or float in the interval [0, 1] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["solidity"] @solidity.setter def solidity(self, val): self["solidity"] = val @property def soliditysrc(self): """ Sets the source reference on Chart Studio Cloud for `solidity`. The 'soliditysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["soliditysrc"] @soliditysrc.setter def soliditysrc(self, val): self["soliditysrc"] = val @property def _prop_descriptions(self): return """\ bgcolor When there is no colorscale sets the color of background pattern fill. Defaults to a `marker.color` background when `fillmode` is "overlay". Otherwise, defaults to a transparent background. bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. fgcolor When there is no colorscale sets the color of foreground pattern fill. Defaults to a `marker.color` background when `fillmode` is "replace". Otherwise, defaults to dark grey or white to increase contrast with the `bgcolor`. fgcolorsrc Sets the source reference on Chart Studio Cloud for `fgcolor`. fgopacity Sets the opacity of the foreground pattern fill. Defaults to a 0.5 when `fillmode` is "overlay". Otherwise, defaults to 1. fillmode Determines whether `marker.color` should be used as a default to `bgcolor` or a `fgcolor`. path Sets a custom path for pattern fill. Use with no `shape` or `solidity`, provide an SVG path string for the regions of the square from (0,0) to (`size`,`size`) to color. pathsrc Sets the source reference on Chart Studio Cloud for `path`. shape Sets the shape of the pattern fill. By default, no pattern is used for filling the area. shapesrc Sets the source reference on Chart Studio Cloud for `shape`. size Sets the size of unit squares of the pattern fill in pixels, which corresponds to the interval of repetition of the pattern. sizesrc Sets the source reference on Chart Studio Cloud for `size`. solidity Sets the solidity of the pattern fill. Solidity is roughly the fraction of the area filled by the pattern. Solidity of 0 shows only the background color without pattern and solidty of 1 shows only the foreground color without pattern. soliditysrc Sets the source reference on Chart Studio Cloud for `solidity`. """ def __init__( self, arg=None, bgcolor=None, bgcolorsrc=None, fgcolor=None, fgcolorsrc=None, fgopacity=None, fillmode=None, path=None, pathsrc=None, shape=None, shapesrc=None, size=None, sizesrc=None, solidity=None, soliditysrc=None, **kwargs, ): """ Construct a new Pattern object Sets the pattern within the marker. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram.marker.Pattern` bgcolor When there is no colorscale sets the color of background pattern fill. Defaults to a `marker.color` background when `fillmode` is "overlay". Otherwise, defaults to a transparent background. bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. fgcolor When there is no colorscale sets the color of foreground pattern fill. Defaults to a `marker.color` background when `fillmode` is "replace". Otherwise, defaults to dark grey or white to increase contrast with the `bgcolor`. fgcolorsrc Sets the source reference on Chart Studio Cloud for `fgcolor`. fgopacity Sets the opacity of the foreground pattern fill. Defaults to a 0.5 when `fillmode` is "overlay". Otherwise, defaults to 1. fillmode Determines whether `marker.color` should be used as a default to `bgcolor` or a `fgcolor`. path Sets a custom path for pattern fill. Use with no `shape` or `solidity`, provide an SVG path string for the regions of the square from (0,0) to (`size`,`size`) to color. pathsrc Sets the source reference on Chart Studio Cloud for `path`. shape Sets the shape of the pattern fill. By default, no pattern is used for filling the area. shapesrc Sets the source reference on Chart Studio Cloud for `shape`. size Sets the size of unit squares of the pattern fill in pixels, which corresponds to the interval of repetition of the pattern. sizesrc Sets the source reference on Chart Studio Cloud for `size`. solidity Sets the solidity of the pattern fill. Solidity is roughly the fraction of the area filled by the pattern. Solidity of 0 shows only the background color without pattern and solidty of 1 shows only the foreground color without pattern. soliditysrc Sets the source reference on Chart Studio Cloud for `solidity`. Returns ------- Pattern """ super().__init__("pattern") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.histogram.marker.Pattern constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram.marker.Pattern`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("bgcolor", arg, bgcolor) self._set_property("bgcolorsrc", arg, bgcolorsrc) self._set_property("fgcolor", arg, fgcolor) self._set_property("fgcolorsrc", arg, fgcolorsrc) self._set_property("fgopacity", arg, fgopacity) self._set_property("fillmode", arg, fillmode) self._set_property("path", arg, path) self._set_property("pathsrc", arg, pathsrc) self._set_property("shape", arg, shape) self._set_property("shapesrc", arg, shapesrc) self._set_property("size", arg, size) self._set_property("sizesrc", arg, sizesrc) self._set_property("solidity", arg, solidity) self._set_property("soliditysrc", arg, soliditysrc) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Pattern
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py
{ "start": 1838, "end": 1912 }
class ____: a: str = 0 b = field() c: int = foo() d = list()
C
python
gevent__gevent
src/greentest/3.10/test_subprocess.py
{ "start": 140471, "end": 150888 }
class ____(BaseTestCase): def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call(ZERO_RETURN_CMD, startupinfo=startupinfo) def test_startupinfo_keywords(self): # startupinfo argument # We use hardcoded constants, because we do not want to # depend on win32all. STARTF_USERSHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO( dwFlags=STARTF_USERSHOWWINDOW, wShowWindow=SW_MAXIMIZE ) # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call(ZERO_RETURN_CMD, startupinfo=startupinfo) def test_startupinfo_copy(self): # bpo-34044: Popen must not modify input STARTUPINFO structure startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE # Call Popen() twice with the same startupinfo object to make sure # that it's not modified for _ in range(2): cmd = ZERO_RETURN_CMD with open(os.devnull, 'w') as null: proc = subprocess.Popen(cmd, stdout=null, stderr=subprocess.STDOUT, startupinfo=startupinfo) with proc: proc.communicate() self.assertEqual(proc.returncode, 0) self.assertEqual(startupinfo.dwFlags, subprocess.STARTF_USESHOWWINDOW) self.assertIsNone(startupinfo.hStdInput) self.assertIsNone(startupinfo.hStdOutput) self.assertIsNone(startupinfo.hStdError) self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE) self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []}) def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], preexec_fn=lambda: 1) @support.cpython_only def test_issue31471(self): # There shouldn't be an assertion failure in Popen() in case the env # argument has a bad keys() method. class BadEnv(dict): keys = None with self.assertRaises(TypeError): subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv()) def test_close_fds(self): # close file descriptors rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"], close_fds=True) self.assertEqual(rc, 47) def test_close_fds_with_stdio(self): import msvcrt fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) handles = [] for fd in fds: os.set_inheritable(fd, True) handles.append(msvcrt.get_osfhandle(fd)) p = subprocess.Popen([sys.executable, "-c", "import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])], stdout=subprocess.PIPE, close_fds=False) stdout, stderr = p.communicate() self.assertEqual(p.returncode, 0) int(stdout.strip()) # Check that stdout is an integer p = subprocess.Popen([sys.executable, "-c", "import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) stdout, stderr = p.communicate() self.assertEqual(p.returncode, 1) self.assertIn(b"OSError", stderr) # The same as the previous call, but with an empty handle_list handle_list = [] startupinfo = subprocess.STARTUPINFO() startupinfo.lpAttributeList = {"handle_list": handle_list} p = subprocess.Popen([sys.executable, "-c", "import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])], stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, close_fds=True) stdout, stderr = p.communicate() self.assertEqual(p.returncode, 1) self.assertIn(b"OSError", stderr) # Check for a warning due to using handle_list and close_fds=False with warnings_helper.check_warnings((".*overriding close_fds", RuntimeWarning)): startupinfo = subprocess.STARTUPINFO() startupinfo.lpAttributeList = {"handle_list": handles[:]} p = subprocess.Popen([sys.executable, "-c", "import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])], stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, close_fds=False) stdout, stderr = p.communicate() self.assertEqual(p.returncode, 0) def test_empty_attribute_list(self): startupinfo = subprocess.STARTUPINFO() startupinfo.lpAttributeList = {} subprocess.call(ZERO_RETURN_CMD, startupinfo=startupinfo) def test_empty_handle_list(self): startupinfo = subprocess.STARTUPINFO() startupinfo.lpAttributeList = {"handle_list": []} subprocess.call(ZERO_RETURN_CMD, startupinfo=startupinfo) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen(["set"], shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertIn(b"physalis", p.stdout.read()) def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertIn(b"physalis", p.stdout.read()) def test_shell_encodings(self): # Run command through the shell (string) for enc in ['ansi', 'oem']: newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv, encoding=enc) with p: self.assertIn("physalis", p.stdout.read(), enc) def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47) def _kill_process(self, method, *args): # Some win32 buildbot raises EOFError if stdin is inherited p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with p: # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) _, stderr = p.communicate() self.assertEqual(stderr, b'') returncode = p.wait() self.assertNotEqual(returncode, 0) def _kill_dead_process(self, method, *args): p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() sys.exit(42) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with p: # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) _, stderr = p.communicate() self.assertEqual(stderr, b'') rc = p.wait() self.assertEqual(rc, 42) def test_send_signal(self): self._kill_process('send_signal', signal.SIGTERM) def test_kill(self): self._kill_process('kill') def test_terminate(self): self._kill_process('terminate') def test_send_signal_dead(self): self._kill_dead_process('send_signal', signal.SIGTERM) def test_kill_dead(self): self._kill_dead_process('kill') def test_terminate_dead(self): self._kill_dead_process('terminate')
Win32ProcessTestCase
python
walkccc__LeetCode
solutions/1742. Maximum Number of Balls in a Box/1742.py
{ "start": 0, "end": 421 }
class ____: def countBalls(self, lowLimit: int, highLimit: int) -> int: maxDigitSum = 9 * 5 # 99999 ans = 0 count = [0] * (maxDigitSum + 1) for num in range(lowLimit, highLimit + 1): digitSum = self._getDigitSum(num) count[digitSum] += 1 ans = max(ans, count[digitSum]) return ans def _getDigitSum(self, num: int) -> int: return sum(int(digit) for digit in str(num))
Solution
python
ansible__ansible
lib/ansible/plugins/loader.py
{ "start": 57707, "end": 58222 }
class ____(PluginLoader): """Customized loader for cache plugins that wraps the requested plugin with an interposer that schema-qualifies keys and JSON encodes the values.""" def get(self, name: str, *args, **kwargs) -> BaseCacheModule: plugin = super().get(name, *args, **kwargs) if not plugin: raise AnsibleError(f'Unable to load the cache plugin {name!r}.') if plugin._persistent: return _cache.PluginInterposer(plugin) return plugin
_CacheLoader
python
kamyu104__LeetCode-Solutions
Python/check-if-all-the-integers-in-a-range-are-covered.py
{ "start": 662, "end": 1035 }
class ____(object): def isCovered(self, ranges, left, right): """ :type ranges: List[List[int]] :type left: int :type right: int :rtype: bool """ ranges.sort() for l, r in ranges: if l <= left <= r: left = r+1 return left > right # Time: O(n * r) # Space: O(1)
Solution2
python
Textualize__textual
src/textual/css/styles.py
{ "start": 44924, "end": 51104 }
class ____(StylesBase): """Presents a combined view of two Styles object: a base Styles and inline Styles.""" def __init__(self, node: DOMNode, base: Styles, inline_styles: Styles) -> None: self.node = node self._base_styles = base self._inline_styles = inline_styles self._animate: BoundAnimator | None = None self._updates: int = 0 self._rich_style: tuple[int, Style] | None = None self._gutter: tuple[int, Spacing] | None = None def __eq__(self, other: object) -> bool: if isinstance(other, RenderStyles): return ( self._base_styles._rules == other._base_styles._rules and self._inline_styles._rules == other._inline_styles._rules ) return NotImplemented @property def _cache_key(self) -> int: """A cache key, that changes when any style is changed. Returns: An opaque integer. """ return self._updates + self._base_styles._updates + self._inline_styles._updates @property def base(self) -> Styles: """Quick access to base (css) style.""" return self._base_styles @property def inline(self) -> Styles: """Quick access to the inline styles.""" return self._inline_styles @property def rich_style(self) -> Style: """Get a Rich style for this Styles object.""" assert self.node is not None return self.node.rich_style @property def gutter(self) -> Spacing: """Get space around widget (padding + border) Returns: Space around widget content. """ # This is (surprisingly) a bit of a bottleneck if self._gutter is not None: cache_key, gutter = self._gutter if cache_key == self._cache_key: return gutter gutter = self.padding + self.border.spacing self._gutter = (self._cache_key, gutter) return gutter def animate( self, attribute: str, value: str | float | Animatable, *, final_value: object = ..., duration: float | None = None, speed: float | None = None, delay: float = 0.0, easing: EasingFunction | str = DEFAULT_EASING, on_complete: CallbackType | None = None, level: AnimationLevel = "full", ) -> None: """Animate an attribute. Args: attribute: Name of the attribute to animate. value: The value to animate to. final_value: The final value of the animation. Defaults to `value` if not set. duration: The duration (in seconds) of the animation. speed: The speed of the animation. delay: A delay (in seconds) before the animation starts. easing: An easing method. on_complete: A callable to invoke when the animation is finished. level: Minimum level required for the animation to take place (inclusive). """ if self._animate is None: assert self.node is not None self._animate = self.node.app.animator.bind(self) assert self._animate is not None self._animate( attribute, value, final_value=final_value, duration=duration, speed=speed, delay=delay, easing=easing, on_complete=on_complete, level=level, ) def __rich_repr__(self) -> rich.repr.Result: yield self.node for rule_name in RULE_NAMES: if self.has_rule(rule_name): yield rule_name, getattr(self, rule_name) def refresh( self, *, layout: bool = False, children: bool = False, parent: bool = False, repaint: bool = True, ) -> None: self._inline_styles.refresh( layout=layout, children=children, parent=parent, repaint=repaint ) def merge(self, other: StylesBase) -> None: """Merge values from another Styles. Args: other: A Styles object. """ self._inline_styles.merge(other) def merge_rules(self, rules: RulesMap) -> None: self._inline_styles.merge_rules(rules) self._updates += 1 def reset(self) -> None: """Reset the rules to initial state.""" self._inline_styles.reset() self._updates += 1 def has_rule(self, rule_name: str) -> bool: """Check if a rule has been set.""" return self._inline_styles.has_rule(rule_name) or self._base_styles.has_rule( rule_name ) def has_any_rules(self, *rule_names: str) -> bool: """Check if any of the supplied rules have been set. Args: rule_names: Number of rules. Returns: `True` if any of the supplied rules have been set, `False` if none have. """ inline_has_rule = self._inline_styles.has_rule base_has_rule = self._base_styles.has_rule return any(inline_has_rule(name) or base_has_rule(name) for name in rule_names) def set_rule(self, rule_name: str, value: object | None) -> bool: return self._inline_styles.set_rule(rule_name, value) def get_rule(self, rule_name: str, default: object = None) -> object: if self._inline_styles.has_rule(rule_name): return self._inline_styles.get_rule(rule_name, default) return self._base_styles.get_rule(rule_name, default) def clear_rule(self, rule_name: str) -> bool: """Clear a rule (from inline).""" return self._inline_styles.clear_rule(rule_name) def get_rules(self) -> RulesMap: """Get rules as a dictionary""" rules = {**self._base_styles._rules, **self._inline_styles._rules} return cast(RulesMap, rules) @property def css(self) -> str: """Get the CSS for the combined styles.""" styles = Styles() styles.merge(self._base_styles) styles.merge(self._inline_styles) combined_css = styles.css return combined_css
RenderStyles
python
scrapy__scrapy
tests/test_utils_spider.py
{ "start": 164, "end": 214 }
class ____(Spider): name = "myspider1"
MySpider1
python
django__django
django/db/models/functions/math.py
{ "start": 4416, "end": 5040 }
class ____(NumericOutputFieldMixin, Func): function = "RANDOM" arity = 0 def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function="DBMS_RANDOM.VALUE", **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def get_group_by_cols(self): return []
Random
python
pytorch__pytorch
torch/_dynamo/bytecode_transformation.py
{ "start": 23502, "end": 67371 }
class ____: start: int end: int target: int depth: int lasti: bool def encode_exception_table_varint(n: int) -> list[int]: """ Similar to `encode_varint`, but the 6-bit chunks are ordered in reverse. """ assert n >= 0 b = [n & 63] n >>= 6 while n > 0: b.append(n & 63) n >>= 6 b.reverse() for i in range(len(b) - 1): b[i] |= 64 return b def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int: """ Inverse of `encode_exception_table_varint`. """ b = next(bytes_iter) val = b & 63 while b & 64: val <<= 6 b = next(bytes_iter) val |= b & 63 return val def check_exception_table(tab: list[ExceptionTableEntry]) -> None: """ Verifies that a list of ExceptionTableEntries will make a well-formed jump table: entries are non-empty, sorted, and do not overlap. """ for i in range(len(tab) - 1): assert ( tab[i].start <= tab[i].end and tab[i].end < tab[i + 1].start and tab[i + 1].start <= tab[i + 1].end ) def parse_exception_table(exntab: bytes) -> list[ExceptionTableEntry]: """ Parse the exception table according to https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt """ exntab_iter = iter(exntab) tab = [] try: while True: start = decode_exception_table_varint(exntab_iter) * 2 length = decode_exception_table_varint(exntab_iter) * 2 end = start + length - 2 target = decode_exception_table_varint(exntab_iter) * 2 dl = decode_exception_table_varint(exntab_iter) depth = dl >> 1 lasti = bool(dl & 1) tab.append(ExceptionTableEntry(start, end, target, depth, lasti)) except StopIteration: check_exception_table(tab) return tab def assemble_exception_table(tab: list[ExceptionTableEntry]) -> bytes: """ Inverse of parse_exception_table - encodes list of exception table entries into bytes. """ b = [] for entry in tab: first_entry = encode_exception_table_varint(entry.start // 2) first_entry[0] |= 1 << 7 b.extend(first_entry) length = entry.end - entry.start + 2 b.extend(encode_exception_table_varint(length // 2)) b.extend(encode_exception_table_varint(entry.target // 2)) dl = (entry.depth << 1) + entry.lasti b.extend(encode_exception_table_varint(dl)) return bytes(b) def assemble(instructions: list[Instruction], firstlineno: int) -> tuple[bytes, bytes]: """Do the opposite of dis.get_instructions()""" code: list[int] = [] if sys.version_info >= (3, 11): lnotab, update_lineno = linetable_311_writer(firstlineno) num_ext = 0 for i, inst in enumerate(instructions): if inst.opname == "EXTENDED_ARG": inst_size = 1 num_ext += 1 # copy positions from the actual instruction for j in (1, 2, 3): if instructions[i + j].opname != "EXTENDED_ARG": inst.positions = instructions[i + j].positions break else: inst_size = instruction_size(inst) // 2 + num_ext num_ext = 0 update_lineno(inst.positions, inst_size) num_ext = 0 arg = inst.arg or 0 code.extend((inst.opcode, arg & 0xFF)) for _ in range(instruction_size(inst) // 2 - 1): code.extend((0, 0)) else: lnotab, update_lineno, end = linetable_writer(firstlineno) for inst in instructions: if inst.starts_line is not None: update_lineno(inst.starts_line, len(code)) arg = inst.arg or 0 code.extend((inst.opcode, arg & 0xFF)) end(len(code)) return bytes(code), bytes(lnotab) def _get_instruction_by_offset( offset_to_inst: dict[int, Instruction], offset: int ) -> Optional[Instruction]: """ Get the instruction located at a given offset, accounting for EXTENDED_ARGs """ for n in (0, 2, 4, 6): if offset_to_inst[offset + n].opcode != dis.EXTENDED_ARG: return offset_to_inst[offset + n] return None def virtualize_jumps(instructions: Iterable[Instruction]) -> None: """Replace jump targets with pointers to make editing easier""" jump_targets = { inst.offset: inst for inst in instructions if inst.offset is not None } for inst in instructions: if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel: inst.target = _get_instruction_by_offset(jump_targets, inst.argval) _REL_JUMPS = set(dis.hasjrel) def flip_jump_direction(instruction: Instruction) -> None: if sys.version_info < (3, 11): raise RuntimeError("Cannot flip jump direction in Python < 3.11") if "FORWARD" in instruction.opname: instruction.opname = instruction.opname.replace("FORWARD", "BACKWARD") elif "BACKWARD" in instruction.opname: instruction.opname = instruction.opname.replace("BACKWARD", "FORWARD") else: raise AttributeError("Instruction is not a forward or backward jump") instruction.opcode = dis.opmap[instruction.opname] assert instruction.opcode in _REL_JUMPS def _get_instruction_front(instructions: list[Instruction], idx: int) -> Instruction: """ i.e. get the first EXTENDED_ARG instruction (if any) when targeting instructions[idx] with a jump. """ target = instructions[idx] for offset in (1, 2, 3): if idx >= offset and instructions[idx - offset].opcode == dis.EXTENDED_ARG: target = instructions[idx - offset] else: break return target def devirtualize_jumps(instructions: list[Instruction]) -> None: """Fill in args for virtualized jump target after instructions may have moved""" jumps = set(dis.hasjabs).union(set(dis.hasjrel)) # check for negative jump args and fix them for inst in instructions: if inst.opcode in jumps: if inst.opcode not in dis.hasjabs: assert ( inst.target is not None and inst.target.offset is not None and inst.offset is not None ) if inst.target.offset < inst.offset: if sys.version_info < (3, 11): raise RuntimeError("Got negative jump offset for Python < 3.11") # forward jumps become backward if "FORWARD" in inst.opname: flip_jump_direction(inst) else: # backward jumps become forward if sys.version_info >= (3, 11) and "BACKWARD" in inst.opname: flip_jump_direction(inst) # jump instruction size may have changed due to flips update_offsets(instructions) indexof = get_indexof(instructions) # compute jump instruction arg for inst in instructions: if inst.opcode in jumps: assert inst.target is not None target = _get_instruction_front(instructions, indexof[inst.target]) if inst.opcode in dis.hasjabs: if sys.version_info < (3, 11): # `arg` is expected to be bytecode offset, whereas `offset` is byte offset. # Divide since bytecode is 2 bytes large. inst.arg = int(target.offset / 2) else: raise RuntimeError("Python 3.11+ should not have absolute jumps") else: # relative jump # byte offset between target and next instruction assert target.offset is not None and inst.offset is not None inst.arg = abs( int(target.offset - inst.offset - instruction_size(inst)) ) # pyrefly: ignore [unsupported-operation] inst.arg //= 2 inst.argval = target.offset inst.argrepr = f"to {target.offset}" def virtualize_exception_table( exn_tab_bytes: bytes, instructions: list[Instruction] ) -> None: """Replace exception table entries with pointers to make editing easier""" exn_tab = parse_exception_table(exn_tab_bytes) offset_to_inst = {cast(int, inst.offset): inst for inst in instructions} offsets = sorted(offset_to_inst.keys()) end_offset_idx = 0 exn_tab_iter = iter(exn_tab) try: def step() -> tuple[ExceptionTableEntry, InstructionExnTabEntry]: nonlocal end_offset_idx entry = next(exn_tab_iter) # find rightmost offset <= entry.end, since entry.end may not be # an actual instruction, e.g. if the end instruction is LOAD_GLOBAL, # which takes more than 2 bytes, then entry.end points to the end # of the LOAD_GLOBAL instruction, not the beginning. while ( end_offset_idx < len(offsets) and offsets[end_offset_idx] <= entry.end ): end_offset_idx += 1 assert end_offset_idx > 0 end_offset = offsets[end_offset_idx - 1] inst_entry = InstructionExnTabEntry( _get_instruction_by_offset(offset_to_inst, entry.start), # type: ignore[arg-type] _get_instruction_by_offset(offset_to_inst, end_offset), # type: ignore[arg-type] _get_instruction_by_offset(offset_to_inst, entry.target), # type: ignore[arg-type] entry.depth, entry.lasti, ) return entry, inst_entry entry, inst_entry = step() for inst in instructions: assert inst.offset is not None while inst.offset > entry.end: entry, inst_entry = step() if inst.offset >= entry.start: inst.exn_tab_entry = copy.copy(inst_entry) except StopIteration: pass def compute_exception_table( instructions: list[Instruction], ) -> list[ExceptionTableEntry]: """Compute exception table in list format from instructions with exn_tab_entries""" exn_dict: dict[tuple[int, int], tuple[int, int, bool]] = {} indexof = get_indexof(instructions) for inst in instructions: if inst.exn_tab_entry: # account for prefixed EXTENDED_ARGS start = _get_instruction_front( instructions, indexof[inst.exn_tab_entry.start] ).offset assert start is not None # point to the last 2 bytes of the end instruction end = ( cast(int, inst.exn_tab_entry.end.offset) + instruction_size(inst.exn_tab_entry.end) - 2 ) assert end is not None target = _get_instruction_front( instructions, indexof[inst.exn_tab_entry.target] ).offset assert target is not None key = (start, end) val = (target, inst.exn_tab_entry.depth, inst.exn_tab_entry.lasti) if key in exn_dict: assert exn_dict[key] == val exn_dict[key] = val # Dynamo may construct nested exception table entries for convenience, # but Python expects exception table entries to not overlap. # NOTE: below, "keys" refer to old instruction entries' starts and ends, # and "entries" refer to the generated exception table entries. # Sort keys by increasing start, then decreasing end keys_sorted = sorted(exn_dict.keys(), key=lambda t: (t[0], -t[1])) # smallest byte that the next exception table entry can start at nexti = 0 # stack of current nested keys key_stack: list[tuple[int, int]] = [] exn_tab: list[ExceptionTableEntry] = [] def pop() -> None: """ Pop the key_stack and append an exception table entry if possible. """ nonlocal nexti if key_stack: key = key_stack.pop() if nexti <= key[1]: exn_tab.append( ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key]) ) nexti = key[1] + 2 for key in keys_sorted: # pop keys that are no longer nested over the current key while key_stack and key_stack[-1][1] < key[0]: pop() if key_stack: # create an entry covering to the current key, if possible assert key_stack[-1][0] <= key[0] <= key[1] <= key_stack[-1][1] left = max(nexti, key_stack[-1][0]) if left < key[0]: exn_tab.append( ExceptionTableEntry(left, key[0] - 2, *exn_dict[key_stack[-1]]) ) nexti = key[0] key_stack.append(key) while key_stack: pop() check_exception_table(exn_tab) return exn_tab def check_inst_exn_tab_entries_nested( tab: list[InstructionExnTabEntry], indexof: dict[Instruction, int] ) -> None: """ Checks `tab` is a properly sorted list of nested InstructionExnTabEntry's, i.e. no entries partially overlap. "Properly sorted" means entries are sorted by increasing starts, then decreasing ends. """ entry_stack: list[tuple[int, int]] = [] for entry in tab: key = (indexof[entry.start], indexof[entry.end]) while entry_stack and entry_stack[-1][1] < key[0]: entry_stack.pop() if entry_stack: assert entry_stack[-1][0] <= key[0] <= key[1] <= entry_stack[-1][1] entry_stack.append(key) def propagate_inst_exn_table_entries(instructions: list[Instruction]) -> None: """ Copies exception table entries to all instructions in an entry's range. Supports nested exception table entries. """ indexof = get_indexof(instructions) entries: dict[tuple[int, int], InstructionExnTabEntry] = {} for inst in instructions: if inst.exn_tab_entry: key = ( indexof[inst.exn_tab_entry.start], indexof[inst.exn_tab_entry.end], ) if key in entries: assert inst.exn_tab_entry == entries[key] entries[key] = inst.exn_tab_entry sorted_entries = [ entries[key] for key in sorted(entries.keys(), key=lambda t: (t[0], -t[1])) ] check_inst_exn_tab_entries_nested(sorted_entries, indexof) # Propagation of nested entries works since nested entries come later # in sorted order. for entry in sorted_entries: for i in range(indexof[entry.start], indexof[entry.end] + 1): instructions[i].exn_tab_entry = copy.copy(entry) def check_inst_exn_tab_entries_valid(instructions: list[Instruction]) -> None: """ Checks that exn_tab_entries of instructions are valid. An entry's start, end, and target must be in instructions. Instructions with an exn_tab_entry are located within the entry's start and end instructions. Instructions do not share exn_tab_entries. Implicitly checks for no duplicate instructions. """ indexof = get_indexof(instructions) exn_tab_entry_set = set() for i, inst in enumerate(instructions): if inst.exn_tab_entry: assert sys.version_info >= (3, 11) assert id(inst.exn_tab_entry) not in exn_tab_entry_set exn_tab_entry_set.add(id(inst.exn_tab_entry)) entry = inst.exn_tab_entry assert entry.start in indexof assert entry.end in indexof assert entry.target in indexof assert indexof[entry.start] <= i <= indexof[entry.end] def strip_extended_args(instructions: list[Instruction]) -> None: instructions[:] = [i for i in instructions if i.opcode != dis.EXTENDED_ARG] # Overwrites old_inst with a sequence of new instructions. # This is necessary in order to preserve jump targets to the old # instruction, exception table entries, and positions. # Returns the modified sequence of instructions (including the modified # old instruction!) that can be manipulated elsewhere. def overwrite_instruction( old_inst: Instruction, new_insts: list[Instruction] ) -> list[Instruction]: # update old_inst.exnt_tab_entry.end if necessary if ( old_inst.exn_tab_entry and old_inst.exn_tab_entry.end is old_inst and len(new_insts) > 1 ): old_inst.exn_tab_entry.end = new_insts[-1] # preserve exception table entries and positions for inst in new_insts[1:]: inst.exn_tab_entry = copy.copy(old_inst.exn_tab_entry) inst.positions = old_inst.positions # modify old_inst in-place to preserve jump target old_inst.opcode = new_insts[0].opcode old_inst.opname = new_insts[0].opname old_inst.arg = new_insts[0].arg old_inst.argval = new_insts[0].argval old_inst.target = new_insts[0].target return [old_inst] + new_insts[1:] def remove_load_call_method(instructions: list[Instruction]) -> list[Instruction]: """LOAD_METHOD puts a NULL on the stack which causes issues, so remove it""" assert sys.version_info < (3, 11) rewrites = {"LOAD_METHOD": "LOAD_ATTR", "CALL_METHOD": "CALL_FUNCTION"} for inst in instructions: if inst.opname in rewrites: inst.opname = rewrites[inst.opname] inst.opcode = dis.opmap[inst.opname] return instructions def remove_jump_if_none(instructions: list[Instruction]) -> None: new_insts = [] for inst in instructions: if "_NONE" in inst.opname: is_op = create_instruction("IS_OP", arg=int("NOT" in inst.opname)) # need both argval and arg set correctly now (not later) is_op.argval = is_op.arg if sys.version_info < (3, 12): jump_op = create_instruction( ( "POP_JUMP_FORWARD_IF_TRUE" if "FORWARD" in inst.opname else "POP_JUMP_BACKWARD_IF_TRUE" ), target=inst.target, ) else: jump_op = create_instruction("POP_JUMP_IF_TRUE", target=inst.target) replace_insts = [ create_instruction("LOAD_CONST", argval=None), is_op, jump_op, ] new_insts.extend(overwrite_instruction(inst, replace_insts)) else: new_insts.append(inst) instructions[:] = new_insts def remove_binary_store_slice(instructions: list[Instruction]) -> None: new_insts = [] for inst in instructions: new_insts.append(inst) if inst.opname in ("BINARY_SLICE", "STORE_SLICE"): # new instruction if sys.version_info >= (3, 14) and inst.opname == "BINARY_SLICE": subscr_inst = create_binary_subscr() else: subscr_inst = create_instruction(inst.opname.replace("SLICE", "SUBSCR")) if inst.exn_tab_entry and inst.exn_tab_entry.end is inst: inst.exn_tab_entry.end = subscr_inst subscr_inst.exn_tab_entry = copy.copy(inst.exn_tab_entry) subscr_inst.positions = inst.positions # modify inst in-place to preserve jump target inst.opcode = dis.opmap["BUILD_SLICE"] inst.opname = "BUILD_SLICE" inst.arg = 2 inst.argval = 2 new_insts.append(subscr_inst) instructions[:] = new_insts FUSED_INSTS = { "LOAD_FAST_LOAD_FAST": ("LOAD_FAST", "LOAD_FAST"), "LOAD_FAST_BORROW_LOAD_FAST_BORROW": ("LOAD_FAST_BORROW", "LOAD_FAST_BORROW"), "STORE_FAST_STORE_FAST": ("STORE_FAST", "STORE_FAST"), "STORE_FAST_LOAD_FAST": ("STORE_FAST", "LOAD_FAST"), } def remove_fused_load_store(instructions: list[Instruction]) -> None: new_insts = [] for inst in instructions: if inst.opname in FUSED_INSTS: inst0, inst1 = FUSED_INSTS[inst.opname] argval0, argval1 = inst.argval replace_insts = [ create_instruction(inst0, argval=argval0), create_instruction(inst1, argval=argval1), ] new_insts.extend(overwrite_instruction(inst, replace_insts)) else: new_insts.append(inst) instructions[:] = new_insts # adds GRAPH_BREAK_IF_LEAF (not a real instruction) before RETURN_* instructions # for testing purposes def add_graph_break_if_leaf_instructions(instructions: list[Instruction]) -> None: new_insts = [] for inst in instructions: if "RETURN" in inst.opname: replace_insts = [ create_instruction("NOP", argval="GRAPH_BREAK_IF_LEAF"), create_instruction(inst.opname, argval=inst.argval), ] new_insts.extend(overwrite_instruction(inst, replace_insts)) else: new_insts.append(inst) instructions[:] = new_insts def remove_graph_break_if_leaf_instructions(instructions: list[Instruction]) -> None: new_insts = [] for inst, next_inst in itertools.pairwise(instructions): if ( inst.opname == "NOP" and inst.argval == "GRAPH_BREAK_IF_LEAF" and next_inst.opname.startswith("RETURN") ): # remove this instruction and update all other instructions' jump targets for i in range(len(instructions)): if instructions[i].target is inst: instructions[i].target = next_inst if instructions[i].exn_tab_entry: # linter is mistakenly complaining that None has no attribute "..." # but this codepath only runs if instructions[i] is not None if instructions[i].exn_tab_entry.start is inst: # type: ignore[union-attr] instructions[i].exn_tab_entry.start = next_inst # type: ignore[union-attr] if instructions[i].exn_tab_entry.end is inst: # type: ignore[union-attr] instructions[i].exn_tab_entry.end = next_inst # type: ignore[union-attr] if instructions[i].exn_tab_entry.target is inst: # type: ignore[union-attr] instructions[i].exn_tab_entry.target = next_inst # type: ignore[union-attr] else: new_insts.append(inst) new_insts.append(instructions[-1]) instructions[:] = new_insts def explicit_super(code: types.CodeType, instructions: list[Instruction]) -> None: """convert super() with no args into explicit arg form""" cell_and_free = (code.co_cellvars or ()) + (code.co_freevars or ()) if not len(code.co_varnames): # A function with no argument cannot contain a valid "super()" call return output = [] for idx, inst in enumerate(instructions): output.append(inst) if inst.opname == "LOAD_GLOBAL" and inst.argval == "super": nexti = instructions[idx + 1] if nexti.arg == 0 and ( (sys.version_info >= (3, 12) and nexti.opname == "CALL") or ( sys.version_info >= (3, 11) and sys.version_info < (3, 12) and nexti.opname == "PRECALL" ) or (sys.version_info < (3, 11) and nexti.opname == "CALL_FUNCTION") ): assert "__class__" in cell_and_free output.append(create_instruction("LOAD_DEREF", argval="__class__")) first_var = code.co_varnames[0] if first_var in cell_and_free: output.append(create_instruction("LOAD_DEREF", argval=first_var)) else: output.append(create_instruction("LOAD_FAST", argval=first_var)) nexti.arg = 2 nexti.argval = 2 if nexti.opname == "PRECALL": # also update the following CALL instruction call_inst = instructions[idx + 2] call_inst.arg = 2 call_inst.argval = 2 instructions[:] = output def fix_extended_args(instructions: list[Instruction]) -> int: """Fill in correct argvals for EXTENDED_ARG ops""" output: list[Instruction] = [] def maybe_pop_n(n: int) -> None: for _ in range(n): if output and output[-1].opcode == dis.EXTENDED_ARG: output.pop() for inst in instructions: if inst.opcode == dis.EXTENDED_ARG: # Leave this instruction alone for now so we never shrink code inst.arg = 0 elif inst.arg and inst.arg > 0xFFFFFF: maybe_pop_n(3) output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 24)) output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16)) output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) elif inst.arg and inst.arg > 0xFFFF: maybe_pop_n(2) output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16)) output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) elif inst.arg and inst.arg > 0xFF: maybe_pop_n(1) output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) output.append(inst) added = len(output) - len(instructions) assert added >= 0 instructions[:] = output return added def instruction_size(inst: Instruction) -> int: import torch if sys.version_info >= (3, 11): return 2 * (torch._C._dynamo.eval_frame.py_opcode_caches[inst.opcode] + 1) return 2 def check_offsets(instructions: Sequence[Instruction]) -> None: offset = 0 for inst in instructions: assert inst.offset == offset offset += instruction_size(inst) def update_offsets(instructions: Sequence[Instruction]) -> None: offset = 0 for inst in instructions: inst.offset = offset # pyrefly: ignore [unsupported-operation] offset += instruction_size(inst) def debug_bytes(*args: bytes) -> str: index = range(max(map(len, args))) result = [ " ".join(f"{x:03}" for x in arg) for arg in [index] + list(args) + [[int(a != b) for a, b in zip(args[-1], args[-2])]] ] return "bytes mismatch\n" + "\n".join(result) def debug_checks(code: types.CodeType) -> None: """Make sure our assembler produces same bytes as we start with""" dode, _ = transform_code_object(code, lambda x, y: None, safe=True) assert code.co_code == dode.co_code, debug_bytes(code.co_code, dode.co_code) assert code.co_lnotab == dode.co_lnotab, debug_bytes(code.co_lnotab, dode.co_lnotab) HAS_LOCAL = set(dis.haslocal) HAS_NAME = set(dis.hasname) HAS_FREE = set(dis.hasfree) HAS_CONST = set(dis.hasconst) def get_const_index(code_options: dict[str, Any], val: Any) -> int: for i, v in enumerate(code_options["co_consts"]): # NOTE: stronger comparison is required, since we have # examples where two values compare equal but have # different semantic meaning in some cases, e.g. # 0.0 == -0.0 but have different effects in torch.copysign. if val is v: return i code_options["co_consts"] += (val,) return len(code_options["co_consts"]) - 1 def fix_vars( instructions: list[Instruction], code_options: dict[str, Any], varname_from_oparg: Optional[Callable[..., Any]] = None, ) -> None: # compute instruction arg from argval if arg is not provided names = {name: idx for idx, name in enumerate(code_options["co_names"])} def get_name_index(name: str) -> int: try: idx = names[name] except KeyError: # Add a missing item to co_names idx = names[name] = len(names) code_options["co_names"] = (*code_options["co_names"], name) assert len(code_options["co_names"]) == len(names) return idx if sys.version_info < (3, 11): assert varname_from_oparg is None varnames = {name: idx for idx, name in enumerate(code_options["co_varnames"])} freenames = { name: idx for idx, name in enumerate( code_options["co_cellvars"] + code_options["co_freevars"] ) } else: assert callable(varname_from_oparg) allnames = {} for idx in itertools.count(): try: name = varname_from_oparg(idx) allnames[name] = idx except IndexError: break varnames = {name: allnames[name] for name in code_options["co_varnames"]} freenames = { name: allnames[name] for name in code_options["co_cellvars"] + code_options["co_freevars"] } for i in range(len(instructions)): def should_compute_arg() -> bool: # argval is prioritized over arg return instructions[i].argval is not _NotProvided if instructions[i].opname == "LOAD_GLOBAL": # 3.11 LOAD_GLOBAL requires both arg and argval - see create_instruction assert instructions[i].argval is not _NotProvided if sys.version_info >= (3, 11): assert instructions[i].arg is not None instructions[i].arg = (get_name_index(instructions[i].argval) << 1) + ( cast(int, instructions[i].arg) % 2 ) else: instructions[i].arg = get_name_index(instructions[i].argval) elif instructions[i].opname == "LOAD_ATTR": # 3.12 LOAD_ATTR requires both arg and argval, like LOAD_GLOBAL assert instructions[i].argval is not _NotProvided if sys.version_info >= (3, 12): assert instructions[i].arg is not None instructions[i].arg = (get_name_index(instructions[i].argval) << 1) + ( cast(int, instructions[i].arg) % 2 ) else: instructions[i].arg = get_name_index(instructions[i].argval) elif instructions[i].opname == "LOAD_SUPER_ATTR": assert instructions[i].arg is not None assert instructions[i].argval is not _NotProvided # Copy low bit, force second bit on for explicit super (the "+ 2") instructions[i].arg = ( (get_name_index(instructions[i].argval) << 2) + (cast(int, instructions[i].arg) % 2) + 2 ) elif instructions[i].opname in FUSED_INSTS: assert sys.version_info >= (3, 13) assert isinstance(instructions[i].argval, tuple) assert len(instructions[i].argval) == 2 arg_tuple = tuple( varnames[name] if name in varnames else freenames[name] for name in instructions[i].argval ) instructions[i].arg = (arg_tuple[0] << 4) + (arg_tuple[1] & 15) elif instructions[i].opcode in HAS_LOCAL: if should_compute_arg(): if ( sys.version_info >= (3, 13) and instructions[i].argval not in varnames ): # instructions like LOAD_FAST used for both local and free vars instructions[i].arg = freenames[instructions[i].argval] else: instructions[i].arg = varnames[instructions[i].argval] elif instructions[i].opcode in HAS_NAME: if should_compute_arg(): instructions[i].arg = get_name_index(instructions[i].argval) elif instructions[i].opcode in HAS_FREE: if should_compute_arg(): instructions[i].arg = freenames[instructions[i].argval] elif instructions[i].opcode in HAS_CONST: # NOTE: only update argval if arg is not provided. This assumes # that any additions to co_consts are appended. if instructions[i].arg is None: # cannot use a dictionary since consts may not be hashable idx = get_const_index(code_options, instructions[i].argval) assert idx >= 0 instructions[i].arg = idx def clear_instruction_args(instructions: list[Instruction]) -> None: # Clear the instruction arg for instructions that have argvals. # Useful for using dis'd bytecode within generated bytecode. for inst in instructions: if ( inst.argval is not _NotProvided and ( inst.opcode in HAS_LOCAL or inst.opcode in HAS_NAME or inst.opcode in HAS_FREE or inst.opcode in HAS_CONST ) and inst.opname not in ("LOAD_GLOBAL", "LOAD_ATTR", "LOAD_SUPER_ATTR") ): inst.arg = None @functools.lru_cache def get_code_keys() -> list[str]: # Python 3.11 changes to code keys are not fully documented. # See https://github.com/python/cpython/blob/3.11/Objects/clinic/codeobject.c.h#L24 # for new format. keys = ["co_argcount"] keys.append("co_posonlyargcount") keys.extend( [ "co_kwonlyargcount", "co_nlocals", "co_stacksize", "co_flags", "co_code", "co_consts", "co_names", "co_varnames", "co_filename", "co_name", ] ) if sys.version_info >= (3, 11): keys.append("co_qualname") keys.append("co_firstlineno") keys.append("co_linetable") if sys.version_info >= (3, 11): # not documented, but introduced in https://github.com/python/cpython/issues/84403 keys.append("co_exceptiontable") keys.extend( [ "co_freevars", "co_cellvars", ] ) return keys def transform_code_object( code: types.CodeType, transformations: Callable[ [list[Instruction], dict[str, Any]], Optional["DynamoTracerOutput"] ], safe: bool = False, ) -> tuple[types.CodeType, Optional["DynamoTracerOutput"]]: keys = get_code_keys() code_options = {k: getattr(code, k) for k in keys} assert len(code_options["co_varnames"]) == code_options["co_nlocals"] instructions = cleaned_instructions(code, safe) # propagate line nums again for added instructions propagate_line_nums(instructions) tracer_output = transformations(instructions, code_options) _, bytecode = clean_and_assemble_instructions(instructions, keys, code_options) return bytecode, tracer_output def clean_and_assemble_instructions( instructions: list[Instruction], keys: list[str], code_options: dict[str, Any] ) -> tuple[list[Instruction], types.CodeType]: remove_graph_break_if_leaf_instructions(instructions) # also implicitly checks for no duplicate instructions check_inst_exn_tab_entries_valid(instructions) code_options["co_nlocals"] = len(code_options["co_varnames"]) varname_from_oparg = None if sys.version_info >= (3, 11): # temporary code object with updated names tmp_code = types.CodeType(*[code_options[k] for k in keys]) varname_from_oparg = tmp_code._varname_from_oparg # type: ignore[attr-defined] fix_vars(instructions, code_options, varname_from_oparg=varname_from_oparg) dirty = True while dirty: update_offsets(instructions) devirtualize_jumps(instructions) # this pass might change offsets, if so we need to try again dirty = bool(fix_extended_args(instructions)) remove_extra_line_nums(instructions) bytecode, lnotab = assemble(instructions, code_options["co_firstlineno"]) code_options["co_linetable"] = lnotab code_options["co_code"] = bytecode code_options["co_stacksize"] = stacksize_analysis(instructions) assert set(keys) - {"co_posonlyargcount"} == set(code_options.keys()) - { "co_posonlyargcount" } if sys.version_info >= (3, 11): code_options["co_exceptiontable"] = assemble_exception_table( compute_exception_table(instructions) ) return instructions, types.CodeType(*[code_options[k] for k in keys]) def populate_kw_names_argval(instructions: Sequence[Instruction], consts: Any) -> None: for inst in instructions: if inst.opname == "KW_NAMES": inst.argval = consts[inst.arg] # If safe=True, we do not make any bytecode modifications. # Mainly used for debugging bytecode_transformation (see debug_checks) def cleaned_instructions(code: types.CodeType, safe: bool = False) -> list[Instruction]: instructions = _cached_cleaned_instructions(code, safe) # We have a lot of code that implicitly mutates the instruction array. We # could do better here by making the copies explicit when necessary. return _clone_instructions(instructions) # Copy an instructions array, making sure to remap the individual instruction targets. def _clone_instructions(instructions: Sequence[Instruction]) -> list[Instruction]: # This is super hot and this is the fastest way to do this (tried copy.copy # and dataclasses.replace). copied = [ Instruction( i.opcode, i.opname, i.arg, i.argval, i.offset, i.starts_line, i.is_jump_target, i.positions, i.target, i.exn_tab_entry, i.argrepr, ) for i in instructions ] remap = dict(zip(instructions, copied)) # Handle `None` in the remapper so we don't need an extra `if`. remap[None] = None # type: ignore[index, assignment] for i in copied: i.target = remap[i.target] # type: ignore[index] if entry := i.exn_tab_entry: i.exn_tab_entry = InstructionExnTabEntry( remap[entry.start], remap[entry.end], remap[entry.target], entry.depth, entry.lasti, ) return copied @functools.lru_cache def _cached_cleaned_instructions( code: types.CodeType, safe: bool = False ) -> Sequence[Instruction]: instructions = list(map(convert_instruction, dis.get_instructions(code))) # propagate now in case we remove some instructions propagate_line_nums(instructions) check_offsets(instructions) if sys.version_info >= (3, 11): populate_kw_names_argval(instructions, code.co_consts) virtualize_exception_table(code.co_exceptiontable, instructions) virtualize_jumps(instructions) strip_extended_args(instructions) if not safe: if sys.version_info < (3, 11): remove_load_call_method(instructions) if sys.version_info < (3, 12): explicit_super(code, instructions) if sys.version_info >= (3, 11): remove_jump_if_none(instructions) if sys.version_info >= (3, 12): remove_binary_store_slice(instructions) if sys.version_info >= (3, 13): remove_fused_load_store(instructions) if config.debug_force_graph_break_on_leaf_return: add_graph_break_if_leaf_instructions(instructions) if sys.version_info >= (3, 11): update_offsets(instructions) devirtualize_jumps(instructions) return instructions _unique_id_counter = itertools.count() def unique_id(name: str, with_uuid: bool = False) -> str: ret = f"{name}_{next(_unique_id_counter)}" if with_uuid: ret += f"_{uuid.uuid4()}".replace("-", "_") return ret def is_generator(code: types.CodeType) -> bool: co_generator = 0x20 return (code.co_flags & co_generator) > 0 def bytecode_from_template( fn: Callable[..., Any], varname_map: Optional[Mapping[Any, Any]] = None, noreturn: bool = True, noprefix: bool = True, ) -> list[Instruction]: """Generates bytecode from a template function `fn` for use in dynamo bytecode generation. For example, we can generate Python-version-independent bytecode for looping through a dictionary and copying the values to a new dictionary. def template(d1, d2): for k, v in d1.items(): d2[k] = v or a try block: def template(): try: dummy1 except: dummy2 raise dummy3 Args: fn: a function template to generate bytecode from varname_map: a mapping of `fn`'s varnames to new names. This map will be applied to the generated bytecode's varnames. For example, local variables in `fn` can be replaced with new names that are generated by `OutputGraph.new_var`. noreturn: remove all RETURN_* bytecodes and replace them with a jump to the end of the bytecode. NOTE: any items pushed to the stack for return WILL remain on the stack! Append a POP_TOP if you don't want that item to be present. noprefix: remove prefix bytecodes (all bytecode before the first RESUME, inclusive). """ insts = cleaned_instructions(fn.__code__) clear_instruction_args(insts) if noprefix: for i, inst in enumerate(insts): if inst.opname == "RESUME": insts = insts[i + 1 :] break for inst in insts: # If we don't reset starts_line, then the generated # bytecode's line number will be based on fn's. inst.starts_line = None inst.positions = None if varname_map and inst.argval in varname_map: inst.argval = varname_map[inst.argval] if noreturn: if sys.version_info >= (3, 12): # replace RETURN_CONST with LOAD_CONST RETURN_VALUE new_insts = [] for inst in insts: if inst.opname == "RETURN_CONST": inst.opcode = dis.opmap["LOAD_CONST"] inst.opname = "LOAD_CONST" new_insts.append(inst) # no need to propagate target/exn table new_insts.append(create_instruction("RETURN_VALUE")) else: new_insts.append(inst) insts = new_insts returns = [] for inst in insts: if inst.opname == "RETURN_VALUE": returns.append(inst) if len(returns) == 1 and returns[0] is insts[-1]: # only 1 return at the end - just pop it insts.pop(-1) elif len(returns) > 0: # create jump target - if the last inst is a return, # we can replace it with a NOP and make that the jump target. if insts[-1] is returns[-1]: insts[-1].opname = "NOP" insts[-1].opcode = dis.opmap["NOP"] insts[-1].arg = None insts[-1].argval = _NotProvided returns.pop(-1) else: insts.append(create_instruction("NOP")) # replace returns with jumps for inst in returns: # don't replace inst with new instruction # due to targeting/exn table/etc. jump_inst = create_jump_absolute(insts[-1]) inst.opname = jump_inst.opname inst.opcode = jump_inst.opcode inst.arg = jump_inst.arg inst.argval = jump_inst.argval inst.target = jump_inst.target return insts
ExceptionTableEntry
python
kamyu104__LeetCode-Solutions
Python/find-the-index-of-permutation.py
{ "start": 68, "end": 1285 }
class ____(object): def getPermutationIndex(self, perm): """ :type perm: List[int] :rtype: int """ MOD = 10**9+7 class BIT(object): # 0-indexed. def __init__(self, n): self.__bit = [0]*(n+1) # Extra one for dummy node. def add(self, i, val): i += 1 # Extra one for dummy node. while i < len(self.__bit): self.__bit[i] = (self.__bit[i]+val) % MOD i += (i & -i) def query(self, i): i += 1 # Extra one for dummy node. ret = 0 while i > 0: ret = (ret+self.__bit[i]) % MOD i -= (i & -i) return ret fact = [0]*len(perm) fact[0] = 1 for i in xrange(len(fact)-1): fact[i+1] = ((i+1)*fact[i])%MOD result = 0 bit = BIT(len(perm)) for i, x in enumerate(perm): result = (result+(((((x-1)-bit.query((x-1)-1))%MOD)*fact[(len(perm)-1)-i])%MOD))%MOD bit.add(x-1, +1) return result # Time: O(nlogn) # Space: O(n) # bit, fenwick tree, combinatorics
Solution
python
astropy__astropy
astropy/io/fits/hdu/streaming.py
{ "start": 322, "end": 7586 }
class ____: """ A class that provides the capability to stream data to a FITS file instead of requiring data to all be written at once. The following pseudocode illustrates its use:: header = astropy.io.fits.Header() for all the cards you need in the header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data) shdu.close() """ def __init__(self, name, header): """ Construct a `StreamingHDU` object given a file name and a header. Parameters ---------- name : path-like or file-like The file to which the header and data will be streamed. If opened, the file object must be opened in a writeable binary mode such as 'wb' or 'ab+'. header : `Header` instance The header object associated with the data to be written to the file. Notes ----- The file will be opened and the header appended to the end of the file. If the file does not already exist, it will be created, and if the header represents a Primary header, it will be written to the beginning of the file. If the file does not exist and the provided header is not a Primary header, a default Primary HDU will be inserted at the beginning of the file and the provided header will be added as the first extension. If the file does already exist, but the provided header represents a Primary header, the header will be modified to an image extension header and appended to the end of the file. """ if isinstance(name, gzip.GzipFile): raise TypeError("StreamingHDU not supported for GzipFile objects.") self._header = header.copy() # handle a file object instead of a file name filename = fileobj_name(name) or "" filename = os.path.expanduser(filename) # Check if the file already exists. If it does not, check to see # if we were provided with a Primary Header. If not we will need # to prepend a default PrimaryHDU to the file before writing the # given header. newfile = False if filename: if not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile = True elif hasattr(name, "len") and name.len == 0: newfile = True if newfile: if "SIMPLE" not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, "exception") else: # This will not be the first extension in the file so we # must change the Primary header provided into an image # extension header. if "SIMPLE" in self._header: self._header.set("XTENSION", "IMAGE", "Image extension", after="SIMPLE") del self._header["SIMPLE"] if "PCOUNT" not in self._header: dim = self._header["NAXIS"] dim = "" if dim == 0 else str(dim) self._header.set( "PCOUNT", 0, "number of parameters", after=f"NAXIS{dim}" ) if "GCOUNT" not in self._header: self._header.set("GCOUNT", 1, "number of groups", after="PCOUNT") self._ffo = _File(name, "append") # TODO : Fix this once the HDU writing API is cleaned up tmp_hdu = _BaseHDU() # Passing self._header as an argument to _BaseHDU() will cause its # values to be modified in undesired ways...need to have a better way # of doing this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size if self._size != 0: self.writecomplete = False else: self.writecomplete = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def write(self, data): """ Write the given data to the stream. Parameters ---------- data : ndarray Data to stream to the file. Returns ------- writecomplete : int Flag that when `True` indicates that all of the required data has been written to the stream. Notes ----- Only the amount of data specified in the header provided to the class constructor may be written to the stream. If the provided data would cause the stream to overflow, an `OSError` exception is raised and the data is not written. Once sufficient data has been written to the stream to satisfy the amount specified in the header, the stream is padded to fill a complete FITS block and no more data will be accepted. An attempt to write more data after the stream has been filled will raise an `OSError` exception. If the dtype of the input data does not match what is expected by the header, a `TypeError` exception is raised. """ size = self._ffo.tell() - self._data_offset if self.writecomplete or size + data.nbytes > self._size: raise OSError( "Attempt to write more data to the stream than the header specified." ) if BITPIX2DTYPE[self._header["BITPIX"]] != data.dtype.name: raise TypeError( "Supplied data does not match the type specified in the header." ) if data.dtype.str[0] != ">": # byteswap little endian arrays before writing output = data.byteswap() else: output = data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size: # the stream is full so pad the data to the next FITS block self._ffo.write(_pad_length(self._size) * "\0") self.writecomplete = True self._ffo.flush() return self.writecomplete @property def size(self): """ Return the size (in bytes) of the data portion of the HDU. """ size = 0 naxis = self._header.get("NAXIS", 0) if naxis > 0: simple = self._header.get("SIMPLE", "F") random_groups = self._header.get("GROUPS", "F") if simple == "T" and random_groups == "T": groups = 1 else: groups = 0 size = 1 for idx in range(groups, naxis): size = size * self._header["NAXIS" + str(idx + 1)] bitpix = self._header["BITPIX"] gcount = self._header.get("GCOUNT", 1) pcount = self._header.get("PCOUNT", 0) size = abs(bitpix) * gcount * (pcount + size) // 8 return size def close(self): """ Close the physical FITS file. """ self._ffo.close()
StreamingHDU
python
allegroai__clearml
clearml/hyperdatasets/management.py
{ "start": 321, "end": 8319 }
class ____: @classmethod def get( cls: Type[HD], dataset_name: Optional[str] = None, version_name: Optional[str] = None, project_name: Optional[str] = None, *, dataset_id: Optional[str] = None, version_id: Optional[str] = None, ) -> HD: """ Return a `HyperDataset` handle bound to an existing dataset/version. :param dataset_name: Dataset collection name. Mutually exclusive with `dataset_id` :param version_name: Version name. Mutually exclusive with `version_id` :param project_name: Optional ClearML project filter when using `dataset_name` :param dataset_id: Dataset identifier. Mutually exclusive with `dataset_name` :param version_id: Version identifier. Mutually exclusive with `version_name` :return: `HyperDataset` instance pointing at the requested dataset/version """ if dataset_name and dataset_id: raise ValueError("Provide either dataset_name or dataset_id, not both") if version_name and version_id: raise ValueError("Provide either version_name or version_id, not both") if not dataset_name and not dataset_id: raise ValueError("dataset_name or dataset_id must be provided") if dataset_id: ds = HyperDatasetManagementBackend.get_dataset_by_id(dataset_id) if not ds: raise ValueError(f"Dataset not found: {dataset_id}") else: session = Session() project_id = get_existing_project(session, project_name) if project_name else None ds = HyperDatasetManagementBackend.get_dataset(name=dataset_name, project_id=project_id) if not ds: raise ValueError(f"Dataset not found: {dataset_name}") dataset_id = getattr(ds, "id", None) if not dataset_id: raise ValueError("Dataset has no identifier") if version_id: if not HyperDatasetManagementBackend.version_exists(dataset_id=dataset_id, version_id=version_id): raise ValueError(f"Version not found: {version_id}") resolved_version_id = version_id else: resolved_version_id = HyperDatasetManagementBackend.get_version( dataset_id=dataset_id, version_name=version_name ) if not resolved_version_id: raise ValueError( f"{'Version not found: ' + version_name if version_name else 'No versions found'} (dataset={dataset_name or dataset_id})" # noqa ) target_cls = cls._result_class() obj = target_cls.__new__(target_cls) # type: ignore[misc] obj._project_id = getattr(ds, "project", None) obj._dataset_id = dataset_id obj._version_id = resolved_version_id return obj # type: ignore[return-value] @classmethod def exists( cls, dataset_name: Optional[str] = None, version_name: Optional[str] = None, project_name: Optional[str] = None, *, dataset_id: Optional[str] = None, version_id: Optional[str] = None, ) -> bool: """ Check whether a dataset (and optionally a specific version) exists. :param dataset_name: Dataset collection name. Mutually exclusive with `dataset_id` :param version_name: Dataset version name. Mutually exclusive with `version_id` :param project_name: Optional project filter when searching by name :param dataset_id: Dataset identifier to query. Mutually exclusive with `dataset_name` :param version_id: Version identifier to query. Mutually exclusive with `version_name` :return: True when the dataset (and requested version) can be found """ if dataset_name and dataset_id: raise ValueError("Provide either dataset_name or dataset_id, not both") if version_name and version_id: raise ValueError("Provide either version_name or version_id, not both") if not dataset_name and not dataset_id: raise ValueError("dataset_name or dataset_id must be provided") if dataset_id: ds = HyperDatasetManagementBackend.get_dataset_by_id(dataset_id) if not ds: return False else: session = Session() project_id = get_existing_project(session, project_name) if project_name else None ds = HyperDatasetManagementBackend.get_dataset(name=dataset_name, project_id=project_id) if not ds: return False dataset_id = getattr(ds, "id", None) if not dataset_id: return False if version_id in (None, "*") and version_name is None: return True if version_id not in (None, "*"): return HyperDatasetManagementBackend.version_exists(dataset_id=dataset_id, version_id=version_id) version = HyperDatasetManagementBackend.get_version(dataset_id=dataset_id, version_name=version_name) return bool(version) @classmethod def list( cls, project_name: Optional[str] = None, partial_name: Optional[str] = None, tags: Optional[Sequence[str]] = None, ids: Optional[Sequence[str]] = None, recursive_project_search: bool = True, include_archived: bool = True, ) -> List[Dict[str, Any]]: """ List HyperDataset collections matching the provided filters. :param project_name: Optional project filter (matches project hierarchy when recursive) :param partial_name: Optional regex / partial dataset name filter :param tags: Optional list of tags to filter by :param ids: Optional list of dataset identifiers :param recursive_project_search: Include subprojects when filtering by project_name :param include_archived: Include archived datasets when True :return: List of dictionaries describing the matching datasets """ return HyperDatasetManagementBackend.list( dataset_project=project_name, partial_name=partial_name, tags=tags, ids=ids, recursive_project_search=recursive_project_search, include_archived=include_archived, ) @classmethod def delete( cls, dataset_name: str, version_name: Optional[str] = None, project_name: Optional[str] = None, *, force: bool = False, ) -> bool: """ Delete a dataset or a specific dataset version. :param dataset_name: Dataset name to delete (required) :param version_name: Version name to delete. When omitted, the entire dataset is removed :param project_name: Optional project context when resolving by name :param force: Force deletion even when there are protections :return: True when deletion completes successfully """ session = Session() project_id = get_existing_project(session, project_name) if project_name else None ds = HyperDatasetManagementBackend.get_dataset(name=dataset_name, project_id=project_id) if not ds: return False if version_name: version_id = HyperDatasetManagementBackend.get_version(dataset_id=ds.id, version_name=version_name) if not version_id: return False return HyperDatasetManagementBackend.delete_dataset_version(version_id=version_id, force=force) return HyperDatasetManagementBackend.delete_dataset(dataset_id=ds.id, delete_all_versions=True, force=force) @classmethod def _result_class(cls) -> Type["HyperDatasetManagement"]: if cls is HyperDatasetManagement: from .core import HyperDataset # Local import to avoid circular dependency return HyperDataset return cls
HyperDatasetManagement
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/oracle/vector.py
{ "start": 2504, "end": 5584 }
class ____: """Define the configuration for Oracle VECTOR Index. See :ref:`oracle_vector_datatype` for background. .. versionadded:: 2.0.41 :param index_type: Enum value from :class:`.VectorIndexType` Specifies the indexing method. For HNSW, this must be :attr:`.VectorIndexType.HNSW`. :param distance: Enum value from :class:`.VectorDistanceType` specifies the metric for calculating distance between VECTORS. :param accuracy: interger. Should be in the range 0 to 100 Specifies the accuracy of the nearest neighbor search during query execution. :param parallel: integer. Specifies degree of parallelism. :param hnsw_neighbors: interger. Should be in the range 0 to 2048. Specifies the number of nearest neighbors considered during the search. The attribute :attr:`.VectorIndexConfig.hnsw_neighbors` is HNSW index specific. :param hnsw_efconstruction: integer. Should be in the range 0 to 65535. Controls the trade-off between indexing speed and recall quality during index construction. The attribute :attr:`.VectorIndexConfig.hnsw_efconstruction` is HNSW index specific. :param ivf_neighbor_partitions: integer. Should be in the range 0 to 10,000,000. Specifies the number of partitions used to divide the dataset. The attribute :attr:`.VectorIndexConfig.ivf_neighbor_partitions` is IVF index specific. :param ivf_sample_per_partition: integer. Should be between 1 and ``num_vectors / neighbor partitions``. Specifies the number of samples used per partition. The attribute :attr:`.VectorIndexConfig.ivf_sample_per_partition` is IVF index specific. :param ivf_min_vectors_per_partition: integer. From 0 (no trimming) to the total number of vectors (results in 1 partition). Specifies the minimum number of vectors per partition. The attribute :attr:`.VectorIndexConfig.ivf_min_vectors_per_partition` is IVF index specific. """ index_type: VectorIndexType = VectorIndexType.HNSW distance: Optional[VectorDistanceType] = None accuracy: Optional[int] = None hnsw_neighbors: Optional[int] = None hnsw_efconstruction: Optional[int] = None ivf_neighbor_partitions: Optional[int] = None ivf_sample_per_partition: Optional[int] = None ivf_min_vectors_per_partition: Optional[int] = None parallel: Optional[int] = None def __post_init__(self): self.index_type = VectorIndexType(self.index_type) for field in [ "hnsw_neighbors", "hnsw_efconstruction", "ivf_neighbor_partitions", "ivf_sample_per_partition", "ivf_min_vectors_per_partition", "parallel", "accuracy", ]: value = getattr(self, field) if value is not None and not isinstance(value, int): raise TypeError( f"{field} must be an integer if" f"provided, got {type(value).__name__}" )
VectorIndexConfig
python
apache__airflow
providers/fab/tests/unit/fab/auth_manager/models/test_anonymous_user.py
{ "start": 908, "end": 1226 }
class ____: def test_roles(self): roles = ["role1"] user = AnonymousUser() user.roles = roles assert user.roles == roles def test_perms(self): perms = {"perms1"} user = AnonymousUser() user._perms = perms assert user.perms == perms
TestAnonymousUser
python
huggingface__transformers
src/transformers/models/informer/modular_informer.py
{ "start": 21781, "end": 30676 }
class ____(TimeSeriesTransformerModel): def __init__(self, config: InformerConfig): PreTrainedModel.__init__(self, config) if config.scaling == "mean" or config.scaling is True: self.scaler = InformerMeanScaler(config) elif config.scaling == "std": self.scaler = InformerStdScaler(config) else: self.scaler = InformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = InformerFeatureEmbedder( cardinalities=config.cardinality, embedding_dims=config.embedding_dimension, ) # transformer encoder-decoder and mask initializer self.encoder = InformerEncoder(config) self.decoder = InformerDecoder(config) # Initialize weights and apply final processing self.post_init() def forward(self, **super_kwargs): r""" past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*): Future values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. The sequence length here is equal to `prediction_length`. See the demo notebook and code snippets for details. Optionally, during training any missing values need to be replaced with zeros and indicated via the `future_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to `future_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import InformerModel >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly") >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> last_hidden_state = outputs.last_hidden_state ```""" super().forward(**super_kwargs)
InformerModel
python
jina-ai__jina
jina/serve/monitoring.py
{ "start": 173, "end": 1739 }
class ____(Summary): """ This is a small wrapper around prometheus Summary that allow to deprecate an old metrics by renaming it. """ def __init__( self, name: str, documentation: str, labelnames: Iterable[str] = (), namespace: str = '', subsystem: str = '', unit: str = '', registry: Optional[CollectorRegistry] = REGISTRY, _labelvalues: Optional[Sequence[str]] = None, old_name: str = None, ): """ :param old_name: name of the metric you want to deprecat :param kwargs: the rest of argument for creating your Summary # noqa: DAR102 # noqa: DAR101 """ super().__init__( name, documentation, labelnames, namespace, subsystem, unit, registry, _labelvalues, ) self._old_name = ( _build_full_name(self._type, old_name, namespace, subsystem, unit) if old_name else None ) def collect(self): metric = self._get_metric() for suffix, labels, value, timestamp, exemplar in self._samples(): metric.add_sample(self._name + suffix, labels, value, timestamp, exemplar) if self._old_name: # here this is the hack to inject the old metrics names metric.add_sample( self._old_name + suffix, labels, value, timestamp, exemplar ) return [metric]
_SummaryDeprecated
python
ApeWorX__ape
src/ape/managers/converters.py
{ "start": 2454, "end": 2705 }
class ____(ConverterAPI): def is_convertible(self, value: Any) -> bool: return isinstance(value, str) and not is_0x_prefixed(value) and value.isnumeric() def convert(self, value: str) -> int: return int(value)
StringIntConverter
python
pytorch__pytorch
torch/_functorch/autograd_function.py
{ "start": 23162, "end": 27174 }
class ____(WrappedCtx): _pt_reserved_attrs = ( "_pt_saved_tensors_bdims", "_pt_current_level", *WrappedCtx._pt_reserved_attrs, ) def __init__(self, ctx, current_level): super().__init__(ctx) self._pt_saved_tensors_bdims = () self._pt_current_level = current_level def save_for_backward(self, *tensors): unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level) self._pt_inner_ctx.save_for_backward(*unwrapped_tensors) self._pt_saved_tensors_bdims = bdims def save_for_forward(self, *tensors): unwrapped_tensors, bdims = unwrap_batched(tensors, self._pt_current_level) self._pt_inner_ctx.save_for_forward(*unwrapped_tensors) self._pt_saved_tensors_bdims = bdims def reductify( grad_input, grad_input_bdim, input_bdim, batch_size, target_shape_without_bdim_to_reduce_to=None, ): if not isinstance(grad_input, tuple): grad_input = (grad_input,) if not isinstance(grad_input_bdim, tuple): grad_input_bdim = (grad_input_bdim,) if not isinstance(input_bdim, tuple): input_bdim = (input_bdim,) if target_shape_without_bdim_to_reduce_to is None: target_shape_without_bdim_to_reduce_to = len(grad_input) * (None,) result = tuple( reductify_leaf(gi, gi_bdim, i_bdim, batch_size, maybe_ishape) for gi, gi_bdim, i_bdim, maybe_ishape in zip( grad_input, grad_input_bdim, input_bdim, target_shape_without_bdim_to_reduce_to, ) ) return result def reductify_leaf( grad_input, grad_input_bdim, input_bdim, batch_size, target_shape_without_bdim_to_reduce_to=None, ): if grad_input is None: return None if grad_input_bdim is None and input_bdim is None: return grad_input if grad_input_bdim is not None and input_bdim is None: return grad_input.sum(grad_input_bdim) # NOTE: [Why can't we rely on autograd to reduce expanded gradients?] # For reverse-mode AD, # given a grad_input and input, it is valid for the user to return a # grad_input that has a broadcasted shape when compared to the input. # In this situation, autograd automatically reduces the grad_input to # the shape of the input. # # However, when input_bdim is not None, we have problems. # # [example 1] # grad_input: Tensor[3, 4], input: Tensor[B, 4] # We can expand grad_input to Tensor[B, 3, 4], but that isn't broadcastable # from [B, 4]. # # [example 2] # grad_input: Tensor[3, B, 4], input: Tensor[B, 4] # We can swizzle grad_input to Tensor[B, 3, 4], but that isn't broadcastable # from [B, 4]. # # This means that we need to also reduce the grad_input to the shape of the # input. This behavior is controlled by the `target_shape_without_bdim_to_reduce_to` flag; # if not-None then we do the reducing manually, otherwise, we do not do a reduction. assert input_bdim is not None if grad_input_bdim is None: grad_input = grad_input.unsqueeze(input_bdim) new_shape = list(grad_input.shape) new_shape[input_bdim] = batch_size grad_input = grad_input.expand(new_shape) grad_input_bdim = input_bdim if target_shape_without_bdim_to_reduce_to is not None: return vmap( torch.Tensor.sum_to_size, in_dims=(grad_input_bdim, None), out_dims=input_bdim, )(grad_input, target_shape_without_bdim_to_reduce_to) if input_bdim != grad_input_bdim: grad_input = grad_input.movedim(grad_input_bdim, input_bdim) return grad_input def autograd_function_forward_rewritten(original_forward, original_setup_context): def new_forward(ctx, *args, **kwargs): output = original_forward(*args, **kwargs) original_setup_context(ctx, args, output) return output return new_forward
CtxCustomSave
python
justquick__django-activity-stream
actstream/feeds.py
{ "start": 581, "end": 3999 }
class ____: """ Abstract base class for all stream rendering. Supports hooks for fetching streams and formatting actions. """ def get_stream(self, *args, **kwargs): """ Returns a stream method to use. """ raise NotImplementedError def get_object(self, *args, **kwargs): """ Returns the object (eg user or actor) that the stream is for. """ raise NotImplementedError def items(self, *args, **kwargs): """ Returns a queryset of Actions to use based on the stream method and object. """ return self.get_stream()(self.get_object(*args, **kwargs)) def get_uri(self, action, obj=None, date=None): """ Returns an RFC3987 IRI ID for the given object, action and date. """ if date is None: date = action.timestamp date = date.strftime('%Y-%m-%d') return 'tag:{},{}:{}'.format(Site.objects.get_current().domain, date, self.get_url(action, obj, False)) def get_url(self, action, obj=None, domain=True): """ Returns an RFC3987 IRI for a HTML representation of the given object, action. If domain is true, the current site's domain will be added. """ if not obj: url = reverse('actstream_detail', None, (action.pk,)) elif hasattr(obj, 'get_absolute_url'): url = obj.get_absolute_url() else: ctype = ContentType.objects.get_for_model(obj) url = reverse('actstream_actor', None, (ctype.pk, obj.pk)) if domain: return add_domain(Site.objects.get_current().domain, url) return url def format(self, action): """ Returns a formatted dictionary for the given action. """ item = { 'id': self.get_uri(action), 'url': self.get_url(action), 'verb': action.verb, 'published': rfc3339_date(action.timestamp), 'actor': self.format_actor(action), 'title': str(action), } if action.description: item['content'] = action.description if action.target: item['target'] = self.format_target(action) if action.action_object: item['object'] = self.format_action_object(action) return item def format_item(self, action, item_type='actor'): """ Returns a formatted dictionary for an individual item based on the action and item_type. """ obj = getattr(action, item_type) return { 'id': self.get_uri(action, obj), 'url': self.get_url(action, obj), 'objectType': ContentType.objects.get_for_model(obj).name, 'displayName': str(obj) } def format_actor(self, action): """ Returns a formatted dictionary for the actor of the action. """ return self.format_item(action) def format_target(self, action): """ Returns a formatted dictionary for the target of the action. """ return self.format_item(action, 'target') def format_action_object(self, action): """ Returns a formatted dictionary for the action object of the action. """ return self.format_item(action, 'action_object')
AbstractActivityStream
python
ray-project__ray
rllib/utils/metrics/stats.py
{ "start": 427, "end": 44884 }
class ____: """A container class holding a number of values and executing reductions over them. The individual values in a Stats object may be of any type, for example python int or float, numpy arrays, or more complex structured (tuple, dict) and are stored in a list under `self.values`. This class is not meant to be interfaced with directly from application code. Instead, use `MetricsLogger` to log and manipulate Stats. Stats can be used to store metrics of the same type over time, for example a loss or a learning rate, and to reduce all stored values applying a certain reduction mechanism (for example "mean" or "sum"). Available reduction mechanisms are: - "mean" using EMA with a configurable EMA coefficient. - "mean" using a sliding window (over the last n stored values). - "max/min" with an optional sliding window (over the last n stored values). - "sum" with an optional sliding window (over the last n stored values). - None: Simply store all logged values to an ever-growing list. Through the `reduce()` API, one of the above-mentioned reduction mechanisms will be executed on `self.values`. """ def __init__( self, init_values: Optional[Any] = None, reduce: Optional[str] = "mean", percentiles: Union[List[int], bool] = False, reduce_per_index_on_aggregate: bool = False, window: Optional[Union[int, float]] = None, ema_coeff: Optional[float] = None, clear_on_reduce: bool = False, throughput: Union[bool, float] = False, throughput_ema_coeff: Optional[float] = None, ): """Initializes a Stats instance. Args: init_values: Optional initial values to be placed into `self.values`. If None, `self.values` will start empty. If percentiles is True, values must be ordered if provided. reduce: The name of the reduce method to be used. Allowed are "mean", "min", "max", and "sum". Use None to apply no reduction method (leave `self.values` as-is when reducing, except for shortening it to `window`). Note that if both `reduce` and `window` are None, the user of this Stats object needs to apply some caution over the values list not growing infinitely. percentiles: If reduce is `None`, we can compute the percentiles of the values list given by `percentiles`. Defaults to [0, 50, 75, 90, 95, 99, 100] if set to True. When using percentiles, a window must be provided. This window should be chosen carfully. RLlib computes exact percentiles and the computational complexity is O(m*n*log(n/m)) where n is the window size and m is the number of parallel metrics loggers invovled (for example, m EnvRunners). To be safe, choose a window < 1M and less than 1000 Stats objects to aggregate. See #52963 for more details. window: An optional window size to reduce over. If `window` is not None, then the reduction operation is only applied to the most recent `windows` items, and - after reduction - the values list is shortened to hold at most `window` items (the most recent ones). Must be None if `ema_coeff` is not None. If `window` is None (and `ema_coeff` is None), reduction must not be "mean". reduce_per_index_on_aggregate: If True, when merging Stats objects, we reduce incoming values per index such that the new value at index `n` will be the reduced value of all incoming values at index `n`. If False, when reducing `n` Stats, the first `n` merged values will be the reduced value of all incoming values at index `0`, the next `n` merged values will be the reduced values of all incoming values at index `1`, etc. ema_coeff: An optional EMA coefficient to use if reduce is "mean" and no `window` is provided. Note that if both `window` and `ema_coeff` are provided, an error is thrown. Also, if `ema_coeff` is provided, `reduce` must be "mean". The reduction formula for EMA performed by Stats is: EMA(t1) = (1.0 - ema_coeff) * EMA(t0) + ema_coeff * new_value clear_on_reduce: If True, the Stats object will reset its entire values list to an empty one after `self.reduce()` is called. However, it will then return from the `self.reduce()` call a new Stats object with the properly reduced (not completely emptied) new values. Setting this to True is useful for cases, in which the internal values list would otherwise grow indefinitely, for example if reduce is None and there is no `window` provided. throughput: If True, track a throughput estimate together with this Stats. This is only supported for `reduce=sum` and `clear_on_reduce=False` metrics (aka. "lifetime counts"). The `Stats` then keeps track of the time passed between two consecutive calls to `reduce()` and update its throughput estimate. The current throughput estimate can be obtained through: `throughput_per_sec = Stats.peek(throughput=True)`. If a float, track throughput and also set current throughput estimate to the given value. throughput_ema_coeff: An optional EMA coefficient to use for throughput tracking. Only used if throughput=True. """ # Thus far, we only support mean, max, min, and sum. if reduce not in [None, "mean", "min", "max", "sum", "percentiles"]: raise ValueError( "`reduce` must be one of `mean|min|max|sum|percentiles` or None!" ) # One or both window and ema_coeff must be None. if window is not None and ema_coeff is not None: raise ValueError("Only one of `window` or `ema_coeff` can be specified!") # If `ema_coeff` is provided, `reduce` must be "mean". if ema_coeff is not None and reduce != "mean": raise ValueError( "`ema_coeff` arg only allowed (not None) when `reduce=mean`!" ) if percentiles is not False: if reduce is not None: raise ValueError( "`reduce` must be `None` when `percentiles` is not `False`!" ) if window in (None, float("inf")): raise ValueError( "A window must be specified when reduce is 'percentiles'!" ) if reduce_per_index_on_aggregate is not False: raise ValueError( f"`reduce_per_index_on_aggregate` ({reduce_per_index_on_aggregate})" f" must be `False` when `percentiles` is not `False`!" ) if percentiles is True: percentiles = [0, 50, 75, 90, 95, 99, 100] else: if type(percentiles) not in (bool, list): raise ValueError("`percentiles` must be a list or bool!") if isinstance(percentiles, list): if not all(isinstance(p, (int, float)) for p in percentiles): raise ValueError( "`percentiles` must contain only ints or floats!" ) if not all(0 <= p <= 100 for p in percentiles): raise ValueError( "`percentiles` must contain only values between 0 and 100!" ) self._percentiles = percentiles # If `window` is explicitly set to inf, `clear_on_reduce` must be True. self._inf_window = window in [None, float("inf")] # If `window` is set to inf, `clear_on_reduce` must be True. # Otherwise, we risk a memory leak. if self._inf_window and not clear_on_reduce and reduce is None: raise ValueError( "When using an infinite window without reduction, `clear_on_reduce` must " "be set to True!" ) # If reduce=mean AND window=ema_coeff=None, we use EMA by default with a coeff # of 0.01 (we do NOT support infinite window sizes for mean as that would mean # to keep data in the cache forever). if reduce == "mean" and self._inf_window and ema_coeff is None: ema_coeff = 0.01 self._reduce_method = reduce self._window = window self._ema_coeff = ema_coeff if ( self._reduce_method not in ["mean", "sum", "min", "max"] and reduce_per_index_on_aggregate ): raise ValueError( "reduce_per_index_on_aggregate is only supported for mean, sum, min, and max reduction!" ) self._reduce_per_index_on_aggregate = reduce_per_index_on_aggregate # Timing functionality (keep start times per thread). self._start_times = defaultdict(lambda: None) # Simply store ths flag for the user of this class. self._clear_on_reduce = clear_on_reduce self._has_returned_zero = False # On each `.reduce()` call, we store the result of this call in # self._last_reduce. self._last_reduced = [np.nan] # The ID of this Stats instance. self.id_ = str(uuid.uuid4()) self._prev_merge_values = defaultdict(int) self._throughput_ema_coeff = throughput_ema_coeff self._throughput_stats = None if throughput is not False: self._throughput_stats = Stats( # We have to check for bool here because in Python, bool is a subclass # of int. init_values=[throughput] if ( isinstance(throughput, (int, float)) and not isinstance(throughput, bool) ) else None, reduce="mean", ema_coeff=throughput_ema_coeff, window=None, clear_on_reduce=False, throughput=False, throughput_ema_coeff=None, ) if init_values is not None: self._last_throughput_measure_time = time.perf_counter() else: self._last_throughput_measure_time = ( -1 ) # Track last push time for throughput calculation # The actual, underlying data in this Stats object. self.values: Union[List, deque.Deque] = None self._set_values(force_list(init_values)) self._is_tensor = False # Track if new values were pushed since last reduce if init_values is not None: self._has_new_values = True else: self._has_new_values = False def check_value(self, value: Any) -> None: # If we have a reduce method, value should always be a scalar # If we don't reduce, we can keep track of value as it is if self._reduce_method is not None: if isinstance(value, np.ndarray) and value.shape == (): return elif torch and torch.is_tensor(value): self._is_tensor = True if tuple(value.shape) == (): return elif type(value) not in (list, tuple, deque): return raise ValueError( f"Value ({value}) is required to be a scalar when using a reduce " "method!" ) def push(self, value: Any) -> None: """Pushes a value into this Stats object. Args: value: The value to be pushed. Can be of any type. """ self.check_value(value) # If throughput tracking is enabled, calculate it based on time between pushes if self.has_throughput: self._recompute_throughput(value) # Handle different reduction methods if self._window is not None: # For windowed operations, append to values and trim if needed self.values.append(value) if len(self.values) > self._window: self.values.popleft() else: # For non-windowed operations, use _reduced_values if len(self.values) == 0: self._set_values([value]) else: self.values.append(value) _, values = self._reduced_values() self._set_values(values) # Mark that we have new values self._has_new_values = True def __enter__(self) -> "Stats": """Called when entering a context (with which users can measure a time delta). Returns: This Stats instance (self), unless another thread has already entered (and not exited yet), in which case a copy of `self` is returned. This way, the second thread(s) cannot mess with the original Stat's (self) time-measuring. This also means that only the first thread to __enter__ actually logs into `self` and the following threads' measurements are discarded (logged into a non-referenced shim-Stats object, which will simply be garbage collected). """ # In case another thread already is measuring this Stats (timing), simply ignore # the "enter request" and return a clone of `self`. thread_id = threading.get_ident() self._start_times[thread_id] = time.perf_counter() return self def __exit__(self, exc_type, exc_value, tb) -> None: """Called when exiting a context (with which users can measure a time delta).""" thread_id = threading.get_ident() assert self._start_times[thread_id] is not None time_delta_s = time.perf_counter() - self._start_times[thread_id] self.push(time_delta_s) del self._start_times[thread_id] def peek(self, compile: bool = True) -> Union[Any, List[Any]]: """Returns the result of reducing the internal values list. Note that this method does NOT alter the internal values list in this process. Thus, users can call this method to get an accurate look at the reduced value(s) given the current internal values list. Args: compile: If True, the result is compiled into a single value if possible. Returns: The result of reducing the internal values list. """ if self._has_new_values or (not compile and not self._inf_window): reduced_value, reduced_values = self._reduced_values() if not compile and not self._inf_window: return reduced_values if compile and self._reduce_method: return reduced_value[0] if compile and self._percentiles is not False: return compute_percentiles(reduced_values, self._percentiles) return reduced_value else: return_value = self._last_reduced if compile: # We don't need to check for self._reduce_method or percentiles here # because we only store the reduced value if there is a reduce method. return_value = return_value[0] return return_value @property def throughput(self) -> float: """Returns the current throughput estimate per second. Raises: ValueError: If throughput tracking is not enabled for this Stats object. Returns: The current throughput estimate per second. """ if not self.has_throughput: raise ValueError("Throughput tracking is not enabled for this Stats object") # We can always return the first value here because throughput is a single value return self._throughput_stats.peek() @property def has_throughput(self) -> bool: """Returns whether this Stats object tracks throughput. Returns: True if this Stats object has throughput tracking enabled, False otherwise. """ return self._throughput_stats is not None def reduce(self, compile: bool = True) -> Union[Any, List[Any]]: """Reduces the internal values list according to the constructor settings. Thereby, the internal values list is changed (note that this is different from `peek()`, where the internal list is NOT changed). See the docstring of this class for details on the reduction logic applied to the values list, based on the constructor settings, such as `window`, `reduce`, etc.. Args: compile: If True, the result is compiled into a single value if possible. If it is not possible, the result is a list of values. If False, the result is a list of one or more values. Returns: The reduced value (can be of any type, depending on the input values and reduction method). """ len_before_reduce = len(self) if self._has_new_values: # Only calculate and update history if there were new values pushed since # last reduce reduced, reduced_internal_values_list = self._reduced_values() # `clear_on_reduce` -> Clear the values list. if self._clear_on_reduce: self._set_values([]) else: self._set_values(reduced_internal_values_list) else: reduced_internal_values_list = None reduced = self._last_reduced reduced = self._numpy_if_necessary(reduced) # Shift historic reduced valued by one in our reduce_history. if self._reduce_method is not None: # It only makes sense to extend the history if we are reducing to a single # value. We need to make a copy here because the new_values_list is a # reference to the internal values list self._last_reduced = force_list(reduced.copy()) else: # If there is a window and no reduce method, we don't want to use the reduce # history to return reduced values in other methods self._has_new_values = True if compile and self._reduce_method is not None: assert ( len(reduced) == 1 ), f"Reduced values list must contain exactly one value, found {reduced}" reduced = reduced[0] if not compile and not self._inf_window: if reduced_internal_values_list is None: _, reduced_internal_values_list = self._reduced_values() return_values = self._numpy_if_necessary( reduced_internal_values_list ).copy() elif compile and self._percentiles is not False: if reduced_internal_values_list is None: _, reduced_internal_values_list = self._reduced_values() return_values = compute_percentiles( reduced_internal_values_list, self._percentiles ) else: return_values = reduced if compile: return return_values else: if len_before_reduce == 0: # return_values will be be 0 if we reduce a sum over zero elements # But we don't want to create such a zero out of nothing for our new # Stats object that we return here return Stats.similar_to(self) return Stats.similar_to(self, init_values=return_values) def merge_on_time_axis(self, other: "Stats") -> None: """Merges another Stats object's values into this one along the time axis. Args: other: The other Stats object to merge values from. """ self.values.extend(other.values) # Mark that we have new values since we modified the values list self._has_new_values = True def merge_in_parallel(self, *others: "Stats") -> None: """Merges all internal values of `others` into `self`'s internal values list. Thereby, the newly incoming values of `others` are treated equally with respect to each other as well as with respect to the internal values of self. Use this method to merge other `Stats` objects, which resulted from some parallelly executed components, into this one. For example: n Learner workers all returning a loss value in the form of `{"total_loss": [some value]}`. The following examples demonstrate the parallel merging logic for different reduce- and window settings: Args: others: One or more other Stats objects that need to be parallely merged into `self, meaning with equal weighting as the existing values in `self`. """ win = self._window or float("inf") # If any of the value lists have a length of 0 or if there is only one value and # it is nan, we skip stats_to_merge = [ s for s in [self, *others] if not ( len(s) == 0 or ( len(s) == 1 and np.all(np.isnan(self._numpy_if_necessary(s.values))) ) ) ] # If there is only one stat to merge, and it is the same as self, return. if len(stats_to_merge) == 0: # If none of the stats have values, return. return elif len(stats_to_merge) == 1: if stats_to_merge[0] == self: # If no incoming stats have values, return. return else: # If there is only one stat with values, and it's incoming, copy its # values. self.values = stats_to_merge[0].values return # Take turns stepping through `self` and `*others` values, thereby moving # backwards from last index to beginning and will up the resulting values list. # Stop as soon as we reach the window size. new_values = [] tmp_values = [] if self._percentiles is not False: # Use heapq to sort values (assumes that the values are already sorted) # and then pick the correct percentiles lists_to_merge = [list(self.values), *[list(o.values) for o in others]] merged = list(heapq.merge(*lists_to_merge)) self._set_values(merged) else: # Loop from index=-1 backward to index=start until our new_values list has # at least a len of `win`. for i in range(1, max(map(len, stats_to_merge)) + 1): # Per index, loop through all involved stats, including `self` and add # to `tmp_values`. for stats in stats_to_merge: if len(stats) < i: continue tmp_values.append(stats.values[-i]) # Now reduce across `tmp_values` based on the reduce-settings of this # Stats. if self._reduce_per_index_on_aggregate: n_values = 1 else: n_values = len(tmp_values) if self._ema_coeff is not None: new_values.extend([np.nanmean(tmp_values)] * n_values) elif self._reduce_method is None: new_values.extend(tmp_values) elif self._reduce_method == "sum": # We add [sum(tmp_values) / n_values] * n_values to the new values # list instead of tmp_values, because every incoming element should # have the same weight. added_sum = self._reduced_values(values=tmp_values)[0][0] new_values.extend([added_sum / n_values] * n_values) if self.has_throughput: self._recompute_throughput(added_sum) else: new_values.extend( self._reduced_values(values=tmp_values)[0] * n_values ) tmp_values.clear() if len(new_values) >= win: new_values = new_values[:win] break self._set_values(list(reversed(new_values))) # Mark that we have new values since we modified the values list self._has_new_values = True def clear_throughput(self) -> None: """Clears the throughput Stats, if applicable and `self` has throughput. Also resets `self._last_throughput_measure_time` to -1 such that the Stats object has to create a new timestamp first, before measuring any new throughput values. """ if self.has_throughput: self._throughput_stats._set_values([]) self._last_throughput_measure_time = -1 def _recompute_throughput(self, value) -> None: """Recomputes the current throughput value of this Stats instance.""" # Make sure this Stats object does measure throughput. assert self.has_throughput # Take the current time stamp. current_time = time.perf_counter() # Check, whether we have a previous timestamp (non -1). if self._last_throughput_measure_time >= 0: # Compute the time delta. time_diff = current_time - self._last_throughput_measure_time # Avoid divisions by zero. if time_diff > 0: # Push new throughput value into our throughput stats object. self._throughput_stats.push(value / time_diff) # Update the time stamp of the most recent throughput computation (this one). self._last_throughput_measure_time = current_time @staticmethod def _numpy_if_necessary(values): # Torch tensor handling. Convert to CPU/numpy first. if torch and len(values) > 0 and torch.is_tensor(values[0]): # Convert all tensors to numpy values. values = [v.cpu().numpy() for v in values] return values def __len__(self) -> int: """Returns the length of the internal values list.""" return len(self.values) def __repr__(self) -> str: win_or_ema = ( f"; win={self._window}" if self._window else f"; ema={self._ema_coeff}" if self._ema_coeff else "" ) return ( f"Stats({self.peek()}; len={len(self)}; " f"reduce={self._reduce_method}{win_or_ema})" ) def __int__(self): if self._reduce_method is None: raise ValueError( "Cannot convert Stats object with reduce method `None` to int because " "it can not be reduced to a single value." ) else: return int(self.peek()) def __float__(self): if self._reduce_method is None: raise ValueError( "Cannot convert Stats object with reduce method `None` to float " "because it can not be reduced to a single value." ) else: return float(self.peek()) def __eq__(self, other): if self._reduce_method is None: self._comp_error("__eq__") else: return float(self) == float(other) def __le__(self, other): if self._reduce_method is None: self._comp_error("__le__") else: return float(self) <= float(other) def __ge__(self, other): if self._reduce_method is None: self._comp_error("__ge__") else: return float(self) >= float(other) def __lt__(self, other): if self._reduce_method is None: self._comp_error("__lt__") else: return float(self) < float(other) def __gt__(self, other): if self._reduce_method is None: self._comp_error("__gt__") else: return float(self) > float(other) def __add__(self, other): if self._reduce_method is None: self._comp_error("__add__") else: return float(self) + float(other) def __sub__(self, other): if self._reduce_method is None: self._comp_error("__sub__") else: return float(self) - float(other) def __mul__(self, other): if self._reduce_method is None: self._comp_error("__mul__") else: return float(self) * float(other) def __format__(self, fmt): if self._reduce_method is None: raise ValueError( "Cannot format Stats object with reduce method `None` because it can " "not be reduced to a single value." ) else: return f"{float(self):{fmt}}" def _comp_error(self, comp): raise ValueError( f"Cannot {comp} Stats object with reduce method `None` to other " "because it can not be reduced to a single value." ) def get_state(self) -> Dict[str, Any]: state = { # Make sure we don't return any tensors here. "values": convert_to_numpy(self.values), "reduce": self._reduce_method, "percentiles": self._percentiles, "reduce_per_index_on_aggregate": self._reduce_per_index_on_aggregate, "window": self._window, "ema_coeff": self._ema_coeff, "clear_on_reduce": self._clear_on_reduce, "_last_reduced": self._last_reduced, "_is_tensor": self._is_tensor, } if self._throughput_stats is not None: state["throughput_stats"] = self._throughput_stats.get_state() return state @staticmethod def from_state(state: Dict[str, Any]) -> "Stats": # If `values` could contain tensors, don't reinstate them (b/c we don't know # whether we are on a supported device). values = state["values"] if "_is_tensor" in state and state["_is_tensor"]: values = [] if "throughput_stats" in state: throughput_stats = Stats.from_state(state["throughput_stats"]) stats = Stats( values, reduce=state["reduce"], percentiles=state.get("percentiles", False), reduce_per_index_on_aggregate=state.get( "reduce_per_index_on_aggregate", False ), window=state["window"], ema_coeff=state["ema_coeff"], clear_on_reduce=state["clear_on_reduce"], throughput=throughput_stats.peek(), throughput_ema_coeff=throughput_stats._ema_coeff, ) elif state.get("_throughput", False): # Older checkpoints have a _throughput key that is boolean or # a float (throughput value). They don't have a throughput_ema_coeff # so we use a default of 0.05. # TODO(Artur): Remove this after a few Ray releases. stats = Stats( values, reduce=state["reduce"], percentiles=state.get("percentiles", False), window=state["window"], ema_coeff=state["ema_coeff"], clear_on_reduce=state["clear_on_reduce"], throughput=state["_throughput"], throughput_ema_coeff=0.05, ) else: stats = Stats( values, reduce=state["reduce"], percentiles=state.get("percentiles", False), window=state["window"], ema_coeff=state["ema_coeff"], clear_on_reduce=state["clear_on_reduce"], throughput=False, throughput_ema_coeff=None, ) # Compatibility to old checkpoints where a reduce sometimes resulted in a single # values instead of a list such that the history would be a list of integers # instead of a list of lists. if "_hist" in state: # TODO(Artur): Remove this after a few Ray releases. if not isinstance(state["_hist"][0], list): state["_hist"] = list(map(lambda x: [x], state["_hist"])) stats._last_reduced = state["_hist"][-1] else: stats._last_reduced = state.get("_last_reduced", [np.nan]) return stats @staticmethod def similar_to( other: "Stats", init_values: Optional[Any] = None, ) -> "Stats": """Returns a new Stats object that's similar to `other`. "Similar" here means it has the exact same settings (reduce, window, ema_coeff, etc..). The initial values of the returned `Stats` are empty by default, but can be set as well. Args: other: The other Stats object to return a similar new Stats equivalent for. init_value: The initial value to already push into the returned Stats. Returns: A new Stats object similar to `other`, with the exact same settings and maybe a custom initial value (if provided; otherwise empty). """ stats = Stats( init_values=init_values, reduce=other._reduce_method, percentiles=other._percentiles, reduce_per_index_on_aggregate=other._reduce_per_index_on_aggregate, window=other._window, ema_coeff=other._ema_coeff, clear_on_reduce=other._clear_on_reduce, throughput=other._throughput_stats.peek() if other.has_throughput else False, throughput_ema_coeff=other._throughput_ema_coeff, ) stats.id_ = other.id_ stats._last_reduced = other._last_reduced return stats def _set_values(self, new_values): # For stats with window, use a deque with maxlen=window. # This way, we never store more values than absolutely necessary. if not self._inf_window: self.values = deque(new_values, maxlen=self._window) # For infinite windows, use `new_values` as-is (a list). else: self.values = new_values self._has_new_values = True def _reduced_values(self, values=None) -> Tuple[Any, Any]: """Runs a non-committed reduction procedure on given values (or `self.values`). Note that this method does NOT alter any state of `self` or the possibly provided list of `values`. It only returns new values as they should be adopted after a possible, actual reduction step. Args: values: The list of values to reduce. If not None, use `self.values` Returns: A tuple containing 1) the reduced values and 2) the new internal values list to be used. If there is no reduciton method, the reduced values will be the same as the values. """ values = values if values is not None else self.values # No reduction method. Return list as-is OR reduce list to len=window. if self._reduce_method is None: if self._percentiles is not False: # Sort values values = list(values) # (Artur): Numpy can sort faster than Python's built-in sort for large lists. Howoever, if we convert to an array here # and then sort, this only slightly (<2x) improved the runtime of this method, even for an internal values list of 1M values. values.sort() return values, values # Special case: Internal values list is empty -> return NaN or 0.0 for sum. elif len(values) == 0: if self._reduce_method in ["min", "max", "mean"] or self._has_returned_zero: # We also return np.nan if we have returned zero before. # This helps with cases where stats are cleared on reduce, but we don't want to log 0's, except for the first time. return [np.nan], [] else: return [0], [] # Do EMA (always a "mean" reduction; possibly using a window). elif self._ema_coeff is not None: # Perform EMA reduction over all values in internal values list. mean_value = values[0] for v in values[1:]: mean_value = self._ema_coeff * v + (1.0 - self._ema_coeff) * mean_value if self._inf_window: return [mean_value], [mean_value] else: return [mean_value], values # Non-EMA reduction (possibly using a window). else: # Use the numpy/torch "nan"-prefix to ignore NaN's in our value lists. if torch and torch.is_tensor(values[0]): self._is_tensor = True # Only one item in the if len(values[0].shape) == 0: reduced = values[0] else: reduce_meth = getattr(torch, "nan" + self._reduce_method) reduce_in = torch.stack(list(values)) if self._reduce_method == "mean": reduce_in = reduce_in.float() reduced = reduce_meth(reduce_in) else: reduce_meth = getattr(np, "nan" + self._reduce_method) if np.all(np.isnan(values)): # This avoids warnings for taking a mean of an empty array. reduced = np.nan else: reduced = reduce_meth(values) def safe_isnan(value): if torch and isinstance(value, torch.Tensor): return torch.isnan(value) return np.isnan(value) # Convert from numpy to primitive python types, if original `values` are # python types. if ( not safe_isnan(reduced) and reduced.shape == () and isinstance(values[0], (int, float)) ): if reduced.dtype in [np.int32, np.int64, np.int8, np.int16]: reduced = int(reduced) else: reduced = float(reduced) # For window=None|inf (infinite window) and reduce != mean, we don't have to # keep any values, except the last (reduced) one. if self._inf_window and self._reduce_method != "mean": # TODO (sven): What if values are torch tensors? In this case, we # would have to do reduction using `torch` above (not numpy) and only # then return the python primitive AND put the reduced new torch # tensor in the new `self.values`. return [reduced], [reduced] else: # In all other cases, keep the values that were also used for the reduce # operation. return [reduced], values @DeveloperAPI def compute_percentiles(sorted_list, percentiles): """Compute percentiles from an already sorted list. Note that this will not raise an error if the list is not sorted to avoid overhead. Args: sorted_list: A list of numbers sorted in ascending order percentiles: A list of percentile values (0-100) Returns: A dictionary mapping percentile values to their corresponding data values """ n = len(sorted_list) if n == 0: return {p: None for p in percentiles} results = {} for p in percentiles: index = (p / 100) * (n - 1) if index.is_integer(): results[p] = sorted_list[int(index)] else: lower_index = int(index) upper_index = lower_index + 1 weight = index - lower_index results[p] = ( sorted_list[lower_index] * (1 - weight) + sorted_list[upper_index] * weight ) return results @DeveloperAPI def merge_stats(base_stats: Optional[Stats], incoming_stats: List[Stats]) -> Stats: """Merges Stats objects. If `base_stats` is None, we use the first incoming Stats object as the new base Stats object. If `base_stats` is not None, we merge all incoming Stats objects into the base Stats object. Args: base_stats: The base Stats object to merge into. incoming_stats: The list of Stats objects to merge. Returns: The merged Stats object. """ if base_stats is None: new_root_stats = True else: new_root_stats = False # Nothing to be merged if len(incoming_stats) == 0: return base_stats if new_root_stats: # We need to deepcopy here first because stats from incoming_stats may be altered in the future base_stats = copy.deepcopy(incoming_stats[0]) base_stats.clear_throughput() # Note that we may take a mean of means here, which is not the same as a # mean of all values. In the future, we could implement a weighted mean # of means here by introducing a new Stats object that counts samples # for each mean Stats object. if len(incoming_stats) > 1: base_stats.merge_in_parallel(*incoming_stats[1:]) if ( base_stats._reduce_method == "sum" and base_stats._inf_window and base_stats._clear_on_reduce is False ): for stat in incoming_stats: base_stats._prev_merge_values[stat.id_] = stat.peek() elif len(incoming_stats) > 0: # Special case: `base_stats` is a lifetime sum (reduce=sum, # clear_on_reduce=False) -> We subtract the previous value (from 2 # `reduce()` calls ago) from all to-be-merged stats, so we don't count # twice the older sum from before. # Also, for the merged, new throughput value, we need to find out what the # actual value-delta is between before the last reduce and the current one. added_sum = 0.0 # Used in `base_stats._recompute_throughput` if applicable. if ( base_stats._reduce_method == "sum" and base_stats._inf_window and base_stats._clear_on_reduce is False ): for stat in incoming_stats: # Subtract "lifetime counts" from the Stat's values to not count # older "lifetime counts" more than once. prev_reduction = base_stats._prev_merge_values[stat.id_] new_reduction = stat.peek(compile=True) base_stats.values[-1] -= prev_reduction # Keep track of how many counts we actually gained (for throughput # recomputation). added_sum += new_reduction - prev_reduction base_stats._prev_merge_values[stat.id_] = new_reduction parallel_merged_stat = copy.deepcopy(incoming_stats[0]) if len(incoming_stats) > 1: # There are more than one incoming parallel others -> Merge all of # them in parallel (equal importance). parallel_merged_stat.merge_in_parallel(*incoming_stats[1:]) # Merge incoming Stats object into base Stats object on time axis # (giving incoming ones priority). if base_stats._reduce_method == "mean" and not base_stats._clear_on_reduce: # If we don't clear values, values that are not cleared would contribute # to the mean multiple times. base_stats._set_values(parallel_merged_stat.values.copy()) else: base_stats.merge_on_time_axis(parallel_merged_stat) # Keep track of throughput through the sum of added counts. if base_stats.has_throughput: base_stats._recompute_throughput(added_sum) return base_stats
Stats
python
pytorch__pytorch
torch/cuda/_sanitizer.py
{ "start": 17523, "end": 19968 }
class ____: def __init__(self) -> None: self.dataptrs_read: set[DataPtr] = set() self.dataptrs_written: set[DataPtr] = set() self.tensor_aliases: dict[DataPtr, list[str]] = {} self.outputs: set[DataPtr] = set() def _handle_argument( self, value: Any, is_write: bool, metadata_only: bool, name: Optional[str] = None, is_output: bool = False, ) -> None: if isinstance(value, torch.Tensor) and value.is_cuda: data_ptr = value.data_ptr() if is_write: self.dataptrs_written.add(data_ptr) elif not metadata_only: self.dataptrs_read.add(data_ptr) self.tensor_aliases.setdefault(data_ptr, []) if name is not None: self.tensor_aliases[data_ptr].append(name) if is_output: self.outputs.add(data_ptr) def parse_inputs( self, schema: torch.FunctionSchema, args: tuple[Any, ...], kwargs: dict[str, Any], *, is_factory: bool, ) -> None: for argument, value in zip_arguments(schema, args, kwargs): is_write = argument.alias_info is not None and argument.alias_info.is_write # A change is metadata only if it is a view or a factory function that # reads only metadata metadata_only = is_factory or ( argument.alias_info is not None and not argument.alias_info.is_write ) pytree.tree_map_( functools.partial( self._handle_argument, is_write=is_write, name=argument.name, metadata_only=metadata_only, ), value, ) def parse_outputs( self, schema: torch.FunctionSchema, outputs: Any, *, is_factory: bool ) -> None: for res, value in zip(schema.returns, (outputs,)): metadata_only = is_factory or ( res.alias_info is not None and not res.alias_info.is_write ) pytree.tree_map_( functools.partial( self._handle_argument, is_write=not metadata_only, is_output=True, metadata_only=metadata_only, ), value, )
ArgumentHandler
python
plotly__plotly.py
plotly/graph_objs/violin/box/_line.py
{ "start": 233, "end": 2955 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "violin.box" _path_str = "violin.box.line" _valid_props = {"color", "width"} @property def color(self): """ Sets the inner box plot bounding line color. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def width(self): """ Sets the inner box plot bounding line width. The 'width' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def _prop_descriptions(self): return """\ color Sets the inner box plot bounding line color. width Sets the inner box plot bounding line width. """ def __init__(self, arg=None, color=None, width=None, **kwargs): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.violin.box.Line` color Sets the inner box plot bounding line color. width Sets the inner box plot bounding line width. Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.violin.box.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.violin.box.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("width", arg, width) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
bokeh__bokeh
src/bokeh/document/events.py
{ "start": 4960, "end": 5080 }
class ____: def _session_callback_removed(self, event: SessionCallbackRemoved) -> None: ...
SessionCallbackRemovedMixin
python
dagster-io__dagster
python_modules/dagster/dagster/_core/workspace/workspace.py
{ "start": 830, "end": 1363 }
class ____: origin: Annotated[ "CodeLocationOrigin", ImportFrom("dagster._core.remote_origin"), ] code_location: Optional[ Annotated[ "CodeLocation", ImportFrom("dagster._core.remote_representation.code_location"), ] ] load_error: Optional[SerializableErrorInfo] load_status: CodeLocationLoadStatus display_metadata: Mapping[str, str] update_timestamp: float version_key: str definitions_source: DefinitionsSource @record
CodeLocationEntry
python
mlflow__mlflow
mlflow/legacy_databricks_cli/configure/provider.py
{ "start": 14327, "end": 16881 }
class ____: def __init__( self, host, username, password, token, refresh_token=None, insecure=None, jobs_api_version=None, client_id=None, client_secret=None, auth_type=None, ): self.host = host self.username = username self.password = password self.token = token self.refresh_token = refresh_token self.insecure = insecure self.jobs_api_version = jobs_api_version self.client_id = client_id self.client_secret = client_secret self.auth_type = auth_type @classmethod def from_token(cls, host, token, refresh_token=None, insecure=None, jobs_api_version=None): return DatabricksConfig( host=host, username=None, password=None, token=token, refresh_token=refresh_token, insecure=insecure, jobs_api_version=jobs_api_version, ) @classmethod def from_password(cls, host, username, password, insecure=None, jobs_api_version=None): return DatabricksConfig( host=host, username=username, password=password, token=None, refresh_token=None, insecure=insecure, jobs_api_version=jobs_api_version, ) @classmethod def empty(cls): return DatabricksConfig( host=None, username=None, password=None, token=None, refresh_token=None, insecure=None, jobs_api_version=None, ) @property def is_valid_with_token(self): return self.host is not None and self.token is not None @property def is_valid_with_password(self): return self.host is not None and self.username is not None and self.password is not None @property def is_valid_with_client_id_secret(self): return self.host and self.client_id and self.client_secret @property def is_databricks_cli_auth_type(self): return self.auth_type == "databricks-cli" @property def is_azure_cli_auth_type(self): return self.auth_type == "azure-cli" @property def is_valid(self): return ( self.is_valid_with_token or self.is_valid_with_password or self.is_valid_with_client_id_secret or self.is_databricks_cli_auth_type or self.is_azure_cli_auth_type )
DatabricksConfig
python
doocs__leetcode
solution/1200-1299/1298.Maximum Candies You Can Get from Boxes/Solution.py
{ "start": 0, "end": 1014 }
class ____: def maxCandies( self, status: List[int], candies: List[int], keys: List[List[int]], containedBoxes: List[List[int]], initialBoxes: List[int], ) -> int: q = deque() has, took = set(initialBoxes), set() ans = 0 for box in initialBoxes: if status[box]: q.append(box) took.add(box) ans += candies[box] while q: box = q.popleft() for k in keys[box]: if not status[k]: status[k] = 1 if k in has and k not in took: q.append(k) took.add(k) ans += candies[k] for b in containedBoxes[box]: has.add(b) if status[b] and b not in took: q.append(b) took.add(b) ans += candies[b] return ans
Solution
python
bokeh__bokeh
src/bokeh/models/labeling.py
{ "start": 1520, "end": 1750 }
class ____(Model): """ Base class for labeling policies. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs)
LabelingPolicy
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 695016, "end": 695552 }
class ____(sgqlc.types.Type, Node): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("actor", "base_ref_name", "created_at", "pull_request") actor = sgqlc.types.Field(Actor, graphql_name="actor") base_ref_name = sgqlc.types.Field(String, graphql_name="baseRefName") created_at = sgqlc.types.Field( sgqlc.types.non_null(DateTime), graphql_name="createdAt" ) pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
BaseRefDeletedEvent
python
spyder-ide__spyder
external-deps/spyder-kernels/spyder_kernels/comms/frontendcomm.py
{ "start": 990, "end": 7474 }
class ____(CommBase): """Mixin to implement the spyder_shell_api.""" def __init__(self, kernel): super(FrontendComm, self).__init__() # Comms self.kernel = kernel self.kernel.comm_manager.register_target( self._comm_name, self._comm_open) self.comm_lock = threading.Lock() self._cached_messages = {} self._pending_comms = {} def close(self, comm_id=None): """Close the comm and notify the other side.""" with self.comm_lock: return super(FrontendComm, self).close(comm_id) def _send_message(self, *args, **kwargs): """Publish custom messages to the other side.""" with self.comm_lock: return super(FrontendComm, self)._send_message(*args, **kwargs) def poll_one(self): """Receive one message from comm socket.""" out_stream = None if self.kernel.shell_streams: # If the message handler needs to send a reply, # use the regular shell stream. out_stream = self.kernel.shell_streams[0] try: ident, msg = self.kernel.session.recv( self.kernel.parent.control_socket, 0) except zmq.error.ContextTerminated: return except Exception: self.kernel.log.warning("Invalid Message:", exc_info=True) return msg_type = msg['header']['msg_type'] handler = self.kernel.control_handlers.get(msg_type, None) if handler is None: self.kernel.log.warning("Unknown message type: %r", msg_type) return try: asyncio.run(handler(out_stream, ident, msg)) except Exception: self.kernel.log.error( "Exception in message handler:", exc_info=True) finally: sys.stdout.flush() sys.stderr.flush() # Flush to ensure reply is sent if out_stream: out_stream.flush(zmq.POLLOUT) def remote_call(self, comm_id=None, blocking=False, callback=None, timeout=None, display_error=False): """Get a handler for remote calls.""" return super(FrontendComm, self).remote_call( blocking=blocking, comm_id=comm_id, callback=callback, timeout=timeout, display_error=display_error) def wait_until(self, condition, timeout=None): """Wait until condition is met. Returns False if timeout.""" if condition(): return True t_start = time.time() while not condition(): if timeout is not None and time.time() > t_start + timeout: return False if threading.current_thread() is self.kernel.parent.control_thread: # Wait for a reply on the comm channel. self.poll_one() else: # Wait 10ms for a reply time.sleep(0.01) return True def cache_message(self, comm_id, msg): """Message from a comm that might be opened later.""" if comm_id not in self._cached_messages: self._cached_messages[comm_id] = [] self._cached_messages[comm_id].append(msg) # --- Private -------- def _check_comm_reply(self): """ Send comm message to frontend to check if the iopub channel is ready """ # Make sure the length doesn't change during iteration pending_comms = list(self._pending_comms.values()) if len(pending_comms) == 0: return for comm in pending_comms: self._notify_comm_ready(comm) self.kernel.io_loop.call_later(1, self._check_comm_reply) def _notify_comm_ready(self, comm): """Send messages about comm readiness to frontend.""" self.remote_call( comm_id=comm.comm_id, callback=self._comm_ready_callback )._comm_ready() def _comm_ready_callback(self, ret): """A comm has replied, so process all cached messages related to it.""" comm = self._pending_comms.pop(self.calling_comm_id, None) if not comm: return # Cached messages for that comm if comm.comm_id in self._cached_messages: for msg in self._cached_messages[comm.comm_id]: comm.handle_msg(msg) self._cached_messages.pop(comm.comm_id) def _wait_reply(self, comm_id, call_id, call_name, timeout, retry=True): """Wait until the frontend replies to a request.""" def reply_received(): """The reply is there!""" return call_id in self._reply_inbox if not self.wait_until(reply_received): if retry: self._wait_reply(comm_id, call_id, call_name, timeout, False) return raise TimeoutError( "Timeout while waiting for '{}' reply.".format( call_name)) def _comm_open(self, comm, msg): """ A new comm is open! """ self.calling_comm_id = comm.comm_id self._register_comm(comm) # IOPub might not be connected yet, keep sending messages until a # reply is received. self._pending_comms[comm.comm_id] = comm self._notify_comm_ready(comm) self.kernel.io_loop.call_later(.3, self._check_comm_reply) def _comm_close(self, msg): """Close comm.""" comm_id = msg['content']['comm_id'] # Send back a close message confirmation # Fixes spyder-ide/spyder#15356 self.close(comm_id) def _async_error(self, error_wrapper): """ Send an async error back to the frontend to be displayed. """ self.remote_call()._async_error(error_wrapper.to_json()) def _register_comm(self, comm): """ Remove side effect ipykernel has. """ def handle_msg(msg): """Handle a comm_msg message""" if comm._msg_callback: comm._msg_callback(msg) comm.handle_msg = handle_msg super(FrontendComm, self)._register_comm(comm) def _remote_callback(self, call_name, call_args, call_kwargs): """Call the callback function for the remote call.""" with WriteContext(call_name): return super(FrontendComm, self)._remote_callback( call_name, call_args, call_kwargs)
FrontendComm
python
huggingface__transformers
src/transformers/models/data2vec/modeling_data2vec_audio.py
{ "start": 7625, "end": 11005 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[Data2VecAudioConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights, None
Data2VecAudioAttention
python
plotly__plotly.py
plotly/graph_objs/ohlc/hoverlabel/_font.py
{ "start": 233, "end": 17128 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "ohlc.hoverlabel" _path_str = "ohlc.hoverlabel.font" _valid_props = { "color", "colorsrc", "family", "familysrc", "lineposition", "linepositionsrc", "shadow", "shadowsrc", "size", "sizesrc", "style", "stylesrc", "textcase", "textcasesrc", "variant", "variantsrc", "weight", "weightsrc", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for `color`. The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"] @colorsrc.setter def colorsrc(self, val): self["colorsrc"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def familysrc(self): """ Sets the source reference on Chart Studio Cloud for `family`. The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["familysrc"] @familysrc.setter def familysrc(self, val): self["familysrc"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') - A list or array of the above Returns ------- Any|numpy.ndarray """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def linepositionsrc(self): """ Sets the source reference on Chart Studio Cloud for `lineposition`. The 'linepositionsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["linepositionsrc"] @linepositionsrc.setter def linepositionsrc(self, val): self["linepositionsrc"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def shadowsrc(self): """ Sets the source reference on Chart Studio Cloud for `shadow`. The 'shadowsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["shadowsrc"] @shadowsrc.setter def shadowsrc(self, val): self["shadowsrc"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for `size`. The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"] @sizesrc.setter def sizesrc(self, val): self["sizesrc"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def stylesrc(self): """ Sets the source reference on Chart Studio Cloud for `style`. The 'stylesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["stylesrc"] @stylesrc.setter def stylesrc(self, val): self["stylesrc"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def textcasesrc(self): """ Sets the source reference on Chart Studio Cloud for `textcase`. The 'textcasesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["textcasesrc"] @textcasesrc.setter def textcasesrc(self, val): self["textcasesrc"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def variantsrc(self): """ Sets the source reference on Chart Studio Cloud for `variant`. The 'variantsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["variantsrc"] @variantsrc.setter def variantsrc(self, val): self["variantsrc"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|numpy.ndarray """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def weightsrc(self): """ Sets the source reference on Chart Studio Cloud for `weight`. The 'weightsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["weightsrc"] @weightsrc.setter def weightsrc(self, val): self["weightsrc"] = val @property def _prop_descriptions(self): return """\ color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. familysrc Sets the source reference on Chart Studio Cloud for `family`. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. linepositionsrc Sets the source reference on Chart Studio Cloud for `lineposition`. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. shadowsrc Sets the source reference on Chart Studio Cloud for `shadow`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. style Sets whether a font should be styled with a normal or italic face from its family. stylesrc Sets the source reference on Chart Studio Cloud for `style`. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. textcasesrc Sets the source reference on Chart Studio Cloud for `textcase`. variant Sets the variant of the font. variantsrc Sets the source reference on Chart Studio Cloud for `variant`. weight Sets the weight (or boldness) of the font. weightsrc Sets the source reference on Chart Studio Cloud for `weight`. """ def __init__( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, lineposition=None, linepositionsrc=None, shadow=None, shadowsrc=None, size=None, sizesrc=None, style=None, stylesrc=None, textcase=None, textcasesrc=None, variant=None, variantsrc=None, weight=None, weightsrc=None, **kwargs, ): """ Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.ohlc.hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. familysrc Sets the source reference on Chart Studio Cloud for `family`. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. linepositionsrc Sets the source reference on Chart Studio Cloud for `lineposition`. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. shadowsrc Sets the source reference on Chart Studio Cloud for `shadow`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. style Sets whether a font should be styled with a normal or italic face from its family. stylesrc Sets the source reference on Chart Studio Cloud for `style`. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. textcasesrc Sets the source reference on Chart Studio Cloud for `textcase`. variant Sets the variant of the font. variantsrc Sets the source reference on Chart Studio Cloud for `variant`. weight Sets the weight (or boldness) of the font. weightsrc Sets the source reference on Chart Studio Cloud for `weight`. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.ohlc.hoverlabel.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.ohlc.hoverlabel.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("colorsrc", arg, colorsrc) self._set_property("family", arg, family) self._set_property("familysrc", arg, familysrc) self._set_property("lineposition", arg, lineposition) self._set_property("linepositionsrc", arg, linepositionsrc) self._set_property("shadow", arg, shadow) self._set_property("shadowsrc", arg, shadowsrc) self._set_property("size", arg, size) self._set_property("sizesrc", arg, sizesrc) self._set_property("style", arg, style) self._set_property("stylesrc", arg, stylesrc) self._set_property("textcase", arg, textcase) self._set_property("textcasesrc", arg, textcasesrc) self._set_property("variant", arg, variant) self._set_property("variantsrc", arg, variantsrc) self._set_property("weight", arg, weight) self._set_property("weightsrc", arg, weightsrc) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
fluentpython__example-code-2e
11-pythonic-obj/vector2d_v3_prophash.py
{ "start": 1863, "end": 3496 }
class ____: typecode = 'd' def __init__(self, x, y): self.__x = float(x) # <1> self.__y = float(y) @property # <2> def x(self): # <3> return self.__x # <4> @property # <5> def y(self): return self.__y def __iter__(self): return (i for i in (self.x, self.y)) # <6> # remaining methods: same as previous Vector2d # end::VECTOR2D_V3_PROP[] def __repr__(self): class_name = type(self).__name__ return '{}({!r}, {!r})'.format(class_name, *self) def __str__(self): return str(tuple(self)) def __bytes__(self): return (bytes([ord(self.typecode)]) + bytes(array(self.typecode, self))) def __eq__(self, other): return tuple(self) == tuple(other) # tag::VECTOR_V3_HASH[] def __hash__(self): return hash((self.x, self.y)) # end::VECTOR_V3_HASH[] def __abs__(self): return math.hypot(self.x, self.y) def __bool__(self): return bool(abs(self)) def angle(self): return math.atan2(self.y, self.x) def __format__(self, fmt_spec=''): if fmt_spec.endswith('p'): fmt_spec = fmt_spec[:-1] coords = (abs(self), self.angle()) outer_fmt = '<{}, {}>' else: coords = self outer_fmt = '({}, {})' components = (format(c, fmt_spec) for c in coords) return outer_fmt.format(*components) @classmethod def frombytes(cls, octets): typecode = chr(octets[0]) memv = memoryview(octets[1:]).cast(typecode) return cls(*memv)
Vector2d
python
huggingface__transformers
src/transformers/models/bart/modeling_bart.py
{ "start": 70348, "end": 70952 }
class ____(BartPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = BartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @auto_docstring( custom_intro=""" BART decoder with a language modeling head on top (linear layer with weights tied to the input embeddings). """ )
BartDecoderWrapper
python
numba__numba
numba/tests/test_random.py
{ "start": 61223, "end": 63074 }
class ____(TestCase): # Enough iterations for: # 1. Mersenne-Twister state shuffles to occur (once every 624) # 2. Race conditions to be plausible # 3. Nice statistical properties to emerge _extract_iterations = 100000 def setUp(self): # Warm up, to avoid compiling in the threads args = (42, self._get_output(1)) py_extract_randomness(*args) np_extract_randomness(*args) def _get_output(self, size): return np.zeros(size, dtype=np.uint32) def check_output(self, out): """ Check statistical properties of output. """ # Output should follow a uniform distribution in [0, 1<<32) expected_avg = 1 << 31 expected_std = (1 << 32) / np.sqrt(12) rtol = 0.05 # given enough iterations np.testing.assert_allclose(out.mean(), expected_avg, rtol=rtol) np.testing.assert_allclose(out.std(), expected_std, rtol=rtol) def check_several_outputs(self, results, same_expected): # Outputs should have the expected statistical properties # (an uninitialized PRNG or a PRNG whose internal state was # corrupted by a race condition could produce bogus randomness) for out in results: self.check_output(out) # Check all threads gave either the same sequence or # distinct sequences if same_expected: expected_distinct = 1 else: expected_distinct = len(results) heads = {tuple(out[:5]) for out in results} tails = {tuple(out[-5:]) for out in results} sums = {out.sum() for out in results} self.assertEqual(len(heads), expected_distinct, heads) self.assertEqual(len(tails), expected_distinct, tails) self.assertEqual(len(sums), expected_distinct, sums)
ConcurrencyBaseTest
python
google__pytype
pytype/pytd/visitors.py
{ "start": 52645, "end": 53898 }
class ____(Visitor): """Expand to Cartesian product of parameter types. For example, this transforms def f(x: Union[int, float], y: Union[int, float]) -> Union[str, unicode] to def f(x: int, y: int) -> Union[str, unicode] def f(x: int, y: float) -> Union[str, unicode] def f(x: float, y: int) -> Union[str, unicode] def f(x: float, y: float) -> Union[str, unicode] The expansion by this class is typically *not* an optimization. But it can be the precursor for optimizations that need the expanded signatures, and it can simplify code generation, e.g. when generating type declarations for a type inferencer. """ def VisitFunction(self, f): """Rebuild the function with the new signatures. This is called after its children (i.e. when VisitSignature has already converted each signature into a list) and rebuilds the function using the new signatures. Arguments: f: A pytd.Function instance. Returns: Function with the new signatures. """ # flatten return value(s) from VisitSignature signatures = tuple(ex for s in f.signatures for ex in ExpandSignature(s)) # pylint: disable=g-complex-comprehension return f.Replace(signatures=signatures)
ExpandSignatures
python
huggingface__transformers
src/transformers/models/llama4/modeling_llama4.py
{ "start": 3504, "end": 4202 }
class ____(nn.Module): def __init__(self, config, intermediate_size=None): super().__init__() if intermediate_size is None: intermediate_size = config.intermediate_size self.config = config self.gate_proj = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.up_proj = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, config.hidden_size, bias=False) self.activation_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.activation_fn(self.gate_proj(x)) * self.up_proj(x) return self.down_proj(down_proj)
Llama4TextMLP
python
getsentry__sentry
src/sentry/backup/services/import_export/model.py
{ "start": 8253, "end": 9091 }
class ____(RpcModel, Finding): """ A Pydantic and RPC friendly error container that also inherits from the base `Finding` class. """ is_err: Literal[True] = True kind: RpcExportErrorKind = RpcExportErrorKind.Unknown # Include fields from `Finding` in this `RpcModel` derivative. on: InstanceID left_pk: int | None = None right_pk: int | None = None reason: str = "" def get_kind(self) -> RpcExportErrorKind: return RpcExportErrorKind(self.kind) def pretty(self) -> str: return f"RpcExportError(\n kind: {self.get_kind().value},{self._pretty_inner()}\n)" def to_dict(self) -> dict[str, Any]: d = dict(self) del d["is_err"] return d RpcExportResult = Annotated[Union[RpcExportOk, RpcExportError], Field(discriminator="is_err")]
RpcExportError
python
PyCQA__pylint
tests/functional/a/abstract/abstract_class_instantiated.py
{ "start": 1435, "end": 1550 }
class ____(Structure): @abc.abstractmethod def length(self): pass __len__ = length
AbstractSizable
python
huggingface__transformers
src/transformers/trainer_utils.py
{ "start": 29723, "end": 36806 }
class ____: """Wrap the data collator to remove unused columns before they are passed to the collator.""" def __init__( self, data_collator, signature_columns, logger=None, model_name: str | None = None, description: str | None = None, ): self.data_collator = data_collator self.signature_columns = signature_columns self.logger = logger self.description = description self.model_name = model_name self.message_logged = False def _remove_columns(self, feature: dict) -> dict: if not isinstance(feature, dict): return feature if not self.message_logged and self.logger and self.model_name: ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) if len(ignored_columns) > 0: dset_description = "" if self.description is None else f"in the {self.description} set" self.logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " " you can safely ignore this message." ) self.message_logged = True return {k: v for k, v in feature.items() if k in self.signature_columns} def __call__(self, features: list[dict]): features = [self._remove_columns(feature) for feature in features] return self.data_collator(features) def check_target_module_exists(optim_target_modules, key: str, return_is_regex: bool = False): """A helper method to check if the passed module's key name matches any of the target modules in the optim_target_modules. Args: optim_target_modules (`Union[str, list[str]]`): A list of strings to try to match. Can be also a full string. key (`str`): A key to search any matches in optim_target_modules return_is_regex (`bool`): If set to `True`, the method will return whether the passed `optim_target_modules` is a regex or not. Returns: `bool` : True of match object if key matches any target modules from config, False or None if no match found `bool` : If the matched target module is a regex to silence out the warnings in Trainer for extra modules being found (only if `target_module_found=True` for an array of regex). """ target_module_found = False is_regex = False if isinstance(optim_target_modules, str): target_module_found = bool(re.fullmatch(optim_target_modules, key)) is_regex = optim_target_modules != key elif key in optim_target_modules: # from here, target_module_found must be a list of str # this module is specified directly in target_modules target_module_found = True elif any(target_key in key for target_key in optim_target_modules): target_module_found = True elif any(bool(re.fullmatch(optim_target_module, key)) for optim_target_module in optim_target_modules): target_module_found = True is_regex = True if return_is_regex: return target_module_found, is_regex return target_module_found def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True): """ This is the same as [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`torch.nn.Module`): The model in which to load the checkpoint. folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. strict (`bool`, *optional*, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. prefer_safe (`bool`, *optional*, defaults to `False`): If both safetensors and PyTorch save files are present in checkpoint and `prefer_safe` is True, the safetensors files will be loaded. Otherwise, PyTorch files are always loaded when possible. Returns: `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields - `missing_keys` is a list of str containing the missing keys - `unexpected_keys` is a list of str containing the unexpected keys """ # Load the index index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) safe_index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) index_present = os.path.isfile(index_file) safe_index_present = os.path.isfile(safe_index_file) if not index_present and not safe_index_present: filenames = (WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME) raise ValueError(f"Can't find a checkpoint index ({' or '.join(filenames)}) in {folder}.") load_safe = safe_index_present and (prefer_safe or not index_present) load_index = safe_index_file if load_safe else index_file with open(load_index, "r", encoding="utf-8") as f: index = json.load(f) shard_files = list(set(index["weight_map"].values())) # If strict=True, error before loading any of the state dicts. # TODO: Here, update the weigth map with the config.dynamic_weight_conversion loaded_keys = index["weight_map"].keys() model_keys = model.state_dict().keys() missing_keys = [key for key in model_keys if key not in loaded_keys] unexpected_keys = [key for key in loaded_keys if key not in model_keys] if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" if len(missing_keys) > 0: str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) error_message += f"\nMissing key(s): {str_missing_keys}." if len(unexpected_keys) > 0: str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) error_message += f"\nMissing key(s): {str_unexpected_keys}." raise RuntimeError(error_message) if load_safe: loader = safe_load_file else: check_torch_load_is_safe() loader = partial(torch.load, map_location="cpu", weights_only=True) for shard_file in shard_files: state_dict = loader(os.path.join(folder, shard_file)) model.load_state_dict(state_dict, strict=False) # Make sure memory is freed before we load the next state dict. del state_dict gc.collect() # Return the same thing as PyTorch load_state_dict function. return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys)
RemoveColumnsCollator
python
chardet__chardet
chardet/jpcntx.py
{ "start": 26325, "end": 27089 }
class ____(JapaneseContextAnalysis): def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]: # type: ignore[reportIncompatibleMethodOverride] if not byte_str: return -1, 1 # find out current char's byte length first_char = byte_str[0] if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): char_len = 2 elif first_char == 0x8F: char_len = 3 else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = byte_str[1] if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, char_len return -1, char_len
EUCJPContextAnalysis
python
coleifer__peewee
tests/extra_fields.py
{ "start": 191, "end": 267 }
class ____(TestModel): key = TextField() data = CompressedField()
Comp
python
milvus-io__pymilvus
pymilvus/exceptions.py
{ "start": 3548, "end": 3629 }
class ____(MilvusException): """Raise when autoID is invalid"""
AutoIDException
python
kamyu104__LeetCode-Solutions
Python/kth-largest-sum-in-a-binary-tree.py
{ "start": 45, "end": 158 }
class ____(object): def __init__(self, val=0, left=None, right=None): pass # bfs, quick select
TreeNode
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/class_as_data_structure.py
{ "start": 1115, "end": 1257 }
class ____: spam = "ham" def __init__(self, foo:int, bar:list): self.foo = foo self.bar = bar
NoWarningsClassAttributes
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_unittest.py
{ "start": 1588, "end": 2336 }
class ____(unittest.TestCase): @given(s=st.text()) @settings(deadline=None) def test_subtest(self, s): with self.subTest(text=s): self.assertIsInstance(s, str) if __name__ == "__main__": unittest.main() """ @skipif_emscripten @pytest.mark.parametrize("err", [[], ["-Werror"]]) def test_subTest_no_self(testdir, err): # https://github.com/HypothesisWorks/hypothesis/issues/2462 # for some reason this issue happens only when running unittest from commandline fname = testdir.makepyfile(tests=SUBTEST_SUITE) result = testdir.run(sys.executable, *err, str(fname)) expected = pytest.ExitCode.TESTS_FAILED if err else pytest.ExitCode.OK assert result.ret == expected, result.stderr.str()
MyTest
python
huggingface__transformers
src/transformers/tokenization_python.py
{ "start": 10825, "end": 15034 }
class ____(Trie): def __init__(self, *args): super().__init__(*args) def extensions(self, prefix: str): """ Generates all extensions of a given prefix token in the Trie. Example: ```python >>> trie = Trie() >>> trie.add("apple") >>> trie.add("app") >>> trie.add("application") >>> trie.extensions("app") ['app', 'apple', 'application'] ``` """ prefix_node = self._get_node(prefix) ret = self._collect_tokens(prefix_node) return [prefix + token for token in ret] def _get_node(self, token: str) -> dict: """ Retrieves the node corresponding to the given token in the Trie. Args: token (str): The token for which the corresponding node needs to be retrieved. Returns: dict: The node in the Trie corresponding to the given token. """ node = self.data for char in token: if char not in node: break node = node[char] return node def _collect_tokens(self, node: dict) -> list: """ Generates all tokens in the Trie starting from a given node. Args: node (dict): The node in the Trie from which tokens need to be generated. Returns: list: List of tokens generated from the given node. """ tokens = [self._termination_char] if self._termination_char in node else [] for token, subtrie_head in node.items(): if token != self._termination_char: subtokens = self._collect_tokens(subtrie_head) tokens.extend([token + subtoken for subtoken in subtokens]) return tokens def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def _is_end_of_word(text): """Checks whether the last character in text is one of a punctuation, control or whitespace character.""" last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char)) def _is_start_of_word(text): """Checks whether the first character in text is one of a punctuation, control or whitespace character.""" first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char)) def _insert_one_token_to_ordered_list(token_list: list[str], new_token: str): """ Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted. """ insertion_idx = bisect.bisect_left(token_list, new_token) # Checks if new_token is already in the ordered token_list if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token: # new_token is in token_list, don't add return else: token_list.insert(insertion_idx, new_token) @add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
ExtensionsTrie
python
Lightning-AI__lightning
src/lightning/fabric/utilities/types.py
{ "start": 2287, "end": 2620 }
class ____(Steppable, Protocol): """To structurally type ``optimizer``""" param_groups: list[dict[Any, Any]] defaults: dict[Any, Any] state: defaultdict[Tensor, Any] def state_dict(self) -> dict[str, dict[Any, Any]]: ... def load_state_dict(self, state_dict: dict[str, dict[Any, Any]]) -> None: ...
Optimizable
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/sqlite/aiosqlite.py
{ "start": 3873, "end": 3960 }
class ____(AsyncAdapt_dbapi_ss_cursor): __slots__ = ()
AsyncAdapt_aiosqlite_ss_cursor
python
scikit-learn__scikit-learn
sklearn/linear_model/_stochastic_gradient.py
{ "start": 31423, "end": 50449 }
class ____(BaseSGDClassifier): """Linear classifiers (SVM, logistic regression, etc.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning via the `partial_fit` method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : {'hinge', 'log_loss', 'modified_huber', 'squared_hinge',\ 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive',\ 'squared_epsilon_insensitive'}, default='hinge' The loss function to be used. - 'hinge' gives a linear SVM. - 'log_loss' gives logistic regression, a probabilistic classifier. - 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. - 'squared_hinge' is like hinge but is quadratically penalized. - 'perceptron' is the linear loss used by the perceptron algorithm. - The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and 'squared_epsilon_insensitive' are designed for regression but can be useful in classification as well; see :class:`~sklearn.linear_model.SGDRegressor` for a description. More details about the losses formulas can be found in the :ref:`User Guide <sgd_mathematical_formulation>` and you can find a visualisation of the loss functions in :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_loss_functions.py`. penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. No penalty is added when set to `None`. You can see a visualisation of the penalties in :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_penalties.py`. alpha : float, default=0.0001 Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when `learning_rate` is set to 'optimal'. Values must be in the range `[0.0, inf)`. l1_ratio : float, default=0.15 The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Only used if `penalty` is 'elasticnet'. Values must be in the range `[0.0, 1.0]` or can be `None` if `penalty` is not `elasticnet`. .. versionchanged:: 1.7 `l1_ratio` can be `None` when `penalty` is not "elasticnet". fit_intercept : bool, default=True Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. max_iter : int, default=1000 The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the :meth:`partial_fit` method. Values must be in the range `[1, inf)`. .. versionadded:: 0.19 tol : float or None, default=1e-3 The stopping criterion. If it is not None, training will stop when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive epochs. Convergence is checked against the training loss or the validation loss depending on the `early_stopping` parameter. Values must be in the range `[0.0, inf)`. .. versionadded:: 0.19 shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. verbose : int, default=0 The verbosity level. Values must be in the range `[0, inf)`. epsilon : float, default=0.1 Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. Values must be in the range `[0.0, inf)`. n_jobs : int, default=None The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance, default=None Used for shuffling the data, when ``shuffle`` is set to ``True``. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Integer values must be in the range `[0, 2**32 - 1]`. learning_rate : str, default='optimal' The learning rate schedule: - 'constant': `eta = eta0` - 'optimal': `eta = 1.0 / (alpha * (t + t0))` where `t0` is chosen by a heuristic proposed by Leon Bottou. - 'invscaling': `eta = eta0 / pow(t, power_t)` - 'adaptive': `eta = eta0`, as long as the training keeps decreasing. Each time n_iter_no_change consecutive epochs fail to decrease the training loss by tol or fail to increase validation score by tol if `early_stopping` is `True`, the current learning rate is divided by 5. - 'pa1': passive-aggressive algorithm 1, see [1]_. Only with `loss='hinge'`. Update is `w += eta y x` with `eta = min(eta0, loss/||x||**2)`. - 'pa2': passive-aggressive algorithm 2, see [1]_. Only with `loss='hinge'`. Update is `w += eta y x` with `eta = hinge_loss / (||x||**2 + 1/(2 eta0))`. .. versionadded:: 0.20 Added 'adaptive' option. .. versionadded:: 1.8 Added options 'pa1' and 'pa2' eta0 : float, default=0.01 The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules. The default value is 0.01, but note that eta0 is not used by the default learning rate 'optimal'. Values must be in the range `(0.0, inf)`. For PA-1 (`learning_rate=pa1`) and PA-II (`pa2`), it specifies the aggressiveness parameter for the passive-agressive algorithm, see [1] where it is called C: - For PA-I it is the maximum step size. - For PA-II it regularizes the step size (the smaller `eta0` the more it regularizes). As a general rule-of-thumb for PA, `eta0` should be small when the data is noisy. power_t : float, default=0.5 The exponent for inverse scaling learning rate. Values must be in the range `[0.0, inf)`. .. deprecated:: 1.8 Negative values for `power_t` are deprecated in version 1.8 and will raise an error in 1.10. Use values in the range [0.0, inf) instead. early_stopping : bool, default=False Whether to use early stopping to terminate training when validation score is not improving. If set to `True`, it will automatically set aside a stratified fraction of training data as validation and terminate training when validation score returned by the `score` method is not improving by at least tol for n_iter_no_change consecutive epochs. See :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_early_stopping.py` for an example of the effects of early stopping. .. versionadded:: 0.20 Added 'early_stopping' option validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if `early_stopping` is True. Values must be in the range `(0.0, 1.0)`. .. versionadded:: 0.20 Added 'validation_fraction' option n_iter_no_change : int, default=5 Number of iterations with no improvement to wait before stopping fitting. Convergence is checked against the training loss or the validation loss depending on the `early_stopping` parameter. Integer values must be in the range `[1, max_iter)`. .. versionadded:: 0.20 Added 'n_iter_no_change' option class_weight : dict, {class_label: weight} or "balanced", default=None Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. If a dynamic learning rate is used, the learning rate is adapted depending on the number of samples already seen. Calling ``fit`` resets this counter, while ``partial_fit`` will result in increasing the existing counter. average : bool or int, default=False When set to `True`, computes the averaged SGD weights across all updates and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches `average`. So ``average=10`` will begin averaging after seeing 10 samples. Integer values must be in the range `[1, n_samples]`. Attributes ---------- coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ (n_classes, n_features) Weights assigned to the features. intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. n_iter_ : int The actual number of iterations before reaching the stopping criterion. For multiclass fits, it is the maximum over every binary fit. classes_ : array of shape (n_classes,) t_ : int Number of weight updates performed during training. Same as ``(n_iter_ * n_samples + 1)``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- sklearn.svm.LinearSVC : Linear support vector classification. LogisticRegression : Logistic regression. Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to ``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)``. References ---------- .. [1] Online Passive-Aggressive Algorithms <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf> K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) Examples -------- >>> import numpy as np >>> from sklearn.linear_model import SGDClassifier >>> from sklearn.preprocessing import StandardScaler >>> from sklearn.pipeline import make_pipeline >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> # Always scale the input. The most convenient way is to use a pipeline. >>> clf = make_pipeline(StandardScaler(), ... SGDClassifier(max_iter=1000, tol=1e-3)) >>> clf.fit(X, Y) Pipeline(steps=[('standardscaler', StandardScaler()), ('sgdclassifier', SGDClassifier())]) >>> print(clf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { **BaseSGDClassifier._parameter_constraints, "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], "alpha": [Interval(Real, 0, None, closed="left")], "l1_ratio": [Interval(Real, 0, 1, closed="both"), None], "power_t": [Interval(Real, None, None, closed="neither")], "epsilon": [Interval(Real, 0, None, closed="left")], "learning_rate": [ StrOptions({"constant", "optimal", "invscaling", "adaptive", "pa1", "pa2"}), ], } def __init__( self, loss="hinge", *, penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None, random_state=None, learning_rate="optimal", eta0=0.01, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False, ): super().__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, class_weight=class_weight, warm_start=warm_start, average=average, ) def _check_proba(self): if self.loss not in ("log_loss", "modified_huber"): raise AttributeError( "probability estimates are not available for loss=%r" % self.loss ) return True @available_if(_check_proba) def predict_proba(self, X): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class:`~sklearn.calibration.CalibratedClassifierCV` instead. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data for prediction. Returns ------- ndarray of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, https://dl.acm.org/doi/pdf/10.1145/775047.775151 The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ check_is_fitted(self) if self.loss == "log_loss": return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError( "predict_(log_)proba only supported when" " loss='log_loss' or loss='modified_huber' " "(%r given)" % self.loss ) @available_if(_check_proba) def predict_log_proba(self, X): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data for prediction. Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ return np.log(self.predict_proba(X))
SGDClassifier
python
pyca__cryptography
tests/x509/test_x509_ext.py
{ "start": 207973, "end": 212777 }
class ____: def test_eq(self, backend): sct = ( _load_cert( os.path.join("x509", "badssl-sct.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) sct2 = ( _load_cert( os.path.join("x509", "badssl-sct.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) assert sct == sct2 def test_ne(self, backend): sct = ( _load_cert( os.path.join("x509", "badssl-sct.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) sct2 = ( _load_cert( os.path.join("x509", "cryptography-scts.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) assert sct != sct2 assert sct != object() def test_hash(self, backend): sct = ( _load_cert( os.path.join("x509", "badssl-sct.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) sct2 = ( _load_cert( os.path.join("x509", "badssl-sct.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) sct3 = ( _load_cert( os.path.join("x509", "cryptography-scts.pem"), x509.load_pem_x509_certificate, ) .extensions.get_extension_for_class( x509.PrecertificateSignedCertificateTimestamps ) .value[0] ) assert hash(sct) == hash(sct2) assert hash(sct) != hash(sct3) def test_public_bytes(self, backend): ext = ( load_vectors_from_file( os.path.join("x509", "ocsp", "resp-sct-extension.der"), lambda data: ocsp.load_der_ocsp_response(data.read()), mode="rb", ) .single_extensions.get_extension_for_class( x509.SignedCertificateTimestamps ) .value ) assert ext.public_bytes() == ( b"\x04\x82\x01\xe6\x01\xe4\x00w\x00D\x94e.\xb0\xee\xce\xaf\xc4" b"@\x07\xd8\xa8\xfe(\xc0\xda\xe6\x82\xbe\xd8\xcb1\xb5?\xd33" b"\x96\xb5\xb6\x81\xa8\x00\x00\x01no\xc33h\x00\x00\x04\x03\x00" b"H0F\x02!\x00\xa0}J\xa7\xb1Y\xb4\x15P\xd7\x95Y\x12\xfb\xa1" b"\xdfh\x96u\xa3\x0f_\x01\xf2\xfd\xcbMI\x9bt\xe2\xfe\x02!\x00" b"\x89E\xd7\x86N<>\xe8\x07\xc4\xca\xdbO:\xb7\x9f]E\xbc\x1az" b"\xe5h\xab%\xdaukT\x8a\xf7\xc1\x00w\x00oSv\xac1\xf01\x19\xd8" b"\x99\x00\xa4Q\x15\xffw\x15\x1c\x11\xd9\x02\xc1\x00)\x06\x8d" b"\xb2\x08\x9a7\xd9\x13\x00\x00\x01no\xc33m\x00\x00\x04\x03" b"\x00H0F\x02!\x00\xd4\xe06\xd2\xed~{\x9fs-E2\xd8\xd2\xb41\xc6" b"v\x8b3\xf2\tS\x1d\xd8SUe\xe1\xcf\xfc;\x02!\x00\xd9cF[\x8e\xac" b'4\x02@\xd6\x8a\x10y\x98\x92\xbee\xf4\n\x11L\xbfpI(Y"O\x1al' b"\xe9g\x00w\x00\xbb\xd9\xdf\xbc\x1f\x8aq\xb5\x93\x94#\x97\xaa" b"\x92{G8W\x95\n\xabR\xe8\x1a\x90\x96d6\x8e\x1e\xd1\x85\x00" b"\x00\x01no\xc34g\x00\x00\x04\x03\x00H0F\x02!\x00\xf4:\xec" b"\x1b\xdeQ\r\xf8S\x9c\xf2\xeee<\xcf\xc5:\x0f\x0f\xeb\x8bv\x9f" b'8d.z\x9c"K\x9b\x11\x02!\x00\xe7`\xe9Ex\xf7)B<\xf7\xd62b\xfa' b"\xa2\xc7!\xc4\xbau\xcb\xad\x0ezEZ\x11\x13\xa1+\x89J\x00w\x00" b"\xeeK\xbd\xb7u\xce`\xba\xe1Bi\x1f\xab\xe1\x9ef\xa3\x0f~_\xb0" b"r\xd8\x83\x00\xc4{\x89z\xa8\xfd\xcb\x00\x00\x01no\xc32\xdd" b"\x00\x00\x04\x03\x00H0F\x02!\x00\x95Y\x81\x7f\xa4\xe5\x17o" b"\x06}\xac\xcdt-\xb0\xb8L\x18H\xecB\xcc-\xe5\x13>\x07\xba\xc0" b"}\xa3\xe6\x02!\x00\xbf\xc8\x88\x93m\x8d\xc3(GS\xaf=4}\x97" b"\xe6\xc2\x1djQ\x0e0\x8c\xcc\x9d\xc2\xc7\xc3\xb1\x0f\xec\x98" )
TestSignedCertificateTimestamps
python
GoogleCloudPlatform__python-docs-samples
functions/v2/typed/greeting/main.py
{ "start": 997, "end": 1355 }
class ____: message: str # Required to serialize the response def to_dict(self) -> dict: return { "message": self.message, } @functions_framework.typed def greeting(req: GreetingRequest): return GreetingResponse(message=f"Hello {req.first_name} {req.last_name}!") # [END functions_typed_greeting]
GreetingResponse
python
numpy__numpy
numpy/_core/tests/test_unicode.py
{ "start": 8098, "end": 8255 }
class ____(AssignValues): """Check the assignment of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value
TestAssignValues_1_UCS2
python
apache__airflow
providers/http/tests/unit/http/operators/test_http.py
{ "start": 1536, "end": 14215 }
class ____: @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id="http_default", conn_type="http", host="test:8080/", extra='{"bearer": "test"}' ) ) def test_response_in_logs(self, requests_mock): """ Test that when using HttpOperator with 'GET', the log contains 'Example Domain' in it """ requests_mock.get("http://www.example.com", text="Example.com fake response") operator = HttpOperator( task_id="test_HTTP_op", method="GET", endpoint="/", http_conn_id="HTTP_EXAMPLE", log_response=True, ) result = operator.execute("Example.com fake response") assert result == "Example.com fake response" def test_response_in_logs_after_failed_check(self, requests_mock): """ Test that when using HttpOperator with log_response=True, the response is logged even if request_check fails """ def response_check(response): return response.text != "invalid response" requests_mock.get("http://www.example.com", text="invalid response") operator = HttpOperator( task_id="test_HTTP_op", method="GET", endpoint="/", http_conn_id="HTTP_EXAMPLE", log_response=True, response_check=response_check, ) with mock.patch.object(operator.log, "info") as mock_info: with pytest.raises(AirflowException): operator.execute({}) calls = [mock.call("Calling HTTP method"), mock.call("invalid response")] mock_info.assert_has_calls(calls, any_order=True) def test_filters_response(self, requests_mock): requests_mock.get("http://www.example.com", json={"value": 5}) operator = HttpOperator( task_id="test_HTTP_op", method="GET", endpoint="/", http_conn_id="HTTP_EXAMPLE", response_filter=lambda response: response.json(), ) result = operator.execute({}) assert result == {"value": 5} def test_async_defer_successfully(self, requests_mock): operator = HttpOperator( task_id="test_HTTP_op", deferrable=True, ) with pytest.raises(TaskDeferred) as exc: operator.execute({}) assert isinstance(exc.value.trigger, HttpTrigger), "Trigger is not a HttpTrigger" def test_async_execute_successfully(self, requests_mock): operator = HttpOperator( task_id="test_HTTP_op", deferrable=True, ) response = Response() response._content = b"content" result = operator.execute_complete( context={}, event={ "status": "success", "response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"), }, ) assert result == "content" @pytest.mark.parametrize( ( "data", "headers", "extra_options", "pagination_data", "pagination_headers", "pagination_extra_options", ), [ ({"data": 1}, {"x-head": "1"}, {"verify": False}, {"data": 2}, {"x-head": "0"}, {"verify": True}), ("data foo", {"x-head": "1"}, {"verify": False}, {"data": 2}, {"x-head": "0"}, {"verify": True}), ("data foo", {"x-head": "1"}, {"verify": False}, "data bar", {"x-head": "0"}, {"verify": True}), ({"data": 1}, {"x-head": "1"}, {"verify": False}, "data foo", {"x-head": "0"}, {"verify": True}), ], ) def test_pagination( self, requests_mock, data, headers, extra_options, pagination_data, pagination_headers, pagination_extra_options, ): """ Test that the HttpOperator calls repetitively the API when a pagination_function is provided, and as long as this function returns a dictionary that override previous' call parameters. """ is_second_call: bool = False extra_options_verify = extra_options["verify"] def pagination_function(response: Response) -> dict | None: """Paginated function which returns None at the second call.""" nonlocal is_second_call if not is_second_call: is_second_call = True return dict( endpoint=response.json()["endpoint"], data=pagination_data, headers=pagination_headers, extra_options=pagination_extra_options, ) return None first_endpoint = requests_mock.post("http://www.example.com/1", json={"value": 5, "endpoint": "2"}) second_endpoint = requests_mock.post("http://www.example.com/2", json={"value": 10, "endpoint": "3"}) operator = HttpOperator( task_id="test_HTTP_op", method="POST", endpoint="/1", data=data, headers=headers, extra_options=extra_options, http_conn_id="HTTP_EXAMPLE", pagination_function=pagination_function, response_filter=lambda resp: [entry.json()["value"] for entry in resp], ) result = operator.execute({}) # Ensure the initial call is made with parameters passed to the Operator first_call = first_endpoint.request_history[0] assert first_call.headers.items() >= headers.items() assert first_call.body == RequestEncodingMixin._encode_params(data) assert first_call.verify == extra_options_verify # Ensure the second - paginated - call is made with parameters merged from the pagination function second_call = second_endpoint.request_history[0] assert second_call.headers.items() >= pagination_headers.items() assert second_call.body == RequestEncodingMixin._encode_params(pagination_data) assert second_call.verify is pagination_extra_options["verify"] assert result == [5, 10] def test_async_pagination(self, requests_mock): """ Test that the HttpOperator calls asynchronously and repetitively the API when a pagination_function is provided, and as long as this function returns a dictionary that override previous' call parameters. """ def make_response_object() -> Response: response = Response() response._content = b'{"value": 5}' return response def create_resume_response_parameters() -> dict: response = make_response_object() return dict( context={}, event={ "status": "success", "response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"), }, ) has_returned: bool = False def pagination_function(response: Response) -> dict | None: """Paginated function which returns None at the second call.""" nonlocal has_returned if not has_returned: has_returned = True return dict(endpoint="/") return None operator = HttpOperator( task_id="test_HTTP_op", pagination_function=pagination_function, deferrable=True, ) # Do two calls: On the first one, the pagination_function creates a new # deferrable trigger. On the second one, the pagination_function returns # None, which ends the execution of the Operator with contextlib.suppress(TaskDeferred): operator.execute_complete(**create_resume_response_parameters()) result = operator.execute_complete( **create_resume_response_parameters(), paginated_responses=[make_response_object()] ) assert result == ['{"value": 5}', '{"value": 5}'] @patch.object(HttpHook, "run_with_advanced_retry") def test_retry_args(self, mock_run_with_advanced_retry, requests_mock): requests_mock.get("http://www.example.com", exc=Exception("Example Exception")) retry_args = dict( wait=tenacity.wait_none(), stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_if_exception_type(Exception), ) operator = HttpOperator( task_id="test_HTTP_op", method="GET", endpoint="/", http_conn_id="HTTP_EXAMPLE", retry_args=retry_args, ) operator.execute({}) mock_run_with_advanced_retry.assert_called_with(retry_args, "/", {}, {}, {}) assert mock_run_with_advanced_retry.call_count == 1 @patch.object(HttpHook, "run_with_advanced_retry") def test_pagination_retry_args( self, mock_run_with_advanced_retry, requests_mock, ): is_second_call: bool = False def pagination_function(response: Response) -> dict | None: """Paginated function which returns None at the second call.""" nonlocal is_second_call if not is_second_call: is_second_call = True return dict( endpoint=response.json()["endpoint"], ) return None retry_args = dict( wait=tenacity.wait_none(), stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_if_exception_type(Exception), ) operator = HttpOperator( task_id="test_HTTP_op", method="GET", endpoint="/", http_conn_id="HTTP_EXAMPLE", pagination_function=pagination_function, retry_args=retry_args, ) response = Response() response.status_code = 200 response._content = json.dumps({"value": 5, "endpoint": "/"}).encode("utf-8") response.headers["Content-Type"] = "application/json" mock_run_with_advanced_retry.return_value = response operator.execute({}) mock_run_with_advanced_retry.assert_has_calls( [ call(retry_args, "/", {}, {}, {}), call(retry_args, endpoint="/", data={}, headers={}, extra_options={}), ] ) assert mock_run_with_advanced_retry.call_count == 2 def _capture_defer(self, monkeypatch): captured = {} def _fake_defer(self, *, trigger, method_name, **kwargs): captured["trigger"] = trigger captured["kwargs"] = kwargs monkeypatch.setattr(HttpOperator, "defer", _fake_defer) return captured @pytest.mark.parametrize( ("login", "password", "auth_type", "expect_cls"), [ ("user", "password", None, BasicAuth), (None, None, None, type(None)), ("user", "password", BasicAuth, BasicAuth), ], ) def test_auth_type_is_serialised_as_string(self, monkeypatch, login, password, auth_type, expect_cls): monkeypatch.setattr( base.BaseHook, "get_connection", lambda _cid: SimpleNamespace(login=login, password=password) ) captured = self._capture_defer(monkeypatch) HttpOperator(task_id="test_HTTP_op", deferrable=True, auth_type=auth_type).execute(context={}) trigger = captured["trigger"] kwargs = captured["trigger"].serialize()[1] expected_str = serialize_auth_type(expect_cls) if expect_cls is not type(None) else None assert kwargs["auth_type"] == expected_str assert trigger.auth_type == expect_cls or (trigger.auth_type is None and expect_cls is type(None)) def test_resolve_auth_type_variants(self, monkeypatch): monkeypatch.setattr( base.BaseHook, "get_connection", lambda _cid: SimpleNamespace(login="user", password="password") ) assert HttpOperator(task_id="test_HTTP_op_1")._resolve_auth_type() is BasicAuth class DummyAuth: def __init__(self, *_, **__): ... assert HttpOperator(task_id="test_HTTP_op_2", auth_type=DummyAuth)._resolve_auth_type() is DummyAuth monkeypatch.setattr( base.BaseHook, "get_connection", lambda _cid: SimpleNamespace(login=None, password=None) ) assert HttpOperator(task_id="test_HTTP_op_3")._resolve_auth_type() is None
TestHttpOperator
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image05.py
{ "start": 315, "end": 1041 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image05.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image("A1", self.image_dir + "blue.png") worksheet.insert_image("B3", self.image_dir + "red.jpg") worksheet.insert_image("D5", self.image_dir + "yellow.jpg") worksheet.insert_image("F9", self.image_dir + "grey.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
fastapi__sqlmodel
docs_src/tutorial/offset_and_limit/tutorial003.py
{ "start": 100, "end": 1628 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: Optional[int] = Field(default=None, index=True) sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32) hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36) hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93) with Session(engine) as session: session.add(hero_1) session.add(hero_2) session.add(hero_3) session.add(hero_4) session.add(hero_5) session.add(hero_6) session.add(hero_7) session.commit() def select_heroes(): with Session(engine) as session: statement = select(Hero).offset(6).limit(3) results = session.exec(statement) heroes = results.all() print(heroes) def main(): create_db_and_tables() create_heroes() select_heroes() if __name__ == "__main__": main()
Hero
python
getsentry__sentry
src/sentry/identity/services/identity/model.py
{ "start": 1065, "end": 1238 }
class ____(TypedDict, total=False): id: int user_id: int identity_ext_id: str provider_id: int provider_ext_id: str provider_type: str
IdentityFilterArgs
python
huggingface__transformers
tests/models/clip/test_modeling_clip.py
{ "start": 1751, "end": 5781 }
class ____: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, pixel_values): model = CLIPVisionModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.image_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference(self, *args): return getattr(ModelTesterMixin, self._testMethodName)(self)
CLIPVisionModelTester
python
facebook__pyre-check
scripts/setup.py
{ "start": 1203, "end": 15729 }
class ____(Enum): EXTERNAL = "external" FACEBOOK = "facebook" def _custom_linker_option(pyre_directory: Path, build_type: BuildType) -> str: # HACK: This is a temporary workaround for inconsistent OS installations # in FB-internal CI. Can be removed once all fleets are upgraded. if build_type == BuildType.FACEBOOK and sys.platform == "linux": return ( (pyre_directory / "facebook" / "scripts" / "custom_linker_options.txt") .read_text() .rstrip() ) else: return "" def detect_opam_version() -> Tuple[int, ...]: LOG.info(["opam", "--version"]) version = subprocess.check_output( ["opam", "--version"], universal_newlines=True ).strip() try: version_semver = version.split("~")[0] version = tuple(map(int, version_semver.split("."))) except ValueError as error: message = f"Failed to parse output of `opam --version`: `{version}`" raise OpamVersionParseError(message) from error LOG.info(f"Found opam version {'.'.join(map(str, version))}") if version[0] != 2: LOG.error( "Pyre only supports opam 2.0.0 and above, please update your " + "opam version." ) raise OldOpam return version def _run_command( command: List[str], current_working_directory: Optional[Path] = None, add_environment_variables: Optional[Mapping[str, str]] = None, ) -> str: if add_environment_variables: environment_variables = { **os.environ, **add_environment_variables, } else: environment_variables = None LOG.info(command) try: output = subprocess.check_output( command, universal_newlines=True, cwd=current_working_directory, env=environment_variables, ) except CalledProcessError as called_process_error: LOG.info( f"Command: {command} returned non zero exit code.\n" f"stdout: {called_process_error.stdout}\n" f"stderr: {called_process_error.stderr}" ) raise called_process_error if output.endswith("\n"): return output[:-1] else: return output def _switch_name() -> str: return f"pyre-{COMPILER_VERSION}" def _compiler_specification() -> str: """ Command-line argument to set the compiler version in `opam switch create ...` The format for how to specify this changed in 4.12.0, see https://discuss.ocaml.org/t/experimental-new-layout-for-the-ocaml-variants-packages-in-opam-repository/6779 """ return ",".join( [ f"--packages=ocaml-variants.{COMPILER_VERSION}+options", "ocaml-option-flambda", ] ) def _opam_command(opam_version: Tuple[int, ...]) -> List[str]: command = ["opam"] # We need to explicitly set the opam cli version we are using, # otherwise it automatically uses `2.0` which means we can't use # some options from 2.1 such as `--assume-depexts`. if opam_version >= (2, 1): command.append("--cli=2.1") if opam_version >= (2, 2): command.append("--cli=2.2") return command def produce_root_dune_file(pyre_directory: Path, build_type: BuildType) -> None: # lint-ignore: NoUnsafeFilesystemRule with open(pyre_directory / "source" / "dune.in") as dune_in: # lint-ignore: NoUnsafeFilesystemRule with open(pyre_directory / "source" / "dune", "w") as dune: dune_data = dune_in.read() dune.write( "; WARNING: This file is generated from dune.in when invoking 'make'.\n" ) dune.write("; Please edit the original file.\n\n") dune.write( dune_data.replace("%VERSION%", build_type.value).replace( "%CUSTOM_LINKER_OPTION%", _custom_linker_option(pyre_directory, build_type), ) ) def produce_taint_test_dune_file(pyre_directory: Path) -> None: directory = pyre_directory / "source/interprocedural_analyses/taint/test" # lint-ignore: NoUnsafeFilesystemRule with open(directory / "dune.in") as dune_in: # lint-ignore: NoUnsafeFilesystemRule with open(directory / "dune", "w") as dune: dune.write( "; WARNING: This file is generated from dune.in when invoking 'make'.\n" ) dune.write("; Please edit the original file.\n\n") dune.write(dune_in.read()) for test_file in (directory / "integration").glob("*.py"): dune.write("\n") dune.write("(rule\n") dune.write(" (alias runtest)\n") dependencies = [ f"integration/{test_file.name}", f"integration/{test_file.name}.models", f"integration/{test_file.name}.cg", f"integration/{test_file.name}.hofcg", f"integration/{test_file.name}.config", f"integration/{test_file.name}.overrides", f"integration/{test_file.name}.pysa", ] dependencies = [ path for path in dependencies if (directory / path).exists() ] dune.write( f" (deps\n{textwrap.indent('\n'.join(dependencies), prefix=' ')})\n" ) action = "(run ./integrationTest.exe)" action = f"(setenv\n PYSA_INTEGRATION_TEST\n {test_file.name}\n{textwrap.indent(action, prefix=' ')})" if test_file.name != "sanitize_tito_shaping.py": # Enable invariant checking, except for the test above. action = f"(setenv\n PYSA_CHECK_INVARIANTS\n 1\n{textwrap.indent(action, prefix=' ')})" dune.write(f" (action\n{textwrap.indent(action, prefix=' ')}))\n") def _get_opam_environment_variables( opam_root: Path, opam_version: Tuple[int, ...] ) -> Dict[str, str]: LOG.info("Activating opam") opam_env_result = _run_command( _opam_command(opam_version) + [ "env", "--yes", "--switch", _switch_name(), "--root", opam_root.as_posix(), "--set-root", "--set-switch", "--shell=bash", ] ) opam_environment_variables: Dict[str, str] = {} # `opam env` produces lines of two forms: # - comments like ": this comment, starts with a colon;" # - lines defining and exporting env vars like "ENV_VAR=value; export ENV_VAR;" for line in opam_env_result.split("\n"): if not line.startswith(":"): environment_variable, quoted_value = line.split(";")[0].split("=") value = quoted_value[1:-1] LOG.info(f'{environment_variable}="{value}"') # noqa: B907 opam_environment_variables[environment_variable] = value return opam_environment_variables def opam_update( opam_root: Path, opam_version: Tuple[int, ...], add_environment_variables: Optional[Mapping[str, str]] = None, ) -> None: _run_command( _opam_command(opam_version) + [ "update", "--root", opam_root.as_posix(), ], add_environment_variables=add_environment_variables, ) def _initialize_opam_if_needed( opam_root: Path, opam_version: Tuple[int, ...], add_environment_variables: Optional[Mapping[str, str]] = None, ) -> None: # `opam init` is a noop if opam is already initialized, so it's safe to run # this unconditionally. _run_command( _opam_command(opam_version) + [ "init", "--bare", "--yes", "--disable-sandboxing", "--root", opam_root.as_posix(), "default", "https://opam.ocaml.org", ], add_environment_variables=add_environment_variables, ) def _select_or_create_switch( opam_root: Path, opam_version: Tuple[int, ...], add_environment_variables: Optional[Mapping[str, str]] = None, ) -> None: switch_name = _switch_name() switch_root = opam_root / switch_name if switch_root.is_dir(): _run_command( _opam_command(opam_version) + [ "switch", "set", _switch_name(), "--root", opam_root.as_posix(), ] ) else: _run_command( _opam_command(opam_version) + [ "switch", "create", _switch_name(), _compiler_specification(), "--yes", "--root", opam_root.as_posix(), ], add_environment_variables=add_environment_variables, ) def _install_dependencies( opam_root: Path, opam_version: Tuple[int, ...], add_environment_variables: Optional[Mapping[str, str]] = None, rust_path: Optional[Path] = None, ) -> Mapping[str, str]: environment_variables = { **({} if add_environment_variables is None else add_environment_variables), **_get_opam_environment_variables(opam_root, opam_version), } if rust_path is not None: environment_variables["PATH"] = ( str(rust_path) + ":" + environment_variables["PATH"] ) opam_install_command = _opam_command(opam_version) + ["install", "--yes"] if sys.platform == "linux": # osx fails on sandcastle with exit status 2 (illegal argument) with this. # unable to repro locally on osx. opam_install_command.append("--assume-depexts") opam_install_command += DEPENDENCIES _run_command(opam_install_command, add_environment_variables=environment_variables) return environment_variables def initialize_opam_switch( opam_root: Path, opam_version: Tuple[int, ...], release: bool = False, add_environment_variables: Optional[Mapping[str, str]] = None, rust_path: Optional[Path] = None, ) -> None: _initialize_opam_if_needed(opam_root, opam_version, add_environment_variables) opam_update(opam_root, opam_version, add_environment_variables) _select_or_create_switch(opam_root, opam_version, add_environment_variables) _install_dependencies(opam_root, opam_version, add_environment_variables, rust_path) def full_setup( opam_root: Path, opam_version: Tuple[int, ...], pyre_directory: Path, *, release: bool = False, run_tests: bool = False, run_clean: bool = False, build_type: BuildType, add_environment_variables: Optional[Mapping[str, str]] = None, rust_path: Optional[Path] = None, ) -> None: opam_environment_variables: Mapping[str, str] = _install_dependencies( opam_root, opam_version, add_environment_variables=add_environment_variables, rust_path=rust_path, ) def run_in_opam_environment(command: List[str]) -> None: _run_command( command, current_working_directory=pyre_directory / "source", add_environment_variables=opam_environment_variables, ) produce_root_dune_file(pyre_directory, build_type) produce_taint_test_dune_file(pyre_directory) if run_clean: # Note: we do not run `make clean` because we want the result of the # explicit `produce_root_dune_file` to remain. # Dune 3.7 runs into `rmdir` failure when cleaning the `_build` directory # for some reason. Manually clean the dir to work around the issue. run_in_opam_environment(["rm", "-rf", "_build"]) if release: LOG.info("Running a release build. This may take a while.") run_in_opam_environment(["make", "release"]) if run_tests: run_in_opam_environment(["make", "release_test"]) else: run_in_opam_environment(["make", "dev"]) if run_tests: run_in_opam_environment(["make", "test"]) def _make_opam_root(local: bool) -> Path: home = Path.home() home_opam = home / ".opam" if local and not home_opam.is_dir(): local_opam = home / "local" / "opam" local_opam.parent.mkdir(parents=True, exist_ok=True) local_opam.symlink_to(home_opam, target_is_directory=True) return home_opam def _infer_build_type_from_filesystem(pyre_directory: Path) -> BuildType: if (pyre_directory / "facebook").is_dir(): return BuildType.FACEBOOK else: return BuildType.EXTERNAL def setup( add_environment_variables: Optional[Mapping[str, str]] = None, ) -> None: # lint-ignore: NoCustomLogRule logging.basicConfig( level=logging.INFO, format="[%(asctime)s] [%(levelname)s] %(message)s" ) parser = argparse.ArgumentParser(description="Set up Pyre.") parser.add_argument("--pyre-directory", type=Path) parser.add_argument("--local", action="store_true") parser.add_argument("--configure", action="store_true") parser.add_argument("--release", action="store_true") parser.add_argument("--build-type", type=BuildType) parser.add_argument("--no-tests", action="store_true") parser.add_argument("--rust-path", type=Path) parsed = parser.parse_args() pyre_directory = parsed.pyre_directory if not pyre_directory: pyre_directory = Path(__file__).parent.parent.absolute() opam_root = _make_opam_root(parsed.local) build_type = parsed.build_type or _infer_build_type_from_filesystem(pyre_directory) opam_version = detect_opam_version() release = parsed.release if parsed.configure: produce_root_dune_file(pyre_directory, build_type) produce_taint_test_dune_file(pyre_directory) else: initialize_opam_switch( opam_root, opam_version, release, add_environment_variables, parsed.rust_path, ) full_setup( opam_root, opam_version, pyre_directory, release=release, run_tests=not parsed.no_tests, build_type=build_type, add_environment_variables=add_environment_variables, rust_path=parsed.rust_path, ) if __name__ == "__main__": setup(add_environment_variables=None)
BuildType
python
astropy__astropy
astropy/table/groups.py
{ "start": 10558, "end": 13971 }
class ____(BaseGroups): def __init__(self, parent_table, indices=None, keys=None): self.parent_table = parent_table # parent Table self._indices = indices self._keys = keys @property def key_colnames(self): """ Return the names of columns in the parent table that were used for grouping. """ # If the table was grouped by key columns *in* the table then treat those columns # differently in aggregation. In this case keys will be a Table with # keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we # need to handle this. grouped_by_table_cols = getattr(self.keys, "meta", {}).get( "grouped_by_table_cols", False ) return self.keys.colnames if grouped_by_table_cols else () @property def indices(self): if self._indices is None: return np.array([0, len(self.parent_table)]) else: return self._indices def aggregate(self, func): """ Aggregate each group in the Table into a single row by applying the reduction function ``func`` to group values in each column. Parameters ---------- func : function Function that reduces an array of values to a single value Returns ------- out : Table New table with the aggregated rows. """ i0s = self.indices[:-1] out_cols = [] parent_table = self.parent_table for col in parent_table.columns.values(): # For key columns just pick off first in each group since they are identical if col.info.name in self.key_colnames: new_col = col.take(i0s) else: try: new_col = col.info.groups.aggregate(func) except TypeError as err: warnings.warn(str(err), AstropyUserWarning) continue out_cols.append(new_col) return parent_table.__class__(out_cols, meta=parent_table.meta) def filter(self, func): """ Filter groups in the Table based on evaluating function ``func`` on each group sub-table. The function which is passed to this method must accept two arguments: - ``table`` : `Table` object - ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping It must then return either `True` or `False`. As an example, the following will select all table groups with only positive values in the non-key columns:: def all_positive(table, key_colnames): colnames = [name for name in table.colnames if name not in key_colnames] for colname in colnames: if np.any(table[colname] < 0): return False return True Parameters ---------- func : function Filter function Returns ------- out : Table New table with the aggregated rows. """ mask = np.empty(len(self), dtype=bool) key_colnames = self.key_colnames for i, group_table in enumerate(self): mask[i] = func(group_table, key_colnames) return self[mask] @property def keys(self): return self._keys
TableGroups
python
google__pytype
pytype/imports/pickle_utils.py
{ "start": 1593, "end": 5316 }
class ____(Exception): """Errors when loading a pickled pytd file.""" def __init__(self, filename: Path): self.filename = os.fspath(filename) msg = f"Error loading pickle file: {self.filename}" super().__init__(msg) Encoder = msgspec.msgpack.Encoder(order="deterministic") AstDecoder = msgspec.msgpack.Decoder(type=serialize_ast.SerializableAst) BuiltinsDecoder = msgspec.msgpack.Decoder(type=serialize_ast.ModuleBundle) _DecT = TypeVar( "_DecT", serialize_ast.SerializableAst, serialize_ast.ModuleBundle ) _Dec = msgspec.msgpack.Decoder _Serializable = Union[serialize_ast.SerializableAst, serialize_ast.ModuleBundle] def _Load( dec: "_Dec[_DecT]", filename: Path, compress: bool = False, open_function=open, ) -> _DecT: """Loads a serialized file. Args: dec: The msgspec.Decoder to use. filename: The file to read. compress: if True, the file will be opened using gzip. open_function: The function to open the file with. Returns: The decoded object. Raises: LoadPickleError, if there is an OSError, gzip error, or msgspec error. """ try: with open_function(filename, "rb") as fi: if compress: with gzip.GzipFile(fileobj=fi) as zfi: data = zfi.read() else: data = fi.read() return dec.decode(data) except ( OSError, gzip.BadGzipFile, msgspec.DecodeError, msgspec.ValidationError, ) as e: raise LoadPickleError(filename) from e def DecodeAst(data: bytes) -> serialize_ast.SerializableAst: return AstDecoder.decode(data) def LoadAst( filename: Path, compress: bool = False, open_function=open ) -> serialize_ast.SerializableAst: return _Load( AstDecoder, filename, compress, open_function ) # pytype: disable=bad-return-type def DecodeBuiltins(data: bytes) -> serialize_ast.ModuleBundle: return BuiltinsDecoder.decode(data) def LoadBuiltins( filename: Path, compress: bool = False, open_function=open ) -> serialize_ast.ModuleBundle: return _Load(BuiltinsDecoder, filename, compress, open_function) def Encode(obj: _Serializable) -> bytes: return Encoder.encode(obj) def Save( obj: _Serializable, filename: Path, compress: bool = False, open_function=open, ) -> None: """Saves a serializable object to a file. Args: obj: The object to serialize. filename: filename to write to. compress: if True, the data will be compressed using gzip. The given filename will be used, unaltered. open_function: The function to use to open files. Defaults to the builtin open() function. """ with open_function(filename, "wb") as fi: if compress: # We blank the filename and set the mtime explicitly to produce # deterministic gzip files. with gzip.GzipFile(filename="", mode="wb", fileobj=fi, mtime=1.0) as zfi: zfi.write(Encode(obj)) else: fi.write(Encode(obj)) def Serialize( ast: pytd.TypeDeclUnit, src_path: str | None = None, metadata=None ) -> bytes: out = serialize_ast.SerializeAst(ast, src_path, metadata) return Encode(out) def SerializeAndSave( ast: pytd.TypeDeclUnit, filename: Path, *, compress: bool = False, open_function=open, src_path: str | None = None, metadata=None, ) -> None: out = serialize_ast.SerializeAst(ast, src_path, metadata) Save(out, filename, compress, open_function) def PrepareModuleBundle( modules: Iterable[tuple[str, str, pytd.TypeDeclUnit]], ) -> serialize_ast.ModuleBundle: raw = lambda ast, filename: msgspec.Raw(Serialize(ast, src_path=filename)) return tuple( ((name, raw(module, filename)) for name, filename, module in modules) )
LoadPickleError
python
gevent__gevent
src/gevent/tests/test__api_timeout.py
{ "start": 1404, "end": 1685 }
class ____(object): update_now_calls = 0 def __init__(self, loop): self.loop = loop def __getattr__(self, name): return getattr(self.loop, name) def update_now(self): self.update_now_calls += 1 self.loop.update_now()
_UpdateNowProxy
python
doocs__leetcode
solution/0300-0399/0393.UTF-8 Validation/Solution.py
{ "start": 0, "end": 524 }
class ____: def validUtf8(self, data: List[int]) -> bool: cnt = 0 for v in data: if cnt > 0: if v >> 6 != 0b10: return False cnt -= 1 elif v >> 7 == 0: cnt = 0 elif v >> 5 == 0b110: cnt = 1 elif v >> 4 == 0b1110: cnt = 2 elif v >> 3 == 0b11110: cnt = 3 else: return False return cnt == 0
Solution
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py
{ "start": 1802, "end": 6324 }
class ____(test.TestCase): @classmethod def setUpClass(cls): super(CSRSparseMatrixGradTest, cls).setUpClass() cls._gpu_available = test_util.is_gpu_available() # TODO(penporn): Make these tests runnable on eager mode. # (tf.gradients and gradient_checker only run in graph mode.) @test_util.run_deprecated_v1 def testLargeBatchConversionGrad(self): if not self._gpu_available: return sparsify = lambda m: m * (m > 0) for dense_shape in ([53, 65, 127], [127, 65]): mats_val = sparsify(np.random.randn(*dense_shape)) with self.test_session() as sess: mats = math_ops.cast(mats_val, dtype=dtypes.float32) sparse_mats = dense_to_csr_sparse_matrix(mats) dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( sparse_mats, dtypes.float32) grad_vals = np.random.randn(*dense_shape).astype(np.float32) grad_out = gradients_impl.gradients([dense_mats], [mats], [grad_vals])[0] self.assertEqual(grad_out.dtype, dtypes.float32) self.assertEqual(grad_out.shape, dense_shape) grad_out_value = sess.run(grad_out) tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" % dense_shape) nonzero_indices = abs(mats_val) > 0.0 self.assertAllEqual(grad_out_value[nonzero_indices], grad_vals[nonzero_indices]) self.assertTrue( np.all(grad_out_value[np.logical_not(nonzero_indices)] == 0.0)) @test_util.run_deprecated_v1 def testLargeBatchSparseConversionGrad(self): sparsify = lambda m: m * (m > 0) for dense_shape in ([53, 65, 127], [127, 65]): mats_val = sparsify(np.random.randn(*dense_shape)) with self.session(use_gpu=True) as sess: indices = array_ops.where_v2( math_ops.not_equal(mats_val, array_ops.zeros_like(mats_val))) values = math_ops.cast( array_ops.gather_nd(mats_val, indices), dtype=dtypes.float32) grad_vals = np.random.randn(*sess.run(values).shape).astype(np.float32) csr_matrix = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( indices, values, dense_shape) new_coo_tensor = ( sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( csr_matrix, type=dtypes.float32)) grad_out = gradients_impl.gradients([new_coo_tensor.values], [values], [grad_vals])[0] self.assertEqual(grad_out.dtype, dtypes.float32) grad_out_vals = sess.run(grad_out) self.assertAllClose(grad_vals, grad_out_vals) @test_util.run_deprecated_v1 def testLargeBatchSparseMatrixAddGrad(self): if not self._gpu_available: return sparsify = lambda m: m * (m > 0) for dense_shape in ([53, 65, 127], [127, 65]): a_mats_val = sparsify(np.random.randn(*dense_shape)) b_mats_val = sparsify(np.random.randn(*dense_shape)) alpha = np.float32(0.5) beta = np.float32(-1.5) grad_vals = np.random.randn(*dense_shape).astype(np.float32) expected_a_grad = alpha * grad_vals expected_b_grad = beta * grad_vals expected_a_grad[abs(a_mats_val) == 0.0] = 0.0 expected_b_grad[abs(b_mats_val) == 0.0] = 0.0 with self.test_session() as sess: a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32) b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32) a_sm = dense_to_csr_sparse_matrix(a_mats) b_sm = dense_to_csr_sparse_matrix(b_mats) c_sm = sparse_csr_matrix_ops.sparse_matrix_add( a_sm, b_sm, alpha=alpha, beta=beta) c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( c_sm, dtypes.float32) a_grad, b_grad = gradients_impl.gradients([c_dense], [a_mats, b_mats], [grad_vals]) self.assertEqual(a_grad.dtype, dtypes.float32) self.assertEqual(b_grad.dtype, dtypes.float32) self.assertEqual(a_grad.shape, dense_shape) self.assertEqual(b_grad.shape, dense_shape) a_grad_value, b_grad_value = sess.run((a_grad, b_grad)) tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" % dense_shape) self.assertAllEqual(expected_a_grad, a_grad_value) self.assertAllEqual(expected_b_grad, b_grad_value) if __name__ == "__main__": test.main()
CSRSparseMatrixGradTest
python
getsentry__sentry
tests/sentry/autofix/test_utils.py
{ "start": 4154, "end": 9254 }
class ____(TestCase): @patch("requests.post") def test_get_autofix_state_success_with_group_id(self, mock_post: MagicMock) -> None: # Setup mock response mock_response = mock_post.return_value mock_response.raise_for_status = lambda: None mock_response.json.return_value = { "group_id": 123, "state": { "run_id": 456, "request": { "project_id": 789, "organization_id": 999, "issue": {"id": 123, "title": "Test Issue"}, "repos": [], }, "updated_at": "2023-07-18T12:00:00Z", "status": "PROCESSING", }, } # Call the function result = get_autofix_state(group_id=123, organization_id=999) # Assertions assert isinstance(result, AutofixState) assert result.run_id == 456 assert result.request == { "project_id": 789, "organization_id": 999, "issue": {"id": 123, "title": "Test Issue"}, "repos": [], } assert result.updated_at == datetime(2023, 7, 18, 12, 0, tzinfo=timezone.utc) assert result.status == AutofixStatus.PROCESSING mock_post.assert_called_once_with( f"{settings.SEER_AUTOFIX_URL}/v1/automation/autofix/state", data=b'{"group_id":123,"run_id":null,"check_repo_access":false,"is_user_fetching":false}', headers={"content-type": "application/json;charset=utf-8"}, ) @patch("requests.post") def test_get_autofix_state_success_with_run_id(self, mock_post: MagicMock) -> None: # Setup mock response mock_response = mock_post.return_value mock_response.raise_for_status = lambda: None mock_response.json.return_value = { "run_id": 456, "state": { "run_id": 456, "request": { "project_id": 789, "organization_id": 999, "issue": {"id": 123, "title": "Test Issue"}, "repos": [], }, "updated_at": "2023-07-18T12:00:00Z", "status": "COMPLETED", }, } # Call the function result = get_autofix_state(run_id=456, organization_id=999) # Assertions assert isinstance(result, AutofixState) assert result.run_id == 456 assert result.request == { "project_id": 789, "organization_id": 999, "issue": {"id": 123, "title": "Test Issue"}, "repos": [], } assert result.updated_at == datetime(2023, 7, 18, 12, 0, tzinfo=timezone.utc) assert result.status == AutofixStatus.COMPLETED mock_post.assert_called_once_with( f"{settings.SEER_AUTOFIX_URL}/v1/automation/autofix/state", data=b'{"group_id":null,"run_id":456,"check_repo_access":false,"is_user_fetching":false}', headers={"content-type": "application/json;charset=utf-8"}, ) @patch("requests.post") def test_get_autofix_state_no_result(self, mock_post: MagicMock) -> None: # Setup mock response mock_response = mock_post.return_value mock_response.raise_for_status = lambda: None mock_response.json.return_value = {} # Call the function result = get_autofix_state(group_id=123, organization_id=999) # Assertions assert result is None @patch("requests.post") def test_get_autofix_state_http_error(self, mock_post: MagicMock) -> None: # Setup mock response to raise HTTP error mock_response = mock_post.return_value mock_response.raise_for_status.side_effect = Exception("HTTP Error") # Call the function and expect an exception with pytest.raises(Exception) as context: get_autofix_state(group_id=123, organization_id=999) # Assertions assert "HTTP Error" in str(context.value) @patch("requests.post") def test_get_autofix_state_raises_on_org_id_mismatch(self, mock_post: MagicMock) -> None: # Setup mock response where returned state has a different organization_id mock_response = mock_post.return_value mock_response.raise_for_status = lambda: None mock_response.json.return_value = { "group_id": 123, "state": { "run_id": 456, "request": { "project_id": 789, "organization_id": 111, # mismatched org id "issue": {"id": 123, "title": "Test Issue"}, "repos": [], }, "updated_at": "2023-07-18T12:00:00Z", "status": "PROCESSING", }, } # Expect SeerPermissionError due to org id mismatch with pytest.raises(SeerPermissionError): get_autofix_state(group_id=123, organization_id=999)
TestGetAutofixState
python
dagster-io__dagster
python_modules/libraries/dagster-deltalake/dagster_deltalake/config.py
{ "start": 4451, "end": 6545 }
class ____(Config): """Configuration for http client interacting with storage APIs.""" allow_http: Optional[bool] = None """Allow non-TLS, i.e. non-HTTPS connections""" allow_invalid_certificates: Optional[bool] = None """Skip certificate validation on https connections. ## Warning You should think very carefully before using this method. If invalid certificates are trusted, any certificate for any site will be trusted for use. This includes expired certificates. This introduces significant vulnerabilities, and should only be used as a last resort or for testing """ connect_timeout: Optional[int] = None """Timeout for only the connect phase of a Client""" default_content_type: Optional[str] = None """default CONTENT_TYPE for uploads""" http1_only: Optional[bool] = None """Only use http1 connections""" http2_keep_alive_interval: Optional[int] = None """Interval for HTTP2 Ping frames should be sent to keep a connection alive.""" http2_keep_alive_timeout: Optional[int] = None """Timeout for receiving an acknowledgement of the keep-alive ping.""" http2_keep_alive_while_idle: Optional[int] = None """Enable HTTP2 keep alive pings for idle connections""" http2_only: Optional[bool] = None """Only use http2 connections""" pool_idle_timeout: Optional[int] = None """The pool max idle timeout This is the length of time an idle connection will be kept alive """ pool_max_idle_per_host: Optional[int] = None """maximum number of idle connections per host""" proxy_url: Optional[str] = None """HTTP proxy to use for requests""" timeout: Optional[str] = None """Request timeout (e.g. "120s") The timeout is applied from when the request starts connecting until the response body has finished """ user_agent: Optional[str] = None """User-Agent header to be used by this client""" def str_dict(self) -> dict[str, str]: """Storage options as str dict.""" return _to_str_dict(self.dict())
ClientConfig
python
matplotlib__matplotlib
lib/mpl_toolkits/axes_grid1/axes_size.py
{ "start": 5253, "end": 5464 }
class ____(MaxExtent): """ Size whose absolute part is the largest height of the given *artist_list*. """ def __init__(self, artist_list): super().__init__(artist_list, "height")
MaxHeight
python
fluentpython__example-code
10-seq-hacking/vector_v5.py
{ "start": 4470, "end": 7188 }
class ____: typecode = 'd' def __init__(self, components): self._components = array(self.typecode, components) def __iter__(self): return iter(self._components) def __repr__(self): components = reprlib.repr(self._components) components = components[components.find('['):-1] return 'Vector({})'.format(components) def __str__(self): return str(tuple(self)) def __bytes__(self): return (bytes([ord(self.typecode)]) + bytes(self._components)) def __eq__(self, other): return (len(self) == len(other) and all(a == b for a, b in zip(self, other))) def __hash__(self): hashes = (hash(x) for x in self) return functools.reduce(operator.xor, hashes, 0) def __abs__(self): return math.sqrt(sum(x * x for x in self)) def __bool__(self): return bool(abs(self)) def __len__(self): return len(self._components) def __getitem__(self, index): cls = type(self) if isinstance(index, slice): return cls(self._components[index]) elif isinstance(index, numbers.Integral): return self._components[index] else: msg = '{.__name__} indices must be integers' raise TypeError(msg.format(cls)) shortcut_names = 'xyzt' def __getattr__(self, name): cls = type(self) if len(name) == 1: pos = cls.shortcut_names.find(name) if 0 <= pos < len(self._components): return self._components[pos] msg = '{.__name__!r} object has no attribute {!r}' raise AttributeError(msg.format(cls, name)) def angle(self, n): # <2> r = math.sqrt(sum(x * x for x in self[n:])) a = math.atan2(r, self[n-1]) if (n == len(self) - 1) and (self[-1] < 0): return math.pi * 2 - a else: return a def angles(self): # <3> return (self.angle(n) for n in range(1, len(self))) def __format__(self, fmt_spec=''): if fmt_spec.endswith('h'): # hyperspherical coordinates fmt_spec = fmt_spec[:-1] coords = itertools.chain([abs(self)], self.angles()) # <4> outer_fmt = '<{}>' # <5> else: coords = self outer_fmt = '({})' # <6> components = (format(c, fmt_spec) for c in coords) # <7> return outer_fmt.format(', '.join(components)) # <8> @classmethod def frombytes(cls, octets): typecode = chr(octets[0]) memv = memoryview(octets[1:]).cast(typecode) return cls(memv) # END VECTOR_V5
Vector
python
ray-project__ray
python/ray/util/state/common.py
{ "start": 39870, "end": 40600 }
class ____: #: The name of this task group name: str #: A unique identifier for this group key: str #: The type of the class. Equivalent to protobuf TaskType, #: "ACTOR" if it represents an Actor, or "GROUP" if it's a grouping of tasks. type: str #: Unix timestamp to use to sort the task group. timestamp: Optional[int] = None #: State name to the count dict. State name is equivalent to #: the protobuf TaskStatus. state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) #: The child children: List["NestedTaskSummary"] = field(default_factory=list) #: A link to more details about this summary. link: Optional[Link] = None @dataclass
NestedTaskSummary
python
getsentry__sentry
src/sentry/notifications/platform/templates/sample.py
{ "start": 7519, "end": 9408 }
class ____(NotificationTemplate[SlowLoadMetricAlertData]): category = NotificationCategory.DEBUG example_data = SlowLoadMetricAlertData( alert_type="Slow Product Load", severity="critical", project_name="example-app", measurement="5152.0 p50(measurements.lc)", threshold="static", start_time="2024-01-15 14:30:22 UTC", chart_url="https://storage.googleapis.com/sentryio-chartcuterie-bucket/b8c05163a9474cf0ae0c6e8797e768ee.png", acknowledge_url="https://example.com/acknowledge", escalate_url="https://example.com/escalate", alert_url="https://example.com/alert", ) def render(self, data: SlowLoadMetricAlertData) -> NotificationRenderedTemplate: return NotificationRenderedTemplate( subject=f"{data.severity.upper()}: {data.alert_type} in {data.project_name}", body=[ ParagraphBlock( type=NotificationBodyFormattingBlockType.PARAGRAPH, blocks=[ PlainTextBlock( type=NotificationBodyTextBlockType.PLAIN_TEXT, text=f"{data.measurement} since {data.start_time}", ) ], ), ], chart=NotificationRenderedImage( url=data.chart_url, alt_text="Metric alert chart", ), actions=[ NotificationRenderedAction( label="Acknowledge", link="https://example.com/acknowledge" ), NotificationRenderedAction(label="Escalate", link="https://example.com/escalate"), ], footer=f"Threshold: {data.threshold} | Triggered alert: {data.alert_url}", ) @dataclass(frozen=True)
SlowLoadMetricAlertNotificationTemplate
python
huggingface__transformers
src/transformers/models/qwen3_vl/modular_qwen3_vl.py
{ "start": 15204, "end": 15594 }
class ____(Qwen2_5_VLVisionBlock): def __init__(self, config, attn_implementation: str = "sdpa") -> None: super().__init__() self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6) self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6) self.attn = Qwen3VLVisionAttention(config=config) self.mlp = Qwen3VLVisionMLP(config=config)
Qwen3VLVisionBlock
python
google__jax
tests/pallas/tpu_pallas_memory_space_test.py
{ "start": 3773, "end": 8280 }
class ____(jtu.JaxTestCase): def setUp(self): super().setUp() if not jtu.is_device_tpu_at_least(5): self.skipTest('Needs a newer TPU') @parameterized.parameters( (pltpu.VMEM, 1), (pltpu.SMEM, 4), (pltpu.HBM, 0), (pltpu.ANY, None), ) def test_basic_ref_memory_space_constraint(self, memory_space, color): @jax.jit def f(x): x_ref = jax.new_ref(x, memory_space=memory_space) y_ref = jax.new_ref(pl.empty_like(x), memory_space=memory_space) self.assertEqual(jax.typeof(x_ref).memory_space, memory_space) self.assertEqual(jax.typeof(y_ref).memory_space, memory_space) @pl.core_map(mesh=pltpu.create_tensorcore_mesh('core')) def _(): if jax.typeof(x_ref).memory_space is pltpu.VMEM: y_ref[...] = x_ref[...] else: pltpu.sync_copy(x_ref, y_ref) return y_ref[...] x = jnp.arange(1024, dtype=jnp.float32).reshape((8, 128)) num_cores = jax.devices()[0].num_cores if num_cores > 1 and memory_space == pltpu.VMEM: with self.assertRaisesRegex( NotImplementedError, 'TensorCoreMesh does not support VMEM inputs/outputs when there are' ' >1 cores. Use HBM or ANY instead.', ): f.lower(x).compile() return lowered = f.lower(x) compiled = lowered.compile() hlo = compiled.as_text() if color is None or memory_space == pltpu.SMEM: self.assertIn('"input_memory_space_colors":[]', hlo) else: self.assertIn( f'"input_memory_space_colors":[{{"operand_index":"0","color":"{color}","shape_index":[]}},{{"operand_index":"1","color":"{color}","shape_index":[]}}]', hlo, ) y = compiled(x) np.testing.assert_array_equal(y, x) def test_smem_copy(self): mesh = pltpu.create_tensorcore_mesh('core') if len(mesh.devices) > 1: self.skipTest('Only one core is supported for this test.') kernel = pl.core_map(mesh=mesh) @jax.jit def f(): y_ref = pl.empty_ref_like(pltpu.SMEM((8,), jnp.int32)) @kernel def _(): for i in range(y_ref.shape[0]): y_ref[i] = i @kernel def _(): for i in range(y_ref.shape[0]): y_ref[i] = y_ref[i] + 1 return y_ref[...] np.testing.assert_array_equal(f(), np.arange(8) + 1) def test_smem_async_copy(self): mesh = pltpu.create_tensorcore_mesh('core') if len(mesh.devices) > 1: self.skipTest('Only one core is supported for this test.') kernel = pl.core_map(mesh=mesh) @jax.jit def f(): y_ref = pl.empty_ref_like(pltpu.SMEM((8,), jnp.int32)) @kernel def _(): for i in range(y_ref.shape[0]): y_ref[i] = i @kernel def _(): for i in range(y_ref.shape[0]): y_ref[i] = y_ref[i] + 1 y_out_ref = pl.empty_ref_like(pltpu.HBM((8,), jnp.int32)) sem = pl.empty_ref_like(pltpu.SemaphoreType.DMA(())) @kernel def _(): pltpu.make_async_copy(y_ref, y_out_ref, sem).start() @kernel def _(): pltpu.make_async_copy(y_ref, y_out_ref, sem).wait() return y_out_ref[...] np.testing.assert_array_equal(f(), np.arange(8) + 1) def test_smem_async_copy_megacore(self): mesh = pltpu.create_tensorcore_mesh('core') num_cores = len(mesh.devices) if num_cores == 1: self.skipTest('Only megacore is supported for this test.') kernel = pl.core_map(mesh=mesh) n = 256 @jax.jit def f(): y_ref = pl.empty_ref_like(pltpu.SMEM((1, n), jnp.int32)) @kernel def _(): core_i = jax.lax.axis_index('core') for i in range(n): y_ref[0, i] = i + core_i * n @kernel def _(): for i in range(n): y_ref[0, i] = y_ref[0, i] + 1 y_out_ref = pl.empty_ref_like(pltpu.HBM((num_cores, 1, n), jnp.int32)) sem = pl.empty_ref_like(pltpu.SemaphoreType.DMA(())) @kernel def _(): core_i = jax.lax.axis_index('core') pltpu.make_async_copy(y_ref, y_out_ref.at[core_i, ...], sem).start() @kernel def _(): core_i = jax.lax.axis_index('core') pltpu.make_async_copy(y_ref, y_out_ref.at[core_i, ...], sem).wait() return y_out_ref[...] np.testing.assert_array_equal( f(), np.arange(num_cores * n).reshape((num_cores, 1, n)) + 1 ) if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
TPUCoreMapMemorySpaceTest
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_state.py
{ "start": 337, "end": 3197 }
class ____(FSDPTestMultiThread): @property def world_size(self) -> int: return 1 @skip_if_lt_x_gpu(1) def test_fully_shard_state(self): """ Tests the ability to get the state object from a fully sharded module. """ num_mlps = 3 model = nn.Sequential(*[MLP(8) for _ in range(num_mlps)]) for mlp in model: fully_shard(mlp) fully_shard(model) root_state = fully_shard.state(model) self.assertTrue(root_state is not None) all_states = [root_state] + [fully_shard.state(mlp) for mlp in model] # Check that each `fully_shard` call constructs a distinct state object self.assertEqual(len(set(all_states)), num_mlps + 1) @skip_if_lt_x_gpu(1) def test_fully_shard_reapply(self): model = MLP(8) fully_shard(model) with self.assertRaisesRegex( AssertionError, "Each distinct composable distributed API can only be applied to a module once.", ): fully_shard(model) @skip_if_lt_x_gpu(1) def test_fully_shard_cls(self): # Check that we only swap class for the module passed to `fully_shard` model = MLP(8) fully_shard(model) self.assertTrue(isinstance(model, MLP)) self.assertTrue(isinstance(model, FSDPModule)) self.assertEqual(model.__class__.__name__, "FSDPMLP") for module in model.modules(): if module is model: continue self.assertFalse(isinstance(module, FSDPModule)) # Check that slicing into a `Sequential` does not preserve FSDP model = nn.Sequential(*[MLP(8) for _ in range(3)]) fully_shard(model) self.assertTrue(isinstance(model, nn.Sequential)) self.assertTrue(isinstance(model, FSDPModule)) self.assertEqual(model.__class__.__name__, "FSDPSequential") sliced_model = model[:2] self.assertTrue(isinstance(sliced_model, nn.Sequential)) self.assertFalse(isinstance(sliced_model, FSDPModule)) @skip_if_lt_x_gpu(1) def test_fully_shard_unsupported_module_cls(self): regex = ( r"fully\_shard does not support containers that do not implement forward" ) model = nn.ModuleList([MLP(8) for _ in range(3)]) with self.assertRaisesRegex(ValueError, regex): fully_shard(model) model = nn.ModuleDict({"1": MLP(8), "2": MLP(8)}) with self.assertRaisesRegex(ValueError, regex): fully_shard(model) @skip_if_lt_x_gpu(1) def test_fully_shard_deepcopy(self): model = MLP(8) fully_shard(model) with self.assertRaisesRegex(AssertionError, "FSDP does not support deepcopy"): copy.deepcopy(model) if __name__ == "__main__": run_tests()
TestFullyShardState
python
django-haystack__django-haystack
test_haystack/core/models.py
{ "start": 228, "end": 579 }
class ____(models.Model): author = models.CharField(max_length=255) foo = models.CharField(max_length=255, blank=True) pub_date = models.DateTimeField(default=datetime.datetime.now) tag = models.ForeignKey(MockTag, models.CASCADE) def __str__(self): return self.author def hello(self): return "World!"
MockModel
python
celery__celery
t/unit/utils/test_saferepr.py
{ "start": 2045, "end": 2135 }
class ____(frozenset): def __repr__(self): return super().__repr__()
frozenset3
python
django__django
tests/requests_tests/tests.py
{ "start": 1138, "end": 39706 }
class ____(SimpleTestCase): def test_httprequest(self): request = HttpRequest() self.assertEqual(list(request.GET), []) self.assertEqual(list(request.POST), []) self.assertEqual(list(request.COOKIES), []) self.assertEqual(list(request.META), []) # .GET and .POST should be QueryDicts self.assertEqual(request.GET.urlencode(), "") self.assertEqual(request.POST.urlencode(), "") # and FILES should be MultiValueDict self.assertEqual(request.FILES.getlist("foo"), []) self.assertIsNone(request.content_type) self.assertIsNone(request.content_params) def test_httprequest_full_path(self): request = HttpRequest() request.path_info = "/;some/?awful/=path/foo:bar/" request.path = "/prefix" + request.path_info request.META["QUERY_STRING"] = ";some=query&+query=string" expected = "/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string" self.assertEqual(request.get_full_path_info(), expected) self.assertEqual(request.get_full_path(), "/prefix" + expected) def test_httprequest_full_path_with_query_string_and_fragment(self): request = HttpRequest() request.path_info = "/foo#bar" request.path = "/prefix" + request.path_info request.META["QUERY_STRING"] = "baz#quux" self.assertEqual(request.get_full_path_info(), "/foo%23bar?baz#quux") self.assertEqual(request.get_full_path(), "/prefix/foo%23bar?baz#quux") def test_httprequest_repr(self): request = HttpRequest() request.path = "/somepath/" request.method = "GET" request.GET = {"get-key": "get-value"} request.POST = {"post-key": "post-value"} request.COOKIES = {"post-key": "post-value"} request.META = {"post-key": "post-value"} self.assertEqual(repr(request), "<HttpRequest: GET '/somepath/'>") def test_httprequest_repr_invalid_method_and_path(self): request = HttpRequest() self.assertEqual(repr(request), "<HttpRequest>") request = HttpRequest() request.method = "GET" self.assertEqual(repr(request), "<HttpRequest>") request = HttpRequest() request.path = "" self.assertEqual(repr(request), "<HttpRequest>") def test_wsgirequest(self): request = WSGIRequest( { "PATH_INFO": "bogus", "REQUEST_METHOD": "bogus", "CONTENT_TYPE": "text/html; charset=utf8", "wsgi.input": BytesIO(b""), } ) self.assertEqual(list(request.GET), []) self.assertEqual(list(request.POST), []) self.assertEqual(list(request.COOKIES), []) self.assertEqual( set(request.META), { "PATH_INFO", "REQUEST_METHOD", "SCRIPT_NAME", "CONTENT_TYPE", "wsgi.input", }, ) self.assertEqual(request.META["PATH_INFO"], "bogus") self.assertEqual(request.META["REQUEST_METHOD"], "bogus") self.assertEqual(request.META["SCRIPT_NAME"], "") self.assertEqual(request.content_type, "text/html") self.assertEqual(request.content_params, {"charset": "utf8"}) def test_wsgirequest_with_script_name(self): """ The request's path is correctly assembled, regardless of whether or not the SCRIPT_NAME has a trailing slash (#20169). """ # With trailing slash request = WSGIRequest( { "PATH_INFO": "/somepath/", "SCRIPT_NAME": "/PREFIX/", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/PREFIX/somepath/") # Without trailing slash request = WSGIRequest( { "PATH_INFO": "/somepath/", "SCRIPT_NAME": "/PREFIX", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/PREFIX/somepath/") def test_wsgirequest_script_url_double_slashes(self): """ WSGI squashes multiple successive slashes in PATH_INFO, WSGIRequest should take that into account when populating request.path and request.META['SCRIPT_NAME'] (#17133). """ request = WSGIRequest( { "SCRIPT_URL": "/mst/milestones//accounts/login//help", "PATH_INFO": "/milestones/accounts/login/help", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/mst/milestones/accounts/login/help") self.assertEqual(request.META["SCRIPT_NAME"], "/mst") def test_wsgirequest_with_force_script_name(self): """ The FORCE_SCRIPT_NAME setting takes precedence over the request's SCRIPT_NAME environment parameter (#20169). """ with override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX/"): request = WSGIRequest( { "PATH_INFO": "/somepath/", "SCRIPT_NAME": "/PREFIX/", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/FORCED_PREFIX/somepath/") def test_wsgirequest_path_with_force_script_name_trailing_slash(self): """ The request's path is correctly assembled, regardless of whether or not the FORCE_SCRIPT_NAME setting has a trailing slash (#20169). """ # With trailing slash with override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX/"): request = WSGIRequest( { "PATH_INFO": "/somepath/", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/FORCED_PREFIX/somepath/") # Without trailing slash with override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX"): request = WSGIRequest( { "PATH_INFO": "/somepath/", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/FORCED_PREFIX/somepath/") def test_wsgirequest_repr(self): request = WSGIRequest({"REQUEST_METHOD": "get", "wsgi.input": BytesIO(b"")}) self.assertEqual(repr(request), "<WSGIRequest: GET '/'>") request = WSGIRequest( { "PATH_INFO": "/somepath/", "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) request.GET = {"get-key": "get-value"} request.POST = {"post-key": "post-value"} request.COOKIES = {"post-key": "post-value"} request.META = {"post-key": "post-value"} self.assertEqual(repr(request), "<WSGIRequest: GET '/somepath/'>") def test_wsgirequest_path_info(self): def wsgi_str(path_info, encoding="utf-8"): path_info = path_info.encode( encoding ) # Actual URL sent by the browser (bytestring) path_info = path_info.decode( "iso-8859-1" ) # Value in the WSGI environ dict (native string) return path_info # Regression for #19468 request = WSGIRequest( { "PATH_INFO": wsgi_str("/سلام/"), "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) self.assertEqual(request.path, "/سلام/") # The URL may be incorrectly encoded in a non-UTF-8 encoding (#26971) request = WSGIRequest( { "PATH_INFO": wsgi_str("/café/", encoding="iso-8859-1"), "REQUEST_METHOD": "get", "wsgi.input": BytesIO(b""), } ) # Since it's impossible to decide the (wrong) encoding of the URL, it's # left percent-encoded in the path. self.assertEqual(request.path, "/caf%E9/") def test_wsgirequest_copy(self): request = WSGIRequest({"REQUEST_METHOD": "get", "wsgi.input": BytesIO(b"")}) request_copy = copy.copy(request) self.assertIs(request_copy.environ, request.environ) def test_limited_stream(self): # Read all of a limited stream stream = LimitedStream(BytesIO(b"test"), 2) self.assertEqual(stream.read(), b"te") # Reading again returns nothing. self.assertEqual(stream.read(), b"") # Read a number of characters greater than the stream has to offer stream = LimitedStream(BytesIO(b"test"), 2) self.assertEqual(stream.read(5), b"te") # Reading again returns nothing. self.assertEqual(stream.readline(5), b"") # Read sequentially from a stream stream = LimitedStream(BytesIO(b"12345678"), 8) self.assertEqual(stream.read(5), b"12345") self.assertEqual(stream.read(5), b"678") # Reading again returns nothing. self.assertEqual(stream.readline(5), b"") # Read lines from a stream stream = LimitedStream(BytesIO(b"1234\n5678\nabcd\nefgh\nijkl"), 24) # Read a full line, unconditionally self.assertEqual(stream.readline(), b"1234\n") # Read a number of characters less than a line self.assertEqual(stream.readline(2), b"56") # Read the rest of the partial line self.assertEqual(stream.readline(), b"78\n") # Read a full line, with a character limit greater than the line length self.assertEqual(stream.readline(6), b"abcd\n") # Read the next line, deliberately terminated at the line end self.assertEqual(stream.readline(4), b"efgh") # Read the next line... just the line end self.assertEqual(stream.readline(), b"\n") # Read everything else. self.assertEqual(stream.readline(), b"ijkl") # Regression for #15018 # If a stream contains a newline, but the provided length # is less than the number of provided characters, the newline # doesn't reset the available character count stream = LimitedStream(BytesIO(b"1234\nabcdef"), 9) self.assertEqual(stream.readline(10), b"1234\n") self.assertEqual(stream.readline(3), b"abc") # Now expire the available characters self.assertEqual(stream.readline(3), b"d") # Reading again returns nothing. self.assertEqual(stream.readline(2), b"") # Same test, but with read, not readline. stream = LimitedStream(BytesIO(b"1234\nabcdef"), 9) self.assertEqual(stream.read(6), b"1234\na") self.assertEqual(stream.read(2), b"bc") self.assertEqual(stream.read(2), b"d") self.assertEqual(stream.read(2), b"") self.assertEqual(stream.read(), b"") def test_stream_read(self): payload = FakePayload("name=value") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, }, ) self.assertEqual(request.read(), b"name=value") def test_stream_readline(self): payload = FakePayload("name=value\nother=string") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, }, ) self.assertEqual(request.readline(), b"name=value\n") self.assertEqual(request.readline(), b"other=string") def test_read_after_value(self): """ Reading from request is allowed after accessing request contents as POST or body. """ payload = FakePayload("name=value") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, {"name": ["value"]}) self.assertEqual(request.body, b"name=value") self.assertEqual(request.read(), b"name=value") def test_value_after_read(self): """ Construction of POST or body is not allowed after reading from request. """ payload = FakePayload("name=value") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.read(2), b"na") with self.assertRaises(RawPostDataException): request.body self.assertEqual(request.POST, {}) def test_non_ascii_POST(self): payload = FakePayload(urlencode({"key": "España"})) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_LENGTH": len(payload), "CONTENT_TYPE": "application/x-www-form-urlencoded", "wsgi.input": payload, } ) self.assertEqual(request.POST, {"key": ["España"]}) def test_non_utf8_charset_POST_bad_request(self): payload = FakePayload(urlencode({"key": "España".encode("latin-1")})) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_LENGTH": len(payload), "CONTENT_TYPE": "application/x-www-form-urlencoded; charset=iso-8859-1", "wsgi.input": payload, } ) msg = ( "HTTP requests with the 'application/x-www-form-urlencoded' content type " "must be UTF-8 encoded." ) with self.assertRaisesMessage(BadRequest, msg): request.POST with self.assertRaisesMessage(BadRequest, msg): request.FILES def test_utf8_charset_POST(self): for charset in ["utf-8", "UTF-8"]: with self.subTest(charset=charset): payload = FakePayload(urlencode({"key": "España"})) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_LENGTH": len(payload), "CONTENT_TYPE": ( f"application/x-www-form-urlencoded; charset={charset}" ), "wsgi.input": payload, } ) self.assertEqual(request.POST, {"key": ["España"]}) def test_body_after_POST_multipart_form_data(self): """ Reading body after parsing multipart/form-data is not allowed """ # Because multipart is used for large amounts of data i.e. file # uploads, we don't want the data held in memory twice, and we don't # want to silence the error by setting body = '' either. payload = FakePayload( "\r\n".join( [ "--boundary", 'Content-Disposition: form-data; name="name"', "", "value", "--boundary--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=boundary", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, {"name": ["value"]}) with self.assertRaises(RawPostDataException): request.body def test_malformed_multipart_header(self): for header in [ 'Content-Disposition : form-data; name="name"', 'Content-Disposition:form-data; name="name"', 'Content-Disposition :form-data; name="name"', ]: with self.subTest(header): payload = FakePayload( "\r\n".join( [ "--boundary", header, "", "value", "--boundary--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=boundary", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, {"name": ["value"]}) def test_body_after_POST_multipart_related(self): """ Reading body after parsing multipart that isn't form-data is allowed """ # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b"\r\n".join( [ b"--boundary", b'Content-ID: id; name="name"', b"", b"value", b"--boundary--", ] ) payload = FakePayload(payload_data) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/related; boundary=boundary", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data) def test_POST_multipart_with_content_length_zero(self): """ Multipart POST requests with Content-Length >= 0 are valid and need to be handled. """ # According to RFC 9110 Section 8.6 every POST with Content-Length >= 0 # is a valid request, so ensure that we handle Content-Length == 0. payload = FakePayload( "\r\n".join( [ "--boundary", 'Content-Disposition: form-data; name="name"', "", "value", "--boundary--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=boundary", "CONTENT_LENGTH": 0, "wsgi.input": payload, } ) self.assertEqual(request.POST, {}) @override_settings( FILE_UPLOAD_HANDLERS=["requests_tests.tests.ErrorFileUploadHandler"] ) def test_POST_multipart_handler_error(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "", "value", f"--{BOUNDARY}--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) with self.assertRaises(ValueError): request.POST @override_settings( FILE_UPLOAD_HANDLERS=["requests_tests.tests.CustomFileUploadHandler"] ) def test_POST_multipart_handler_parses_input(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "", "value", f"--{BOUNDARY}--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, "_POST") self.assertEqual(request.FILES, "_FILES") def test_request_methods_with_content(self): for method in ["GET", "PUT", "DELETE"]: with self.subTest(method=method): payload = FakePayload(urlencode({"key": "value"})) request = WSGIRequest( { "REQUEST_METHOD": method, "CONTENT_LENGTH": len(payload), "CONTENT_TYPE": "application/x-www-form-urlencoded", "wsgi.input": payload, } ) self.assertEqual(request.POST, {}) def test_POST_content_type_json(self): payload = FakePayload( "\r\n".join( [ '{"pk": 1, "model": "store.book", "fields": {"name": "Mostly Ha', 'rmless", "author": ["Douglas", Adams"]}}', ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/json", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) _json_payload = [ 'Content-Disposition: form-data; name="JSON"', "Content-Type: application/json", "", '{"pk": 1, "model": "store.book", "fields": {"name": "Mostly Harmless", ' '"author": ["Douglas", Adams"]}}', ] def test_POST_form_data_json(self): payload = FakePayload( "\r\n".join([f"--{BOUNDARY}", *self._json_payload, f"--{BOUNDARY}--"]) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual( request.POST, { "JSON": [ '{"pk": 1, "model": "store.book", "fields": {"name": "Mostly ' 'Harmless", "author": ["Douglas", Adams"]}}' ], }, ) def test_POST_multipart_json(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "", "value", f"--{BOUNDARY}", *self._json_payload, f"--{BOUNDARY}--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual( request.POST, { "name": ["value"], "JSON": [ '{"pk": 1, "model": "store.book", "fields": {"name": "Mostly ' 'Harmless", "author": ["Douglas", Adams"]}}' ], }, ) def test_POST_multipart_json_csv(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "", "value", f"--{BOUNDARY}", *self._json_payload, f"--{BOUNDARY}", 'Content-Disposition: form-data; name="CSV"', "Content-Type: text/csv", "", "Framework,ID.Django,1.Flask,2.", f"--{BOUNDARY}--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual( request.POST, { "name": ["value"], "JSON": [ '{"pk": 1, "model": "store.book", "fields": {"name": "Mostly ' 'Harmless", "author": ["Douglas", Adams"]}}' ], "CSV": ["Framework,ID.Django,1.Flask,2."], }, ) def test_POST_multipart_with_file(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "", "value", f"--{BOUNDARY}", *self._json_payload, f"--{BOUNDARY}", 'Content-Disposition: form-data; name="File"; filename="test.csv"', "Content-Type: application/octet-stream", "", "Framework,ID", "Django,1", "Flask,2", f"--{BOUNDARY}--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual( request.POST, { "name": ["value"], "JSON": [ '{"pk": 1, "model": "store.book", "fields": {"name": "Mostly ' 'Harmless", "author": ["Douglas", Adams"]}}' ], }, ) self.assertEqual(len(request.FILES), 1) self.assertIsInstance((request.FILES["File"]), InMemoryUploadedFile) def test_base64_invalid_encoding(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="file"; filename="test.txt"', "Content-Type: application/octet-stream", "Content-Transfer-Encoding: base64", "", f"\r\nZsg£\r\n--{BOUNDARY}--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) msg = "Could not decode base64 data." with self.assertRaisesMessage(MultiPartParserError, msg): request.POST def test_POST_binary_only(self): payload = b"\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@" environ = { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/octet-stream", "CONTENT_LENGTH": len(payload), "wsgi.input": BytesIO(payload), } request = WSGIRequest(environ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) self.assertEqual(request.body, payload) # Same test without specifying content-type environ.update({"CONTENT_TYPE": "", "wsgi.input": BytesIO(payload)}) request = WSGIRequest(environ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) self.assertEqual(request.body, payload) def test_read_by_lines(self): payload = FakePayload("name=value") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(list(request), [b"name=value"]) def test_POST_after_body_read(self): """ POST should be populated even if body is read first """ payload = FakePayload("name=value") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) request.body # evaluate self.assertEqual(request.POST, {"name": ["value"]}) def test_POST_after_body_read_and_stream_read(self): """ POST should be populated even if body is read first, and then the stream is read second. """ payload = FakePayload("name=value") request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) request.body # evaluate self.assertEqual(request.read(1), b"n") self.assertEqual(request.POST, {"name": ["value"]}) def test_multipart_post_field_with_base64(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "Content-Transfer-Encoding: base64", "", "dmFsdWU=", f"--{BOUNDARY}--", "", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) request.body # evaluate self.assertEqual(request.POST, {"name": ["value"]}) def test_multipart_post_field_with_invalid_base64(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "Content-Transfer-Encoding: base64", "", "123", f"--{BOUNDARY}--", "", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) request.body # evaluate self.assertEqual(request.POST, {"name": ["123"]}) def test_POST_after_body_read_and_stream_read_multipart(self): """ POST should be populated even if body is read first, and then the stream is read second. Using multipart/form-data instead of urlencoded. """ payload = FakePayload( "\r\n".join( [ "--boundary", 'Content-Disposition: form-data; name="name"', "", "value", "--boundary--" "", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=boundary", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) request.body # evaluate # Consume enough data to mess up the parsing: self.assertEqual(request.read(13), b"--boundary\r\nC") self.assertEqual(request.POST, {"name": ["value"]}) def test_POST_immutable_for_multipart(self): """ MultiPartParser.parse() leaves request.POST immutable. """ payload = FakePayload( "\r\n".join( [ "--boundary", 'Content-Disposition: form-data; name="name"', "", "value", "--boundary--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=boundary", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertFalse(request.POST._mutable) def test_multipart_without_boundary(self): request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data;", "CONTENT_LENGTH": 0, "wsgi.input": FakePayload(), } ) with self.assertRaisesMessage( MultiPartParserError, "Invalid boundary in multipart: None" ): request.POST def test_multipart_non_ascii_content_type(self): request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary = \xe0", "CONTENT_LENGTH": 0, "wsgi.input": FakePayload(), } ) msg = ( "Invalid non-ASCII Content-Type in multipart: multipart/form-data; " "boundary = à" ) with self.assertRaisesMessage(MultiPartParserError, msg): request.POST def test_multipart_with_header_fields_too_large(self): payload = FakePayload( "\r\n".join( [ "--boundary", 'Content-Disposition: form-data; name="name"', "X-Long-Header: %s" % ("-" * (MAX_TOTAL_HEADER_SIZE + 1)), "", "value", "--boundary--", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=boundary", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) msg = "Request max total header size exceeded." with self.assertRaisesMessage(MultiPartParserError, msg): request.POST def test_POST_connection_error(self): """ If wsgi.input.read() raises an exception while trying to read() the POST, the exception is identifiable (not a generic OSError). """ class ExplodingBytesIO(BytesIO): def read(self, size=-1, /): raise OSError("kaboom!") payload = b"name=value" request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": ExplodingBytesIO(payload), } ) with self.assertRaises(UnreadablePostError): request.body def test_set_encoding_clears_POST(self): payload = FakePayload( "\r\n".join( [ f"--{BOUNDARY}", 'Content-Disposition: form-data; name="name"', "", "Hello Günter", f"--{BOUNDARY}--", "", ] ) ) request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": MULTIPART_CONTENT, "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) self.assertEqual(request.POST, {"name": ["Hello Günter"]}) request.encoding = "iso-8859-16" # FIXME: POST should be accessible after changing the encoding # (refs #14035). # self.assertEqual(request.POST, {"name": ["Hello GĂŒnter"]}) def test_set_encoding_clears_GET(self): payload = FakePayload("") request = WSGIRequest( { "REQUEST_METHOD": "GET", "wsgi.input": payload, "QUERY_STRING": "name=Hello%20G%C3%BCnter", } ) self.assertEqual(request.GET, {"name": ["Hello Günter"]}) request.encoding = "iso-8859-16" self.assertEqual(request.GET, {"name": ["Hello G\u0102\u0152nter"]}) def test_FILES_connection_error(self): """ If wsgi.input.read() raises an exception while trying to read() the FILES, the exception is identifiable (not a generic OSError). """ class ExplodingBytesIO(BytesIO): def read(self, size=-1, /): raise OSError("kaboom!") payload = b"x" request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "multipart/form-data; boundary=foo_", "CONTENT_LENGTH": len(payload), "wsgi.input": ExplodingBytesIO(payload), } ) with self.assertRaises(UnreadablePostError): request.FILES def test_copy(self): request = HttpRequest() request_copy = copy.copy(request) self.assertIs(request_copy.resolver_match, request.resolver_match) def test_deepcopy(self): request = RequestFactory().get("/") request.session = {} request_copy = copy.deepcopy(request) request.session["key"] = "value" self.assertEqual(request_copy.session, {})
RequestsTests
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_representation/external_data.py
{ "start": 23076, "end": 23614 }
class ____: """A definition of a directed edge in the logical asset graph. An upstream asset that's depended on, and the corresponding input name in the downstream asset that depends on it. """ parent_asset_key: AssetKey input_name: Optional[str] = None output_name: Optional[str] = None partition_mapping: Optional[PartitionMapping] = None @whitelist_for_serdes( storage_name="ExternalAssetDependedBy", storage_field_names={"child_asset_key": "downstream_asset_key"}, ) @record
AssetParentEdgeSnap
python
pyenv__pyenv
plugins/python-build/scripts/add_miniconda.py
{ "start": 4158, "end": 6806 }
class ____(NamedTuple): flavor: Flavor suffix: Suffix version_str: VersionStr py_version: Optional[PyVersion] @classmethod def from_str(cls, s): """ Convert a string of the form "miniconda_n-ver" or "miniconda_n-py_ver-ver" to a :class:`CondaVersion` object. """ miniconda_n, _, remainder = s.partition("-") suffix = miniconda_n[-1] if suffix in string.digits: flavor = miniconda_n[:-1] else: flavor = miniconda_n suffix = "" components = remainder.split("-") if flavor == Flavor.MINICONDA and len(components) >= 2: py_ver, *ver_parts = components py_ver = PyVersion(f"py{py_ver.replace('.', '')}") ver = "-".join(ver_parts) else: ver = "-".join(components) py_ver = None return CondaVersion(Flavor(flavor), Suffix(suffix), VersionStr(ver), py_ver) def to_filename(self): if self.py_version: return f"{self.flavor}{self.suffix}-{self.py_version.version()}-{self.version_str}" else: return f"{self.flavor}{self.suffix}-{self.version_str}" def default_py_version(self): """ :class:`PyVersion` of Python used with this Miniconda version """ if self.py_version: return self.py_version elif self.suffix == Suffix.TWO: return PyVersion.PY27 v = self.version_str.info() if self.flavor == "miniconda": # https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-python.html if v < (4, 7): return PyVersion.PY36 if v < (4, 8): return PyVersion.PY37 else: # since 4.8, Miniconda specifies versions explicitly in the file name raise ValueError("Miniconda 4.8+ is supposed to specify a Python version explicitly") if self.flavor == "anaconda": # https://docs.anaconda.com/free/anaconda/reference/release-notes/ if v >= (2024,6): return PyVersion.PY312 if v >= (2023,7): return PyVersion.PY311 if v >= (2023,3): return PyVersion.PY310 if v >= (2021,11): return PyVersion.PY39 if v >= (2020,7): return PyVersion.PY38 if v >= (2020,2): return PyVersion.PY37 if v >= (5,3,0): return PyVersion.PY37 return PyVersion.PY36 raise ValueError(self.flavor)
CondaVersion
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol53.py
{ "start": 1058, "end": 1131 }
class ____(Proto_CoSelf): def m(self) -> Self: ...
Impl_CoSelfExplicit2
python
huggingface__transformers
src/transformers/models/vilt/modeling_vilt.py
{ "start": 1462, "end": 2584 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`list[tuple(torch.FloatTensor)]`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[list[tuple[torch.FloatTensor]]] = None attentions: Optional[list[tuple[torch.FloatTensor]]] = None
ViltForImagesAndTextClassificationOutput
python
getsentry__sentry
src/sentry/models/organization.py
{ "start": 2315, "end": 3368 }
class ____(IntEnum): ACTIVE = 0 PENDING_DELETION = 1 DELETION_IN_PROGRESS = 2 RELOCATION_PENDING_APPROVAL = 3 # alias for OrganizationStatus.ACTIVE VISIBLE = 0 def __str__(self) -> str: return self.name @property def label(self): return OrganizationStatus_labels[self] @classmethod def as_choices(cls): result = [] for name, member in cls.__members__.items(): # an alias if name != member.name: continue # realistically Enum shouldn't even creating these, but alas if name.startswith("_"): continue result.append((member.value, str(member.label))) return tuple(result) OrganizationStatus_labels = { OrganizationStatus.ACTIVE: "active", OrganizationStatus.PENDING_DELETION: "pending deletion", OrganizationStatus.DELETION_IN_PROGRESS: "deletion in progress", OrganizationStatus.RELOCATION_PENDING_APPROVAL: "relocation pending approval", }
OrganizationStatus
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadCall6.py
{ "start": 2507, "end": 2811 }
class ____(Generic[_T]): @overload def m1(self: "ClassA[int]") -> "ClassA[int]": ... @overload def m1(self: "ClassA[str]") -> "ClassA[str]": ... def m1(self) -> "ClassA[Any]": return self def func7(a: ClassA[Any]): reveal_type(a.m1(), expected_text="ClassA[int]")
ClassA
python
readthedocs__readthedocs.org
readthedocs/core/forms.py
{ "start": 2255, "end": 2376 }
class ____(forms.ModelForm): class Meta: model = UserProfile fields = ["allow_ads"]
UserAdvertisingForm
python
pytorch__pytorch
torch/utils/data/distributed.py
{ "start": 293, "end": 6451 }
class ____(Sampler[_T_co]): r"""Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size and that any instance of it always returns the same elements in the same order. Args: dataset: Dataset used for sampling. num_replicas (int, optional): Number of processes participating in distributed training. By default, :attr:`world_size` is retrieved from the current distributed group. rank (int, optional): Rank of the current process within :attr:`num_replicas`. By default, :attr:`rank` is retrieved from the current distributed group. shuffle (bool, optional): If ``True`` (default), sampler will shuffle the indices. seed (int, optional): random seed used to shuffle the sampler if :attr:`shuffle=True`. This number should be identical across all processes in the distributed group. Default: ``0``. drop_last (bool, optional): if ``True``, then the sampler will drop the tail of the data to make it evenly divisible across the number of replicas. If ``False``, the sampler will add extra indices to make the data evenly divisible across the replicas. Default: ``False``. .. warning:: In distributed mode, calling the :meth:`set_epoch` method at the beginning of each epoch **before** creating the :class:`DataLoader` iterator is necessary to make shuffling work properly across multiple epochs. Otherwise, the same ordering will be always used. Example:: >>> # xdoctest: +SKIP >>> sampler = DistributedSampler(dataset) if is_distributed else None >>> loader = DataLoader(dataset, shuffle=(sampler is None), ... sampler=sampler) >>> for epoch in range(start_epoch, n_epochs): ... if is_distributed: ... sampler.set_epoch(epoch) ... train(loader) """ def __init__( self, dataset: Dataset, num_replicas: int | None = None, rank: int | None = None, shuffle: bool = True, seed: int = 0, drop_last: bool = False, ) -> None: if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() if rank >= num_replicas or rank < 0: raise ValueError( f"Invalid rank {rank}, rank should be in the interval [0, {num_replicas - 1}]" ) self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.drop_last = drop_last # If the dataset length is evenly divisible by # of replicas, then there # is no need to drop any data, since the dataset will be split equally. if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type] # Split to nearest available length that is evenly divisible. # This is to ensure each rank receives the same amount of data when # using this Sampler. self.num_samples = math.ceil( (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type] ) else: self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type] self.total_size = self.num_samples * self.num_replicas self.shuffle = shuffle self.seed = seed def __iter__(self) -> Iterator[_T_co]: if self.shuffle: # deterministically shuffle based on epoch and seed g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type] else: indices = list(range(len(self.dataset))) # type: ignore[arg-type] if not self.drop_last: # add extra samples to make it evenly divisible padding_size = self.total_size - len(indices) if padding_size <= len(indices): indices += indices[:padding_size] else: indices += (indices * math.ceil(padding_size / len(indices)))[ :padding_size ] else: # remove tail of data to make it evenly divisible. indices = indices[: self.total_size] if len(indices) != self.total_size: raise AssertionError( f"Number of indices ({len(indices)}) does not match total_size ({self.total_size})" ) # subsample indices = indices[self.rank : self.total_size : self.num_replicas] if len(indices) != self.num_samples: raise AssertionError( f"Number of subsampled indices ({len(indices)}) does not match num_samples ({self.num_samples})" ) # pyrefly: ignore [bad-return] return iter(indices) def __len__(self) -> int: return self.num_samples def set_epoch(self, epoch: int) -> None: r""" Set the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas use a different random ordering for each epoch. Otherwise, the next iteration of this sampler will yield the same ordering. Args: epoch (int): Epoch number. """ self.epoch = epoch
DistributedSampler
python
plotly__plotly.py
plotly/graph_objs/indicator/delta/_increasing.py
{ "start": 233, "end": 3044 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "indicator.delta" _path_str = "indicator.delta.increasing" _valid_props = {"color", "symbol"} @property def color(self): """ Sets the color for increasing value. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def symbol(self): """ Sets the symbol to display for increasing value The 'symbol' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["symbol"] @symbol.setter def symbol(self, val): self["symbol"] = val @property def _prop_descriptions(self): return """\ color Sets the color for increasing value. symbol Sets the symbol to display for increasing value """ def __init__(self, arg=None, color=None, symbol=None, **kwargs): """ Construct a new Increasing object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.indicator.delta.Increasing` color Sets the color for increasing value. symbol Sets the symbol to display for increasing value Returns ------- Increasing """ super().__init__("increasing") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.indicator.delta.Increasing constructor must be a dict or an instance of :class:`plotly.graph_objs.indicator.delta.Increasing`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("symbol", arg, symbol) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Increasing
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 265827, "end": 266102 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id",) client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
DeleteBranchProtectionRulePayload
python
tensorflow__tensorflow
tensorflow/python/training/input_test.py
{ "start": 85855, "end": 104649 }
class ____(test_lib.TestCase): def _testTwoThreadsHelper(self, use_dict): with ops.Graph().as_default(), self.cached_session(): # Two threads, the first generates (0..24, "a"). num_a = 25 zero64 = constant_op.constant(0, dtype=dtypes.int64) examples = variables.Variable(zero64) counter = examples.count_up_to(num_a) sparse_counter = sparse_tensor.SparseTensor( indices=array_ops.reshape(zero64, [1, 1]), values=array_ops_stack.stack( [math_ops.cast(counter, dtypes.float32)]), dense_shape=[1]) # The second generates (99, "b") 35 times and then stops. num_b = 35 ninety_nine = inp.limit_epochs( constant_op.constant( 99, dtype=dtypes.int64), num_b) sparse_ninety_nine = sparse_tensor.SparseTensor( indices=array_ops.reshape(zero64, [1, 1]), values=array_ops_stack.stack( [math_ops.cast(ninety_nine, dtypes.float32)]), dense_shape=[1]) # These get joined together and grouped into batches of 5. batch_size = 5 if use_dict: batched = inp.shuffle_batch_join( [{ "c": counter, "s": sparse_counter, "S": "a" }, { "c": ninety_nine, "s": sparse_ninety_nine, "S": "b" }], batch_size=batch_size, capacity=32, min_after_dequeue=16, seed=223607) batched_fetch = [batched["c"], batched["s"], batched["S"]] else: batched = inp.shuffle_batch_join( [[counter, sparse_counter, "a"], [ninety_nine, sparse_ninety_nine, "b"]], batch_size=batch_size, capacity=32, min_after_dequeue=16, seed=223607) batched_fetch = batched # Shapes. self.assertEqual(3, len(batched_fetch)) self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list()) self.assertAllEqual((None, 2), batched_fetch[1].indices.get_shape().as_list()) self.assertAllEqual((None,), batched_fetch[1].values.get_shape().as_list()) self.assertAllEqual((2,), batched_fetch[1].dense_shape.get_shape().as_list()) self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list()) self.evaluate(variables.global_variables_initializer()) variables.local_variables_initializer().run() threads = queue_runner_impl.start_queue_runners() # Should see the "a" and "b" threads mixed together. all_a = [] seen_b = 0 saw_both = 0 num_batches = (num_a + num_b) // batch_size for i in range(num_batches): results = self.evaluate(batched_fetch) self.assertEqual(3, len(results)) self.assertEqual(len(results[0]), batch_size) self.assertEqual(len(results[2]), batch_size) self.assertAllEqual(results[0], results[1].values) self.assertAllEqual( results[1].indices, np.vstack((np.arange(batch_size), np.zeros(batch_size))).T) self.assertAllEqual(results[1].dense_shape, [batch_size, 1]) which_a = [i for i, s in enumerate(results[2]) if s == b"a"] which_b = [i for i, s in enumerate(results[2]) if s == b"b"] self.assertEqual(len(which_a) + len(which_b), batch_size) if which_a and which_b: saw_both += 1 all_a.extend(results[0][i] for i in which_a) seen_b += len(which_b) self.assertAllEqual([99] * len(which_b), [results[0][i] for i in which_b]) # Some minimum level of mixing of the results of both threads. self.assertGreater(saw_both, 1) # Saw all the items from "a", but scrambled. self.assertItemsEqual(all_a, range(num_a)) deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)] self.assertFalse(all(d == deltas[0] for d in deltas)) self.assertEqual(seen_b, num_b) # Reached the limit. with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(batched_fetch) for thread in threads: thread.join() def testTwoThreads(self): self._testTwoThreadsHelper(use_dict=False) def testTwoThreadsDict(self): self._testTwoThreadsHelper(use_dict=True) def testTwoThreadsSmallerBatch(self): with ops.Graph().as_default(), self.cached_session(): # Two threads, the first generates (0..26, "a"). extra_elements = 2 num_a = 25 + extra_elements zero64 = constant_op.constant(0, dtype=dtypes.int64) examples = variables.Variable(zero64) counter = examples.count_up_to(num_a) sparse_counter = sparse_tensor.SparseTensor( indices=array_ops.reshape(zero64, [1, 1]), values=array_ops_stack.stack( [math_ops.cast(counter, dtypes.float32)]), dense_shape=[1]) # The second generates (99, "b") 37 times and then stops. num_b = 35 + extra_elements ninety_nine = inp.limit_epochs( constant_op.constant( 99, dtype=dtypes.int64), num_b) sparse_ninety_nine = sparse_tensor.SparseTensor( indices=array_ops.reshape(zero64, [1, 1]), values=array_ops_stack.stack( [math_ops.cast(ninety_nine, dtypes.float32)]), dense_shape=[1]) # These get joined together and grouped into batches of 5. batch_size = 5 batched = inp.shuffle_batch_join( [[counter, sparse_counter, "a"], [ninety_nine, sparse_ninety_nine, "b"]], batch_size=batch_size, capacity=32, min_after_dequeue=16, seed=223607, allow_smaller_final_batch=True) # Shapes. self.assertEqual(3, len(batched)) self.assertAllEqual((None,), batched[0].get_shape().as_list()) self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list()) self.assertAllEqual((None,), batched[1].values.get_shape().as_list()) self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list()) self.assertAllEqual((None,), batched[2].get_shape().as_list()) self.evaluate(variables.global_variables_initializer()) variables.local_variables_initializer().run() threads = queue_runner_impl.start_queue_runners() # Should see the "a" and "b" threads mixed together. all_a = [] seen_b = 0 saw_both = 0 num_batches = (num_a + num_b) // batch_size for i in range(num_batches): results = self.evaluate(batched) tf_logging.info("Batch %d: %s", i, results[0]) self.assertEqual(len(results[0]), batch_size) self.assertEqual(len(results[2]), batch_size) self.assertAllEqual(results[0], results[1].values) self.assertAllEqual( results[1].indices, np.vstack((np.arange(batch_size), np.zeros(batch_size))).T) self.assertAllEqual(results[1].dense_shape, [batch_size, 1]) which_a = [i for i, s in enumerate(results[2]) if s == b"a"] which_b = [i for i, s in enumerate(results[2]) if s == b"b"] self.assertEqual(len(which_a) + len(which_b), batch_size) if which_a and which_b: saw_both += 1 all_a.extend(results[0][i] for i in which_a) seen_b += len(which_b) self.assertAllEqual([99] * len(which_b), [results[0][i] for i in which_b]) # Reached end with 2 * extra_elements left results = self.evaluate(batched) self.assertEqual(len(results[0]), 2 * extra_elements) self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1]) self.assertEqual(len(results[2]), 2 * extra_elements) self.assertAllEqual(results[0], results[1].values) self.assertAllEqual(results[1].indices, np.vstack((np.arange(2 * extra_elements), np.zeros(2 * extra_elements))).T) which_a = [i for i, s in enumerate(results[2]) if s == b"a"] which_b = [i for i, s in enumerate(results[2]) if s == b"b"] self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements) if which_a and which_b: saw_both += 1 all_a.extend(results[0][i] for i in which_a) seen_b += len(which_b) # Some minimum level of mixing of the results of both threads. self.assertGreater(saw_both, 1) # Saw all the items from "a", but scrambled, including extras. self.assertItemsEqual(all_a, range(num_a)) deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)] self.assertFalse(all(d == deltas[0] for d in deltas)) self.assertEqual(seen_b, num_b) # Reached the limit. with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(batched) for thread in threads: thread.join() def testMismatchedDictKeys(self): with ops.Graph().as_default(), self.assertRaisesRegex( ValueError, "must have the same keys"): inp.shuffle_batch_join( [{ "c": 12, "s": 123, "S": "a" }, { "cool": -12, "s": 99, "S": "b" }], batch_size=8, capacity=32, min_after_dequeue=16, seed=223607) def testSharedName(self): with ops.Graph().as_default(), self.cached_session(): batch_size = 10 num_batches = 3 zero64 = constant_op.constant(0, dtype=dtypes.int64) examples = variables.Variable(zero64) counter = examples.count_up_to(num_batches * batch_size) batched = inp.shuffle_batch_join( [[counter, "string"]], batch_size=batch_size, capacity=32, min_after_dequeue=10, shared_name="SHARED_NAME_XYZ", name="Q") # Shapes. self.assertEqual(2, len(batched)) self.assertAllEqual((batch_size,), batched[0].get_shape().as_list()) self.assertAllEqual((batch_size,), batched[1].get_shape().as_list()) self.assertProtoEquals( "s: 'SHARED_NAME_XYZ'", batched[0].op.inputs[0].op.node_def.attr["shared_name"]) def _testKeepInputHelper(self, num_threads, enqueue_many, keep_input_vector=False): with ops.Graph().as_default(), self.cached_session(): batch_size = 5 num_batches = 4 examples = variables.Variable(0) counter = examples.count_up_to(num_batches * batch_size * 2) sparse_counter = sparse_tensor.SparseTensor( indices=array_ops.zeros( [1, 1], dtype=dtypes.int64), values=array_ops_stack.stack( [math_ops.cast(counter, dtypes.float32)]), dense_shape=[1]) to_batch = [counter, sparse_counter, "string"] if enqueue_many: to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1) keep_input = array_ops.squeeze( math_ops.equal(0, math_ops.mod(to_batch[0], 2))) batched = inp.maybe_shuffle_batch_join( [to_batch] * num_threads, batch_size, 10, 1, keep_input, enqueue_many=enqueue_many) variables.initialize_all_variables().run() variables.initialize_local_variables().run() threads = queue_runner_impl.start_queue_runners() for _ in range(num_batches): results = self.evaluate(batched) self.assertAllEqual([0] * batch_size, np.mod(results[0], 2)) self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2)) self.assertAllEqual([b"string"] * batch_size, results[2]) # Reached the limit. with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(batched) for thread in threads: thread.join() @test_util.run_v1_only("Input pipelines based on Queues are not supported " "when eager execution is enabled. TF2 uses tf.data " "instead.") def testSingleThreadKeepInput(self): self._testKeepInputHelper(1, False) @test_util.run_v1_only("Input pipelines based on Queues are not supported " "when eager execution is enabled. TF2 uses tf.data " "instead.") def testSingleThreadKeepInputEnqueueMany(self): self._testKeepInputHelper(1, True) @test_util.run_v1_only("Input pipelines based on Queues are not supported " "when eager execution is enabled. TF2 uses tf.data " "instead.") def testMultipleThreadKeepInput(self): self._testKeepInputHelper(5, False) @test_util.run_v1_only("Input pipelines based on Queues are not supported " "when eager execution is enabled. TF2 uses tf.data " "instead.") def testMultipleThreadKeepInputEnqueueMany(self): self._testKeepInputHelper(5, True) def testSingleThreadKeepInputPerExample(self): self._testKeepInputHelper(1, True, keep_input_vector=True) def testMultipleThreadKeepInputPerExample(self): self._testKeepInputHelper(5, True, keep_input_vector=True) def testInvalidKeepInputVector(self): with ops.Graph().as_default(): # Can't have vector `keep_input` with `enqueue_many=False`. with self.assertRaisesRegex(ValueError, "`keep_input` cannot be a vector"): inp.maybe_shuffle_batch_join([[array_ops.zeros(5)]], 1, 10, 1, keep_input=constant_op.constant( [True, False]), enqueue_many=False) # Can't have `keep_input` with more than one dimension. with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"): inp.maybe_shuffle_batch_join([[array_ops.zeros(5)]], 1, 10, 1, keep_input=constant_op.constant([[True]]), enqueue_many=True) # `keep_input` must have dimensions determined at graph construction. with self.assertRaisesRegex(ValueError, "must be known at graph construction"): inp.maybe_shuffle_batch_join([[array_ops.zeros(5)]], 1, 10, 1, keep_input=array_ops.placeholder( dtypes.bool), enqueue_many=True) def testMaybeBatchedSparseTensorInferredShape(self): with ops.Graph().as_default(): sparse = sparse_tensor.SparseTensor( indices=[[0]], values=[1.0], dense_shape=[1]) self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list()) batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True) self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list()) def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self): with ops.Graph().as_default(): sparse = sparse_tensor.SparseTensor( indices=[[0]], values=[1.0], dense_shape=[1]) self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list()) batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True, enqueue_many=True) self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list()) def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self): with ops.Graph().as_default(): sparse = sparse_tensor.SparseTensor( indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2]) self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list()) batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, [True, False], enqueue_many=True) self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list()) def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self): with ops.Graph().as_default(): sparse = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=array_ops.placeholder(dtypes.float32), dense_shape=array_ops.placeholder(dtypes.int64)) self.assertIs(None, sparse.dense_shape.get_shape().num_elements()) batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True) self.assertIs(None, batched.dense_shape.get_shape().num_elements()) def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self): with ops.Graph().as_default(): sparse = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=array_ops.placeholder(dtypes.float32), dense_shape=array_ops.placeholder(dtypes.int64)) self.assertIs(None, sparse.dense_shape.get_shape().num_elements()) batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True, enqueue_many=True) self.assertIs(None, batched.dense_shape.get_shape().num_elements()) def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self): with ops.Graph().as_default(): sparse = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=array_ops.placeholder(dtypes.float32), dense_shape=array_ops.placeholder(dtypes.int64)) self.assertIs(None, sparse.dense_shape.get_shape().num_elements()) batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, [True, False], enqueue_many=True) self.assertIs(None, batched.dense_shape.get_shape().num_elements()) if __name__ == "__main__": test_lib.main()
ShuffleBatchJoinTest
python
apache__airflow
providers/google/tests/unit/google/marketing_platform/operators/test_campaign_manager.py
{ "start": 7424, "end": 9082 }
class ____: @mock.patch( "airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook" ) @mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator") def test_execute(self, mock_base_op, hook_mock): report = {"report": "test"} mock_context = {"task_instance": mock.Mock()} hook_mock.return_value.insert_report.return_value = {"id": REPORT_ID} op = GoogleCampaignManagerInsertReportOperator( profile_id=PROFILE_ID, report=report, api_version=API_VERSION, task_id="test_task", ) op.execute(context=mock_context) hook_mock.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, api_version=API_VERSION, impersonation_chain=None, ) hook_mock.return_value.insert_report.assert_called_once_with(profile_id=PROFILE_ID, report=report) mock_context["task_instance"].xcom_push.assert_called_once_with(key="report_id", value=REPORT_ID) def test_prepare_template(self): report = {"key": "value"} with NamedTemporaryFile("w+", suffix=".json") as f: f.write(json.dumps(report)) f.flush() op = GoogleCampaignManagerInsertReportOperator( profile_id=PROFILE_ID, report=f.name, api_version=API_VERSION, task_id="test_task", ) op.prepare_template() assert isinstance(op.report, dict) assert op.report == report
TestGoogleCampaignManagerInsertReportOperator
python
google__pytype
pytype/overlays/special_builtins.py
{ "start": 19334, "end": 21054 }
class ____(BuiltinFunction): """For debugging. assert_type(x, t) asserts that the type of "x" is "t".""" # Minimal signature, only used for constructing exceptions. _SIGNATURE = function.Signature.from_param_names( "assert_type", ("variable", "type") ) _NAME = "assert_type" def call(self, node, func, args, alias_map=None): if len(args.posargs) == 2: var, typ = args.posargs else: raise error_types.WrongArgCount(self._SIGNATURE, args, self.ctx) # Convert both args to strings and compare them pp = self.ctx.errorlog.pretty_printer actual = pp.print_var_type(var, node) # If the 'expected' arg is an actual string, try to check it as is. # This is very fragile and not supported by other type checkers, but in # pytype historically this was the main mode of operation. try: expected = abstract_utils.get_atomic_python_constant(typ, str) matched_as_string = actual == expected except abstract_utils.ConversionError: matched_as_string = False # Regardless of whether there was a mismatch or 'expected' isn't a string, # we try to parse it in the context and convert it to a type string again. # This *should* be the main mode of operation, but it may still have issues. if not matched_as_string: typ = self.ctx.annotation_utils.extract_annotation( node, typ, "assert_type", self.ctx.vm.simple_stack() ) node, instance = self.ctx.vm.init_class_and_forward_node(node, typ) expected = pp.print_var_type(instance, node) if actual != expected: self.ctx.errorlog.assert_type(self.ctx.vm.frames, actual, expected) return node, self.ctx.convert.build_none(node)
AssertType
python
rapidsai__cudf
python/cudf/cudf/core/column/numerical.py
{ "start": 1679, "end": 40840 }
class ____(NumericalBaseColumn): """ A Column object for Numeric types. Parameters ---------- data : Buffer dtype : np.dtype The dtype associated with the data Buffer mask : Buffer, optional """ _VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS _VALID_PLC_TYPES = { plc.TypeId.INT8, plc.TypeId.INT16, plc.TypeId.INT32, plc.TypeId.INT64, plc.TypeId.UINT8, plc.TypeId.UINT16, plc.TypeId.UINT32, plc.TypeId.UINT64, plc.TypeId.FLOAT32, plc.TypeId.FLOAT64, plc.TypeId.BOOL8, } def __init__( self, plc_column: plc.Column, size: int, dtype: np.dtype, offset: int, null_count: int, exposed: bool, ) -> None: if ( cudf.get_option("mode.pandas_compatible") and dtype.kind not in "iufb" ) or ( not cudf.get_option("mode.pandas_compatible") and not (isinstance(dtype, np.dtype) and dtype.kind in "iufb") ): raise ValueError( f"dtype must be a floating, integer or boolean dtype. Got: {dtype}" ) super().__init__( plc_column=plc_column, size=size, dtype=dtype, offset=offset, null_count=null_count, exposed=exposed, ) def _clear_cache(self) -> None: super()._clear_cache() try: del self.nan_count except AttributeError: pass def __contains__(self, item: ScalarLike) -> bool: """ Returns True if column contains item, else False. """ # Handles improper item types # Fails if item is of type None, so the handler. try: search_item = self.dtype.type(item) if search_item != item and self.dtype.kind != "f": return False except (TypeError, ValueError): return False # TODO: Use `scalar`-based `contains` wrapper return self.contains( as_column( [search_item], dtype=self.dtype, nan_as_null=not cudf.get_option("mode.pandas_compatible"), ), ).any() @property def values(self) -> cp.ndarray: """ Return a CuPy representation of the NumericalColumn. """ dtype = self.dtype if is_pandas_nullable_extension_dtype(dtype): dtype = getattr(dtype, "numpy_dtype", dtype) if len(self) == 0: return cp.empty(0, dtype=dtype) col = self if col.has_nulls(): if dtype.kind == "b": raise ValueError( f"Column must have no nulls for dtype={col.dtype}" ) elif dtype.kind != "f": dtype = np.dtype(np.float64) col = col.astype(dtype) # type: ignore[assignment] col = col.fillna(np.nan) return cp.asarray(col.data).view(dtype) def indices_of(self, value: ScalarLike) -> NumericalColumn: if isinstance(value, (bool, np.bool_)) and self.dtype.kind != "b": raise ValueError( f"Cannot use a {type(value).__name__} to find an index of " f"a {self.dtype} Index." ) elif ( self.dtype.kind in {"c", "f"} and isinstance(value, (float, np.floating)) and np.isnan(value) ): return self.isnan().indices_of(True) else: return super().indices_of(value) def has_nulls(self, include_nan: bool = False) -> bool: return bool(self.null_count != 0) or ( include_nan and bool(self.nan_count != 0) ) def element_indexing(self, index: int) -> ScalarLike | None: result = super().element_indexing(index) if isinstance(result, pa.Scalar): return self.dtype.type(result.as_py()) return result def _cast_setitem_value(self, value: Any) -> plc.Scalar | ColumnBase: if is_scalar(value): if value is cudf.NA or value is None: scalar = pa.scalar( None, type=cudf_dtype_to_pa_type(self.dtype) ) else: try: scalar = pa.scalar(value) except ValueError as err: raise TypeError( f"Cannot set value of type {type(value)} to column of type {self.dtype}" ) from err is_scalar_bool = pa.types.is_boolean(scalar.type) if (is_scalar_bool and self.dtype.kind != "b") or ( not is_scalar_bool and self.dtype.kind == "b" ): raise TypeError( f"Invalid value {value} for dtype {self.dtype}" ) return pa_scalar_to_plc_scalar( scalar.cast(cudf_dtype_to_pa_type(self.dtype)) ) else: col = as_column(value) if col.dtype.kind == "b" and self.dtype.kind != "b": raise TypeError( f"Invalid value {value} for dtype {self.dtype}" ) return col.astype(self.dtype) def __invert__(self) -> ColumnBase: if self.dtype.kind in "ui": return self.unary_operator("invert") elif self.dtype.kind == "b": return self.unary_operator("not") else: return super().__invert__() def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase: int_float_dtype_mapping = { np.int8: np.float32, np.int16: np.float32, np.int32: np.float32, np.int64: np.float64, np.uint8: np.float32, np.uint16: np.float32, np.uint32: np.float64, np.uint64: np.float64, np.bool_: np.float32, } if cudf.get_option("mode.pandas_compatible"): int_float_dtype_mapping = { np.int8: np.float64, np.int16: np.float64, np.int32: np.float64, np.int64: np.float64, np.uint8: np.float64, np.uint16: np.float64, np.uint32: np.float64, np.uint64: np.float64, np.bool_: np.float64, } cmp_ops = { "__lt__", "__gt__", "__le__", "__ge__", "__eq__", "__ne__", } out_dtype = None if op in {"__truediv__", "__rtruediv__"}: # Division with integer types results in a suitable float. if truediv_type := int_float_dtype_mapping.get( self.dtype.numpy_dtype.type if is_pandas_nullable_extension_dtype(self.dtype) else self.dtype.type ): return self.astype( get_dtype_of_same_kind(self.dtype, np.dtype(truediv_type)) )._binaryop(other, op) elif op in cmp_ops: out_dtype = get_dtype_of_same_kind(self.dtype, np.dtype(np.bool_)) # If `other` is a Python integer and it is out-of-bounds # promotion could fail but we can trivially define the result # in terms of `notnull` or `NULL_NOT_EQUALS`. if type(other) is int and self.dtype.kind in "iu": truthiness = None iinfo = np.iinfo(self.dtype) if iinfo.min > other: truthiness = op in {"__ne__", "__gt__", "__ge__"} elif iinfo.max < other: truthiness = op in {"__ne__", "__lt__", "__le__"} # Compare with minimum value so that the result is true/false if truthiness is True: other = iinfo.min op = "__ge__" elif truthiness is False: other = iinfo.min op = "__lt__" elif op in {"NULL_EQUALS", "NULL_NOT_EQUALS"}: out_dtype = get_dtype_of_same_kind(self.dtype, np.dtype(np.bool_)) reflect, op = self._check_reflected_op(op) if (other := self._normalize_binop_operand(other)) is NotImplemented: return NotImplemented other_cudf_dtype = ( cudf_dtype_from_pa_type(other.type) if isinstance(other, pa.Scalar) else other.dtype ) if out_dtype is None: out_dtype = find_common_type((self.dtype, other_cudf_dtype)) if op in {"__mod__", "__floordiv__"}: tmp = self if reflect else other tmp_dtype = self.dtype if reflect else other_cudf_dtype # Guard against division by zero for integers. if tmp_dtype.kind in "iu" and ( (isinstance(tmp, NumericalColumn) and 0 in tmp) or (isinstance(tmp, pa.Scalar) and tmp.as_py() == 0) ): out_dtype = get_dtype_of_same_kind( out_dtype, np.dtype(np.float64) ) if op in {"__and__", "__or__", "__xor__"}: if self.dtype.kind == "f" or other_cudf_dtype.kind == "f": raise TypeError( f"Operation 'bitwise {op[2:-2]}' not supported between " f"{self.dtype.type.__name__} and " f"{other_cudf_dtype.type.__name__}" ) if self.dtype.kind == "b" and other_cudf_dtype.kind == "b": out_dtype = get_dtype_of_same_kind( self.dtype, np.dtype(np.bool_) ) elif self.dtype.kind == "b" or other_cudf_dtype.kind == "b": out_dtype = get_dtype_of_same_kind( out_dtype, np.dtype(np.bool_) ) elif ( op == "__pow__" and self.dtype.kind in "iu" and (other_cudf_dtype.kind in "iu") ): op = "INT_POW" lhs_dtype, rhs_dtype = ( (other_cudf_dtype, self.dtype) if reflect else (self.dtype, other_cudf_dtype) ) lhs, rhs = (other, self) if reflect else (self, other) if out_dtype.kind == "f" and is_pandas_nullable_extension_dtype( out_dtype ): if ( not is_pandas_nullable_extension_dtype(lhs_dtype) and lhs_dtype.kind == "f" and isinstance(lhs, NumericalColumn) ): lhs = lhs.nans_to_nulls() if ( not is_pandas_nullable_extension_dtype(rhs_dtype) and rhs_dtype.kind == "f" and isinstance(rhs, NumericalColumn) ): rhs = rhs.nans_to_nulls() lhs_binaryop: plc.Scalar | ColumnBase = ( pa_scalar_to_plc_scalar(lhs) if isinstance(lhs, pa.Scalar) else lhs ) rhs_binaryop: plc.Scalar | ColumnBase = ( pa_scalar_to_plc_scalar(rhs) if isinstance(rhs, pa.Scalar) else rhs ) res = binaryop.binaryop(lhs_binaryop, rhs_binaryop, op, out_dtype) if ( is_pandas_nullable_extension_dtype(out_dtype) and out_dtype.kind == "f" ): # If the output dtype is a pandas nullable extension type, # we need to ensure that the result is a NumericalColumn. res = res.nans_to_nulls() if op in {"__mod__", "__floordiv__"} and tmp_dtype.kind == "b": res = res.astype( get_dtype_of_same_kind(out_dtype, np.dtype(np.int8)) ) elif op == "INT_POW" and res.null_count: if ( isinstance(lhs_binaryop, plc.Scalar) and lhs_binaryop.to_py() == 1 and isinstance(rhs_binaryop, ColumnBase) and rhs_binaryop.null_count > 0 ): res = res.fillna(lhs_binaryop.to_py()) elif ( cudf.get_option("mode.pandas_compatible") and op in cmp_ops and not is_pandas_nullable_extension_dtype(self.dtype) ): res = res.fillna(op == "__ne__") return res def nans_to_nulls(self: Self) -> Self: # Only floats can contain nan. if self.dtype.kind != "f" or self.nan_count == 0: return self with acquire_spill_lock(): mask, _ = plc.transform.nans_to_nulls(self.plc_column) return self.set_mask(as_buffer(mask)) def _normalize_binop_operand(self, other: Any) -> pa.Scalar | ColumnBase: if isinstance(other, ColumnBase): if not isinstance(other, type(self)): return NotImplemented return other # TODO: cupy scalars are just aliases for numpy scalars, so extracting a scalar # from a cupy array would always require a D2H copy. As a result, cupy does not # produce scalars without explicit casting requests # https://docs.cupy.dev/en/stable/user_guide/difference.html#zero-dimensional-array # The below logic for type inference relies on numpy, however, so we need to go # that route for now. If possible we should find a way to avoid this. if isinstance(other, cp.ndarray) and other.ndim == 0: other = cp.asnumpy(other)[()] elif isinstance(other, np.ndarray) and other.ndim == 0: other = other[()] if is_scalar(other): if is_na_like(other): return pa.scalar(None, type=cudf_dtype_to_pa_type(self.dtype)) if not isinstance(other, (int, float, complex)): # Go via NumPy to get the value other = np.array(other) if other.dtype.kind in "uifc": other = other.item() # Try and match pandas and hence numpy. Deduce the common # dtype via the _value_ of other, and the dtype of self on NumPy 1.x # with NumPy 2, we force weak promotion even for our/NumPy scalars # to match pandas 2.2. # Weak promotion is not at all simple: # np.result_type(0, np.uint8) # => np.uint8 # np.result_type(np.asarray([0], dtype=np.int64), np.uint8) # => np.int64 # np.promote_types(np.int64(0), np.uint8) # => np.int64 # np.promote_types(np.asarray([0], dtype=np.int64).dtype, np.uint8) # => np.int64 if is_pandas_nullable_extension_dtype(self.dtype): if isinstance(self.dtype, pd.ArrowDtype): common_dtype = cudf.utils.dtypes.find_common_type( [self.dtype, other] ) else: common_dtype = get_dtype_of_same_kind( self.dtype, np.result_type(self.dtype.numpy_dtype, other), # noqa: TID251 ) else: common_dtype = np.result_type(self.dtype, other) # noqa: TID251 if common_dtype.kind in {"b", "i", "u", "f"}: # type: ignore[union-attr] if self.dtype.kind == "b" and not isinstance(other, bool): common_dtype = min_signed_type(other) return pa.scalar( other, type=cudf_dtype_to_pa_type(common_dtype) ) else: return NotImplemented else: return NotImplemented @acquire_spill_lock() def int2ip(self) -> StringColumn: if self.dtype != np.dtype(np.uint32): raise TypeError("Only uint32 type can be converted to ip") plc_column = plc.strings.convert.convert_ipv4.integers_to_ipv4( self.plc_column ) return type(self).from_pylibcudf(plc_column) # type: ignore[return-value] def as_string_column(self, dtype: DtypeObj) -> StringColumn: col = self if ( cudf.get_option("mode.pandas_compatible") and isinstance(dtype, np.dtype) and dtype.kind == "O" ): raise ValueError( "Cannot convert numerical column to string column " "when dtype is an object dtype in pandas compatibility mode." ) if len(self) == 0: return cast( cudf.core.column.StringColumn, column_empty(0, dtype=CUDF_STRING_DTYPE), ) conv_func: Callable[[plc.Column], plc.Column] if self.dtype.kind == "b": conv_func = functools.partial( plc.strings.convert.convert_booleans.from_booleans, true_string=pa_scalar_to_plc_scalar(pa.scalar("True")), false_string=pa_scalar_to_plc_scalar(pa.scalar("False")), ) elif self.dtype.kind in {"i", "u"}: conv_func = plc.strings.convert.convert_integers.from_integers elif self.dtype.kind == "f": if cudf.get_option( "mode.pandas_compatible" ) and is_pandas_nullable_extension_dtype(dtype): # In pandas compatibility mode, we convert nans to nulls col = self.nans_to_nulls() conv_func = plc.strings.convert.convert_floats.from_floats else: raise ValueError(f"No string conversion from type {self.dtype}") with acquire_spill_lock(): return ( type(self) .from_pylibcudf( # type: ignore[return-value] conv_func(col.plc_column) ) ._with_type_metadata(dtype) ) def _as_temporal_column(self, dtype: np.dtype) -> plc.Column: """Convert Self to a temporal pylibcudf Column for as_datetime_column and as_timedelta_column""" return plc.Column( data_type=dtype_to_pylibcudf_type(dtype), size=self.size, data=plc.gpumemoryview(self.astype(np.dtype(np.int64)).base_data), mask=plc.gpumemoryview(self.base_mask) if self.base_mask is not None else None, null_count=self.null_count, offset=self.offset, children=[], ) def as_datetime_column(self, dtype: np.dtype) -> DatetimeColumn: return ( type(self) # type: ignore[return-value] .from_pylibcudf(self._as_temporal_column(dtype)) ._with_type_metadata(dtype) ) def as_timedelta_column(self, dtype: np.dtype) -> TimeDeltaColumn: return ( type(self) # type: ignore[return-value] .from_pylibcudf(self._as_temporal_column(dtype)) ._with_type_metadata(dtype) ) def as_decimal_column(self, dtype: DecimalDtype) -> DecimalBaseColumn: return self.cast(dtype=dtype) # type: ignore[return-value] def as_numerical_column(self, dtype: DtypeObj) -> NumericalColumn: if dtype == self.dtype: return self if cudf.get_option("mode.pandas_compatible"): if ( is_pandas_nullable_extension_dtype(self.dtype) and isinstance(dtype, np.dtype) and self.null_count > 0 ): if dtype.kind in "iu": raise ValueError("cannot convert NA to integer") elif dtype.kind == "b": raise ValueError("cannot convert float NaN to bool") if ( not is_pandas_nullable_extension_dtype(self.dtype) and is_pandas_nullable_extension_dtype(dtype) and dtype.kind == "f" ): res = self.nans_to_nulls().cast(dtype=dtype) res._dtype = dtype return res # type: ignore[return-value] if dtype_to_pylibcudf_type(dtype) == dtype_to_pylibcudf_type( self.dtype ): # Short-circuit the cast if the dtypes are equivalent # but not the same type object. if ( is_pandas_nullable_extension_dtype(dtype) and isinstance(self.dtype, np.dtype) and self.dtype.kind == "f" ): # If the dtype is a pandas nullable extension type, we need to # float column doesn't have any NaNs. res = self.nans_to_nulls() res._dtype = dtype # type: ignore[has-type] return res else: self._dtype = dtype return self if self.dtype.kind == "f" and dtype.kind in "iu": if ( not is_pandas_nullable_extension_dtype(dtype) and self.nan_count > 0 ): raise TypeError( "Cannot convert non-finite values (NA or inf) to integer" ) # If casting from float to int, we need to convert nans to nulls res = self.nans_to_nulls().cast(dtype=dtype) res._dtype = dtype return res # type: ignore[return-value] return self.cast(dtype=dtype) # type: ignore[return-value] def all(self, skipna: bool = True) -> bool: # If all entries are null the result is True, including when the column # is empty. result_col = self.nans_to_nulls() if skipna else self return super(type(self), result_col).all(skipna=skipna) def any(self, skipna: bool = True) -> bool: # Early exit for fast cases. result_col = self.nans_to_nulls() if skipna else self return super(type(self), result_col).any(skipna=skipna) @functools.cached_property def nan_count(self) -> int: if self.dtype.kind != "f": return super().nan_count return self.isnan().sum() def _process_values_for_isin( self, values: Sequence ) -> tuple[ColumnBase, ColumnBase]: try: lhs, rhs = super()._process_values_for_isin(values) except TypeError: # Can remove once dask 25.04 is the minimum version # https://github.com/dask/dask/pull/11869 if isinstance(values, np.ndarray) and values.dtype.kind == "O": return super()._process_values_for_isin(values.tolist()) else: raise if lhs.dtype != rhs.dtype and rhs.dtype != CUDF_STRING_DTYPE: if rhs.can_cast_safely(lhs.dtype): rhs = rhs.astype(lhs.dtype) elif lhs.can_cast_safely(rhs.dtype): lhs = lhs.astype(rhs.dtype) return lhs, rhs def _can_return_nan(self, skipna: bool | None = None) -> bool: return not skipna and self.has_nulls(include_nan=True) def _min_column_type(self, expected_type: np.dtype) -> np.dtype: """ Return the smallest dtype which can represent all elements of self. """ if self.null_count == len(self): return self.dtype min_value, max_value = self.min(), self.max() either_is_inf = np.isinf(min_value) or np.isinf(max_value) if not either_is_inf and expected_type.kind == "i": max_bound_dtype = min_signed_type(max_value) min_bound_dtype = min_signed_type(min_value) return np.promote_types(max_bound_dtype, min_bound_dtype) elif not either_is_inf and expected_type.kind == "u": max_bound_dtype = min_unsigned_type(max_value) min_bound_dtype = min_unsigned_type(min_value) return np.promote_types(max_bound_dtype, min_bound_dtype) elif self.dtype.kind == "f" or expected_type.kind == "f": return np.promote_types( expected_type, np.promote_types( np.min_scalar_type(float(max_value)), np.min_scalar_type(float(min_value)), ), ) else: return self.dtype def find_and_replace( self, to_replace: ColumnBase | list, replacement: ColumnBase | list, all_nan: bool = False, ) -> Self: """ Return col with *to_replace* replaced with *value*. """ # TODO: all_nan and list arguments only used for this # this subclass, try to factor these cases out of this method # If all of `to_replace`/`replacement` are `None`, # dtype of `to_replace_col`/`replacement_col` # is inferred as `string`, but this is a valid # float64 column too, Hence we will need to type-cast # to self.dtype. to_replace_col = as_column(to_replace) if to_replace_col.null_count == len(to_replace_col): to_replace_col = to_replace_col.astype(self.dtype) replacement_col = as_column(replacement) if replacement_col.null_count == len(replacement_col): replacement_col = replacement_col.astype(self.dtype) if not isinstance(to_replace_col, type(replacement_col)): raise TypeError( f"to_replace and value should be of same types," f"got to_replace dtype: {to_replace_col.dtype} and " f"value dtype: {replacement_col.dtype}" ) if not isinstance(to_replace_col, NumericalColumn) and not isinstance( replacement_col, NumericalColumn ): return self.copy() try: to_replace_col = _normalize_find_and_replace_input( self.dtype, to_replace ) except TypeError: # if `to_replace` cannot be normalized to the current dtype, # that means no value of `to_replace` is present in self, # Hence there is no point of proceeding further. return self.copy() if all_nan: replacement_col = as_column(replacement, dtype=self.dtype) else: try: replacement_col = _normalize_find_and_replace_input( self.dtype, replacement ) except TypeError: # Some floating values can never be converted into signed or unsigned integers # for those cases, we just need a column of `replacement` constructed # with its own type for the final type determination below at `find_common_type` # call. replacement_col = as_column( replacement, dtype=self.dtype if len(replacement) <= 0 else None, ) common_type = find_common_type( (to_replace_col.dtype, replacement_col.dtype, self.dtype) ) if len(replacement_col) == 1 and len(to_replace_col) > 1: replacement_col = replacement_col.repeat(len(to_replace_col)) elif len(replacement_col) == 1 and len(to_replace_col) == 0: return self.copy() replaced = cast(Self, self.astype(common_type)) df = cudf.DataFrame._from_data( { "old": to_replace_col.astype(common_type), "new": replacement_col.astype(common_type), } ) df = df.drop_duplicates(subset=["old"], keep="last", ignore_index=True) if df._data["old"].null_count == 1: replaced = replaced.fillna( df._data["new"] .apply_boolean_mask(df._data["old"].isnull()) .element_indexing(0) ) df = df.dropna(subset=["old"]) return replaced.replace(df._data["old"], df._data["new"]) def _validate_fillna_value( self, fill_value: ScalarLike | ColumnLike ) -> plc.Scalar | ColumnBase: """Align fill_value for .fillna based on column type.""" if is_scalar(fill_value): cudf_obj = ColumnBase.from_pylibcudf( plc.Column.from_scalar( pa_scalar_to_plc_scalar(pa.scalar(fill_value)), 1 ) ) if not cudf_obj.can_cast_safely(self.dtype): raise TypeError( f"Cannot safely cast non-equivalent " f"{type(fill_value).__name__} to {self.dtype.name}" ) return super()._validate_fillna_value(fill_value) else: cudf_obj = as_column(fill_value, nan_as_null=False) if not cudf_obj.can_cast_safely(self.dtype): raise TypeError( f"Cannot safely cast non-equivalent " f"{cudf_obj.dtype.type.__name__} to " f"{self.dtype.type.__name__}" ) return cudf_obj.astype(self.dtype) def can_cast_safely(self, to_dtype: DtypeObj) -> bool: """ Returns true if all the values in self can be safely cast to dtype """ # Convert potential pandas extension dtypes to numpy dtypes # For example, convert Int32Dtype to np.dtype('int32') self_dtype_numpy = ( np.dtype(self.dtype.numpy_dtype) if hasattr(self.dtype, "numpy_dtype") else self.dtype ) to_dtype_numpy = ( np.dtype(to_dtype.numpy_dtype) if hasattr(to_dtype, "numpy_dtype") else to_dtype ) if self_dtype_numpy.kind == to_dtype_numpy.kind: # Check if self dtype can be safely cast to to_dtype # For same kinds, we can compare the sizes if self_dtype_numpy <= to_dtype_numpy: return True else: if self_dtype_numpy.kind == "f": # Exclude 'np.inf', '-np.inf' not_inf = (self != np.inf) & (self != -np.inf) col = self.apply_boolean_mask(not_inf) else: col = self min_ = col.min() # TODO: depending on implementation of cudf scalar and future # refactor of min/max, change the test method if np.isnan(min_): # Column contains only infs return True # Kinds are the same but to_dtype is smaller if "float" in to_dtype_numpy.name: finfo = np.finfo(to_dtype_numpy) lower_: int | float upper_: int | float lower_, upper_ = finfo.min, finfo.max # type: ignore[assignment] # Check specifically for np.pi values when casting to lower precision if self_dtype_numpy.itemsize > to_dtype_numpy.itemsize: # Check if column contains pi value if len(col) > 0: # Create a simple column with pi to test if the precision matters pi_col = self == np.pi # Test if pi can be correctly represented after casting if pi_col.any(): # If pi is present, we cannot safely cast to lower precision return False elif "int" in to_dtype_numpy.name: iinfo = np.iinfo(to_dtype_numpy) lower_, upper_ = iinfo.min, iinfo.max return (min_ >= lower_) and (col.max() < upper_) # want to cast int to uint elif self_dtype_numpy.kind == "i" and to_dtype_numpy.kind == "u": i_max_ = np.iinfo(self_dtype_numpy).max u_max_ = np.iinfo(to_dtype_numpy).max return (self.min() >= 0) and ( (i_max_ <= u_max_) or (self.max() < u_max_) ) # want to cast uint to int elif self_dtype_numpy.kind == "u" and to_dtype_numpy.kind == "i": u_max_ = np.iinfo(self_dtype_numpy).max i_max_ = np.iinfo(to_dtype_numpy).max return (u_max_ <= i_max_) or (self.max() < i_max_) # want to cast int to float elif ( self_dtype_numpy.kind in {"i", "u"} and to_dtype_numpy.kind == "f" ): info = np.finfo(to_dtype_numpy) biggest_exact_int = 2 ** (info.nmant + 1) if (self.min() >= -biggest_exact_int) and ( self.max() <= biggest_exact_int ): return True else: filled = self.fillna(0) return ( filled.astype(to_dtype).astype(filled.dtype) == filled ).all() # want to cast float to int: elif self_dtype_numpy.kind == "f" and to_dtype_numpy.kind in { "i", "u", }: if self.nan_count > 0: return False iinfo = np.iinfo(to_dtype_numpy) min_, max_ = iinfo.min, iinfo.max # best we can do is hope to catch it here and avoid compare # Use Python floats, which have precise comparison for float64. # NOTE(seberg): it would make sense to limit to the mantissa range. if (float(self.min()) >= min_) and (float(self.max()) <= max_): filled = self.fillna(0) return (filled % 1 == 0).all() else: return False return False def _with_type_metadata( self: Self, dtype: DtypeObj, ) -> ColumnBase: if isinstance(dtype, CategoricalDtype): codes_dtype = min_unsigned_type(len(dtype.categories)) codes = cast(NumericalColumn, self.astype(codes_dtype)) return CategoricalColumn( plc_column=codes.to_pylibcudf(mode="read"), size=codes.size, dtype=dtype, offset=codes.offset, null_count=codes.null_count, exposed=False, ) if cudf.get_option("mode.pandas_compatible"): res_dtype = get_dtype_of_same_type(dtype, self.dtype) if ( is_pandas_nullable_extension_dtype(res_dtype) and isinstance(self.dtype, np.dtype) and self.dtype.kind == "f" ): # If the dtype is a pandas nullable extension type, we need to # float column doesn't have any NaNs. res = self.nans_to_nulls() res._dtype = res_dtype return res self._dtype = res_dtype return self def _reduction_result_dtype(self, reduction_op: str) -> Dtype: if reduction_op in {"sum", "product"}: if self.dtype.kind == "f": return self.dtype elif self.dtype.kind == "u": return np.dtype("uint64") return np.dtype("int64") elif reduction_op == "sum_of_squares": return find_common_type((self.dtype, np.dtype(np.uint64))) elif reduction_op in {"var", "std", "mean"}: if self.dtype.kind == "f": return self.dtype else: return np.dtype("float64") return super()._reduction_result_dtype(reduction_op) @acquire_spill_lock() def digitize(self, bins: np.ndarray, right: bool = False) -> Self: """Return the indices of the bins to which each value in column belongs. Parameters ---------- bins : np.ndarray 1-D column-like object of bins with same type as `column`, should be monotonically increasing. right : bool Indicates whether interval contains the right or left bin edge. Returns ------- A column containing the indices """ if self.dtype != bins.dtype: raise ValueError( "digitize() expects bins and input column have the same dtype." ) bin_col = as_column(bins, dtype=bins.dtype) if bin_col.nullable: raise ValueError("`bins` cannot contain null entries.") return type(self).from_pylibcudf( getattr(plc.search, "lower_bound" if right else "upper_bound")( plc.Table([bin_col.plc_column]), plc.Table([self.plc_column]), [plc.types.Order.ASCENDING], [plc.types.NullOrder.BEFORE], ) ) def _normalize_find_and_replace_input( input_column_dtype: DtypeObj, col_to_normalize: ColumnBase | list ) -> ColumnBase: normalized_column = as_column( col_to_normalize, dtype=input_column_dtype if len(col_to_normalize) <= 0 else None, ) col_to_normalize_dtype = normalized_column.dtype if isinstance(col_to_normalize, list): if normalized_column.null_count == len(normalized_column): normalized_column = normalized_column.astype(input_column_dtype) if normalized_column.can_cast_safely(input_column_dtype): return normalized_column.astype(input_column_dtype) col_to_normalize_dtype = normalized_column._min_column_type( # type: ignore[attr-defined] input_column_dtype ) # Scalar case if len(col_to_normalize) == 1: if _is_null_host_scalar(col_to_normalize[0]): return normalized_column.astype(input_column_dtype) if np.isinf(col_to_normalize[0]): return normalized_column col_to_normalize_casted = np.array(col_to_normalize[0]).astype( col_to_normalize_dtype ) if not np.isnan(col_to_normalize_casted) and ( col_to_normalize_casted != col_to_normalize[0] ): raise TypeError( f"Cannot safely cast non-equivalent " f"{col_to_normalize[0]} " f"to {input_column_dtype.name}" ) if normalized_column.can_cast_safely(col_to_normalize_dtype): return normalized_column.astype(col_to_normalize_dtype) elif hasattr(col_to_normalize, "dtype"): col_to_normalize_dtype = col_to_normalize.dtype else: raise TypeError(f"Type {type(col_to_normalize)} not supported") if ( col_to_normalize_dtype.kind == "f" and input_column_dtype.kind in {"i", "u"} ) or (col_to_normalize_dtype.num > input_column_dtype.num): raise TypeError( f"Potentially unsafe cast for non-equivalent " f"{col_to_normalize_dtype.name} " f"to {input_column_dtype.name}" ) if not normalized_column.can_cast_safely(input_column_dtype): return normalized_column return normalized_column.astype(input_column_dtype)
NumericalColumn
python
neetcode-gh__leetcode
python/0230-kth-smallest-element-in-a-bst.py
{ "start": 164, "end": 534 }
class ____: def kthSmallest(self, root: TreeNode, k: int) -> int: stack = [] curr = root while stack or curr: while curr: stack.append(curr) curr = curr.left curr = stack.pop() k -= 1 if k == 0: return curr.val curr = curr.right
Solution
python
django__django
django/db/migrations/questioner.py
{ "start": 3478, "end": 11902 }
class ____(MigrationQuestioner): def __init__( self, defaults=None, specified_apps=None, dry_run=None, prompt_output=None ): super().__init__( defaults=defaults, specified_apps=specified_apps, dry_run=dry_run ) self.prompt_output = prompt_output or OutputWrapper(sys.stdout) def _boolean_input(self, question, default=None): self.prompt_output.write(f"{question} ", ending="") result = input() if not result and default is not None: return default while not result or result[0].lower() not in "yn": self.prompt_output.write("Please answer yes or no: ", ending="") result = input() return result[0].lower() == "y" def _choice_input(self, question, choices): self.prompt_output.write(f"{question}") for i, choice in enumerate(choices): self.prompt_output.write(" %s) %s" % (i + 1, choice)) self.prompt_output.write("Select an option: ", ending="") while True: try: result = input() value = int(result) except ValueError: pass except KeyboardInterrupt: self.prompt_output.write("\nCancelled.") sys.exit(1) else: if 0 < value <= len(choices): return value self.prompt_output.write("Please select a valid option: ", ending="") def _ask_default(self, default=""): """ Prompt for a default value. The ``default`` argument allows providing a custom default value (as a string) which will be shown to the user and used as the return value if the user doesn't provide any other input. """ self.prompt_output.write("Please enter the default value as valid Python.") if default: self.prompt_output.write( f"Accept the default '{default}' by pressing 'Enter' or " f"provide another value." ) self.prompt_output.write( "The datetime and django.utils.timezone modules are available, so " "it is possible to provide e.g. timezone.now as a value." ) self.prompt_output.write("Type 'exit' to exit this prompt") while True: if default: prompt = "[default: {}] >>> ".format(default) else: prompt = ">>> " self.prompt_output.write(prompt, ending="") try: code = input() except KeyboardInterrupt: self.prompt_output.write("\nCancelled.") sys.exit(1) if not code and default: code = default if not code: self.prompt_output.write( "Please enter some code, or 'exit' (without quotes) to exit." ) elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {"datetime": datetime, "timezone": timezone}) except Exception as e: self.prompt_output.write(f"{e.__class__.__name__}: {e}") def ask_not_null_addition(self, field_name, model_name): """Adding a NOT NULL field to a model.""" if not self.dry_run: choice = self._choice_input( f"It is impossible to add a non-nullable field '{field_name}' " f"to {model_name} without specifying a default. This is " f"because the database needs something to populate existing " f"rows.\n" f"Please select a fix:", [ ( "Provide a one-off default now (will be set on all existing " "rows with a null value for this column)" ), "Quit and manually define a default value in models.py.", ], ) if choice == 2: sys.exit(3) else: return self._ask_default() return None def ask_not_null_alteration(self, field_name, model_name): """Changing a NULL field to NOT NULL.""" if not self.dry_run: choice = self._choice_input( f"It is impossible to change a nullable field '{field_name}' " f"on {model_name} to non-nullable without providing a " f"default. This is because the database needs something to " f"populate existing rows.\n" f"Please select a fix:", [ ( "Provide a one-off default now (will be set on all existing " "rows with a null value for this column)" ), "Ignore for now. Existing rows that contain NULL values " "will have to be handled manually, for example with a " "RunPython or RunSQL operation.", "Quit and manually define a default value in models.py.", ], ) if choice == 2: return NOT_PROVIDED elif choice == 3: sys.exit(3) else: return self._ask_default() return None def ask_rename(self, model_name, old_name, new_name, field_instance): """Was this field really renamed?""" msg = "Was %s.%s renamed to %s.%s (a %s)? [y/N]" return self._boolean_input( msg % ( model_name, old_name, model_name, new_name, field_instance.__class__.__name__, ), False, ) def ask_rename_model(self, old_model_state, new_model_state): """Was this model really renamed?""" msg = "Was the model %s.%s renamed to %s? [y/N]" return self._boolean_input( msg % (old_model_state.app_label, old_model_state.name, new_model_state.name), False, ) def ask_merge(self, app_label): return self._boolean_input( "\nMerging will only work if the operations printed above do not conflict\n" + "with each other (working on different fields or models)\n" + "Should these migration branches be merged? [y/N]", False, ) def ask_auto_now_add_addition(self, field_name, model_name): """Adding an auto_now_add field to a model.""" if not self.dry_run: choice = self._choice_input( f"It is impossible to add the field '{field_name}' with " f"'auto_now_add=True' to {model_name} without providing a " f"default. This is because the database needs something to " f"populate existing rows.\n", [ "Provide a one-off default now which will be set on all " "existing rows", "Quit and manually define a default value in models.py.", ], ) if choice == 2: sys.exit(3) else: return self._ask_default(default="timezone.now") return None def ask_unique_callable_default_addition(self, field_name, model_name): """Adding a unique field with a callable default.""" if not self.dry_run: version = get_docs_version() choice = self._choice_input( f"Callable default on unique field {model_name}.{field_name} " f"will not generate unique values upon migrating.\n" f"Please choose how to proceed:\n", [ f"Continue making this migration as the first step in " f"writing a manual migration to generate unique values " f"described here: " f"https://docs.djangoproject.com/en/{version}/howto/" f"writing-migrations/#migrations-that-add-unique-fields.", "Quit and edit field options in models.py.", ], ) if choice == 2: sys.exit(3) return None
InteractiveMigrationQuestioner
python
bottlepy__bottle
test/test_router.py
{ "start": 73, "end": 6562 }
class ____(unittest.TestCase): CGI = False def setUp(self): self.r = bottle.Router() def add(self, path, target, method='GET', **ka): with warnings.catch_warnings() as r: warnings.simplefilter("ignore") self.r.add(path, method, target, **ka) def match(self, path, method='GET'): env = {'PATH_INFO': path, 'REQUEST_METHOD': method} if self.CGI: env['wsgi.run_once'] = 'true' return self.r.match(env) def assertMatches(self, rule, url, method='GET', **args): self.add(rule, rule, method) target, urlargs = self.match(url, method) self.assertEqual(rule, target) self.assertEqual(args, urlargs) def testBasic(self): self.assertMatches('/static', '/static') self.assertMatches('/\\:its/:#.+#/:test/:name#[a-z]+#/', '/:its/a/cruel/world/', test='cruel', name='world') self.assertMatches('/:test', '/test', test='test') # No tail self.assertMatches(':test/', 'test/', test='test') # No head self.assertMatches('/:test/', '/test/', test='test') # Middle self.assertMatches(':test', 'test', test='test') # Full wildcard self.assertMatches('/:#anon#/match', '/anon/match') # Anon wildcards self.assertRaises(bottle.HTTPError, self.match, '//no/m/at/ch/') def testNewSyntax(self): self.assertMatches('/static', '/static') self.assertMatches('/\\<its>/<:re:.+>/<test>/<name:re:[a-z]+>/', '/<its>/a/cruel/world/', test='cruel', name='world') self.assertMatches('/<test>', '/test', test='test') # No tail self.assertMatches('<test>/', 'test/', test='test') # No head self.assertMatches('/<test>/', '/test/', test='test') # Middle self.assertMatches('<test>', 'test', test='test') # Full wildcard self.assertMatches('/<:re:anon>/match', '/anon/match') # Anon wildcards self.assertRaises(bottle.HTTPError, self.match, '//no/m/at/ch/') def testUnicode(self): self.assertMatches('/uni/<x>', '/uni/瓶', x='瓶') def testValueErrorInFilter(self): self.r.add_filter('test', lambda x: ('.*', int, int)) self.assertMatches('/int/<i:test>', '/int/5', i=5) # No tail self.assertRaises(bottle.HTTPError, self.match, '/int/noint') def testIntFilter(self): self.assertMatches('/object/<id:int>', '/object/567', id=567) self.assertRaises(bottle.HTTPError, self.match, '/object/abc') def testFloatFilter(self): self.assertMatches('/object/<id:float>', '/object/1', id=1) self.assertMatches('/object/<id:float>', '/object/1.1', id=1.1) self.assertMatches('/object/<id:float>', '/object/.1', id=0.1) self.assertMatches('/object/<id:float>', '/object/1.', id=1) self.assertRaises(bottle.HTTPError, self.match, '/object/abc') self.assertRaises(bottle.HTTPError, self.match, '/object/') self.assertRaises(bottle.HTTPError, self.match, '/object/.') def testPathFilter(self): self.assertMatches('/<id:path>/:f', '/a/b', id='a', f='b') self.assertMatches('/<id:path>', '/a', id='a') def testWildcardNames(self): self.assertMatches('/alpha/:abc', '/alpha/alpha', abc='alpha') self.assertMatches('/alnum/:md5', '/alnum/sha1', md5='sha1') def testParentheses(self): self.assertMatches('/func(:param)', '/func(foo)', param='foo') self.assertMatches('/func2(:param#(foo|bar)#)', '/func2(foo)', param='foo') self.assertMatches('/func2(:param#(foo|bar)#)', '/func2(bar)', param='bar') self.assertRaises(bottle.HTTPError, self.match, '/func2(baz)') def testErrorInPattern(self): self.assertRaises(Exception, self.assertMatches, '/:bug#(#/', '/foo/') self.assertRaises(Exception, self.assertMatches, '/<:re:(>/', '/foo/') def testBuild(self): add, build = self.add, self.r.build add('/:test/:name#[a-z]+#/', 'handler', name='testroute') url = build('testroute', test='hello', name='world') self.assertEqual('/hello/world/', url) url = build('testroute', test='hello', name='world', q='value') self.assertEqual('/hello/world/?q=value', url) # RouteBuildError: Missing URL argument: 'test' self.assertRaises(bottle.RouteBuildError, build, 'test') def testBuildAnon(self): add, build = self.add, self.r.build add('/anon/:#.#', 'handler', name='anonroute') url = build('anonroute', 'hello') self.assertEqual('/anon/hello', url) url = build('anonroute', 'hello', q='value') self.assertEqual('/anon/hello?q=value', url) # RouteBuildError: Missing URL argument: anon0. self.assertRaises(bottle.RouteBuildError, build, 'anonroute') def testBuildFilter(self): add, build = self.add, self.r.build add('/int/<:int>', 'handler', name='introute') url = build('introute', '5') self.assertEqual('/int/5', url) # RouteBuildError: Missing URL argument: anon0. self.assertRaises(ValueError, build, 'introute', 'hello') def test_dynamic_before_static_any(self): ''' Static ANY routes have lower priority than dynamic GET routes. ''' self.add('/foo', 'foo', 'ANY') self.assertEqual(self.match('/foo')[0], 'foo') self.add('/<:>', 'bar', 'GET') self.assertEqual(self.match('/foo')[0], 'bar') def test_any_static_before_dynamic(self): ''' Static ANY routes have higher priority than dynamic ANY routes. ''' self.add('/<:>', 'bar', 'ANY') self.assertEqual(self.match('/foo')[0], 'bar') self.add('/foo', 'foo', 'ANY') self.assertEqual(self.match('/foo')[0], 'foo') def test_dynamic_any_if_method_exists(self): ''' Check dynamic ANY routes if the matching method is known, but not matched.''' self.add('/bar<:>', 'bar', 'GET') self.assertEqual(self.match('/barx')[0], 'bar') self.add('/foo<:>', 'foo', 'ANY') self.assertEqual(self.match('/foox')[0], 'foo') def test_lots_of_routes(self): n = bottle.Router._MAX_GROUPS_PER_PATTERN+10 for i in range(n): self.add('/<:>/'+str(i), str(i), 'GET') self.assertEqual(self.match('/foo/'+str(n-1))[0], str(n-1))
TestRouter