_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q274300
NetworkRunner._extract_subruns
test
def _extract_subruns(self, traj, pre_run=False): """Extracts subruns from the trajectory. :param traj: Trajectory container :param pre_run: Boolean whether current run is regular or a pre-run :raises: RuntimeError if orders are duplicates or even missing """ if pre_run: durations_list = traj.f_get_all(self._pre_durations_group_name) else: durations_list = traj.f_get_all(self._durations_group_name) subruns = {} orders = [] for durations in durations_list: for duration_param in durations.f_iter_leaves(with_links=False): if 'order' in duration_param.v_annotations: order = duration_param.v_annotations.order else: raise RuntimeError('Your duration parameter %s has no order. Please add ' 'an order in `v_annotations.order`.' % duration_param.v_full_name) if order in subruns: raise RuntimeError('Your durations must differ in their order, there are two ' 'with order %d.' % order) else: subruns[order] = duration_param orders.append(order) return [subruns[order] for order in sorted(orders)]
python
{ "resource": "" }
q274301
NetworkRunner._execute_network_run
test
def _execute_network_run(self, traj, network, network_dict, component_list, analyser_list, pre_run=False): """Generic `execute_network_run` function, handles experimental runs as well as pre-runs. See also :func:`~pypet.brian2.network.NetworkRunner.execute_network_run` and :func:`~pypet.brian2.network.NetworkRunner.execute_network_pre_run`. """ # Initially extract the `subrun_list` subrun_list = self._extract_subruns(traj, pre_run=pre_run) # counter for subruns subrun_number = 0 # Execute all subruns in order while len(subrun_list) > 0: # Get the next subrun current_subrun = subrun_list.pop(0) # 1. Call `add` of all normal components for component in component_list: component.add_to_network(traj, network, current_subrun, subrun_list, network_dict) # 2. Call `add` of all analyser components for analyser in analyser_list: analyser.add_to_network(traj, network, current_subrun, subrun_list, network_dict) # 3. Call `add` of the network runner itself self.add_to_network(traj, network, current_subrun, subrun_list, network_dict) # 4. Run the network self._logger.info('STARTING subrun `%s` (#%d) lasting %s.' % (current_subrun.v_name, subrun_number, str(current_subrun.f_get()))) network.run(duration=current_subrun.f_get(), report=self._report, report_period=self._report_period) # 5. Call `analyse` of all analyser components for analyser in analyser_list: analyser.analyse(traj, network, current_subrun, subrun_list, network_dict) # 6. Call `remove` of the network runner itself self.remove_from_network(traj, network, current_subrun, subrun_list, network_dict) # 7. Call `remove` for all analyser components for analyser in analyser_list: analyser.remove_from_network(traj, network, current_subrun, subrun_list, network_dict) # 8. Call `remove` for all normal components for component in component_list: component.remove_from_network(traj, network, current_subrun, subrun_list, network_dict) subrun_number += 1
python
{ "resource": "" }
q274302
NetworkManager.add_parameters
test
def add_parameters(self, traj): """Adds parameters for a network simulation. Calls :func:`~pypet.brian2.network.NetworkComponent.add_parameters` for all components, analyser, and the network runner (in this order). :param traj: Trajectory container """ self._logger.info('Adding Parameters of Components') for component in self.components: component.add_parameters(traj) if self.analysers: self._logger.info('Adding Parameters of Analysers') for analyser in self.analysers: analyser.add_parameters(traj) self._logger.info('Adding Parameters of Runner') self.network_runner.add_parameters(traj)
python
{ "resource": "" }
q274303
NetworkManager.pre_run_network
test
def pre_run_network(self, traj): """Starts a network run before the individual run. Useful if a network needs an initial run that can be shared by all individual experimental runs during parameter exploration. Needs to be called by the user. If `pre_run_network` is started by the user, :func:`~pypet.brian2.network.NetworkManager.pre_build` will be automatically called from this function. This function will create a new BRIAN2 network which is run by the :class:`~pypet.brian2.network.NetworkRunner` and it's :func:`~pypet.brian2.network.NetworkRunner.execute_network_pre_run`. To see how a network run is structured also take a look at :func:`~pypet.brian2.network.NetworkRunner.run_network`. :param traj: Trajectory container """ self.pre_build(traj) self._logger.info('\n------------------------\n' 'Pre-Running the Network\n' '------------------------') self._network = self._network_constructor(*self._brian_list) self.network_runner.execute_network_pre_run(traj, self._network, self._network_dict, self.components, self.analysers) self._logger.info('\n-----------------------------\n' 'Network Simulation successful\n' '-----------------------------') self._pre_run = True if hasattr(self._network, 'store'): self._network.store('pre_run')
python
{ "resource": "" }
q274304
NetworkManager.run_network
test
def run_network(self, traj): """Top-level simulation function, pass this to the environment Performs an individual network run during parameter exploration. `run_network` does not need to be called by the user. If this method (not this one of the NetworkManager) is passed to an :class:`~pypet.environment.Environment` with this NetworkManager, `run_network` and :func:`~pypet.brian2.network.NetworkManager.build` are automatically called for each individual experimental run. This function will create a new BRIAN2 network in case one was not pre-run. The execution of the network run is carried out by the :class:`~pypet.brian2.network.NetworkRunner` and it's :func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take a look at this function's documentation to see the structure of a network run). :param traj: Trajectory container """ # Check if the network was pre-built if self._pre_built: if self._pre_run and hasattr(self._network, 'restore'): self._network.restore('pre_run') # Temprorary fix for https://github.com/brian-team/brian2/issues/681 self._network.store('pre_run') self._run_network(traj) else: self._run_network(traj)
python
{ "resource": "" }
q274305
NetworkManager._run_network
test
def _run_network(self, traj): """Starts a single run carried out by a NetworkRunner. Called from the public function :func:`~pypet.brian2.network.NetworkManger.run_network`. :param traj: Trajectory container """ self.build(traj) self._pretty_print_explored_parameters(traj) # We need to construct a network object in case one was not pre-run if not self._pre_run: self._network = self._network_constructor(*self._brian_list) # Start the experimental run self.network_runner.execute_network_run(traj, self._network, self._network_dict, self.components, self.analysers) self._logger.info('\n-----------------------------\n' 'Network Simulation successful\n' '-----------------------------')
python
{ "resource": "" }
q274306
make_filename
test
def make_filename(traj): """ Function to create generic filenames based on what has been explored """ explored_parameters = traj.f_get_explored_parameters() filename = '' for param in explored_parameters.values(): short_name = param.v_name val = param.f_get() filename += '%s_%s__' % (short_name, str(val)) return filename[:-2] + '.png'
python
{ "resource": "" }
q274307
IteratorChain.next
test
def next(self): """Returns next element from chain. More precisely, it returns the next element of the foremost iterator. If this iterator is empty it moves iteratively along the chain of available iterators to pick the new foremost one. Raises StopIteration if there are no elements left. """ while True: # We need this loop because some iterators may already be empty. # We keep on popping from the left until next succeeds and as long # as there are iterators available try: return next(self._current) except StopIteration: try: self._current = iter(self._chain.popleft()) except IndexError: # If we run out of iterators we are sure that # there can be no more element raise StopIteration('Reached end of iterator chain')
python
{ "resource": "" }
q274308
merge_all_in_folder
test
def merge_all_in_folder(folder, ext='.hdf5', dynamic_imports=None, storage_service=None, force=False, ignore_data=(), move_data=False, delete_other_files=False, keep_info=True, keep_other_trajectory_info=True, merge_config=True, backup=True): """Merges all files in a given folder. IMPORTANT: Does not check if there are more than 1 trajectory in a file. Always uses the last trajectory in file and ignores the other ones. Trajectories are merged according to the alphabetical order of the files, i.e. the resulting merged trajectory is found in the first file (according to lexicographic ordering). :param folder: folder (not recursive) where to look for files :param ext: only files with the given extension are used :param dynamic_imports: Dynamic imports for loading :param storage_service: storage service to use, leave `None` to use the default one :param force: If loading should be forced. :param delete_other_files: Deletes files of merged trajectories All other parameters as in `f_merge_many` of the trajectory. :return: The merged traj """ in_dir = os.listdir(folder) all_files = [] # Find all files with matching extension for file in in_dir: full_file = os.path.join(folder, file) if os.path.isfile(full_file): _, extension = os.path.splitext(full_file) if extension == ext: all_files.append(full_file) all_files = sorted(all_files) # Open all trajectories trajs = [] for full_file in all_files: traj = load_trajectory(index=-1, storage_service=storage_service, filename=full_file, load_data=0, force=force, dynamic_imports=dynamic_imports) trajs.append(traj) # Merge all trajectories first_traj = trajs.pop(0) first_traj.f_merge_many(trajs, ignore_data=ignore_data, move_data=move_data, delete_other_trajectory=False, keep_info=keep_info, keep_other_trajectory_info=keep_other_trajectory_info, merge_config=merge_config, backup=backup) if delete_other_files: # Delete all but the first file for file in all_files[1:]: os.remove(file) return first_traj
python
{ "resource": "" }
q274309
_SigintHandler._handle_sigint
test
def _handle_sigint(self, signum, frame): """Handler of SIGINT Does nothing if SIGINT is encountered once but raises a KeyboardInterrupt in case it is encountered twice. immediatly. """ if self.hit: prompt = 'Exiting immediately!' raise KeyboardInterrupt(prompt) else: self.hit = True prompt = ('\nYou killed the process(es) via `SIGINT` (`CTRL+C`). ' 'I am trying to exit ' 'gracefully. Using `SIGINT` (`CTRL+C`) ' 'again will cause an immediate exit.\n') sys.stderr.write(prompt)
python
{ "resource": "" }
q274310
config_from_file
test
def config_from_file(filename, config=None): ''' Small configuration file management function''' if config: # We're writing configuration try: with open(filename, 'w') as fdesc: fdesc.write(json.dumps(config)) except IOError as error: logger.exception(error) return False return True else: # We're reading config if os.path.isfile(filename): try: with open(filename, 'r') as fdesc: return json.loads(fdesc.read()) except IOError as error: return False else: return {}
python
{ "resource": "" }
q274311
Ecobee.request_pin
test
def request_pin(self): ''' Method to request a PIN from ecobee for authorization ''' url = 'https://api.ecobee.com/authorize' params = {'response_type': 'ecobeePin', 'client_id': self.api_key, 'scope': 'smartWrite'} try: request = requests.get(url, params=params) except RequestException: logger.warn("Error connecting to Ecobee. Possible connectivity outage." "Could not request pin.") return self.authorization_code = request.json()['code'] self.pin = request.json()['ecobeePin'] logger.error('Please authorize your ecobee developer app with PIN code ' + self.pin + '\nGoto https://www.ecobee.com/consumerportal' '/index.html, click\nMy Apps, Add application, Enter Pin' ' and click Authorize.\nAfter authorizing, call request_' 'tokens() method.')
python
{ "resource": "" }
q274312
Ecobee.request_tokens
test
def request_tokens(self): ''' Method to request API tokens from ecobee ''' url = 'https://api.ecobee.com/token' params = {'grant_type': 'ecobeePin', 'code': self.authorization_code, 'client_id': self.api_key} try: request = requests.post(url, params=params) except RequestException: logger.warn("Error connecting to Ecobee. Possible connectivity outage." "Could not request token.") return if request.status_code == requests.codes.ok: self.access_token = request.json()['access_token'] self.refresh_token = request.json()['refresh_token'] self.write_tokens_to_file() self.pin = None else: logger.warn('Error while requesting tokens from ecobee.com.' ' Status code: ' + str(request.status_code)) return
python
{ "resource": "" }
q274313
Ecobee.refresh_tokens
test
def refresh_tokens(self): ''' Method to refresh API tokens from ecobee ''' url = 'https://api.ecobee.com/token' params = {'grant_type': 'refresh_token', 'refresh_token': self.refresh_token, 'client_id': self.api_key} request = requests.post(url, params=params) if request.status_code == requests.codes.ok: self.access_token = request.json()['access_token'] self.refresh_token = request.json()['refresh_token'] self.write_tokens_to_file() return True else: self.request_pin()
python
{ "resource": "" }
q274314
Ecobee.get_thermostats
test
def get_thermostats(self): ''' Set self.thermostats to a json list of thermostats from ecobee ''' url = 'https://api.ecobee.com/1/thermostat' header = {'Content-Type': 'application/json;charset=UTF-8', 'Authorization': 'Bearer ' + self.access_token} params = {'json': ('{"selection":{"selectionType":"registered",' '"includeRuntime":"true",' '"includeSensors":"true",' '"includeProgram":"true",' '"includeEquipmentStatus":"true",' '"includeEvents":"true",' '"includeWeather":"true",' '"includeSettings":"true"}}')} try: request = requests.get(url, headers=header, params=params) except RequestException: logger.warn("Error connecting to Ecobee. Possible connectivity outage.") return None if request.status_code == requests.codes.ok: self.authenticated = True self.thermostats = request.json()['thermostatList'] return self.thermostats else: self.authenticated = False logger.info("Error connecting to Ecobee while attempting to get " "thermostat data. Refreshing tokens and trying again.") if self.refresh_tokens(): return self.get_thermostats() else: return None
python
{ "resource": "" }
q274315
Ecobee.write_tokens_to_file
test
def write_tokens_to_file(self): ''' Write api tokens to a file ''' config = dict() config['API_KEY'] = self.api_key config['ACCESS_TOKEN'] = self.access_token config['REFRESH_TOKEN'] = self.refresh_token config['AUTHORIZATION_CODE'] = self.authorization_code if self.file_based_config: config_from_file(self.config_filename, config) else: self.config = config
python
{ "resource": "" }
q274316
Ecobee.set_hvac_mode
test
def set_hvac_mode(self, index, hvac_mode): ''' possible hvac modes are auto, auxHeatOnly, cool, heat, off ''' body = {"selection": {"selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "thermostat": { "settings": { "hvacMode": hvac_mode } }} log_msg_action = "set HVAC mode" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274317
Ecobee.set_fan_min_on_time
test
def set_fan_min_on_time(self, index, fan_min_on_time): ''' The minimum time, in minutes, to run the fan each hour. Value from 1 to 60 ''' body = {"selection": {"selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "thermostat": { "settings": { "fanMinOnTime": fan_min_on_time } }} log_msg_action = "set fan minimum on time." return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274318
Ecobee.set_hold_temp
test
def set_hold_temp(self, index, cool_temp, heat_temp, hold_type="nextTransition"): ''' Set a hold ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "setHold", "params": { "holdType": hold_type, "coolHoldTemp": int(cool_temp * 10), "heatHoldTemp": int(heat_temp * 10) }}]} log_msg_action = "set hold temp" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274319
Ecobee.set_climate_hold
test
def set_climate_hold(self, index, climate, hold_type="nextTransition"): ''' Set a climate hold - ie away, home, sleep ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "setHold", "params": { "holdType": hold_type, "holdClimateRef": climate }}]} log_msg_action = "set climate hold" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274320
Ecobee.delete_vacation
test
def delete_vacation(self, index, vacation): ''' Delete the vacation with name vacation ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "deleteVacation", "params": { "name": vacation }}]} log_msg_action = "delete a vacation" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274321
Ecobee.resume_program
test
def resume_program(self, index, resume_all=False): ''' Resume currently scheduled program ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "resumeProgram", "params": { "resumeAll": resume_all }}]} log_msg_action = "resume program" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274322
Ecobee.send_message
test
def send_message(self, index, message="Hello from python-ecobee!"): ''' Send a message to the thermostat ''' body = {"selection": { "selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "functions": [{"type": "sendMessage", "params": { "text": message[0:500] }}]} log_msg_action = "send message" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274323
Ecobee.set_humidity
test
def set_humidity(self, index, humidity): ''' Set humidity level''' body = {"selection": {"selectionType": "thermostats", "selectionMatch": self.thermostats[index]['identifier']}, "thermostat": { "settings": { "humidity": int(humidity) } }} log_msg_action = "set humidity level" return self.make_request(body, log_msg_action)
python
{ "resource": "" }
q274324
gen_delay_selecting
test
def gen_delay_selecting(): """Generate the delay in seconds in which the DISCOVER will be sent. [:rfc:`2131#section-4.4.1`]:: The client SHOULD wait a random time between one and ten seconds to desynchronize the use of DHCP at startup. """ delay = float(random.randint(0, MAX_DELAY_SELECTING)) logger.debug('Delay to enter in SELECTING %s.', delay) logger.debug('SELECTING will happen on %s', future_dt_str(nowutc(), delay)) return delay
python
{ "resource": "" }
q274325
gen_timeout_resend
test
def gen_timeout_resend(attempts): """Generate the time in seconds in which DHCPDISCOVER wil be retransmited. [:rfc:`2131#section-3.1`]:: might retransmit the DHCPREQUEST message four times, for a total delay of 60 seconds [:rfc:`2131#section-4.1`]:: For example, in a 10Mb/sec Ethernet internetwork, the delay before the first retransmission SHOULD be 4 seconds randomized by the value of a uniform random number chosen from the range -1 to +1. Clients with clocks that provide resolution granularity of less than one second may choose a non-integer randomization value. The delay before the next retransmission SHOULD be 8 seconds randomized by the value of a uniform number chosen from the range -1 to +1. The retransmission delay SHOULD be doubled with subsequent retransmissions up to a maximum of 64 seconds. """ timeout = 2 ** (attempts + 1) + random.uniform(-1, +1) logger.debug('next timeout resending will happen on %s', future_dt_str(nowutc(), timeout)) return timeout
python
{ "resource": "" }
q274326
gen_timeout_request_renew
test
def gen_timeout_request_renew(lease): """Generate time in seconds to retransmit DHCPREQUEST. [:rfc:`2131#section-4..4.5`]:: In both RENEWING and REBINDING states, if the client receives no response to its DHCPREQUEST message, the client SHOULD wait one-half of the remaining time until T2 (in RENEWING state) and one-half of the remaining lease time (in REBINDING state), down to a minimum of 60 seconds, before retransmitting the DHCPREQUEST message. """ time_left = (lease.rebinding_time - lease.renewing_time) * RENEW_PERC if time_left < 60: time_left = 60 logger.debug('Next request in renew will happen on %s', future_dt_str(nowutc(), time_left)) return time_left
python
{ "resource": "" }
q274327
gen_renewing_time
test
def gen_renewing_time(lease_time, elapsed=0): """Generate RENEWING time. [:rfc:`2131#section-4.4.5`]:: T1 defaults to (0.5 * duration_of_lease). T2 defaults to (0.875 * duration_of_lease). Times T1 and T2 SHOULD be chosen with some random "fuzz" around a fixed value, to avoid synchronization of client reacquisition. """ renewing_time = int(lease_time) * RENEW_PERC - elapsed # FIXME:80 [:rfc:`2131#section-4.4.5`]: the chosen "fuzz" could fingerprint # the implementation # NOTE: here using same "fuzz" as systemd? range_fuzz = int(lease_time) * REBIND_PERC - renewing_time logger.debug('rebinding fuzz range %s', range_fuzz) fuzz = random.uniform(-(range_fuzz), +(range_fuzz)) renewing_time += fuzz logger.debug('Renewing time %s.', renewing_time) return renewing_time
python
{ "resource": "" }
q274328
DHCPCAPFSM.dict_self
test
def dict_self(self): """Return the self object attributes not inherited as dict.""" return {k: v for k, v in self.__dict__.items() if k in FSM_ATTRS}
python
{ "resource": "" }
q274329
DHCPCAPFSM.reset
test
def reset(self, iface=None, client_mac=None, xid=None, scriptfile=None): """Reset object attributes when state is INIT.""" logger.debug('Reseting attributes.') if iface is None: iface = conf.iface if client_mac is None: # scapy for python 3 returns byte, not tuple tempmac = get_if_raw_hwaddr(iface) if isinstance(tempmac, tuple) and len(tempmac) == 2: mac = tempmac[1] else: mac = tempmac client_mac = str2mac(mac) self.client = DHCPCAP(iface=iface, client_mac=client_mac, xid=xid) if scriptfile is not None: self.script = ClientScript(scriptfile) else: self.script = None self.time_sent_request = None self.discover_attempts = 0 self.request_attempts = 0 self.current_state = STATE_PREINIT self.offers = list()
python
{ "resource": "" }
q274330
DHCPCAPFSM.get_timeout
test
def get_timeout(self, state, function): """Workaround to get timeout in the ATMT.timeout class method.""" state = STATES2NAMES[state] for timeout_fn_t in self.timeout[state]: # access the function name if timeout_fn_t[1] is not None and \ timeout_fn_t[1].atmt_condname == function.atmt_condname: logger.debug('Timeout for state %s, function %s, is %s', state, function.atmt_condname, timeout_fn_t[0]) return timeout_fn_t[0] return None
python
{ "resource": "" }
q274331
DHCPCAPFSM.set_timeout
test
def set_timeout(self, state, function, newtimeout): """ Workaround to change timeout values in the ATMT.timeout class method. self.timeout format is:: {'STATE': [ (TIMEOUT0, <function foo>), (TIMEOUT1, <function bar>)), (None, None) ], } """ state = STATES2NAMES[state] for timeout_fn_t in self.timeout[state]: # access the function name if timeout_fn_t[1] is not None and \ timeout_fn_t[1].atmt_condname == function.atmt_condname: # convert list to tuple to make it mutable timeout_l = list(timeout_fn_t) # modify the timeout timeout_l[0] = newtimeout # set the new timeoute to self.timeout i = self.timeout[state].index(timeout_fn_t) self.timeout[state][i] = tuple(timeout_l) logger.debug('Set state %s, function %s, to timeout %s', state, function.atmt_condname, newtimeout)
python
{ "resource": "" }
q274332
DHCPCAPFSM.send_discover
test
def send_discover(self): """Send discover.""" assert self.client assert self.current_state == STATE_INIT or \ self.current_state == STATE_SELECTING pkt = self.client.gen_discover() sendp(pkt) # FIXME:20 check that this is correct,: all or only discover? if self.discover_attempts < MAX_ATTEMPTS_DISCOVER: self.discover_attempts += 1 timeout = gen_timeout_resend(self.discover_attempts) self.set_timeout(self.current_state, self.timeout_selecting, timeout)
python
{ "resource": "" }
q274333
DHCPCAPFSM.select_offer
test
def select_offer(self): """Select an offer from the offers received. [:rfc:`2131#section-4.2`]:: DHCP clients are free to use any strategy in selecting a DHCP server among those from which the client receives a DHCPOFFER. [:rfc:`2131#section-4.4.1`]:: The time over which the client collects messages and the mechanism used to select one DHCPOFFER are implementation dependent. Nor [:rfc:`7844`] nor [:rfc:`2131`] specify the algorithm. Here, currently the first offer is selected. .. todo:: - Check other implementations algorithm to select offer. """ logger.debug('Selecting offer.') pkt = self.offers[0] self.client.handle_offer(pkt)
python
{ "resource": "" }
q274334
DHCPCAPFSM.send_request
test
def send_request(self): """Send request. [:rfc:`2131#section-3.1`]:: a client retransmitting as described in section 4.1 might retransmit the DHCPREQUEST message four times, for a total delay of 60 seconds .. todo:: - The maximum number of retransmitted REQUESTs is per state or in total? - Are the retransmitted REQUESTs independent to the retransmitted DISCOVERs? """ assert self.client if self.current_state == STATE_BOUND: pkt = self.client.gen_request_unicast() else: pkt = self.client.gen_request() sendp(pkt) logger.debug('Modifying FSM obj, setting time_sent_request.') self.time_sent_request = nowutc() logger.info('DHCPREQUEST of %s on %s to %s port %s', self.client.iface, self.client.client_ip, self.client.server_ip, self.client.server_port) # NOTE: see previous TODO, maybe the MAX_ATTEMPTS_REQUEST needs to be # calculated per state. if self.request_attempts < MAX_ATTEMPTS_REQUEST: self.request_attempts *= 2 logger.debug('Increased request attempts to %s', self.request_attempts) if self.current_state == STATE_RENEWING: timeout_renewing = gen_timeout_request_renew(self.client.lease) self.set_timeout(self.current_state, self.timeout_request_renewing, timeout_renewing) elif self.current_state == STATE_REBINDING: timeout_rebinding = gen_timeout_request_rebind(self.client.lease) self.set_timeout(self.current_state, self.timeout_request_rebinding, timeout_rebinding) else: timeout_requesting = \ gen_timeout_resend(self.request_attempts) self.set_timeout(self.current_state, self.timeout_requesting, timeout_requesting)
python
{ "resource": "" }
q274335
DHCPCAPFSM.set_timers
test
def set_timers(self): """Set renewal, rebinding times.""" logger.debug('setting timeouts') self.set_timeout(self.current_state, self.renewing_time_expires, self.client.lease.renewal_time) self.set_timeout(self.current_state, self.rebinding_time_expires, self.client.lease.rebinding_time)
python
{ "resource": "" }
q274336
DHCPCAPFSM.process_received_ack
test
def process_received_ack(self, pkt): """Process a received ACK packet. Not specifiyed in [:rfc:`7844`]. Probe the offered IP in [:rfc:`2131#section-2.2.`]:: the allocating server SHOULD probe the reused address before allocating the address, e.g., with an ICMP echo request, and the client SHOULD probe the newly received address, e.g., with ARP. The client SHOULD broadcast an ARP reply to announce the client's new IP address and clear any outdated ARP cache entries in hosts on the client's subnet. It is also not specifiyed in [:rfc:`7844`] nor [:rfc:`2131`] how to check that the offered IP is valid. .. todo:: - Check that nor ``dhclient`` nor ``systemd-networkd`` send an ARP. - Check how other implementations check that the ACK paremeters are valid, ie, if the ACK fields match the fields in the OFFER. - Check to which state the client should go back to when the offered parameters are not valid. """ if isack(pkt): try: self.event = self.client.handle_ack(pkt, self.time_sent_request) except AddrFormatError as err: logger.error(err) # NOTE: see previous TODO, maybe should go back to other state. raise self.SELECTING() # NOTE: see previous TODO, not checking address with ARP. logger.info('DHCPACK of %s from %s' % (self.client.client_ip, self.client.server_ip)) return True return False
python
{ "resource": "" }
q274337
DHCPCAPFSM.process_received_nak
test
def process_received_nak(self, pkt): """Process a received NAK packet.""" if isnak(pkt): logger.info('DHCPNAK of %s from %s', self.client.client_ip, self.client.server_ip) return True return False
python
{ "resource": "" }
q274338
DHCPCAPFSM.INIT
test
def INIT(self): """INIT state. [:rfc:`2131#section-4.4.1`]:: The client SHOULD wait a random time between one and ten seconds to desynchronize the use of DHCP at startup .. todo:: - The initial delay is implemented, but probably is not in other implementations. Check what other implementations do. """ # NOTE: in case INIT is reached from other state, initialize attributes # reset all variables. logger.debug('In state: INIT') if self.current_state is not STATE_PREINIT: self.reset() self.current_state = STATE_INIT # NOTE: see previous TODO, maybe this is not needed. if self.delay_selecting: if self.delay_before_selecting is None: delay_before_selecting = gen_delay_selecting() else: delay_before_selecting = self.delay_before_selecting else: delay_before_selecting = 0 self.set_timeout(self.current_state, self.timeout_delay_before_selecting, delay_before_selecting) if self.timeout_select is not None: self.set_timeout(STATE_SELECTING, self.timeout_selecting, self.timeout_select)
python
{ "resource": "" }
q274339
DHCPCAPFSM.BOUND
test
def BOUND(self): """BOUND state.""" logger.debug('In state: BOUND') logger.info('(%s) state changed %s -> bound', self.client.iface, STATES2NAMES[self.current_state]) self.current_state = STATE_BOUND self.client.lease.info_lease() if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: try: set_net(self.client.lease) except Exception as e: logger.error('Can not set IP', exc_info=True)
python
{ "resource": "" }
q274340
DHCPCAPFSM.RENEWING
test
def RENEWING(self): """RENEWING state.""" logger.debug('In state: RENEWING') self.current_state = STATE_RENEWING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease)
python
{ "resource": "" }
q274341
DHCPCAPFSM.REBINDING
test
def REBINDING(self): """REBINDING state.""" logger.debug('In state: REBINDING') self.current_state = STATE_REBINDING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease)
python
{ "resource": "" }
q274342
DHCPCAPFSM.END
test
def END(self): """END state.""" logger.debug('In state: END') self.current_state = STATE_END if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease) return
python
{ "resource": "" }
q274343
DHCPCAPFSM.ERROR
test
def ERROR(self): """ERROR state.""" logger.debug('In state: ERROR') self.current_state = STATE_ERROR if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() set_net(self.client.lease) raise self.INIT()
python
{ "resource": "" }
q274344
DHCPCAPFSM.timeout_selecting
test
def timeout_selecting(self): """Timeout of selecting on SELECTING state. Not specifiyed in [:rfc:`7844`]. See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`. """ logger.debug('C2.1: T In %s, timeout receiving response to select.', self.current_state) if len(self.offers) >= MAX_OFFERS_COLLECTED: logger.debug('C2.2: T Maximum number of offers reached, ' 'raise REQUESTING.') raise self.REQUESTING() if self.discover_attempts >= MAX_ATTEMPTS_DISCOVER: logger.debug('C2.3: T Maximum number of discover retries is %s' ' and already sent %s.', MAX_ATTEMPTS_DISCOVER, self.discover_attempts) if len(self.offers) <= 0: logger.debug('C2.4: T. But no OFFERS where received, ' 'raise ERROR.') raise self.ERROR() logger.debug('C2.4: F. But there is some OFFERS, ' 'raise REQUESTING.') raise self.REQUESTING() logger.debug('C2.2: F. Still not received all OFFERS, but not ' 'max # attemps reached, raise SELECTING.') raise self.SELECTING()
python
{ "resource": "" }
q274345
DHCPCAPFSM.timeout_requesting
test
def timeout_requesting(self): """Timeout requesting in REQUESTING state. Not specifiyed in [:rfc:`7844`] [:rfc:`2131#section-3.1`]:: might retransmit the DHCPREQUEST message four times, for a total delay of 60 seconds """ logger.debug("C3.2: T. In %s, timeout receiving response to request, ", self.current_state) if self.discover_requests >= MAX_ATTEMPTS_REQUEST: logger.debug('C2.3: T. Maximum number %s of REQUESTs ' 'reached, already sent %s, raise ERROR.', MAX_ATTEMPTS_REQUEST, self.disover_requests) raise self.ERROR() logger.debug("C2.3: F. Maximum number of REQUESTs retries not reached," "raise REQUESTING.") raise self.REQUESTING()
python
{ "resource": "" }
q274346
DHCPCAPFSM.timeout_request_renewing
test
def timeout_request_renewing(self): """Timeout of renewing on RENEWING state. Same comments as in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_requesting`. """ logger.debug("C5.2:T In %s, timeout receiving response to request.", self.current_state) if self.request_attempts >= MAX_ATTEMPTS_REQUEST: logger.debug('C2.3: T Maximum number %s of REQUESTs ' 'reached, already sent %s, wait to rebinding time.', MAX_ATTEMPTS_REQUEST, self.disover_requests) # raise self.ERROR() logger.debug("C2.3: F. Maximum number of REQUESTs retries not reached," "raise RENEWING.") raise self.RENEWING()
python
{ "resource": "" }
q274347
DHCPCAPFSM.timeout_request_rebinding
test
def timeout_request_rebinding(self): """Timeout of request rebinding on REBINDING state. Same comments as in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_requesting`. """ logger.debug("C6.2:T In %s, timeout receiving response to request.", self.current_state) if self.request_attempts >= MAX_ATTEMPTS_REQUEST: logger.debug('C.2.3: T. Maximum number %s of REQUESTs ' 'reached, already sent %s, wait lease time expires.', MAX_ATTEMPTS_REQUEST, self.disover_requests) # raise self.ERROR() logger.debug("C2.3: F. Maximum number of REQUESTs retries not reached," "raise REBINDING.") raise self.REBINDING()
python
{ "resource": "" }
q274348
DHCPCAPFSM.receive_offer
test
def receive_offer(self, pkt): """Receive offer on SELECTING state.""" logger.debug("C2. Received OFFER?, in SELECTING state.") if isoffer(pkt): logger.debug("C2: T, OFFER received") self.offers.append(pkt) if len(self.offers) >= MAX_OFFERS_COLLECTED: logger.debug("C2.5: T, raise REQUESTING.") self.select_offer() raise self.REQUESTING() logger.debug("C2.5: F, raise SELECTING.") raise self.SELECTING()
python
{ "resource": "" }
q274349
DHCPCAPFSM.receive_ack_requesting
test
def receive_ack_requesting(self, pkt): """Receive ACK in REQUESTING state.""" logger.debug("C3. Received ACK?, in REQUESTING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in REQUESTING state, " "raise BOUND.") raise self.BOUND()
python
{ "resource": "" }
q274350
DHCPCAPFSM.receive_nak_requesting
test
def receive_nak_requesting(self, pkt): """Receive NAK in REQUESTING state.""" logger.debug("C3.1. Received NAK?, in REQUESTING state.") if self.process_received_nak(pkt): logger.debug("C3.1: T. Received NAK, in REQUESTING state, " "raise INIT.") raise self.INIT()
python
{ "resource": "" }
q274351
DHCPCAPFSM.receive_ack_renewing
test
def receive_ack_renewing(self, pkt): """Receive ACK in RENEWING state.""" logger.debug("C3. Received ACK?, in RENEWING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in RENEWING state, " "raise BOUND.") raise self.BOUND()
python
{ "resource": "" }
q274352
DHCPCAPFSM.receive_nak_renewing
test
def receive_nak_renewing(self, pkt): """Receive NAK in RENEWING state.""" logger.debug("C3.1. Received NAK?, in RENEWING state.") if self.process_received_nak(pkt): logger.debug("C3.1: T. Received NAK, in RENEWING state, " " raise INIT.") raise self.INIT()
python
{ "resource": "" }
q274353
DHCPCAPFSM.receive_ack_rebinding
test
def receive_ack_rebinding(self, pkt): """Receive ACK in REBINDING state.""" logger.debug("C3. Received ACK?, in REBINDING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in REBINDING state, " "raise BOUND.") raise self.BOUND()
python
{ "resource": "" }
q274354
DHCPCAPFSM.receive_nak_rebinding
test
def receive_nak_rebinding(self, pkt): """Receive NAK in REBINDING state.""" logger.debug("C3.1. Received NAK?, in RENEWING state.") if self.process_received_nak(pkt): logger.debug("C3.1: T. Received NAK, in RENEWING state, " "raise INIT.") raise self.INIT()
python
{ "resource": "" }
q274355
DHCPCAPFSM.on_renewing
test
def on_renewing(self): """Action on renewing on RENEWING state. Not recording lease, but restarting timers. """ self.client.lease.sanitize_net_values() self.client.lease.set_times(self.time_sent_request) self.set_timers()
python
{ "resource": "" }
q274356
Qurl.set
test
def set(self, name, value): """ Assign a value, remove if it's None """ clone = self._clone() if django.VERSION[0] <= 1 and django.VERSION[1] <= 4: value = value or None clone._qsl = [(q, v) for (q, v) in self._qsl if q != name] if value is not None: clone._qsl.append((name, value)) return clone
python
{ "resource": "" }
q274357
Qurl.add
test
def add(self, name, value): """ Append a value to multiple value parameter. """ clone = self._clone() clone._qsl = [p for p in self._qsl if not(p[0] == name and p[1] == value)] clone._qsl.append((name, value,)) return clone
python
{ "resource": "" }
q274358
Qurl.remove
test
def remove(self, name, value): """ Remove a value from multiple value parameter. """ clone = self._clone() clone._qsl = [qb for qb in self._qsl if qb != (name, str(value))] return clone
python
{ "resource": "" }
q274359
get_status
test
def get_status(options): """ Get programs statuses. :param options: parsed commandline arguments. :type options: optparse.Values. :return: supervisord XML-RPC call result. :rtype: dict. """ payload = { # server connection URI formatted string payload "username": options.username, "password": options.password, "server": options.server, "port": options.port, } try: if options.server.startswith("/") and stat.S_ISSOCK(os.stat(options.server).st_mode): # communicate with server via unix socket (simple check is server address is path and path is unix socket) try: import supervisor.xmlrpc except ImportError as error: sys.stderr.write("ERROR: Couldn't load module. {error}\n".format(error=error)) sys.stderr.write("ERROR: Unix socket support not available! Please install nagios-check-supervisord with unix socket support: 'nagios-check-supervisord[unix-socket-support]' or install 'supervisor' separately.\n") sys.exit(-1) if all([options.username, options.password, ]): # with auth connection = xmlrpclib.ServerProxy("https://", transport=supervisor.xmlrpc.SupervisorTransport(options.username, options.password, serverurl=URI[URI_TPL_SOCKET].format(**payload))) else: connection = xmlrpclib.ServerProxy("https://", transport=supervisor.xmlrpc.SupervisorTransport(None, None, serverurl=URI[URI_TPL_SOCKET].format(**payload))) else: # communicate with server via http if all([options.username, options.password, ]): # with auth connection = xmlrpclib.Server(URI[URI_TPL_HTTP_AUTH].format(**payload)) else: connection = xmlrpclib.Server(URI[URI_TPL_HTTP].format(**payload)) return connection.supervisor.getAllProcessInfo() except Exception as error: if not options.quiet: sys.stdout.write("ERROR: Server communication problem. {error}\n".format(error=error)) sys.exit(EXIT_CODES.get(options.network_errors_exit_code, EXIT_CODE_UNKNOWN))
python
{ "resource": "" }
q274360
create_output
test
def create_output(data, options): """ Create Nagios and human readable supervisord statuses. :param data: supervisord XML-RPC call result. :type data: dict. :param options: parsed commandline arguments. :type options: optparse.Values. :return: Nagios and human readable supervisord statuses and exit code. :rtype: (str, int). """ output = {} programs = map(strip, options.programs.strip().split(",")) if options.programs else map(lambda x: x["name"], data) for program in programs: try: program_data = filter(lambda x: x["name"] == program, data)[0] output.update({ program: { "name": program, "template": STATE2TEMPLATE[program_data["statename"]], "status": program_data["spawnerr"] if program_data["spawnerr"] else program_data["statename"], } }) except IndexError: output.update({ program: { "name": program, "template": "unknown", "status": "", } }) # getting main status for check (for multiple check need to get main status by priority) statuses = [status[0] for status in sorted([(status, OUTPUT_TEMPLATES[status]["priority"]) for status in list(set([output[d]["template"] for d in output.keys()]))], key=lambda x: x[1])] # if no programs found or configured by supervisord set status ok and custom message status = statuses[0] if statuses else EXIT_CODE_OK text = ", ".join([OUTPUT_TEMPLATES[output[program]["template"]]["text"].format(**output[program]) for program in sorted(output.keys(), key=lambda x: OUTPUT_TEMPLATES[output[x]["template"]]["priority"])]) if statuses else "No program configured/found" # create exit code (unknown if something happened wrong) code = EXIT_CODES.get(status, EXIT_CODE_UNKNOWN) # return full status string with main status for multiple programs and all programs states return "{status}: {output}\n".format(**{ "status": status.upper(), "output": text, }), code
python
{ "resource": "" }
q274361
main
test
def main(): """ Program main. """ options = parse_options() output, code = create_output(get_status(options), options) sys.stdout.write(output) sys.exit(code)
python
{ "resource": "" }
q274362
validate
test
def validate( message, get_certificate=lambda url: urlopen(url).read(), certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX, max_age=DEFAULT_MAX_AGE ): """ Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message. """ # Check the signing certicate URL. SigningCertURLValidator(certificate_url_regex).validate(message) # Check the message age. if not isinstance(max_age, datetime.timedelta): raise ValueError("max_age must be None or a timedelta object") MessageAgeValidator(max_age).validate(message) # Passed the basic checks, let's download the cert. # We've validated the URL, so aren't worried about a malicious server. certificate = get_certificate(message["SigningCertURL"]) # Check the cryptographic signature. SignatureValidator(certificate).validate(message)
python
{ "resource": "" }
q274363
read_tdms
test
def read_tdms(tdms_file): """Read tdms file and return channel names and data""" tdms_file = nptdms.TdmsFile(tdms_file) ch_names = [] ch_data = [] for o in tdms_file.objects.values(): if o.data is not None and len(o.data): chn = o.path.split('/')[-1].strip("'") if "unit_string" in o.properties: unit = o.properties["unit_string"] ch_names.append("{} [{}]".format(chn, unit)) else: ch_names.append(chn) ch_data.append(o.data) return ch_names, ch_data
python
{ "resource": "" }
q274364
add_deformation
test
def add_deformation(chn_names, data): """From circularity, compute the deformation This method is useful for RT-DC data sets that contain the circularity but not the deformation. """ if "deformation" not in chn_names: for ii, ch in enumerate(chn_names): if ch == "circularity": chn_names.append("deformation") data.append(1-data[ii]) return chn_names, data
python
{ "resource": "" }
q274365
tdms2fcs
test
def tdms2fcs(tdms_file): """Creates an fcs file for a given tdms file""" fcs_file = tdms_file[:-4]+"fcs" chn_names, data = read_tdms(tdms_file) chn_names, data = add_deformation(chn_names, data) fcswrite.write_fcs(filename=fcs_file, chn_names=chn_names, data=np.array(data).transpose())
python
{ "resource": "" }
q274366
Diff.equal
test
def equal(self, cwd): """ Returns True if left and right are equal """ cmd = ["diff"] cmd.append("-q") cmd.append(self.left.get_name()) cmd.append(self.right.get_name()) try: Process(cmd).run(cwd=cwd, suppress_output=True) except SubprocessError as e: if e.get_returncode() == 1: return False else: raise e return True
python
{ "resource": "" }
q274367
New.create
test
def create(self, patchname): """ Adds a new patch with patchname to the queue The new patch will be added as the topmost applied patch. """ patch = Patch(patchname) if self.series.is_patch(patch): raise PatchAlreadyExists(self.series, patchname) patch_dir = self.quilt_patches patch_dir.create() patchfile = patch_dir + File(patchname) patchfile.touch() pc_dir = self.quilt_pc + patchname if pc_dir.exists(): # be sure that the directory is clear pc_dir.delete() # create empty .pc/<patchname> directory as quilt does too pc_dir.create() top = self.db.top_patch() # add new patch after the current topmost applied patch self.series.add_patches([patch], top) # "apply" patch self.db.add_patch(patch) # create patches/series files self.series.save() # create .pc/.version and .pc/applied-patches files self.db.save() self.patch_created(patch)
python
{ "resource": "" }
q274368
Delete.delete_next
test
def delete_next(self, remove=False, backup=False): """ Delete next unapplied patch If remove is True the patch file will also be removed. If remove and backup are True a copy of the deleted patch file will be made. """ patch = self.db.top_patch() if patch: after = self.series.patch_after(patch) else: after = self.series.first_patch() if not after: raise QuiltError("No next patch") self._delete_patch(after, remove=remove, backup=backup)
python
{ "resource": "" }
q274369
Delete.delete_patch
test
def delete_patch(self, patch_name=None, remove=False, backup=False): """ Delete specified patch from the series If remove is True the patch file will also be removed. If remove and backup are True a copy of the deleted patch file will be made. """ if patch_name: patch = Patch(patch_name) else: patch = self.db.top_patch() if not patch: raise NoAppliedPatch(self.db) self._delete_patch(patch, remove=remove, backup=backup)
python
{ "resource": "" }
q274370
Add._file_in_patch
test
def _file_in_patch(self, filename, patch, ignore): """ Checks if a backup file of the filename in the current patch exists """ file = self.quilt_pc + File(os.path.join(patch.get_name(), filename)) if file.exists(): if ignore: return True else: raise QuiltError("File %s is already in patch %s" % (filename, patch.get_name())) return False
python
{ "resource": "" }
q274371
Add._backup_file
test
def _backup_file(self, file, patch): """ Creates a backup of file """ dest_dir = self.quilt_pc + patch.get_name() file_dir = file.get_directory() if file_dir: #TODO get relative path dest_dir = dest_dir + file_dir backup = Backup() backup.backup_file(file, dest_dir, copy_empty=True)
python
{ "resource": "" }
q274372
Add.add_file
test
def add_file(self, filename, patch_name=None, ignore=False): """ Add file to the patch with patch_name. If patch_name is None or empty the topmost patch will be used. Adding an already added patch will raise an QuiltError if ignore is False. """ file = File(filename) if patch_name: patch = Patch(patch_name) else: patch = self.db.top_patch() if not patch: raise NoAppliedPatch(self.db) exists = self._file_in_patch(filename, patch, ignore) if exists: return self._file_in_next_patches(filename, patch) if file.is_link(): raise QuiltError("Cannot add symbolic link %s" % filename) self._backup_file(file, patch) if file.exists(): # be sure user can write original file os.chmod(filename, file.get_mode() | stat.S_IWUSR | stat.S_IRUSR) self.file_added(file, patch)
python
{ "resource": "" }
q274373
Process.run
test
def run(self, suppress_output=False, inputdata=None, **kw): """Run command as a subprocess and wait until it is finished. The command should be given as a list of strings to avoid problems with shell quoting. If the command exits with a return code other than 0, a SubprocessError is raised. """ if inputdata is not None: kw["stdin"] = subprocess.PIPE if suppress_output: kw["stdout"] = open(os.devnull, "w") kw["stderr"] = kw["stdout"] try: try: process = subprocess.Popen(self.cmd, **kw) finally: if suppress_output: kw["stdout"].close() except OSError as e: msg = "Failed starting command {!r}: {}".format(self.cmd, e) raise QuiltError(msg) if inputdata is not None: process.stdin.write(inputdata) process.stdin.close() ret = process.wait() if ret != 0: raise SubprocessError(self.cmd, ret)
python
{ "resource": "" }
q274374
Directory.create
test
def create(self): """ Creates the directory and all its parent directories if it does not exist yet """ if self.dirname and not os.path.exists(self.dirname): os.makedirs(self.dirname)
python
{ "resource": "" }
q274375
Directory.copy
test
def copy(self, dest, symlinks=False): """ Copy to destination directory recursively. If symlinks is true, symbolic links in the source tree are represented as symbolic links in the new tree, but the metadata of the original links is NOT copied; if false or omitted, the contents and metadata of the linked files are copied to the new tree. """ if isinstance(dest, Directory): dest = dest.get_name() shutil.copytree(self.dirname, dest)
python
{ "resource": "" }
q274376
File.link
test
def link(self, link): """ Create hard link as link to this file """ if isinstance(link, File): link = link.filename os.link(self.filename, link)
python
{ "resource": "" }
q274377
File.copy
test
def copy(self, dest): """ Copy file to destination """ if isinstance(dest, File): dest_dir = dest.get_directory() dest_dir.create() dest = dest.filename elif isinstance(dest, Directory): dest = dest.dirname shutil.copy2(self.filename, dest)
python
{ "resource": "" }
q274378
File.get_directory
test
def get_directory(self): """ Returns the directory where the file is placed in or None if the path to the file doesn't contain a directory """ dirname = os.path.dirname(self.filename) if dirname: return Directory(dirname) else: return None
python
{ "resource": "" }
q274379
Backup.backup_file
test
def backup_file(self, file, dest_dir, copy_empty=False): """ Backup file in dest_dir Directory. The return value is a File object pointing to the copied file in the destination directory or None if no file is copied. If file exists and it is not empty it is copied to dest_dir. If file exists and it is empty the file is copied only if copy_empty is True. If file does not exist and copy_empty is True a new file in dest_dir will be created. In all other cases no file will be copied and None is returned. """ if file.exists(): if not copy_empty and file.is_empty(): return None dest_dir.create() file.copy(dest_dir) return dest_dir + file.get_basefile() elif copy_empty: # create new file in dest_dir dest_dir = dest_dir + file.get_directory() dest_dir.create() dest_file = dest_dir + file.get_basefile() dest_file.touch() return dest_file else: return None
python
{ "resource": "" }
q274380
Refresh.refresh
test
def refresh(self, patch_name=None, edit=False): """ Refresh patch with patch_name or applied top patch if patch_name is None """ if patch_name: patch = Patch(patch_name) else: patch = self.db.top_patch() if not patch: raise QuiltError("No patch applied. Nothing to refresh.") pc_dir = self.quilt_pc + patch.get_name() patch_file = self.quilt_patches + File(patch.get_name()) files = pc_dir.content()[1] with TmpFile(prefix="pquilt-") as tmpfile: f = tmpfile.open() if patch_file.exists(): header = patch.get_header(self.quilt_patches) tmpfile.write(header) for file_name in files: if file_name == ".timestamp": continue orig_file = pc_dir + File(file_name) new_file = File(file_name) left_label, right_label, index = self._get_labels(file_name, orig_file, new_file) self._write_index(tmpfile, index) diff = Diff(orig_file, new_file) diff.run(self.cwd, fd=f, left_label=left_label, right_label=right_label) if tmpfile.is_empty(): raise QuiltError("Nothing to refresh.") if edit: self.edit_patch(tmpfile) tpatch = Patch(tmpfile.get_name()) tpatch.run(pc_dir.get_name(), dry_run=True, quiet=True) if patch_file.exists(): diff = Diff(patch_file, tmpfile) if diff.equal(self.cwd): raise QuiltError("Nothing to refresh.") tmpfile.copy(patch_file) timestamp = pc_dir + File(".timestamp") timestamp.touch() refresh = self.quilt_pc + File(patch.get_name() + "~refresh") refresh.delete_if_exists() self.refreshed(patch)
python
{ "resource": "" }
q274381
Pop.unapply_patch
test
def unapply_patch(self, patch_name, force=False): """ Unapply patches up to patch_name. patch_name will end up as top patch """ self._check(force) patches = self.db.patches_after(Patch(patch_name)) for patch in reversed(patches): self._unapply_patch(patch) self.db.save() self.unapplied(self.db.top_patch())
python
{ "resource": "" }
q274382
Pop.unapply_top_patch
test
def unapply_top_patch(self, force=False): """ Unapply top patch """ self._check(force) patch = self.db.top_patch() self._unapply_patch(patch) self.db.save() self.unapplied(self.db.top_patch())
python
{ "resource": "" }
q274383
Pop.unapply_all
test
def unapply_all(self, force=False): """ Unapply all patches """ self._check(force) for patch in reversed(self.db.applied_patches()): self._unapply_patch(patch) self.db.save() self.unapplied(self.db.top_patch())
python
{ "resource": "" }
q274384
Push.apply_patch
test
def apply_patch(self, patch_name, force=False, quiet=False): """ Apply all patches up to patch_name """ self._check() patch = Patch(patch_name) patches = self.series.patches_until(patch)[:] applied = self.db.applied_patches() for patch in applied: if patch in patches: patches.remove(patch) if not patches: raise AllPatchesApplied(self.series, self.db.top_patch()) self.applying(patch) try: for cur_patch in patches: self._apply_patch(cur_patch, force, quiet) finally: self.db.save() self.applied(self.db.top_patch())
python
{ "resource": "" }
q274385
Push.apply_next_patch
test
def apply_next_patch(self, force=False, quiet=False): """ Apply next patch in series file """ self._check() top = self.db.top_patch() if not top: patch = self.series.first_patch() else: patch = self.series.patch_after(top) if not patch: raise AllPatchesApplied(self.series, top) self.applying(patch) self._apply_patch(patch, force, quiet) self.db.save() self.applied(self.db.top_patch())
python
{ "resource": "" }
q274386
Push.apply_all
test
def apply_all(self, force=False, quiet=False): """ Apply all patches in series file """ self._check() top = self.db.top_patch() if top: patches = self.series.patches_after(top) else: patches = self.series.patches() if not patches: raise AllPatchesApplied(self.series, top) try: for patch in patches: self.applying(patch) self._apply_patch(patch, force, quiet) finally: self.db.save() self.applied(self.db.top_patch())
python
{ "resource": "" }
q274387
PatchSeries.read
test
def read(self): """ Reads all patches from the series file """ self.patchlines = [] self.patch2line = dict() if self.exists(): with open(self.series_file, "r") as f: for line in f: self.add_patch(line)
python
{ "resource": "" }
q274388
PatchSeries.save
test
def save(self): """ Saves current patches list in the series file """ with open(self.series_file, "wb") as f: for patchline in self.patchlines: f.write(_encode_str(str(patchline))) f.write(b"\n")
python
{ "resource": "" }
q274389
PatchSeries.add_patch
test
def add_patch(self, patch): """ Add a patch to the patches list """ patchline = PatchLine(patch) patch = patchline.get_patch() if patch: self.patch2line[patch] = patchline self.patchlines.append(patchline)
python
{ "resource": "" }
q274390
PatchSeries.insert_patches
test
def insert_patches(self, patches): """ Insert list of patches at the front of the curent patches list """ patchlines = [] for patch_name in patches: patchline = PatchLine(patch_name) patch = patchline.get_patch() if patch: self.patch2line[patch] = patchline patchlines.append(patchline) patchlines.extend(self.patchlines) self.patchlines = patchlines
python
{ "resource": "" }
q274391
PatchSeries.add_patches
test
def add_patches(self, patches, after=None): """ Add a list of patches to the patches list """ if after is None: self.insert_patches(patches) else: self._check_patch(after) patchlines = self._patchlines_before(after) patchlines.append(self.patch2line[after]) for patch in patches: patchline = PatchLine(patch) patchlines.append(patchline) self.patch2line[patchline.get_patch()] = patchline patchlines.extend(self._patchlines_after(after)) self.patchlines = patchlines
python
{ "resource": "" }
q274392
PatchSeries.remove_patch
test
def remove_patch(self, patch): """ Remove a patch from the patches list """ self._check_patch(patch) patchline = self.patch2line[patch] del self.patch2line[patch] self.patchlines.remove(patchline)
python
{ "resource": "" }
q274393
PatchSeries.patches_after
test
def patches_after(self, patch): """ Returns a list of patches after patch from the patches list """ return [line.get_patch() for line in self._patchlines_after(patch) if line.get_patch()]
python
{ "resource": "" }
q274394
PatchSeries.patches_before
test
def patches_before(self, patch): """ Returns a list of patches before patch from the patches list """ return [line.get_patch() for line in self._patchlines_before(patch) if line.get_patch()]
python
{ "resource": "" }
q274395
PatchSeries.patches_until
test
def patches_until(self, patch): """ Returns a list of patches before patch from the patches list including the provided patch """ return [line.get_patch() for line in self._patchlines_until(patch) if line.get_patch()]
python
{ "resource": "" }
q274396
PatchSeries.replace
test
def replace(self, old_patch, new_patch): """ Replace old_patch with new_patch The method only replaces the patch and doesn't change any comments. """ self._check_patch(old_patch) old_patchline = self.patch2line[old_patch] index = self.patchlines.index(old_patchline) self.patchlines.pop(index) new_patchline = PatchLine(new_patch) new_patchline.set_comment(old_patchline.get_comment()) self.patchlines.insert(index, new_patchline) del self.patch2line[old_patch] self.patch2line[new_patch] = new_patchline
python
{ "resource": "" }
q274397
Db.create
test
def create(self): """ Creates the dirname and inserts a .version file """ if not os.path.exists(self.dirname): os.makedirs(self.dirname) self._create_version(self.version_file)
python
{ "resource": "" }
q274398
Db.check_version
test
def check_version(self, version_file): """ Checks if the .version file in dirname has the correct supported version number """ # The file contains a version number as a decimal integer, optionally # followed by a newline with open(version_file, "r") as f: version = f.read(10) version = version.rstrip("\r\n") if len(version) >= 10 or version != str(DB_VERSION): raise DBError("The quilt meta-data version of %s is not supported " "by python-quilt. python-quilt only supports " "version %s." % (version, DB_VERSION))
python
{ "resource": "" }
q274399
ArgumentGroup.add_to_parser
test
def add_to_parser(self, parser): """ Adds the group and its arguments to a argparse.ArgumentParser instance @param parser A argparse.ArgumentParser instance """ self.group = parser.add_argument_group(self.title, self.description) for arg in self.arguments: arg.add_to_parser(self.group)
python
{ "resource": "" }